1 Commits
dev ... s3

Author SHA1 Message Date
greirson
bdd80020a0 feat(storage): Implement S3 and local storage adapters with enhanced configuration
- Introduced a storage adapter factory to dynamically select between local and S3 storage based on the STORAGE_TYPE environment variable.
- Added S3 adapter for handling file operations on AWS S3, including multipart uploads and presigned URLs.
- Implemented local storage adapter for managing file operations on the local filesystem.
- Enhanced configuration validation to ensure proper setup for both storage types.
- Updated .env.example and README.md to document new storage configuration options and usage.

This commit significantly improves the application's flexibility in handling file uploads by supporting both local and cloud storage options, enhancing user experience and deployment versatility.
2025-05-05 21:52:22 -07:00
12 changed files with 1275 additions and 1119 deletions

View File

@@ -106,23 +106,6 @@ AUTO_UPLOAD=false
# ALLOWED_IFRAME_ORIGINS=https://example.com,https://another.com
ALLOWED_IFRAME_ORIGINS=
# --- Docker Specific Settings ---
# User and Group IDs for file permissions
# Sets the user/group the application runs as inside the container.
# Files created in the mapped volume (e.g., ./local_uploads) will have this ownership.
# Set these to match your host user's ID/GID to avoid permission issues.
# Find your IDs with `id -u` and `id -g` on Linux/macOS.
# PUID=1000
# PGID=1000
# File Mode Creation Mask (Umask)
# Controls the default permissions for newly created files.
# 022 (default): Files 644 (rw-r--r--), Dirs 755 (rwxr-xr-x)
# 002: Files 664 (rw-rw-r--), Dirs 775 (rwxrwxr-x) - Good for group sharing
# 007: Files 660 (rw-rw----), Dirs 770 (rwxrwx---) - More restrictive
# 077: Files 600 (rw-------), Dirs 700 (rwx------) - Most restrictive
# UMASK=022
# Max number of retries for client-side chunk uploads (default: 5)
CLIENT_MAX_RETRIES=5

View File

@@ -1,16 +1,8 @@
# Base stage for shared configurations
FROM node:20-alpine as base
# Add user and group IDs as arguments with defaults
ARG PUID=1000
ARG PGID=1000
# Default umask (complement of 022 is 755 for dirs, 644 for files)
ARG UMASK=022
# Install necessary packages:
# - su-exec: lightweight sudo alternative
# - python3, pip: for apprise dependency
RUN apk add --no-cache su-exec python3 py3-pip && \
# Install python and create virtual environment with minimal dependencies
RUN apk add --no-cache python3 py3-pip && \
python3 -m venv /opt/venv && \
rm -rf /var/cache/apk/*
@@ -22,194 +14,52 @@ RUN . /opt/venv/bin/activate && \
# Add virtual environment to PATH
ENV PATH="/opt/venv/bin:$PATH"
# Create group and user with fallback to prevent build failures
# We use the ARG values here, but with a fallback mechanism to avoid build failures
RUN ( \
set -e; \
echo "Attempting to create/verify user with PUID=${PUID} and PGID=${PGID}..."; \
\
# Initialize variables \
TARGET_USER="nodeuser"; \
TARGET_GROUP="nodeuser"; \
NEW_GID="${PGID}"; \
NEW_UID="${PUID}"; \
\
# Step 1: Handle GID and group first \
echo "Setting up group for GID ${NEW_GID}..."; \
if getent group "${NEW_GID}" > /dev/null; then \
# GID exists, check which group has it \
EXISTING_GROUP=$(getent group "${NEW_GID}" | cut -d: -f1); \
echo "GID ${NEW_GID} is already used by group '${EXISTING_GROUP}'."; \
\
if [ "${EXISTING_GROUP}" = "${TARGET_GROUP}" ]; then \
echo "Group '${TARGET_GROUP}' already exists with correct GID ${NEW_GID}."; \
else \
# GID exists but used by a different group (likely 'node') \
echo "Will create '${TARGET_GROUP}' with a different GID to avoid conflict."; \
# Check if TARGET_GROUP exists but with wrong GID \
if getent group "${TARGET_GROUP}" > /dev/null; then \
echo "Group '${TARGET_GROUP}' exists but with wrong GID. Deleting it."; \
delgroup "${TARGET_GROUP}" || true; \
fi; \
# Create TARGET_GROUP with GID+1 (or find next available GID) \
NEXT_GID=$((${NEW_GID} + 1)); \
while getent group "${NEXT_GID}" > /dev/null; do \
NEXT_GID=$((${NEXT_GID} + 1)); \
done; \
echo "Creating group '${TARGET_GROUP}' with new GID ${NEXT_GID}."; \
addgroup -S -g "${NEXT_GID}" "${TARGET_GROUP}"; \
NEW_GID="${NEXT_GID}"; \
fi; \
else \
# GID does not exist - create group with desired GID \
echo "Creating group '${TARGET_GROUP}' with GID ${NEW_GID}."; \
addgroup -S -g "${NEW_GID}" "${TARGET_GROUP}"; \
fi; \
\
# Verify group was created \
echo "Verifying group '${TARGET_GROUP}' exists..."; \
getent group "${TARGET_GROUP}" || (echo "ERROR: Failed to find group '${TARGET_GROUP}'!"; exit 1); \
GID_FOR_USER=$(getent group "${TARGET_GROUP}" | cut -d: -f3); \
echo "Final group: '${TARGET_GROUP}' with GID ${GID_FOR_USER}"; \
\
# Step 2: Handle UID and user \
echo "Setting up user with UID ${NEW_UID}..."; \
if getent passwd "${NEW_UID}" > /dev/null; then \
# UID exists, check which user has it \
EXISTING_USER=$(getent passwd "${NEW_UID}" | cut -d: -f1); \
echo "UID ${NEW_UID} is already used by user '${EXISTING_USER}'."; \
\
if [ "${EXISTING_USER}" = "${TARGET_USER}" ]; then \
echo "User '${TARGET_USER}' already exists with correct UID ${NEW_UID}."; \
# Check if user needs group update \
CURRENT_GID=$(getent passwd "${TARGET_USER}" | cut -d: -f4); \
if [ "${CURRENT_GID}" != "${GID_FOR_USER}" ]; then \
echo "User '${TARGET_USER}' has wrong GID (${CURRENT_GID}). Modifying..."; \
deluser "${TARGET_USER}"; \
adduser -S -D -u "${NEW_UID}" -G "${TARGET_GROUP}" -s /bin/sh "${TARGET_USER}"; \
fi; \
else \
# Another user has our UID (e.g., 'node'). Delete it. \
echo "Deleting existing user '${EXISTING_USER}' with UID ${NEW_UID}."; \
deluser "${EXISTING_USER}" || true; \
\
# Now check if TARGET_USER exists but with wrong UID \
if getent passwd "${TARGET_USER}" > /dev/null; then \
echo "User '${TARGET_USER}' exists but with wrong UID. Updating..."; \
deluser "${TARGET_USER}" || true; \
fi; \
\
# Create user \
echo "Creating user '${TARGET_USER}' with UID ${NEW_UID} and group '${TARGET_GROUP}'."; \
adduser -S -D -u "${NEW_UID}" -G "${TARGET_GROUP}" -s /bin/sh "${TARGET_USER}"; \
fi; \
else \
# UID does not exist - check if user exists with wrong UID \
if getent passwd "${TARGET_USER}" > /dev/null; then \
echo "User '${TARGET_USER}' exists but with wrong UID. Updating..."; \
deluser "${TARGET_USER}" || true; \
fi; \
\
# Create user with desired UID \
echo "Creating user '${TARGET_USER}' with UID ${NEW_UID} and group '${TARGET_GROUP}'."; \
adduser -S -D -u "${NEW_UID}" -G "${TARGET_GROUP}" -s /bin/sh "${TARGET_USER}"; \
fi; \
\
# Create and set permissions on home directory \
echo "Setting up home directory for ${TARGET_USER}..."; \
mkdir -p /home/${TARGET_USER} && \
chown -R ${TARGET_USER}:${TARGET_GROUP} /home/${TARGET_USER} && \
chmod 755 /home/${TARGET_USER}; \
\
# Verify user was created \
echo "Verifying user '${TARGET_USER}' exists..."; \
getent passwd "${TARGET_USER}" || (echo "ERROR: Failed to find user '${TARGET_USER}'!"; exit 1); \
\
# Clean up and verify system files \
echo "Ensuring root user definition is pristine..."; \
chown root:root /etc/passwd /etc/group && \
chmod 644 /etc/passwd /etc/group && \
getent passwd root || (echo "ERROR: root not found after user/group operations!"; exit 1); \
\
# Print final status \
echo "Final user/group setup:"; \
id "${TARGET_USER}"; \
)
WORKDIR /usr/src/app
# Set UMASK - this applies to processes run by the user created in this stage
# The entrypoint will also set it based on the ENV var at runtime.
RUN umask ${UMASK}
# Dependencies stage
FROM base as deps
# Change ownership early so npm cache is owned correctly
RUN chown nodeuser:nodeuser /usr/src/app
# Switch to nodeuser before running npm commands
USER nodeuser
COPY --chown=nodeuser:nodeuser package*.json ./
COPY package*.json ./
RUN npm ci --only=production && \
# Remove npm cache
npm cache clean --force
# Switch back to root for the next stages if needed
USER root
# Development stage
FROM deps as development
USER root
ENV NODE_ENV=development
# Create and set up directories
RUN mkdir -p /usr/src/app/local_uploads /usr/src/app/uploads && \
chown -R nodeuser:nodeuser /usr/src/app/local_uploads /usr/src/app/uploads
COPY --chown=nodeuser:nodeuser package*.json ./
# Install dev dependencies
RUN npm install && \
npm cache clean --force
COPY --chown=nodeuser:nodeuser src/ ./src/
COPY --chown=nodeuser:nodeuser public/ ./public/
# Check if __tests__ and dev exist in your project root, if not, these COPY lines will fail for dev target
# COPY --chown=nodeuser:nodeuser __tests__/ ./__tests__/
# COPY --chown=nodeuser:nodeuser dev/ ./dev/
COPY --chown=nodeuser:nodeuser .eslintrc.json .eslintignore .prettierrc nodemon.json ./
# Create upload directory
RUN mkdir -p uploads
# Switch back to nodeuser for runtime
USER nodeuser
EXPOSE 3000
# Production stage
FROM deps as production
USER root
ENV NODE_ENV=production
ENV UPLOAD_DIR /app/uploads
# Create and set up directories
RUN mkdir -p /usr/src/app/local_uploads /usr/src/app/uploads && \
chown -R nodeuser:nodeuser /usr/src/app/local_uploads /usr/src/app/uploads
# Copy only necessary source files and ensure ownership
COPY --chown=nodeuser:nodeuser src/ ./src/
COPY --chown=nodeuser:nodeuser public/ ./public/
# Copy the entrypoint script and make it executable
COPY --chown=root:root src/scripts/entrypoint.sh /usr/local/bin/entrypoint.sh
RUN chmod +x /usr/local/bin/entrypoint.sh
# Copy source with specific paths to avoid unnecessary files
COPY src/ ./src/
COPY public/ ./public/
COPY __tests__/ ./__tests__/
COPY dev/ ./dev/
COPY .eslintrc.json .eslintignore ./
# Expose port
EXPOSE 3000
# Set the entrypoint
ENTRYPOINT ["/usr/local/bin/entrypoint.sh"]
CMD ["npm", "run", "dev"]
# Final user should be nodeuser for runtime
USER nodeuser
# Production stage
FROM deps as production
ENV NODE_ENV=production
ENV UPLOAD_DIR /app/uploads
# Create upload directory
# RUN mkdir -p uploads # No longer strictly needed here as volume mapping is expected, but harmless
# Copy only necessary source files
COPY src/ ./src/
COPY public/ ./public/
# Expose port
EXPOSE 3000
# Default command to run (passed to entrypoint)
CMD ["npm", "start"]

View File

@@ -254,4 +254,6 @@ See [Local Development (Recommended Quick Start)](LOCAL_DEVELOPMENT.md) for loca
Made with ❤️ by [DumbWare.io](https://dumbware.io)
## Future Features
- Camera Upload for Mobile
> Got an idea? [Open an issue](https://github.com/dumbwareio/dumbdrop/issues) or [submit a PR](https://github.com/dumbwareio/dumbdrop/pulls)
```

View File

@@ -19,24 +19,8 @@ services:
# FOOTER_LINKS: "My Site @ https://example.com,Docs @ https://docs.example.com" # Custom footer links
# PORT: 3000 # Server port (default: 3000)
# NODE_ENV: production # Node environment (development/production)
# DEBUG: false # Debug mode for verbose logging (default: false in production, true in development)
# APPRISE_URL: "" # Apprise notification URL for upload notifications (default: none)
# APPRISE_MESSAGE: "New file uploaded - {filename} ({size}), Storage used {storage}" # Notification message template with placeholders: {filename}, {size}, {storage}
# APPRISE_SIZE_UNIT: "Auto" # Size unit for notifications (B, KB, MB, GB, TB, or Auto)
# ALLOWED_EXTENSIONS: ".jpg,.jpeg,.png,.pdf,.doc,.docx,.txt" # Comma-separated list of allowed file extensions (default: all allowed)
# PUID: 1000 # User ID for file ownership (default: 1000)
# PGID: 1000 # Group ID for file ownership (default: 1000)
# UMASK: "000" # File permissions mask (default: 000)
restart: unless-stopped
# user: "${PUID}:${PGID}" # Don't set user here, entrypoint handles it
# Consider adding healthcheck
# healthcheck:
# test: ["CMD", "curl", "--fail", "http://localhost:3000/health"] # Assuming a /health endpoint exists
# interval: 30s
# timeout: 10s
# retries: 3
# healthcheck:
# test: ["CMD", "curl", "--fail", "http://localhost:3000/health"]
# interval: 30s
# timeout: 10s
# retries: 3
# start_period: 30s

View File

@@ -49,6 +49,9 @@
<div id="uploadProgress"></div> <!-- Original progress bar container -->
<div id="fileList" class="file-list"></div> <!-- Original file list container -->
<button id="uploadButton" class="upload-button" style="display: none;">Upload Files</button>
<footer>
{{FOOTER_CONTENT}}
</footer>
</div>
<script defer>
@@ -56,51 +59,67 @@
const CHUNK_SIZE = 1024 * 1024 * 5; // 5MB chunks
const RETRY_DELAY = 1000; // 1 second delay between retries
// Read MAX_RETRIES from the injected server value, with a fallback
const MAX_RETRIES_STR = '{{MAX_RETRIES}}';
let maxRetries = 5;
let maxRetries = 5; // Default value
if (MAX_RETRIES_STR && MAX_RETRIES_STR !== '{{MAX_RETRIES}}') {
const parsedRetries = parseInt(MAX_RETRIES_STR, 10);
if (!isNaN(parsedRetries) && parsedRetries >= 0) maxRetries = parsedRetries;
else console.warn(`Invalid MAX_RETRIES value "${MAX_RETRIES_STR}", defaulting to ${maxRetries}.`);
} else console.warn('MAX_RETRIES not injected by server, defaulting to 5.');
if (!isNaN(parsedRetries) && parsedRetries >= 0) {
maxRetries = parsedRetries;
} else {
console.warn(`Invalid MAX_RETRIES value "${MAX_RETRIES_STR}" received from server, defaulting to ${maxRetries}.`);
}
} else {
console.warn('MAX_RETRIES not injected by server, defaulting to 5.');
}
window.MAX_RETRIES = maxRetries;
console.log(`Max retries for chunk uploads: ${window.MAX_RETRIES}`);
const AUTO_UPLOAD_STR = '{{AUTO_UPLOAD}}';
const AUTO_UPLOAD = ['true', '1', 'yes'].includes(AUTO_UPLOAD_STR.toLowerCase());
// --- NEW: Variable to track active uploads ---
let activeUploadCount = 0;
// Utility function to generate a unique batch ID
function generateBatchId() {
return `${Date.now()}-${Math.random().toString(36).substr(2, 9)}`;
}
function generateBatchId() { return `${Date.now()}-${Math.random().toString(36).substr(2, 9)}`; }
function formatFileSize(bytes) { if (bytes === 0) return '0 Bytes'; const k = 1024; const sizes = ['Bytes', 'KB', 'MB', 'GB']; const i = Math.floor(Math.log(bytes) / Math.log(k)); return parseFloat((bytes / Math.pow(k, i)).toFixed(2)) + ' ' + sizes[i]; }
// Utility function to format file sizes
function formatFileSize(bytes) {
if (bytes === 0) return '0 Bytes';
const k = 1024;
const sizes = ['Bytes', 'KB', 'MB', 'GB'];
const i = Math.floor(Math.log(bytes) / Math.log(k));
return parseFloat((bytes / Math.pow(k, i)).toFixed(2)) + ' ' + sizes[i];
}
class FileUploader {
constructor(file, batchId) {
this.file = file;
this.batchId = batchId;
this.uploadId = null;
this.uploadId = null; // Application's upload ID
this.position = 0;
this.progressElement = null;
this.chunkSize = CHUNK_SIZE;
this.lastUploadedBytes = 0;
this.lastUploadTime = null;
this.uploadRate = 0;
this.progressElement = null; // For the separate progress bar
this.chunkSize = CHUNK_SIZE; // Use constant
this.lastUploadedBytes = 0; // Used for rate calculation in original progress bar
this.lastUploadTime = null; // Used for rate calculation
this.uploadRate = 0; // Used for rate calculation
this.maxRetries = window.MAX_RETRIES;
this.retryDelay = RETRY_DELAY;
// *** ADDED for S3/Adapter logic ***
// *** ADDED partNumber for S3 ***
this.partNumber = 1;
// *** ADDED completed flag ***
this.completed = false;
}
async start() {
try {
this.createProgressElement(); // Original: create separate progress bar
this.updateProgress(0);
this.createProgressElement(); // Create the progress bar UI element
this.updateProgress(0); // Initial progress update to 0%
await this.initUpload();
// Handle zero-byte files completed during init
if (this.uploadId && this.uploadId.startsWith('zero-byte-')) {
console.log(`Zero-byte file ${this.file.name} handled by server init.`);
console.log(`Zero-byte file ${this.file.name} handled by server during init.`);
this.updateProgress(100);
this.completed = true;
return true;
@@ -109,10 +128,11 @@
if (this.uploadId && this.file.size > 0) {
await this.uploadChunks();
} else if (this.file.size === 0 && !this.completed) {
console.warn(`File ${this.file.name} is zero bytes, init didn't indicate completion. Assuming complete.`);
console.warn(`File ${this.file.name} is zero bytes, but init didn't indicate completion.`);
this.updateProgress(100);
this.completed = true;
}
// Return completion status
return this.completed;
} catch (error) {
console.error(`Upload failed for ${this.file.webkitRelativePath || this.file.name}:`, error);
@@ -120,178 +140,291 @@
this.progressElement.infoSpan.textContent = `Error: ${error.message}`;
this.progressElement.infoSpan.style.color = 'var(--danger-color)';
}
await this.cancelUploadOnServer();
await this.cancelUploadOnServer(); // Attempt cancellation
this.completed = false;
return false;
}
}
async initUpload() {
// (initUpload logic is identical to the previous version - uses fetch to /init)
const uploadPath = this.file.webkitRelativePath || this.file.name;
const consistentPath = uploadPath.replace(/\\/g, '/');
console.log(`[Uploader] Init for: ${consistentPath} (Size: ${this.file.size})`);
console.log(`[Uploader] Initializing upload for: ${consistentPath} (Size: ${this.file.size}, Batch: ${this.batchId})`);
const headers = { 'Content-Type': 'application/json' };
if (this.batchId) headers['X-Batch-ID'] = this.batchId;
const apiUrlPath = '/api/upload/init';
const fullApiUrl = window.BASE_URL + (apiUrlPath.startsWith('/') ? apiUrlPath.substring(1) : apiUrlPath);
const response = await fetch(fullApiUrl, {
method: 'POST', headers,
method: 'POST',
headers,
body: JSON.stringify({ filename: consistentPath, fileSize: this.file.size })
});
if (!response.ok) {
const errData = await response.json().catch(() => ({ error: `Server error ${response.status}` }));
throw new Error(errData.details || errData.error || `Init failed: ${response.status}`);
const errorData = await response.json().catch(() => ({ error: `Server error ${response.status}` }));
throw new Error(errorData.details || errorData.error || `Init failed: ${response.status}`);
}
const data = await response.json();
if (!data.uploadId) throw new Error('Server did not return uploadId');
this.uploadId = data.uploadId;
console.log(`[Uploader] Init success. App Upload ID: ${this.uploadId}`);
console.log(`[Uploader] Init successful. App Upload ID: ${this.uploadId}`);
}
async uploadChunks() {
if (!this.progressElement) this.createProgressElement(); // Ensure progress bar exists
// Create progress element if not already done (might happen if start didn't create it due to early exit/error)
if (!this.progressElement) this.createProgressElement();
while (this.position < this.file.size && !this.completed) {
while (this.position < this.file.size && !this.completed) { // Check completed flag
const chunkStartPosition = this.position;
const chunk = await this.readChunk();
const currentPartNumber = this.partNumber;
const chunk = await this.readChunk(); // Reads based on this.position, updates this.position
const currentPartNumber = this.partNumber; // *** Get current part number ***
try {
console.debug(`[Uploader] Attempting Part ${currentPartNumber}, Bytes ${chunkStartPosition}-${this.position-1}`);
// *** Pass partNumber to upload function ***
const result = await this.uploadChunkWithRetry(chunk, chunkStartPosition, currentPartNumber);
// Update original progress bar with server's progress
this.updateProgress(result.progress);
// *** Increment part number AFTER successful upload ***
this.partNumber++;
// *** Check if server response indicates completion ***
if (result.completed) {
console.log(`[Uploader] Server indicated completion after Part ${currentPartNumber}.`);
this.completed = true;
this.updateProgress(100); // Ensure it hits 100%
break;
this.updateProgress(100); // Update original progress bar
break; // Exit loop
}
} catch (error) {
console.error(`[Uploader] UploadChunks failed for Part ${this.partNumber}, File: ${this.file.name}`);
throw error;
console.error(`[Uploader] UploadChunks failed permanently after retries for Part ${this.partNumber}. File: ${this.file.webkitRelativePath || this.file.name}`);
throw error; // Propagate up
}
}
// Check completion after loop, same as before
if (!this.completed && this.position >= this.file.size) {
this.completed = true; this.updateProgress(100);
console.warn(`[Uploader] Reached end of file but not marked completed by server. Assuming complete.`);
this.completed = true;
this.updateProgress(100);
}
}
async readChunk() {
// (readChunk logic is identical)
const start = this.position;
const end = Math.min(this.position + this.chunkSize, this.file.size);
const blob = this.file.slice(start, end);
this.position = end;
this.position = end; // Update position *after* slicing
return await blob.arrayBuffer();
}
// *** MODIFIED: Added partNumber parameter ***
async uploadChunkWithRetry(chunk, chunkStartPosition, partNumber) {
const chunkApiUrlPath = `/api/upload/chunk/${this.uploadId}?partNumber=${partNumber}`; // *** ADDED partNumber ***
// *** MODIFIED: Append partNumber query parameter to URL ***
const chunkApiUrlPath = `/api/upload/chunk/${this.uploadId}?partNumber=${partNumber}`;
const fullChunkApiUrl = window.BASE_URL + (chunkApiUrlPath.startsWith('/') ? chunkApiUrlPath.substring(1) : chunkApiUrlPath);
let lastError = null;
for (let attempt = 0; attempt <= this.maxRetries; attempt++) {
try {
if (attempt > 0) {
console.warn(`[Uploader] Retrying Part ${partNumber} (Attempt ${attempt}/${this.maxRetries})`);
console.warn(`[Uploader] Retrying Part ${partNumber} upload for ${this.file.webkitRelativePath || this.file.name} (Attempt ${attempt}/${this.maxRetries})...`);
this.updateProgressElementInfo(`Retrying attempt ${attempt}...`, 'var(--warning-color)');
} else if (this.progressElement) { // Update info for first attempt
this.updateProgressElementInfo(`uploading part ${partNumber}...`);
} else {
// Update status text for the current part (optional, depends if you want this level of detail)
// this.updateProgressElementInfo(`uploading part ${partNumber}...`);
}
const controller = new AbortController();
const timeoutId = setTimeout(() => controller.abort(), 60000); // 60s timeout
// Increase timeout slightly for S3 potentially
const timeoutId = setTimeout(() => controller.abort(), 60000);
const response = await fetch(fullChunkApiUrl, {
console.debug(`[Uploader] Sending Part ${partNumber} to ${fullChunkApiUrl}`);
const response = await fetch(fullChunkApiUrl, { // Use modified URL
method: 'POST',
headers: { 'Content-Type': 'application/octet-stream', 'X-Batch-ID': this.batchId },
body: chunk, signal: controller.signal
headers: {
'Content-Type': 'application/octet-stream',
'X-Batch-ID': this.batchId
},
body: chunk,
signal: controller.signal
});
clearTimeout(timeoutId);
if (response.ok) {
const data = await response.json(); // Contains { bytesReceived, progress, completed }
if (attempt > 0) console.log(`[Uploader] Part ${partNumber} success on retry ${attempt}.`);
if (attempt > 0) console.log(`[Uploader] Part ${partNumber} upload successful on retry attempt ${attempt}.`);
else console.debug(`[Uploader] Part ${partNumber} uploaded successfully.`);
if(this.progressElement) this.updateProgressElementInfo('uploading...'); // Reset info
return data; // *** RETURN server data (has 'completed' flag) ***
// *** Use server-provided progress for original progress bar ***
this.updateProgress(data.progress);
this.updateProgressElementInfo('uploading...'); // Reset info message
// *** Return the data which includes the 'completed' flag ***
return data;
} else {
// (Error handling logic for non-OK responses remains the same)
let errorText = `Server error ${response.status}`; try { errorText = (await response.json()).error || errorText } catch(e){}
if (response.status === 404 && attempt > 0) {
console.warn(`[Uploader] 404 on retry (Part ${partNumber}), assuming completed.`);
this.completed = true; // Mark as completed
// this.updateProgress(100); // updateProgress is called from uploadChunks
return { completed: true, progress: 100, bytesReceived: this.file.size };
console.warn(`[Uploader] Received 404 on retry, assuming completed.`);
this.completed = true;
this.updateProgress(100); // Update original progress bar
return { completed: true, progress: 100, bytesReceived: this.file.size }; // Simulate success
}
lastError = new Error(`Failed Part ${partNumber}: ${errorText}`);
console.error(`Attempt ${attempt} failed: ${lastError.message}`);
this.updateProgressElementInfo(`Attempt ${attempt} failed: ${response.statusText}`, 'var(--danger-color)');
}
} catch (error) {
// (Network/Abort error handling remains the same)
lastError = error;
if (error.name === 'AbortError') { console.error(`Part ${partNumber} Attempt ${attempt} timed out.`); this.updateProgressElementInfo(`Attempt ${attempt} timed out`, 'var(--danger-color)');}
else { console.error(`Part ${partNumber} Attempt ${attempt} network error: ${error.message}`); this.updateProgressElementInfo(`Attempt ${attempt} network error`, 'var(--danger-color)'); }
if (error.name === 'AbortError') { console.error(`Attempt ${attempt} timed out.`); this.updateProgressElementInfo(`Attempt ${attempt} timed out`, 'var(--danger-color)');}
else { console.error(`Attempt ${attempt} network error: ${error.message}`); this.updateProgressElementInfo(`Attempt ${attempt} network error`, 'var(--danger-color)'); }
}
// (Retry delay logic remains the same)
if (attempt < this.maxRetries) await new Promise(r => setTimeout(r, Math.min(this.retryDelay * Math.pow(2, attempt), 30000)));
}
console.error(`[Uploader] Part ${partNumber} failed permanently after ${this.maxRetries} retries.`);
} // End retry loop
console.error(`[Uploader] Part ${partNumber} upload failed permanently after ${this.maxRetries} retries.`);
this.updateProgressElementInfo(`Upload failed after ${this.maxRetries} retries`, 'var(--danger-color)');
throw lastError || new Error(`Part ${partNumber} failed after ${this.maxRetries} retries.`);
}
// --- Original Progress Bar UI Methods ---
// (These methods remain identical to the original file content)
createProgressElement() {
if (this.progressElement) return;
const container = document.createElement('div'); container.className = 'progress-container';
const label = document.createElement('div'); label.className = 'progress-label'; label.textContent = this.file.webkitRelativePath || this.file.name;
const progress = document.createElement('div'); progress.className = 'progress';
const bar = document.createElement('div'); bar.className = 'progress-bar';
const status = document.createElement('div'); status.className = 'progress-status';
const info = document.createElement('div'); info.className = 'progress-info'; info.textContent = 'initializing...';
const details = document.createElement('div'); details.className = 'progress-details'; details.textContent = `0 Bytes of ${formatFileSize(this.file.size)} (0.0%)`;
status.appendChild(info); status.appendChild(details); progress.appendChild(bar);
container.appendChild(label); container.appendChild(progress); container.appendChild(status);
if (this.progressElement) return; // Avoid duplicates if called multiple times
const container = document.createElement('div');
container.className = 'progress-container';
container.setAttribute('data-upload-id', this.uploadId || `pending-${this.file.name}`); // Use unique identifier
const label = document.createElement('div');
label.className = 'progress-label';
label.textContent = this.file.webkitRelativePath || this.file.name;
const progress = document.createElement('div');
progress.className = 'progress';
const bar = document.createElement('div');
bar.className = 'progress-bar';
const status = document.createElement('div');
status.className = 'progress-status';
const info = document.createElement('div');
info.className = 'progress-info';
info.textContent = 'initializing...';
const details = document.createElement('div');
details.className = 'progress-details';
details.textContent = `0 Bytes of ${formatFileSize(this.file.size)} (0.0%)`;
status.appendChild(info);
status.appendChild(details);
progress.appendChild(bar);
container.appendChild(label);
container.appendChild(progress);
container.appendChild(status);
document.getElementById('uploadProgress').appendChild(container);
this.progressElement = { container, bar, infoSpan: info, detailsSpan: details };
this.lastUploadTime = Date.now(); this.lastUploadedBytes = 0; this.uploadRate = 0;
}
updateProgress(percent) {
if (!this.progressElement) this.createProgressElement(); if (!this.progressElement) return;
const clampedPercent = Math.max(0, Math.min(100, percent));
this.progressElement.bar.style.width = `${clampedPercent}%`;
const currentTime = Date.now(); const timeDiff = (currentTime - (this.lastUploadTime || currentTime)) / 1000;
const bytesDiff = this.position - this.lastUploadedBytes; // Use this.position for rate too
if (timeDiff > 0.1 && bytesDiff > 0) { this.uploadRate = bytesDiff / timeDiff; this.lastUploadedBytes = this.position; this.lastUploadTime = currentTime; }
else if (timeDiff > 5) { this.uploadRate = 0; }
let rateText = 'Calculating...';
if (this.uploadRate > 0) { const u=['B/s','KB/s','MB/s','GB/s']; let i=0,r=this.uploadRate; while(r>=1024&&i<u.length-1){r/=1024;i++;} rateText=`${r.toFixed(1)} ${u[i]}`; }
else if (this.position > 0 || clampedPercent > 0) { rateText = '0.0 B/s'; }
const statusText = clampedPercent >= 100 ? 'complete' : 'uploading...';
if (!this.progressElement.infoSpan.textContent.startsWith('Retry') && !this.progressElement.infoSpan.textContent.startsWith('Attempt') && !this.progressElement.infoSpan.textContent.startsWith('Error')) {
this.updateProgressElementInfo(`${rateText} · ${statusText}`);
}
this.progressElement.detailsSpan.textContent = `${formatFileSize(this.position)} of ${formatFileSize(this.file.size)} (${clampedPercent.toFixed(1)}%)`;
if (clampedPercent === 100) {
this.progressElement.container.style.opacity = '0.5'; // Original had fade out
setTimeout(() => { if (this.progressElement && this.progressElement.container) { this.progressElement.container.remove(); this.progressElement = null; }}, 2000);
}
}
updateProgressElementInfo(message, color = '') { if (this.progressElement && this.progressElement.infoSpan) { this.progressElement.infoSpan.textContent = message; this.progressElement.infoSpan.style.color = color; }}
async cancelUploadOnServer() { if (!this.uploadId || this.completed || this.uploadId.startsWith('zero-byte-')) return; console.log(`[Uploader] Server cancel for ${this.uploadId}`); try { const p=`/api/upload/cancel/${this.uploadId}`; const u=window.BASE_URL+(p.startsWith('/')?p.substring(1):p); fetch(u,{method:'POST'}).catch(e=>console.warn('Cancel req failed:',e));}catch(e){console.warn('Cancel init err:',e);}}
this.lastUploadTime = Date.now(); // Initialize for rate calculation
this.lastUploadedBytes = 0;
}
updateProgress(percent) {
// Ensure element exists, create if necessary (though start() usually does)
if (!this.progressElement) this.createProgressElement();
if (!this.progressElement) return; // Still couldn't create it? Bail.
const clampedPercent = Math.max(0, Math.min(100, percent));
this.progressElement.bar.style.width = `${clampedPercent}%`;
// Calculate upload rate using server response bytes (as original)
// Note: For S3, data.bytesReceived might not perfectly reflect total uploaded bytes.
// We'll use this.position primarily for display bytes, but rate calculation follows original logic.
const currentTime = Date.now();
const timeDiff = (currentTime - (this.lastUploadTime || currentTime)) / 1000;
// Using this.position for rate might be visually smoother, but let's stick to original logic for now.
// We need the `bytesReceived` from the server response if we want to use it here...
// Let's fallback to using this.position for rate calculation as well, like the progress display.
const bytesDiff = this.position - this.lastUploadedBytes;
if (timeDiff > 0.1 && bytesDiff > 0) {
this.uploadRate = bytesDiff / timeDiff;
this.lastUploadedBytes = this.position;
this.lastUploadTime = currentTime;
} else if (timeDiff > 5) { // Reset rate if stalled
this.uploadRate = 0;
}
// Format rate (same as original)
let rateText = 'Calculating...';
if (this.uploadRate > 0) { /* ... format rate ... */ const units=['B/s','KB/s','MB/s','GB/s']; let i=0, r=this.uploadRate; while(r>=1024 && i<units.length-1){r/=1024;i++;} rateText=`${r.toFixed(1)} ${units[i]}`; }
else if (this.position > 0 || clampedPercent > 0) { rateText = '0.0 B/s'; }
// Update info/details (same as original)
const statusText = clampedPercent >= 100 ? 'complete' : 'uploading...';
if (!this.progressElement.infoSpan.textContent.startsWith('Retry') &&
!this.progressElement.infoSpan.textContent.startsWith('Attempt') &&
!this.progressElement.infoSpan.textContent.startsWith('Error')) {
this.updateProgressElementInfo(`${rateText} · ${statusText}`);
}
// Display progress using this.position (client's view) and clampedPercent
this.progressElement.detailsSpan.textContent =
`${formatFileSize(this.position)} of ${formatFileSize(this.file.size)} (${clampedPercent.toFixed(1)}%)`;
// Fade out (same as original)
if (clampedPercent === 100) {
this.progressElement.container.style.opacity = '0.5';
setTimeout(() => {
if (this.progressElement && this.progressElement.container) {
this.progressElement.container.remove();
this.progressElement = null;
}
}, 2000);
}
}
updateProgressElementInfo(message, color = '') {
// (Identical to original)
if (this.progressElement && this.progressElement.infoSpan) {
this.progressElement.infoSpan.textContent = message;
this.progressElement.infoSpan.style.color = color;
}
}
// --- Cancellation Logic ---
async cancelUploadOnServer() {
// (Identical to original, just ensure checks use this.completed and this.uploadId)
if (!this.uploadId || this.completed || this.uploadId.startsWith('zero-byte-')) return;
console.log(`[Uploader] Attempting server cancel for ${this.uploadId}`);
try {
const cancelApiUrlPath = `/api/upload/cancel/${this.uploadId}`;
const fullUrl = window.BASE_URL + (cancelApiUrlPath.startsWith('/') ? cancelApiUrlPath.substring(1) : cancelApiUrlPath);
fetch(fullUrl, { method: 'POST' }).catch(err => console.warn(`Cancel request failed:`, err));
} catch (e) { console.warn(`Error initiating cancel:`, e); }
}
} // End FileUploader Class
// --- Original UI Handlers and Logic ---
// (All the following code remains identical to the original file)
const dropZone = document.getElementById('dropZone');
const fileInput = document.getElementById('fileInput');
const folderInput = document.getElementById('folderInput');
const fileListDiv = document.getElementById('fileList'); // Original div for list
const fileList = document.getElementById('fileList'); // Refers to the original div#fileList
const uploadButton = document.getElementById('uploadButton');
let filesToUpload = []; // Use a different name than original `files` for clarity
let filesToUpload = []; // Renamed variable
async function getAllFileEntries(dataTransferItems) { /* ... (original implementation from previous message) ... */
// --- Drag and Drop Folder Handling (getAllFileEntries) ---
async function getAllFileEntries(dataTransferItems) {
// (Keep original implementation)
console.debug('Starting getAllFileEntries with items:', Array.from(dataTransferItems).map(item => ({ kind: item.kind, type: item.type })));
let fileEntries = []; let rootFolderName = null;
async function traverseEntry(entry, path = '') {
console.debug('Traversing entry:', { name: entry.name, isFile: entry.isFile, isDirectory: entry.isDirectory, currentPath: path });
if (entry.isFile) {
const file = await new Promise((resolve, reject) => entry.file(f => {
const fileWithPath = new File([f], entry.name, { type: f.type, lastModified: f.lastModified });
@@ -312,137 +445,137 @@
const entryPromises = Array.from(dataTransferItems).map(item => item.webkitGetAsEntry()).filter(Boolean).map(entry => traverseEntry(entry));
await Promise.all(entryPromises);
fileEntries.sort((a, b) => (a.webkitRelativePath || a.name).localeCompare(b.webkitRelativePath || b.name));
console.debug('getAllFileEntries result:', fileEntries.map(f=>f.webkitRelativePath || f.name));
return fileEntries;
} catch (error) { console.error('Error in getAllFileEntries:', error); throw error; }
}
['dragenter', 'dragover', 'dragleave', 'drop'].forEach(ev => { dropZone.addEventListener(ev, preventDefaults, false); document.body.addEventListener(ev, preventDefaults, false); });
['dragenter', 'dragover'].forEach(ev => dropZone.addEventListener(ev, highlight, false));
['dragleave', 'drop'].forEach(ev => dropZone.addEventListener(ev, unhighlight, false));
// --- Event Listeners (Original) ---
['dragenter', 'dragover', 'dragleave', 'drop'].forEach(ev => { dropZone.addEventListener(ev, preventDefaults); document.body.addEventListener(ev, preventDefaults); });
['dragenter', 'dragover'].forEach(ev => dropZone.addEventListener(ev, highlight));
['dragleave', 'drop'].forEach(ev => dropZone.addEventListener(ev, unhighlight));
dropZone.addEventListener('drop', handleDrop);
fileInput.addEventListener('change', handleFilesFromInput);
folderInput.addEventListener('change', handleFilesFromInput);
fileInput.addEventListener('change', handleFilesFromInput); // Use renamed handler
folderInput.addEventListener('change', handleFilesFromInput); // Use renamed handler
uploadButton.addEventListener('click', startUploads);
// --- Event Handler Functions (Original) ---
function preventDefaults(e) { e.preventDefault(); e.stopPropagation(); }
function highlight() { dropZone.classList.add('highlight'); }
function unhighlight() { dropZone.classList.remove('highlight'); }
function highlight(e) { dropZone.classList.add('highlight'); }
function unhighlight(e) { dropZone.classList.remove('highlight'); }
async function handleDrop(e) {
// Use original logic, just assign to filesToUpload
const items = e.dataTransfer.items;
fileListDiv.innerHTML = ''; // Clear old list display
uploadButton.style.display = 'none';
const loadingItem = document.createElement('div'); loadingItem.className = 'file-item loading'; loadingItem.textContent = 'Processing dropped items...'; fileListDiv.appendChild(loadingItem);
if (items && items.length > 0 && items[0].webkitGetAsEntry) {
const loadingItem = document.createElement('div'); loadingItem.className = 'file-item loading'; loadingItem.textContent = 'Processing dropped items...'; fileList.innerHTML = ''; fileList.appendChild(loadingItem); uploadButton.style.display = 'none';
try {
let newFiles;
if (items && items.length > 0 && items[0].webkitGetAsEntry) newFiles = await getAllFileEntries(items);
else newFiles = [...e.dataTransfer.files].filter(f => f.size >= 0);
if (newFiles.length === 0) { loadingItem.textContent = 'No files found.'; setTimeout(() => loadingItem.remove(), 2000); return; }
filesToUpload = newFiles; updateFileList();
if (AUTO_UPLOAD) startUploads(); else if (filesToUpload.length > 0) uploadButton.style.display = 'block';
} catch (error) { console.error('Error handling drop:', error); loadingItem.textContent = `Error: ${error.message}`; loadingItem.style.color = 'var(--danger-color)'; setTimeout(() => {loadingItem.remove(); updateFileList();}, 3000); filesToUpload = []; }
finally { if (loadingItem.parentNode === fileListDiv && filesToUpload.length > 0) loadingItem.remove(); } // Remove loading only if files are shown
const newFiles = await getAllFileEntries(items); if (newFiles.length === 0) throw new Error('No valid files found.');
filesToUpload = newFiles; updateFileList(); if (AUTO_UPLOAD) startUploads(); else uploadButton.style.display = 'block';
} catch (error) { console.error('Error processing dropped items:', error); loadingItem.textContent = `Error: ${error.message}`; loadingItem.style.color = 'var(--danger-color)'; setTimeout(() => loadingItem.remove(), 3000); filesToUpload = []; updateFileList(); }
finally { if (loadingItem.parentNode === fileList) loadingItem.remove(); }
} else {
filesToUpload = [...e.dataTransfer.files].filter(f => f.size >= 0); updateFileList(); if (AUTO_UPLOAD) startUploads(); else if (filesToUpload.length > 0) uploadButton.style.display = 'block';
}
}
function handleFilesFromInput(e) {
// Use original logic, just assign to filesToUpload
const input = e.target; const selectedFiles = [...input.files];
if (input.id === 'folderInput' && selectedFiles.length > 0 && !('webkitRelativePath' in selectedFiles[0])) { alert('Folder upload not fully supported.'); filesToUpload = []; }
else filesToUpload = selectedFiles.filter(f => f.size >= 0);
else { filesToUpload = selectedFiles.filter(f => f.size >= 0); if (input.id === 'folderInput') console.log('Folder files:', filesToUpload.map(f => ({ name: f.name, path: f.webkitRelativePath }))); }
updateFileList();
if (AUTO_UPLOAD && filesToUpload.length > 0) startUploads(); else if (filesToUpload.length > 0) uploadButton.style.display = 'block'; else uploadButton.style.display = 'none';
if (AUTO_UPLOAD && filesToUpload.length > 0) startUploads();
else if (filesToUpload.length > 0) uploadButton.style.display = 'block'; else uploadButton.style.display = 'none';
input.value = '';
}
function updateFileList() { // Original simple list display
// --- File List UI Update (Original Simple List) ---
function updateFileList() {
// Keep the original simpler list rendering
console.debug('Updating original file list UI for', filesToUpload.length, 'files');
fileListDiv.innerHTML = '';
fileList.innerHTML = ''; // Clear current list
if (filesToUpload.length === 0) {
fileListDiv.innerHTML = '<div class="file-item placeholder">No files selected.</div>';
fileList.innerHTML = '<div class="file-item placeholder">No files selected.</div>'; // Show placeholder in original div
uploadButton.style.display = 'none';
return;
}
filesToUpload.forEach(file => {
const fileItem = document.createElement('div');
fileItem.className = 'file-item';
fileItem.className = 'file-item'; // Use original class
const displayName = file.webkitRelativePath || file.name;
fileItem.innerHTML = `📄 ${displayName} (${formatFileSize(file.size)})`;
fileListDiv.appendChild(fileItem);
fileList.appendChild(fileItem);
});
uploadButton.style.display = (!AUTO_UPLOAD && filesToUpload.length > 0) ? 'block' : 'none';
}
// Add original styles for list items (if they were in the script, otherwise they are in styles.css)
const style = document.createElement('style');
style.textContent = `
.file-list { /* Original styles for the list container */ margin-top: 20px; display: flex; flex-direction: column; gap: 10px; }
.file-item { background: var(--container-bg); padding: 10px 15px; border-radius: 5px; text-align: left; box-shadow: 0 2px 4px rgba(0,0,0,0.1); }
.file-item.placeholder { text-align: center; opacity: 0.6; box-shadow: none; background: transparent; border: none; } /* Ensure placeholder has no border if list had one */
.file-item.loading { text-align: center; padding: 15px; background: var(--container-bg); border-radius: 5px; animation: pulse 1.5s infinite; }
.file-item { background: var(--container-bg); padding: 10px 15px; border-radius: 5px; text-align: left; box-shadow: 0 2px 4px rgba(0,0,0,0.1); margin-bottom: 10px; }
.file-item.placeholder { text-align: center; opacity: 0.6; box-shadow: none; background: transparent; }
.file-item.loading { text-align: center; padding: 15px; background: var(--container-bg); border-radius: 5px; margin: 10px 0; animation: pulse 1.5s infinite; }
@keyframes pulse { 0% { opacity: 0.6; } 50% { opacity: 1; } 100% { opacity: 0.6; } }
/* Styles for the separate progress bars, from original */
#uploadProgress { margin: 20px 0; display: flex; flex-direction: column; gap: 15px; }
.progress-container { background: var(--container-bg); padding: 15px; border-radius: 5px; box-shadow: 0 2px 4px rgba(0,0,0,0.1); transition: opacity 0.5s ease-out; }
.progress-label { text-align: left; margin-bottom: 8px; color: var(--text-color); font-size: 0.9rem; }
.progress-status { display: flex; justify-content: space-between; align-items: center; font-size: 0.8rem; color: var(--text-color); opacity: 0.8; margin-top: 8px; }
.progress-info { text-align: left; } .progress-details { text-align: right; }
.progress { background: var(--progress-bg); border-radius: 10px; height: 8px; overflow: hidden; margin-top: 8px; margin-bottom: 8px; }
.progress-bar { height: 100%; background: var(--highlight-color); transition: width 0.3s ease; }
/* Ensure progress bar styles don't conflict if somehow left over */
.progress-container { transition: opacity 0.5s ease-out; }
`;
document.head.appendChild(style);
// --- Upload Process (Original Structure) ---
async function startUploads() {
if (filesToUpload.length === 0) { Toastify({ text: "No files selected.", duration: 3000 }).showToast(); return; }
uploadButton.disabled = true; uploadButton.textContent = 'Uploading...';
document.getElementById('uploadProgress').innerHTML = ''; // Clear old progress bars
uploadButton.disabled = true;
uploadButton.textContent = 'Uploading...';
document.getElementById('uploadProgress').innerHTML = ''; // Clear the separate progress bar container
const batchId = generateBatchId();
let successfulUploads = 0, failedUploads = 0;
let successfulUploads = 0;
let failedUploads = 0;
// Process uploads sequentially (same loop as original)
for (const file of filesToUpload) {
// --- NEW: Increment active upload counter ---
activeUploadCount++;
const uploader = new FileUploader(file, batchId);
const uploader = new FileUploader(file, batchId); // Create uploader instance
try {
if (await uploader.start()) successfulUploads++;
else failedUploads++;
}
catch (error) {
console.error(`Unhandled error for ${file.name}:`, error);
const success = await uploader.start(); // Start the upload
if (success) successfulUploads++; else failedUploads++;
} catch (error) {
console.error(`Unhandled error during upload start for ${file.name}:`, error);
failedUploads++;
} finally {
// --- NEW: Decrement active upload counter ---
activeUploadCount--;
}
// Progress bar might show error via uploader's catch block
}
} // End for...of loop
// --- Show Summary Toast (Original logic) ---
const totalFiles = filesToUpload.length;
let msg = `Uploaded ${successfulUploads} of ${totalFiles} files`;
let bg = successfulUploads === totalFiles ? "#4CAF50" : (successfulUploads > 0 ? "#ff9800" : "#f44336");
Toastify({ text: msg, duration: 3000, gravity: "bottom", position: "right", style: { background: bg } }).showToast();
let toastMessage = `Uploaded ${successfulUploads} of ${totalFiles} files`;
let toastBackground = successfulUploads === totalFiles ? "#4CAF50" : "#f44336";
if (successfulUploads > 0 && failedUploads > 0) toastBackground = "#ff9800"; // Orange if partial success
Toastify({ text: toastMessage, duration: 3000, gravity: "bottom", position: "right", style: { background: toastBackground } }).showToast();
filesToUpload = []; updateFileList();
uploadButton.disabled = false; uploadButton.textContent = 'Upload Files'; uploadButton.style.display = 'none';
fileInput.value = ''; folderInput.value = '';
// --- Reset UI State (Original logic) ---
filesToUpload = []; // Clear the list of files
updateFileList(); // Clear the displayed file list
// Progress bars are removed automatically by the uploader on completion/error
uploadButton.disabled = false;
uploadButton.textContent = 'Upload Files';
uploadButton.style.display = 'none';
fileInput.value = '';
folderInput.value = '';
}
function setTheme(theme) { document.documentElement.setAttribute('data-theme', theme); localStorage.setItem('theme', theme); const m=document.querySelectorAll('.theme-toggle-icon .moon'); const s=document.querySelectorAll('.theme-toggle-icon .sun'); if(theme==='dark'){m.forEach(p=>p.style.display='none');s.forEach(p=>p.style.display='');}else{m.forEach(p=>p.style.display='');s.forEach(p=>p.style.display='none');} }
// --- Theme Management (Original) ---
function setTheme(theme) { document.documentElement.setAttribute('data-theme', theme); localStorage.setItem('theme', theme); const m=document.querySelectorAll('.moon'); const s=document.querySelectorAll('.sun'); if(theme==='dark'){m.forEach(p=>p.style.display='none');s.forEach(p=>p.style.display='');}else{m.forEach(p=>p.style.display='');s.forEach(p=>p.style.display='none');} }
function toggleTheme() { const c=document.documentElement.getAttribute('data-theme'); setTheme(c==='dark'?'light':'dark'); }
const savedTheme = localStorage.getItem('theme'); const prefersDark = window.matchMedia('(prefers-color-scheme: dark)').matches; setTheme(savedTheme || (prefersDark ? 'dark' : 'light'));
updateFileList(); // Initialize list on load
const savedTheme = localStorage.getItem('theme') || (window.matchMedia('(prefers-color-scheme: dark)').matches ? 'dark' : 'light'); setTheme(savedTheme);
// --- NEW: beforeunload event listener ---
window.addEventListener('beforeunload', function (e) {
if (activeUploadCount > 0) {
// Standard message for the confirmation dialog
const confirmationMessage = 'Uploads are in progress. If you leave this page, ongoing uploads will be interrupted. Are you sure you want to leave?';
// --- Initial Setup ---
updateFileList(); // Initialize the simple file list display
// For modern browsers:
e.returnValue = confirmationMessage;
// For older browsers:
return confirmationMessage;
}
});
</script>
<footer>
{{FOOTER_CONTENT}}
</footer>
</body>
</html>

View File

@@ -39,7 +39,7 @@ body {
display: flex;
justify-content: center;
padding-top: 2rem;
padding-bottom: 150px;
padding-bottom: 80px;
color: var(--text-color);
transition: background-color 0.3s ease, color 0.3s ease;
}
@@ -47,7 +47,7 @@ body {
.container {
width: 100%;
max-width: 600px;
padding: 20px 20px 80px 20px;
padding: 20px;
text-align: center;
position: relative;
}
@@ -364,19 +364,20 @@ button:disabled {
/* Footer Styles */
footer {
position: fixed;
bottom: 0;
bottom: 10px;
left: 0;
right: 0;
width: 100%;
max-width: 600px;
margin-left: auto;
margin-right: auto;
padding: 15px;
text-align: center;
font-size: 0.85rem;
color: var(--text-color);
opacity: 0.9;
opacity: 0.7;
border-top: 1px solid var(--border-color);
transition: background-color 0.3s ease, color 0.3s ease;
background-color: var(--bg-color);
z-index: 100;
}
footer a {

View File

@@ -136,11 +136,6 @@ app.get('/login.html', (req, res) => {
}
});
// --- Health Check Endpoint ---
app.get('/health', (req, res) => {
res.status(200).json({ status: 'UP', message: 'Server is healthy' });
});
// --- Static File Serving ---
// Serve static files (CSS, JS, assets) from the 'public' directory
// Use express.static middleware, placed AFTER specific HTML routes

View File

@@ -1,13 +1,39 @@
// File: src/config/index.js
require('dotenv').config();
const { validatePin } = require('../utils/security');
const logger = require('../utils/logger');
const logger = require('../utils/logger'); // Use the default logger instance
const fs = require('fs');
const path = require('path');
// const { version } = require('../../package.json'); // version not currently used, can be removed or kept
const { version } = require('../../package.json'); // Get version from package.json
// --- Environment Variables Reference ---
/* (Comments listing all ENV vars - keep as is) */
/*
STORAGE_TYPE - Storage backend ('local' or 's3', default: 'local')
// --- Local Storage ---
UPLOAD_DIR - Directory for uploads (Docker/production, if STORAGE_TYPE=local)
LOCAL_UPLOAD_DIR - Directory for uploads (local dev, fallback: './local_uploads', if STORAGE_TYPE=local)
// --- S3 Storage ---
S3_REGION - AWS Region for S3 Bucket (required if STORAGE_TYPE=s3)
S3_BUCKET_NAME - Name of the S3 Bucket (required if STORAGE_TYPE=s3)
S3_ACCESS_KEY_ID - S3 Access Key ID (required if STORAGE_TYPE=s3)
S3_SECRET_ACCESS_KEY - S3 Secret Access Key (required if STORAGE_TYPE=s3)
S3_ENDPOINT_URL - Custom S3 endpoint URL (optional, for non-AWS S3)
S3_FORCE_PATH_STYLE - Force path-style access (true/false, optional, for non-AWS S3)
// --- Common ---
PORT - Port for the server (default: 3000)
NODE_ENV - Node environment (default: 'development')
BASE_URL - Base URL for the app (default: http://localhost:${PORT})
MAX_FILE_SIZE - Max upload size in MB (default: 1024)
AUTO_UPLOAD - Enable auto-upload (true/false, default: false)
DUMBDROP_PIN - Security PIN for uploads (required for protected endpoints)
DUMBDROP_TITLE - Site title (default: 'DumbDrop')
APPRISE_URL - Apprise notification URL (optional)
APPRISE_MESSAGE - Notification message template (default provided)
APPRISE_SIZE_UNIT - Size unit for notifications (optional)
ALLOWED_EXTENSIONS - Comma-separated list of allowed file extensions (optional)
ALLOWED_IFRAME_ORIGINS- Comma-separated list of allowed iframe origins (optional)
CLIENT_MAX_RETRIES - Max retries for client chunk uploads (default: 5)
DEMO_MODE - Enable demo mode (true/false, default: false)
*/
// --- Helper for clear configuration logging ---
const logConfig = (message, level = 'info') => {
@@ -18,7 +44,7 @@ const logConfig = (message, level = 'info') => {
// --- Default configurations ---
const DEFAULT_PORT = 3000;
const DEFAULT_SITE_TITLE = 'DumbDrop';
const DEFAULT_BASE_URL_PREFIX = 'http://localhost'; // Prefix, port added later
const DEFAULT_BASE_URL = 'http://localhost:3000';
const DEFAULT_CLIENT_MAX_RETRIES = 5;
const DEFAULT_STORAGE_TYPE = 'local';
@@ -28,62 +54,81 @@ const logAndReturn = (key, value, isDefault = false, sensitive = false) => {
return value;
};
// --- Utility to detect if running in local development mode ---
// (This helps decide whether to *create* LOCAL_UPLOAD_DIR, but doesn't affect UPLOAD_DIR usage in Docker)
function isLocalDevelopment() {
return process.env.NODE_ENV !== 'production' && !process.env.UPLOAD_DIR;
}
/**
* Determine the local upload directory path.
* Only relevant when STORAGE_TYPE is 'local'.
* @returns {string|null} The path, or null if storage is not local.
*/
function determineLocalUploadDirectory() {
if (process.env.STORAGE_TYPE && process.env.STORAGE_TYPE.toLowerCase() !== 'local') {
return null; // Not using local storage
}
let uploadDir;
if (process.env.UPLOAD_DIR) {
uploadDir = process.env.UPLOAD_DIR;
// logger.info(`[Local Storage] Upload directory set from UPLOAD_DIR: ${uploadDir}`); // Logger might not be fully init here
logger.info(`[Local Storage] Upload directory set from UPLOAD_DIR: ${uploadDir}`);
} else if (process.env.LOCAL_UPLOAD_DIR) {
uploadDir = process.env.LOCAL_UPLOAD_DIR;
// logger.warn(`[Local Storage] Upload directory using LOCAL_UPLOAD_DIR fallback: ${uploadDir}`);
logger.warn(`[Local Storage] Upload directory using LOCAL_UPLOAD_DIR fallback: ${uploadDir}`);
} else {
uploadDir = './local_uploads';
// logger.warn(`[Local Storage] Upload directory using default fallback: ${uploadDir}`);
uploadDir = './local_uploads'; // Default local path
logger.warn(`[Local Storage] Upload directory using default fallback: ${uploadDir}`);
}
// logger.info(`[Local Storage] Final upload directory path: ${path.resolve(uploadDir)}`);
return path.resolve(uploadDir); // Always resolve to absolute
logger.info(`[Local Storage] Final upload directory path: ${path.resolve(uploadDir)}`);
return uploadDir;
}
/**
* Ensure the local upload directory exists (if applicable and in local dev).
*/
function ensureLocalUploadDirExists(dirPath) {
if (!dirPath || !isLocalDevelopment()) {
return;
return; // Only create if using local storage in a local dev environment
}
try {
if (!fs.existsSync(dirPath)) {
fs.mkdirSync(dirPath, { recursive: true });
console.log(`[INFO] CONFIGURATION: [Local Storage] Created local upload directory: ${dirPath}`);
logger.info(`[Local Storage] Created local upload directory: ${dirPath}`);
} else {
console.log(`[INFO] CONFIGURATION: [Local Storage] Local upload directory exists: ${dirPath}`);
logger.info(`[Local Storage] Local upload directory exists: ${dirPath}`);
}
// Basic writability check
fs.accessSync(dirPath, fs.constants.W_OK);
console.log(`[SUCCESS] CONFIGURATION: [Local Storage] Local upload directory is writable: ${dirPath}`);
logger.success(`[Local Storage] Local upload directory is writable: ${dirPath}`);
} catch (err) {
console.error(`[ERROR] CONFIGURATION: [Local Storage] Failed to create or access local upload directory: ${dirPath}. Error: ${err.message}`);
logger.error(`[Local Storage] Failed to create or access local upload directory: ${dirPath}. Error: ${err.message}`);
throw new Error(`Upload directory "${dirPath}" is not accessible or writable.`);
}
}
// --- Determine Storage Type ---
const storageTypeInput = process.env.STORAGE_TYPE || DEFAULT_STORAGE_TYPE;
const storageType = ['local', 's3'].includes(storageTypeInput.toLowerCase())
? storageTypeInput.toLowerCase()
: DEFAULT_STORAGE_TYPE;
if (storageTypeInput.toLowerCase() !== storageType) {
console.warn(`[WARN] CONFIGURATION: Invalid STORAGE_TYPE "${storageTypeInput}", using default: "${storageType}"`);
logger.warn(`Invalid STORAGE_TYPE "${storageTypeInput}", using default: "${storageType}"`);
}
const resolvedLocalUploadDir = determineLocalUploadDirectory();
if (storageType === 'local' && resolvedLocalUploadDir) { // Only ensure if actually using local storage
// Determine and potentially ensure local upload directory
const resolvedLocalUploadDir = determineLocalUploadDirectory(); // Will be null if STORAGE_TYPE is 's3'
if (resolvedLocalUploadDir) {
ensureLocalUploadDirExists(resolvedLocalUploadDir);
}
/**
* Function to parse the FOOTER_LINKS environment variable
* @param {string} linksString - The input string containing links
* @returns {Array} - An array of objects containing text and URL
*/
const parseFooterLinks = (linksString) => {
if (!linksString) return [];
return linksString.split(',')
@@ -91,45 +136,85 @@ const parseFooterLinks = (linksString) => {
const parts = linkPair.split('@').map(part => part.trim());
if (parts.length === 2 && parts[0] && parts[1] && (parts[1].startsWith('http://') || parts[1].startsWith('https://'))) {
return { text: parts[0], url: parts[1] };
}
// logger.warn(`Invalid format or URL in FOOTER_LINKS: "${linkPair}".`); // Logger might not be fully init
} else {
logger.warn(`Invalid format or URL in FOOTER_LINKS: "${linkPair}". Expected "Text @ http(s)://URL". Skipping.`);
return null;
}
})
.filter(link => link !== null);
};
const port = parseInt(process.env.PORT || DEFAULT_PORT, 10);
const baseUrl = process.env.BASE_URL || `${DEFAULT_BASE_URL_PREFIX}:${port}/`;
/**
* Application configuration
* Loads and validates environment variables
*/
const config = {
port,
// =====================
// Core Settings
// =====================
port: parseInt(process.env.PORT || DEFAULT_PORT, 10),
nodeEnv: process.env.NODE_ENV || 'development',
baseUrl,
baseUrl: process.env.BASE_URL || `${DEFAULT_BASE_URL.replace(/:3000$/, '')}:${process.env.PORT || DEFAULT_PORT}/`, // Ensure trailing slash
isDemoMode: process.env.DEMO_MODE === 'true',
storageType,
uploadDir: storageType === 'local' ? resolvedLocalUploadDir : path.resolve(process.env.UPLOAD_DIR || process.env.LOCAL_UPLOAD_DIR || './uploads'), // For S3, metadata dir. Fallback required.
// =====================
// Storage Settings
// =====================
storageType: logAndReturn('STORAGE_TYPE', storageType, storageType === DEFAULT_STORAGE_TYPE),
/**
* The primary directory for storing files or metadata.
* If STORAGE_TYPE=local, this is where files are stored.
* If STORAGE_TYPE=s3, this is where '.metadata' lives.
* We default to the determined local path or a standard './uploads' if S3 is used.
*/
uploadDir: resolvedLocalUploadDir || path.resolve('./uploads'), // S3 needs a place for metadata too
// --- S3 Specific (only relevant if storageType is 's3') ---
s3Region: process.env.S3_REGION || null,
s3BucketName: process.env.S3_BUCKET_NAME || null,
s3AccessKeyId: process.env.S3_ACCESS_KEY_ID || null,
s3SecretAccessKey: process.env.S3_SECRET_ACCESS_KEY || null,
s3EndpointUrl: process.env.S3_ENDPOINT_URL || null,
s3ForcePathStyle: process.env.S3_FORCE_PATH_STYLE === 'true',
s3EndpointUrl: process.env.S3_ENDPOINT_URL || null, // Default to null (AWS default endpoint)
s3ForcePathStyle: process.env.S3_FORCE_PATH_STYLE === 'true', // Default to false
// =====================
// Upload Behavior
// =====================
maxFileSize: (() => {
const sizeInMB = parseInt(process.env.MAX_FILE_SIZE || '1024', 10);
return (isNaN(sizeInMB) || sizeInMB <= 0 ? 1024 : sizeInMB) * 1024 * 1024;
if (isNaN(sizeInMB) || sizeInMB <= 0) {
logger.error('Invalid MAX_FILE_SIZE, must be a positive number. Using 1024MB.');
return 1024 * 1024 * 1024;
}
return sizeInMB * 1024 * 1024; // Convert MB to bytes
})(),
autoUpload: process.env.AUTO_UPLOAD === 'true',
allowedExtensions: process.env.ALLOWED_EXTENSIONS ?
process.env.ALLOWED_EXTENSIONS.split(',').map(ext => ext.trim().toLowerCase().replace(/^\./, '.')).filter(Boolean) :
process.env.ALLOWED_EXTENSIONS.split(',').map(ext => ext.trim().toLowerCase().replace(/^\./, '.')).filter(Boolean) : // Ensure dot prefix
null,
clientMaxRetries: (() => {
const retries = parseInt(process.env.CLIENT_MAX_RETRIES || DEFAULT_CLIENT_MAX_RETRIES, 10);
return (isNaN(retries) || retries < 0) ? DEFAULT_CLIENT_MAX_RETRIES : retries;
const envValue = process.env.CLIENT_MAX_RETRIES;
const defaultValue = DEFAULT_CLIENT_MAX_RETRIES;
if (envValue === undefined) return logAndReturn('CLIENT_MAX_RETRIES', defaultValue, true);
const retries = parseInt(envValue, 10);
if (isNaN(retries) || retries < 0) {
logger.warn(`Invalid CLIENT_MAX_RETRIES value: "${envValue}". Using default: ${defaultValue}`);
return logAndReturn('CLIENT_MAX_RETRIES', defaultValue, true);
}
return logAndReturn('CLIENT_MAX_RETRIES', retries);
})(),
pin: validatePin(process.env.DUMBDROP_PIN), // validatePin uses logger, ensure logger is available
// =====================
// Security
// =====================
pin: validatePin(process.env.DUMBDROP_PIN),
allowedIframeOrigins: process.env.ALLOWED_IFRAME_ORIGINS ?
process.env.ALLOWED_IFRAME_ORIGINS.split(',').map(origin => origin.trim()).filter(Boolean) :
null,
// =====================
// UI & Notifications
// =====================
siteTitle: process.env.DUMBDROP_TITLE || DEFAULT_SITE_TITLE,
footerLinks: parseFooterLinks(process.env.FOOTER_LINKS),
appriseUrl: process.env.APPRISE_URL || null,
@@ -137,86 +222,113 @@ const config = {
appriseSizeUnit: process.env.APPRISE_SIZE_UNIT || 'Auto',
};
// --- Log Configuration (after logger is confirmed available) ---
// Moved logging to after config object is built, so logger is definitely available
logger.info(`--- Configuration Start ---`);
logAndReturn('NODE_ENV', config.nodeEnv);
logAndReturn('PORT', config.port);
logAndReturn('BASE_URL', config.baseUrl);
logAndReturn('DEMO_MODE', config.isDemoMode);
logAndReturn('STORAGE_TYPE', config.storageType);
// --- Log Sensitive & Conditional Config ---
logConfig(`NODE_ENV: ${config.nodeEnv}`);
logConfig(`PORT: ${config.port}`);
logConfig(`BASE_URL: ${config.baseUrl}`);
logConfig(`DEMO_MODE: ${config.isDemoMode}`);
if (config.storageType === 'local') {
logAndReturn('Upload Directory (Local Storage)', config.uploadDir);
logConfig(`Upload Directory (Local): ${config.uploadDir}`);
} else {
logAndReturn('Metadata Directory (S3 Mode)', config.uploadDir); // Clarify role for S3
logConfig(`Metadata Directory (S3 Mode): ${config.uploadDir}`); // Clarify role in S3 mode
logAndReturn('S3_REGION', config.s3Region);
logAndReturn('S3_BUCKET_NAME', config.s3BucketName);
logAndReturn('S3_ACCESS_KEY_ID', config.s3AccessKeyId, false, true);
logAndReturn('S3_SECRET_ACCESS_KEY', config.s3SecretAccessKey, false, true);
logAndReturn('S3_ACCESS_KEY_ID', config.s3AccessKeyId, false, true); // Sensitive
logAndReturn('S3_SECRET_ACCESS_KEY', config.s3SecretAccessKey, false, true); // Sensitive
if (config.s3EndpointUrl) logAndReturn('S3_ENDPOINT_URL', config.s3EndpointUrl);
logAndReturn('S3_FORCE_PATH_STYLE', config.s3ForcePathStyle);
}
logger.info(`Max File Size: ${config.maxFileSize / (1024 * 1024)}MB`);
logger.info(`Auto Upload: ${config.autoUpload}`);
if (config.allowedExtensions) logger.info(`Allowed Extensions: ${config.allowedExtensions.join(', ')}`);
if (config.pin) logAndReturn('DUMBDROP_PIN', config.pin, false, true);
if (config.allowedIframeOrigins) logger.info(`Allowed Iframe Origins: ${config.allowedIframeOrigins.join(', ')}`);
logConfig(`Max File Size: ${config.maxFileSize / (1024 * 1024)}MB`);
logConfig(`Auto Upload: ${config.autoUpload}`);
if (config.allowedExtensions) logConfig(`Allowed Extensions: ${config.allowedExtensions.join(', ')}`);
if (config.pin) logAndReturn('DUMBDROP_PIN', config.pin, false, true); // Sensitive
if (config.allowedIframeOrigins) logConfig(`Allowed Iframe Origins: ${config.allowedIframeOrigins.join(', ')}`);
if (config.appriseUrl) logAndReturn('APPRISE_URL', config.appriseUrl);
logger.info(`Client Max Retries: ${config.clientMaxRetries}`);
logger.info(`--- Configuration End ---`);
// --- Configuration Validation ---
function validateConfig() {
const errors = [];
if (config.port <= 0 || config.port > 65535) errors.push('PORT must be a valid number between 1 and 65535');
if (config.maxFileSize <= 0) errors.push('MAX_FILE_SIZE must be greater than 0');
try {
new URL(config.baseUrl);
if (!config.baseUrl.endsWith('/')) errors.push('BASE_URL must end with a trailing slash ("/"). Current: ' + config.baseUrl);
} catch (err) { errors.push(`BASE_URL must be a valid URL. Error: ${err.message}`); }
if (config.storageType === 's3') {
if (!config.s3Region) errors.push('S3_REGION is required for S3 storage');
if (!config.s3BucketName) errors.push('S3_BUCKET_NAME is required for S3 storage');
if (!config.s3AccessKeyId) errors.push('S3_ACCESS_KEY_ID is required for S3 storage');
if (!config.s3SecretAccessKey) errors.push('S3_SECRET_ACCESS_KEY is required for S3 storage');
if (config.s3ForcePathStyle && !config.s3EndpointUrl) {
logger.warn('[Config Validation] S3_FORCE_PATH_STYLE is true, but S3_ENDPOINT_URL is not set. This may not work as expected with default AWS endpoints.');
}
} else if (config.storageType === 'local') {
if (!config.uploadDir) errors.push('Upload directory (UPLOAD_DIR or LOCAL_UPLOAD_DIR) is required for local storage.');
else {
try { fs.accessSync(config.uploadDir, fs.constants.W_OK); }
catch (err) { errors.push(`Local upload directory "${config.uploadDir}" is not writable or does not exist.`); }
}
if (!config.port || config.port <= 0 || config.port > 65535) {
errors.push('PORT must be a valid number between 1 and 65535');
}
// Metadata directory check (for both local file metadata and S3 upload state metadata)
if (!config.uploadDir) { // This condition might be redundant if local storage dir is already checked
errors.push('A base directory (UPLOAD_DIR or LOCAL_UPLOAD_DIR) is required for metadata storage.');
} else {
try {
const metadataBase = path.resolve(config.uploadDir); // Base for .metadata
if (!fs.existsSync(metadataBase)) {
fs.mkdirSync(metadataBase, { recursive: true });
logger.info(`[Config Validation] Created base directory for metadata: ${metadataBase}`);
if (config.maxFileSize <= 0) {
errors.push('MAX_FILE_SIZE must be greater than 0');
}
// Validate BASE_URL format and trailing slash
try {
let url = new URL(config.baseUrl);
if (!config.baseUrl.endsWith('/')) {
errors.push('BASE_URL must end with a trailing slash ("/"). Current value: ' + config.baseUrl);
// Attempt to fix it for runtime, but still report error
// config.baseUrl = config.baseUrl + '/';
}
fs.accessSync(metadataBase, fs.constants.W_OK); // Check writability of the parent of .metadata
} catch (err) {
errors.push(`Cannot access or create base directory for metadata at "${config.uploadDir}". Error: ${err.message}`);
errors.push(`BASE_URL must be a valid URL. Error: ${err.message}`);
}
// Validate S3 configuration if STORAGE_TYPE is 's3'
if (config.storageType === 's3') {
if (!config.s3Region) errors.push('S3_REGION is required when STORAGE_TYPE is "s3"');
if (!config.s3BucketName) errors.push('S3_BUCKET_NAME is required when STORAGE_TYPE is "s3"');
if (!config.s3AccessKeyId) errors.push('S3_ACCESS_KEY_ID is required when STORAGE_TYPE is "s3"');
if (!config.s3SecretAccessKey) errors.push('S3_SECRET_ACCESS_KEY is required when STORAGE_TYPE is "s3"');
if (config.s3ForcePathStyle && !config.s3EndpointUrl) {
logger.warn('S3_FORCE_PATH_STYLE is true, but S3_ENDPOINT_URL is not set. This typically requires a custom endpoint.');
}
}
// Validate local storage dir only if type is local
if (config.storageType === 'local') {
if (!config.uploadDir) {
errors.push('Upload directory could not be determined for local storage.');
} else {
// Check existence and writability again (ensureLocalUploadDirExists might have failed)
try {
fs.accessSync(config.uploadDir, fs.constants.W_OK);
} catch (err) {
errors.push(`Local upload directory "${config.uploadDir}" is not writable or does not exist.`);
}
}
}
// Check metadata dir existence/writability regardless of storage type, as S3 uses it too
try {
const metadataParentDir = path.dirname(path.join(config.uploadDir, '.metadata'));
if (!fs.existsSync(metadataParentDir)) {
fs.mkdirSync(metadataParentDir, { recursive: true });
logger.info(`Created base directory for metadata: ${metadataParentDir}`);
}
fs.accessSync(metadataParentDir, fs.constants.W_OK);
} catch (err) {
errors.push(`Cannot access or create directory for metadata storage at "${config.uploadDir}". Error: ${err.message}`);
}
if (config.nodeEnv === 'production') {
if (!config.appriseUrl) {
logger.info('Apprise notifications disabled (APPRISE_URL not set).');
}
}
if (errors.length > 0) {
logger.error('--- CONFIGURATION ERRORS ---');
errors.forEach(err => logger.error(`- ${err}`));
logger.error('-----------------------------');
throw new Error('Configuration validation failed. Please check environment variables and correct the issues.');
throw new Error('Configuration validation failed. Please check environment variables.');
}
logger.success('[Config Validation] Configuration validated successfully.');
logger.success('Configuration validated successfully.');
}
Object.freeze(config); // Freeze after logging and validation
// Freeze configuration to prevent modifications after initial load
Object.freeze(config);
module.exports = { config, validateConfig };
module.exports = {
config,
validateConfig
};

View File

@@ -16,118 +16,157 @@ const { isDemoMode } = require('../utils/demoMode'); // Keep demo check for spec
// Initialize upload
router.post('/init', async (req, res) => {
if (isDemoMode() && config.storageType !== 's3') { // S3 demo might still hit the adapter for presigned URLs etc.
// but local demo can be simpler.
const { filename = 'demo_file.txt', fileSize = 0 } = req.body;
// Note: Demo mode might bypass storage adapter logic via middleware or adapter factory itself.
// If specific demo responses are needed here, keep the check.
if (isDemoMode()) {
// Simplified Demo Response (assuming demoAdapter handles non-persistence)
const { filename = 'demo_file', fileSize = 0 } = req.body;
const demoUploadId = 'demo-' + Math.random().toString(36).substr(2, 9);
logger.info(`[DEMO /init] Req for ${filename}, size ${fileSize}. ID ${demoUploadId}`);
logger.info(`[DEMO] Init request for ${filename}, size ${fileSize}. Returning ID ${demoUploadId}`);
if (Number(fileSize) === 0) {
logger.success(`[DEMO /init] Sim complete zero-byte: ${filename}`);
logger.success(`[DEMO] Simulated completion of zero-byte file: ${filename}`);
// Potentially call demoAdapter.completeUpload or similar mock logic if needed
}
return res.json({ uploadId: demoUploadId });
}
const { filename, fileSize } = req.body;
const clientBatchId = req.headers['x-batch-id'];
const clientBatchId = req.headers['x-batch-id']; // Adapter might use this
// --- Basic validations ---
if (!filename) return res.status(400).json({ error: 'Missing filename' });
if (fileSize === undefined || fileSize === null) return res.status(400).json({ error: 'Missing fileSize' });
const size = Number(fileSize);
if (isNaN(size) || size < 0) return res.status(400).json({ error: 'Invalid file size' });
// --- Max File Size Check ---
if (size > config.maxFileSize) {
logger.warn(`Upload rejected: File size ${size} exceeds limit ${config.maxFileSize} for ${filename}`);
logger.warn(`Upload rejected: File size ${size} exceeds limit ${config.maxFileSize}`);
return res.status(413).json({ error: 'File too large', limit: config.maxFileSize });
}
// --- Extension Check ---
// Perform extension check before handing off to adapter
if (config.allowedExtensions && config.allowedExtensions.length > 0) {
const fileExt = path.extname(filename).toLowerCase();
// Check if the extracted extension (including '.') is in the allowed list
if (!fileExt || !config.allowedExtensions.includes(fileExt)) {
logger.warn(`Upload rejected: File type not allowed: ${filename} (Ext: ${fileExt || 'none'})`);
logger.warn(`Upload rejected: File type not allowed: ${filename} (Extension: ${fileExt || 'none'})`);
return res.status(400).json({ error: 'File type not allowed', receivedExtension: fileExt || 'none' });
}
logger.debug(`File extension ${fileExt} allowed for ${filename}`);
}
try {
// Delegate initialization to the storage adapter
const result = await storageAdapter.initUpload(filename, size, clientBatchId);
// Respond with the uploadId generated by the adapter/system
res.json({ uploadId: result.uploadId });
} catch (err) {
logger.error(`[Route /init] Upload initialization failed for "${filename}": ${err.name} - ${err.message}`, err.stack);
logger.error(`[Route /init] Upload initialization failed: ${err.message}`, err.stack);
// Map common errors
let statusCode = 500;
let clientMessage = 'Failed to initialize upload.';
if (err.message.includes('Invalid batch ID format')) {
statusCode = 400; clientMessage = err.message;
} else if (err.name === 'NoSuchBucket' || err.name === 'AccessDenied') {
statusCode = 500; clientMessage = 'Storage configuration error.';
} else if (err.code === 'EACCES' || err.code === 'EPERM' || err.message.includes('writable') || err.message.includes('metadata directory')) {
statusCode = 500; clientMessage = 'Storage permission or access error.';
} else if (err.message.includes('S3 Client configuration failed')) {
statusCode = 503; clientMessage = 'Storage service unavailable or misconfigured.';
statusCode = 400;
clientMessage = err.message;
} else if (err.name === 'NoSuchBucket' || err.name === 'AccessDenied') { // S3 Specific
statusCode = 500; // Internal config error
clientMessage = 'Storage configuration error.';
} else if (err.code === 'EACCES' || err.code === 'EPERM' || err.message.includes('writable')) { // Local Specific
statusCode = 500;
clientMessage = 'Storage permission or access error.';
}
res.status(statusCode).json({ error: clientMessage, details: config.nodeEnv === 'development' ? err.message : undefined });
// Add more specific error mapping based on adapter exceptions if needed
res.status(statusCode).json({ error: clientMessage, details: err.message }); // Include details only for logging/debugging
}
});
// Upload chunk
router.post('/chunk/:uploadId', express.raw({
limit: config.maxFileSize + (10 * 1024 * 1024),
limit: config.maxFileSize + (10 * 1024 * 1024), // Allow slightly larger raw body than max file size
type: 'application/octet-stream'
}), async (req, res) => {
const { uploadId } = req.params;
const chunk = req.body;
const partNumber = parseInt(req.query.partNumber, 10); // Ensure partNumber is parsed
const clientBatchId = req.headers['x-batch-id']; // May be useful for logging context
// ** CRITICAL FOR S3: Get Part Number from client **
// Client needs to send this, e.g., ?partNumber=1, ?partNumber=2, ...
const partNumber = parseInt(req.query.partNumber || '1', 10);
if (isNaN(partNumber) || partNumber < 1) {
logger.error(`[Route /chunk] Invalid partNumber for ${uploadId}: ${req.query.partNumber}`);
logger.error(`[Route /chunk] Invalid partNumber received: ${req.query.partNumber}`);
return res.status(400).json({ error: 'Missing or invalid partNumber query parameter (must be >= 1)' });
}
if (isDemoMode() && config.storageType !== 's3') {
logger.debug(`[DEMO /chunk] Chunk for ${uploadId}, part ${partNumber}, size ${chunk?.length || 0}`);
const demoProgress = Math.min(100, (Math.random() * 50) + (partNumber * 10) ); // Simulate increasing progress
const completed = demoProgress >= 100;
if (completed) logger.info(`[DEMO /chunk] Sim completion for ${uploadId}`);
return res.json({ bytesReceived: 0, progress: demoProgress, completed });
// Demo mode handling (simplified)
if (isDemoMode()) {
logger.debug(`[DEMO /chunk] Received chunk for ${uploadId}, part ${partNumber}, size ${chunk?.length || 0}`);
// Simulate progress - more sophisticated logic could go in a demoAdapter
const demoProgress = Math.min(100, Math.random() * 100);
const completed = demoProgress > 95; // Simulate completion occasionally
if (completed) {
logger.info(`[DEMO /chunk] Simulated completion for ${uploadId}`);
}
return res.json({ bytesReceived: 0, progress: demoProgress, completed }); // Approximate response
}
if (!chunk || chunk.length === 0) {
logger.warn(`[Route /chunk] Empty chunk for ${uploadId}, part ${partNumber}`);
logger.warn(`[Route /chunk] Received empty chunk for uploadId: ${uploadId}, part ${partNumber}`);
return res.status(400).json({ error: 'Empty chunk received' });
}
try {
// Delegate chunk storage to the adapter
const result = await storageAdapter.storeChunk(uploadId, chunk, partNumber);
// If the adapter indicates completion after storing this chunk, finalize the upload
if (result.completed) {
logger.info(`[Route /chunk] Part ${partNumber} for ${uploadId} triggered completion. Finalizing...`);
logger.info(`[Route /chunk] Chunk ${partNumber} for ${uploadId} triggered completion. Finalizing...`);
try {
const completionResult = await storageAdapter.completeUpload(uploadId);
logger.success(`[Route /chunk] Finalized upload ${uploadId}. Path/Key: ${completionResult.finalPath}`);
logger.success(`[Route /chunk] Successfully finalized upload ${uploadId}. Final path/key: ${completionResult.finalPath}`);
// Send final success response (ensure progress is 100)
return res.json({ bytesReceived: result.bytesReceived, progress: 100, completed: true });
} catch (completionError) {
logger.error(`[Route /chunk] CRITICAL: Failed to finalize ${uploadId} after part ${partNumber}: ${completionError.message}`, completionError.stack);
return res.status(500).json({ error: 'Upload chunk received, but failed to finalize.', details: config.nodeEnv === 'development' ? completionError.message : undefined });
logger.error(`[Route /chunk] CRITICAL: Failed to finalize completed upload ${uploadId} after storing chunk ${partNumber}: ${completionError.message}`, completionError.stack);
// What to return to client? The chunk was stored, but completion failed.
// Return 500, indicating server-side issue during finalization.
return res.status(500).json({ error: 'Upload chunk received, but failed to finalize.', details: completionError.message });
}
} else {
// Chunk stored, but upload not yet complete, return progress
res.json({ bytesReceived: result.bytesReceived, progress: result.progress, completed: false });
}
} catch (err) {
logger.error(`[Route /chunk] Chunk upload failed for ${uploadId}, part ${partNumber}: ${err.name} - ${err.message}`, err.stack);
logger.error(`[Route /chunk] Chunk upload failed for ${uploadId}, part ${partNumber}: ${err.message}`, err.stack);
// Map common errors
let statusCode = 500;
let clientMessage = 'Failed to process chunk.';
if (err.message.includes('Upload session not found') || err.name === 'NoSuchUpload' || err.code === 'ENOENT' || err.name === 'NotFound' || err.name === 'NoSuchKey') {
statusCode = 404; clientMessage = 'Upload session not found or already completed/aborted.';
} else if (err.name === 'InvalidPart' || err.name === 'InvalidPartOrder') {
statusCode = 400; clientMessage = 'Invalid upload chunk sequence or data.';
} else if (err.name === 'SlowDown' || (err.$metadata && err.$metadata.httpStatusCode === 503) ) {
statusCode = 429; clientMessage = 'Storage provider rate limit exceeded, please try again later.';
} else if (err.code === 'EACCES' || err.code === 'EPERM' ) {
statusCode = 500; clientMessage = 'Storage permission error while writing chunk.';
if (err.message.includes('Upload session not found') || err.name === 'NoSuchUpload' || err.code === 'ENOENT') {
statusCode = 404;
clientMessage = 'Upload session not found or already completed/aborted.';
} else if (err.name === 'InvalidPart' || err.name === 'InvalidPartOrder') { // S3 Specific
statusCode = 400;
clientMessage = 'Invalid upload chunk sequence or data.';
} else if (err.name === 'SlowDown') { // S3 Throttling
statusCode = 429;
clientMessage = 'Upload rate limit exceeded by storage provider, please try again later.';
} else if (err.code === 'EACCES' || err.code === 'EPERM' ) { // Local specific
statusCode = 500;
clientMessage = 'Storage permission error while writing chunk.';
}
res.status(statusCode).json({ error: clientMessage, details: config.nodeEnv === 'development' ? err.message : undefined });
// Add more specific error mapping if needed
res.status(statusCode).json({ error: clientMessage, details: err.message });
}
});
@@ -135,27 +174,27 @@ router.post('/chunk/:uploadId', express.raw({
router.post('/cancel/:uploadId', async (req, res) => {
const { uploadId } = req.params;
if (isDemoMode() && config.storageType !== 's3') {
logger.info(`[DEMO /cancel] Request for ${uploadId}`);
if (isDemoMode()) {
logger.info(`[DEMO /cancel] Request received for ${uploadId}`);
// Call demoAdapter.abortUpload(uploadId) if it exists?
return res.json({ message: 'Upload cancelled (Demo)' });
}
logger.info(`[Route /cancel] Cancel request for upload: ${uploadId}`);
logger.info(`[Route /cancel] Received cancel request for upload: ${uploadId}`);
try {
// Delegate cancellation to the storage adapter
await storageAdapter.abortUpload(uploadId);
res.json({ message: 'Upload cancelled successfully or was already inactive.' });
} catch (err) {
logger.error(`[Route /cancel] Error during cancellation for ${uploadId}: ${err.name} - ${err.message}`, err.stack);
// Generally, client doesn't need to know if server-side abort failed catastrophically,
// as long as client stops sending. However, if it's a config error, 500 is appropriate.
let statusCode = err.name === 'NoSuchUpload' ? 200 : 500; // If not found, it's like success for client
let clientMessage = err.name === 'NoSuchUpload' ? 'Upload already inactive or not found.' : 'Failed to cancel upload on server.';
if (err.name === 'AccessDenied' || err.name === 'NoSuchBucket') {
clientMessage = 'Storage configuration error during cancel.';
statusCode = 500;
}
res.status(statusCode).json({ message: clientMessage, details: config.nodeEnv === 'development' ? err.message : undefined });
// Abort errors are often less critical, log them but maybe return success anyway
logger.error(`[Route /cancel] Error during upload cancellation for ${uploadId}: ${err.message}`, err.stack);
// Don't necessarily send 500, as the goal is just to stop the upload client-side
// Maybe just return success but log the server-side issue?
// Or return 500 if S3 abort fails significantly? Let's return 500 for now.
res.status(500).json({ error: 'Failed to cancel upload on server.', details: err.message });
}
});
module.exports = { router }; // Only export the router object
// Export the router, remove previous function exports
module.exports = { router };

View File

@@ -1,110 +0,0 @@
#!/bin/sh
# Simple entrypoint script to manage user permissions and execute CMD
# Exit immediately if a command exits with a non-zero status.
set -e
# Function to log messages
log_info() {
echo "[INFO] Entrypoint: $1"
}
log_warning() {
echo "[WARN] Entrypoint: $1"
}
log_error() {
echo "[ERROR] Entrypoint: $1" >&2
}
log_info "Starting entrypoint script..."
# Default user/group/umask values
DEFAULT_UID=1000
DEFAULT_GID=1000
DEFAULT_UMASK=022
# Default upload directory if not set by user (should align with Dockerfile/compose)
DEFAULT_UPLOAD_DIR="/usr/src/app/local_uploads"
# Check if PUID or PGID environment variables are set by the user
if [ -z "${PUID}" ] && [ -z "${PGID}" ]; then
# --- Run as Root ---
log_info "PUID/PGID not set, running as root."
# Set umask (use UMASK env var if provided, otherwise default)
CURRENT_UMASK=${UMASK:-$DEFAULT_UMASK}
log_info "Setting umask to ${CURRENT_UMASK}"
umask "${CURRENT_UMASK}"
# Execute the command passed to the entrypoint as root
log_info "Executing command as root: $@"
exec "$@"
else
# --- Run as Custom User (nodeuser with adjusted UID/GID) ---
log_info "PUID/PGID set, configuring user 'nodeuser'..."
# Use provided UID/GID or default if only one is set
CURRENT_UID=${PUID:-$DEFAULT_UID}
CURRENT_GID=${PGID:-$DEFAULT_GID}
CURRENT_UMASK=${UMASK:-$DEFAULT_UMASK}
# Read the upload directory from ENV var or use default
TARGET_UPLOAD_DIR=${UPLOAD_DIR:-$DEFAULT_UPLOAD_DIR}
log_info "Target UID: ${CURRENT_UID}, GID: ${CURRENT_GID}, UMASK: ${CURRENT_UMASK}"
log_info "Target Upload Dir: ${TARGET_UPLOAD_DIR}"
# Check if user/group exists (should exist from Dockerfile)
if ! getent group nodeuser > /dev/null 2>&1; then
log_warning "Group 'nodeuser' not found, creating with GID ${CURRENT_GID}..."
addgroup -g "${CURRENT_GID}" nodeuser
else
EXISTING_GID=$(getent group nodeuser | cut -d: -f3)
if [ "${EXISTING_GID}" != "${CURRENT_GID}" ]; then
log_info "Updating 'nodeuser' group GID from ${EXISTING_GID} to ${CURRENT_GID}..."
groupmod -o -g "${CURRENT_GID}" nodeuser
fi
fi
if ! getent passwd nodeuser > /dev/null 2>&1; then
log_warning "User 'nodeuser' not found, creating with UID ${CURRENT_UID}..."
adduser -u "${CURRENT_UID}" -G nodeuser -s /bin/sh -D nodeuser
else
EXISTING_UID=$(getent passwd nodeuser | cut -d: -f3)
if [ "${EXISTING_UID}" != "${CURRENT_UID}" ]; then
log_info "Updating 'nodeuser' user UID from ${EXISTING_UID} to ${CURRENT_UID}..."
usermod -o -u "${CURRENT_UID}" nodeuser
fi
fi
# Ensure the base application directory ownership is correct
log_info "Ensuring ownership of /usr/src/app..."
chown -R nodeuser:nodeuser /usr/src/app || log_warning "Could not chown /usr/src/app"
# Ensure the target upload directory exists and has correct ownership
if [ -n "${TARGET_UPLOAD_DIR}" ]; then
if [ ! -d "${TARGET_UPLOAD_DIR}" ]; then
log_info "Creating directory: ${TARGET_UPLOAD_DIR}"
# Use -p to create parent directories as needed
mkdir -p "${TARGET_UPLOAD_DIR}"
# Chown after creation
chown nodeuser:nodeuser "${TARGET_UPLOAD_DIR}" || log_warning "Could not chown ${TARGET_UPLOAD_DIR}"
else
# Directory exists, ensure ownership
log_info "Ensuring ownership of ${TARGET_UPLOAD_DIR}..."
chown -R nodeuser:nodeuser "${TARGET_UPLOAD_DIR}" || log_warning "Could not chown ${TARGET_UPLOAD_DIR}"
fi
else
log_warning "UPLOAD_DIR variable is not set or is empty, skipping ownership check for upload directory."
fi
# Set the umask
log_info "Setting umask to ${CURRENT_UMASK}"
umask "${CURRENT_UMASK}"
# Execute the command passed to the entrypoint using su-exec to drop privileges
log_info "Executing command as nodeuser (${CURRENT_UID}:${CURRENT_GID}): $@"
exec su-exec nodeuser "$@"
fi
log_info "Entrypoint script finished (should not reach here if exec worked)."

View File

@@ -1,110 +1,124 @@
/**
* Server entry point that starts the HTTP server and manages connections.
* Handles graceful shutdown, connection tracking, and server initialization.
* Provides development mode directory listing functionality.
*/
const { app, initialize, config } = require('./app'); // config is now also exported from app.js
const { app, initialize, config } = require('./app');
const logger = require('./utils/logger');
const fs = require('fs'); // Keep for readdirSync if needed for local dev logging
const fs = require('fs');
const { executeCleanup } = require('./utils/cleanup');
const { generatePWAManifest } = require('./scripts/pwa-manifest-generator');
const { generatePWAManifest } = require('./scripts/pwa-manifest-generator')
// Track open connections
const connections = new Set();
/**
* Start the server and initialize the application
* @returns {Promise<http.Server>} The HTTP server instance
*/
async function startServer() {
try {
await initialize(); // This will call validateConfig and load storage adapter via app.js
// Initialize the application
await initialize();
// Start the server
const server = app.listen(config.port, () => {
logger.info(`Server running at ${config.baseUrl}`);
// ** MODIFIED LOGGING **
logger.info(`Active Storage Type: ${config.storageType}`);
logger.info(`Data Directory (for uploads or metadata): ${config.uploadDir}`);
logger.info(`Upload directory: ${config.uploadDisplayPath}`);
if (config.nodeEnv === 'development' && config.storageType === 'local') {
// List directory contents in development
if (config.nodeEnv === 'development') {
try {
// Only list contents if it's local storage and dev mode
if (fs.existsSync(config.uploadDir)) {
const files = fs.readdirSync(config.uploadDir);
logger.info(`Current local upload directory contents (${config.uploadDir}):`);
files.forEach(file => logger.info(`- ${file}`));
} else {
logger.warn(`Local upload directory ${config.uploadDir} does not exist for listing.`);
}
logger.info(`Current directory contents (${files.length} files):`);
files.forEach(file => {
logger.info(`- ${file}`);
});
} catch (err) {
logger.error(`Failed to list local upload directory contents: ${err.message}`);
logger.error(`Failed to list directory contents: ${err.message}`);
}
}
});
// Dynamically generate PWA manifest into public folder
generatePWAManifest();
// Track new connections
server.on('connection', (connection) => {
connections.add(connection);
connection.on('close', () => connections.delete(connection));
connection.on('close', () => {
connections.delete(connection);
});
});
let isShuttingDown = false;
// Shutdown handler function
let isShuttingDown = false; // Prevent multiple shutdowns
const shutdownHandler = async (signal) => {
if (isShuttingDown) return;
isShuttingDown = true;
logger.info(`${signal} received. Shutting down gracefully...`);
// Start a shorter force shutdown timer
const forceShutdownTimer = setTimeout(() => {
logger.error('Force shutdown due to timeout.');
logger.error('Force shutdown initiated');
process.exit(1);
}, 5000); // Increased slightly
}, 3000); // 3 seconds maximum for total shutdown
try {
server.closeIdleConnections?.(); // Node 18+
// 1. Stop accepting new connections immediately
server.unref();
const closePromises = Array.from(connections).map(conn => new Promise(resolve => {
conn.on('close', resolve); // Ensure close event resolves
conn.destroy(); // Actively destroy connections
}));
await Promise.race([
Promise.all(closePromises),
new Promise(resolve => setTimeout(resolve, 2000)) // Max 2s for connections
]);
connections.clear();
await new Promise((resolve, reject) => {
server.close((err) => {
if (err) return reject(err);
logger.info('Server closed.');
// 2. Close all existing connections with a shorter timeout
const connectionClosePromises = Array.from(connections).map(conn => {
return new Promise(resolve => {
conn.end(() => {
connections.delete(conn);
resolve();
});
});
});
await executeCleanup(1500); // Max 1.5s for cleanup
// Wait for connections to close with a timeout
await Promise.race([
Promise.all(connectionClosePromises),
new Promise(resolve => setTimeout(resolve, 1000)) // 1 second timeout for connections
]);
// 3. Close the server
await new Promise((resolve) => server.close(resolve));
logger.info('Server closed');
// 4. Run cleanup tasks with a shorter timeout
await executeCleanup(1000); // 1 second timeout for cleanup
// Clear the force shutdown timer since we completed gracefully
clearTimeout(forceShutdownTimer);
logger.info('Shutdown complete.');
process.exit(0);
process.exitCode = 0;
process.exit(0); // Ensure immediate exit
} catch (error) {
clearTimeout(forceShutdownTimer); // Clear timer on error too
logger.error(`Error during shutdown: ${error.message}`);
process.exit(1);
}
};
// Handle both SIGTERM and SIGINT
process.on('SIGTERM', () => shutdownHandler('SIGTERM'));
process.on('SIGINT', () => shutdownHandler('SIGINT'));
return server;
} catch (error) {
logger.error('Failed to start server:', error);
// Ensure process exits if startServer itself fails before listener setup
process.exitCode = 1;
throw error;
}
}
// Only start the server if this file is run directly
if (require.main === module) {
startServer().catch((error) => {
// Error already logged by startServer
// process.exitCode is already set if startServer throws
logger.error('Server failed to start:', error);
process.exitCode = 1;
throw error;
});
}

View File

@@ -3,7 +3,6 @@
* Handles file operations for storing files on AWS S3 or S3-compatible services.
* Implements the storage interface expected by the application routes.
* Uses local files in '.metadata' directory to track multipart upload progress.
* Attempts to make top-level folder prefixes unique per batch if collisions occur.
*/
const {
@@ -15,29 +14,29 @@ const {
ListObjectsV2Command,
GetObjectCommand,
DeleteObjectCommand,
PutObjectCommand,
HeadObjectCommand
} = require('@aws-sdk/client-s3');
const { getSignedUrl } = require("@aws-sdk/s3-request-presigner");
const fs = require('fs').promises;
const fsSync = require('fs');
const path = require('path');
const crypto = require('crypto');
const util = require('util'); // For detailed error logging
const { config } = require('../config');
const logger = require('../utils/logger');
const {
PutObjectCommand // For zero-byte files
} = require('@aws-sdk/client-s3');
const { getSignedUrl } = require("@aws-sdk/s3-request-presigner");
const fs = require('fs').promises;
const fsSync = require('fs'); // For synchronous checks
const path = require('path');
const crypto = require('crypto');
const { config } = require('../config');
const logger = require('../utils/logger');
const {
sanitizePathPreserveDirs,
formatFileSize
} = require('../utils/fileUtils');
const { sendNotification } = require('../services/notifications');
isValidBatchId,
formatFileSize // Keep for potential future use or consistency
} = require('../utils/fileUtils');
const { sendNotification } = require('../services/notifications'); // Needed for completion
const METADATA_DIR = path.join(config.uploadDir, '.metadata');
const UPLOAD_TIMEOUT = 30 * 60 * 1000; // For local metadata cleanup
// --- Constants ---
const METADATA_DIR = path.join(config.uploadDir, '.metadata'); // Use local dir for metadata state
const UPLOAD_TIMEOUT = 30 * 60 * 1000; // 30 minutes timeout for stale *local* metadata cleanup
// --- S3 Client Initialization ---
let s3Client;
try {
// --- S3 Client Initialization ---
let s3Client;
try {
const s3ClientConfig = {
region: config.s3Region,
credentials: {
@@ -47,17 +46,27 @@ try {
...(config.s3EndpointUrl && { endpoint: config.s3EndpointUrl }),
...(config.s3ForcePathStyle && { forcePathStyle: true }),
};
if (s3ClientConfig.endpoint) logger.info(`[S3 Adapter] Configuring S3 client for endpoint: ${s3ClientConfig.endpoint}`);
if (s3ClientConfig.forcePathStyle) logger.info(`[S3 Adapter] Configuring S3 client with forcePathStyle: true`);
if (s3ClientConfig.endpoint) {
logger.info(`[S3 Adapter] Configuring S3 client for endpoint: ${s3ClientConfig.endpoint}`);
}
if (s3ClientConfig.forcePathStyle) {
logger.info(`[S3 Adapter] Configuring S3 client with forcePathStyle: true`);
}
s3Client = new S3Client(s3ClientConfig);
logger.success('[S3 Adapter] S3 Client configured successfully.');
} catch (error) {
logger.error(`[S3 Adapter] Failed to configure S3 client: ${error.message}`);
throw new Error('S3 Client configuration failed. Check S3 environment variables.');
}
// --- Metadata Helper Functions ---
async function ensureMetadataDirExists() {
} catch (error) {
logger.error(`[S3 Adapter] Failed to configure S3 client: ${error.message}`);
// This is critical, throw an error to prevent the adapter from being used incorrectly
throw new Error('S3 Client configuration failed. Check S3 environment variables.');
}
// --- Metadata Helper Functions (Adapted for S3, store state locally) ---
async function ensureMetadataDirExists() {
// Reuse logic from local adapter - S3 adapter still needs local dir for state
try {
if (!fsSync.existsSync(METADATA_DIR)) {
await fs.mkdir(METADATA_DIR, { recursive: true });
@@ -68,9 +77,10 @@ async function ensureMetadataDirExists() {
logger.error(`[S3 Adapter] Local metadata directory error (${METADATA_DIR}): ${err.message}`);
throw new Error(`Failed to access or create local metadata directory for S3 adapter state: ${METADATA_DIR}`);
}
}
}
async function readUploadMetadata(uploadId) {
// Read/Write/Delete functions are identical to localAdapter as they manage local state files
async function readUploadMetadata(uploadId) {
if (!uploadId || typeof uploadId !== 'string' || uploadId.includes('..')) {
logger.warn(`[S3 Adapter] Attempted to read metadata with invalid uploadId: ${uploadId}`);
return null;
@@ -78,24 +88,25 @@ async function readUploadMetadata(uploadId) {
const metaFilePath = path.join(METADATA_DIR, `${uploadId}.meta`);
try {
const data = await fs.readFile(metaFilePath, 'utf8');
// Ensure 'parts' is always an array on read
const metadata = JSON.parse(data);
metadata.parts = metadata.parts || [];
return metadata;
} catch (err) {
if (err.code === 'ENOENT') return null;
if (err.code === 'ENOENT') { return null; }
logger.error(`[S3 Adapter] Error reading metadata for ${uploadId}: ${err.message}`);
throw err;
}
}
}
async function writeUploadMetadata(uploadId, metadata) {
async function writeUploadMetadata(uploadId, metadata) {
if (!uploadId || typeof uploadId !== 'string' || uploadId.includes('..')) {
logger.error(`[S3 Adapter] Attempted to write metadata with invalid uploadId: ${uploadId}`);
return;
}
const metaFilePath = path.join(METADATA_DIR, `${uploadId}.meta`);
metadata.lastActivity = Date.now();
metadata.parts = metadata.parts || [];
metadata.parts = metadata.parts || []; // Ensure parts array exists
try {
const tempMetaPath = `${metaFilePath}.${crypto.randomBytes(4).toString('hex')}.tmp`;
await fs.writeFile(tempMetaPath, JSON.stringify(metadata, null, 2));
@@ -105,9 +116,9 @@ async function writeUploadMetadata(uploadId, metadata) {
try { await fs.unlink(tempMetaPath); } catch (unlinkErr) {/* ignore */}
throw err;
}
}
}
async function deleteUploadMetadata(uploadId) {
async function deleteUploadMetadata(uploadId) {
if (!uploadId || typeof uploadId !== 'string' || uploadId.includes('..')) {
logger.warn(`[S3 Adapter] Attempted to delete metadata with invalid uploadId: ${uploadId}`);
return;
@@ -117,323 +128,465 @@ async function deleteUploadMetadata(uploadId) {
await fs.unlink(metaFilePath);
logger.debug(`[S3 Adapter] Deleted metadata file: ${uploadId}.meta`);
} catch (err) {
if (err.code !== 'ENOENT') logger.error(`[S3 Adapter] Error deleting metadata file ${uploadId}.meta: ${err.message}`);
if (err.code !== 'ENOENT') {
logger.error(`[S3 Adapter] Error deleting metadata file ${uploadId}.meta: ${err.message}`);
}
}
ensureMetadataDirExists().catch(err => {
logger.error(`[S3 Adapter] Initialization failed (metadata dir): ${err.message}`);
process.exit(1);
});
// --- S3 Object/Prefix Utilities ---
const batchS3PrefixMappings = new Map(); // In-memory: originalTopLevelFolder-batchId -> actualS3Prefix
async function s3ObjectExists(key) {
logger.info(`[S3 Adapter] s3ObjectExists: Checking key "${key}"`);
try {
await s3Client.send(new HeadObjectCommand({ Bucket: config.s3BucketName, Key: key }));
logger.info(`[S3 Adapter] s3ObjectExists: HeadObject success for key "${key}". Key EXISTS.`);
return true;
} catch (error) {
// logger.error(`[S3 Adapter DEBUG] Full error object for HeadObject on key "${key}":\n`, util.inspect(error, { showHidden: false, depth: null, colors: false }));
if (error.name === 'NotFound' || error.name === 'NoSuchKey' || (error.$metadata && error.$metadata.httpStatusCode === 404)) {
logger.info(`[S3 Adapter] s3ObjectExists: Key "${key}" NOT found (404-like error).`);
return false;
}
if (error.name === '403' || (error.$metadata && error.$metadata.httpStatusCode === 403)) {
logger.warn(`[S3 Adapter] s3ObjectExists: Received 403 Forbidden for key "${key}". For unique key generation, treating this as 'likely does not exist'.`);
return false;
}
logger.error(`[S3 Adapter] s3ObjectExists: Unhandled error type "${error.name}" for key "${key}": ${error.message}`);
throw error;
}
}
async function getUniqueS3FolderPrefix(originalPrefix, batchId) {
if (!originalPrefix || !originalPrefix.endsWith('/')) {
logger.error("[S3 Adapter] getUniqueS3FolderPrefix: originalPrefix must be a non-empty string ending with '/'");
return originalPrefix; // Or throw error
}
const prefixMapKey = `${originalPrefix}-${batchId}`;
if (batchS3PrefixMappings.has(prefixMapKey)) {
return batchS3PrefixMappings.get(prefixMapKey);
}
let currentPrefixToCheck = originalPrefix;
let counter = 1;
const baseName = originalPrefix.slice(0, -1); // "MyFolder" from "MyFolder/"
async function prefixHasObjects(prefix) {
try {
const listResponse = await s3Client.send(new ListObjectsV2Command({
Bucket: config.s3BucketName, Prefix: prefix, MaxKeys: 1
}));
return listResponse.KeyCount > 0;
} catch (error) {
logger.error(`[S3 Adapter] Error listing objects for prefix check "${prefix}": ${error.message}`);
throw error; // Propagate error if listing fails for permission reasons etc.
}
}
while (await prefixHasObjects(currentPrefixToCheck)) {
logger.warn(`[S3 Adapter] S3 prefix "${currentPrefixToCheck}" is not empty. Generating unique prefix for base "${baseName}/".`);
currentPrefixToCheck = `${baseName}-${counter}/`; // Use hyphen for suffix
counter++;
}
// Ensure metadata dir exists on initialization
ensureMetadataDirExists().catch(err => {
logger.error(`[S3 Adapter] Initialization failed: ${err.message}`);
process.exit(1); // Exit if we can't manage metadata state
});
if (currentPrefixToCheck !== originalPrefix) {
logger.info(`[S3 Adapter] Using unique S3 folder prefix: "${currentPrefixToCheck}" for original "${originalPrefix}" in batch "${batchId}"`);
}
batchS3PrefixMappings.set(prefixMapKey, currentPrefixToCheck);
return currentPrefixToCheck;
}
// --- Interface Implementation ---
async function initUpload(filename, fileSize, clientBatchId) {
await ensureMetadataDirExists();
// --- Interface Implementation ---
/**
* Initializes an S3 multipart upload session (or direct put for zero-byte).
* @param {string} filename - Original filename/path from client.
* @param {number} fileSize - Total size of the file.
* @param {string} clientBatchId - Optional batch ID from client.
* @returns {Promise<{uploadId: string}>} Object containing the application's upload ID.
*/
async function initUpload(filename, fileSize, clientBatchId) {
await ensureMetadataDirExists(); // Re-check before operation
const size = Number(fileSize);
const appUploadId = crypto.randomBytes(16).toString('hex');
const batchId = clientBatchId || `${Date.now()}-${crypto.randomBytes(4).toString('hex').substring(0, 9)}`;
const appUploadId = crypto.randomBytes(16).toString('hex'); // Our internal ID
const originalSanitizedFullpath = sanitizePathPreserveDirs(filename); // e.g., "MyFolder/image.jpg" or "image.jpg"
let s3KeyStructure = path.normalize(originalSanitizedFullpath)
.replace(/^(\.\.(\/|\\|$))+/, '').replace(/\\/g, '/').replace(/^\/+/, '');
// --- Path handling and Sanitization for S3 Key ---
const sanitizedFilename = sanitizePathPreserveDirs(filename);
// S3 keys should not start with /
const s3Key = path.normalize(sanitizedFilename)
.replace(/^(\.\.(\/|\\|$))+/, '')
.replace(/\\/g, '/')
.replace(/^\/+/, '');
let effectiveBasePrefix = ""; // e.g., "MyFolder-1/" or ""
const pathParts = s3KeyStructure.split('/');
const isNestedPath = pathParts.length > 1;
let relativePathInFolder = s3KeyStructure;
if (isNestedPath) {
const originalTopLevelFolder = pathParts[0] + '/'; // "MyFolder/"
effectiveBasePrefix = await getUniqueS3FolderPrefix(originalTopLevelFolder, batchId);
relativePathInFolder = pathParts.slice(1).join('/'); // "SubFolder/image.jpg" or "image.jpg"
s3KeyStructure = effectiveBasePrefix + relativePathInFolder;
}
logger.info(`[S3 Adapter] Init: Original Full Path: "${originalSanitizedFullpath}", Effective Base Prefix: "${effectiveBasePrefix}", Relative Path In Folder: "${relativePathInFolder}"`);
let finalS3Key = s3KeyStructure;
let fileCounter = 1;
const fileDir = path.dirname(s3KeyStructure);
const fileExt = path.extname(s3KeyStructure);
const fileBaseName = path.basename(s3KeyStructure, fileExt);
while (await s3ObjectExists(finalS3Key)) {
logger.warn(`[S3 Adapter] S3 file key already exists: "${finalS3Key}". Generating unique file key.`);
finalS3Key = (fileDir === "." ? "" : fileDir + "/") + `${fileBaseName}-${fileCounter}${fileExt}`; // Use hyphen
fileCounter++;
}
if (finalS3Key !== s3KeyStructure) {
logger.info(`[S3 Adapter] Using unique S3 file key: "${finalS3Key}"`);
}
logger.info(`[S3 Adapter] Init request for S3 Key: ${s3Key}`);
// --- Handle Zero-Byte Files ---
if (size === 0) {
try {
await s3Client.send(new PutObjectCommand({
Bucket: config.s3BucketName, Key: finalS3Key, Body: '', ContentLength: 0
}));
logger.success(`[S3 Adapter] Completed zero-byte file: ${finalS3Key}`);
sendNotification(originalSanitizedFullpath, 0, config);
return { uploadId: `zero-byte-${appUploadId}` };
const putCommand = new PutObjectCommand({
Bucket: config.s3BucketName,
Key: s3Key,
Body: '', // Empty body
ContentLength: 0
});
await s3Client.send(putCommand);
logger.success(`[S3 Adapter] Completed zero-byte file upload directly: ${s3Key}`);
// No metadata needed for zero-byte files as they are completed atomically
sendNotification(filename, 0, config); // Send notification (use original filename)
// Return an uploadId that won't conflict or be processable by chunk/complete
return { uploadId: `zero-byte-${appUploadId}` }; // Or maybe return null/special status?
// Returning a unique ID might be safer for client state.
} catch (putErr) {
logger.error(`[S3 Adapter] Failed zero-byte PUT for ${finalS3Key}: ${putErr.message}`);
throw putErr;
logger.error(`[S3 Adapter] Failed to put zero-byte object ${s3Key}: ${putErr.message}`);
throw putErr; // Let the route handler deal with it
}
}
// --- Initiate Multipart Upload for Non-Zero Files ---
try {
const createCommand = new CreateMultipartUploadCommand({ Bucket: config.s3BucketName, Key: finalS3Key });
const createCommand = new CreateMultipartUploadCommand({
Bucket: config.s3BucketName,
Key: s3Key,
// TODO: Consider adding ContentType if available/reliable: metadata.contentType
// TODO: Consider adding Metadata: { 'original-filename': filename } ?
});
const response = await s3Client.send(createCommand);
const s3UploadId = response.UploadId;
if (!s3UploadId) throw new Error('S3 did not return UploadId');
logger.info(`[S3 Adapter] Multipart initiated for ${finalS3Key} (S3 UploadId: ${s3UploadId})`);
if (!s3UploadId) {
throw new Error('S3 did not return an UploadId');
}
logger.info(`[S3 Adapter] Initiated multipart upload for ${s3Key} (S3 UploadId: ${s3UploadId})`);
// --- Create and Persist Local Metadata ---
const batchId = clientBatchId || `${Date.now()}-${crypto.randomBytes(4).toString('hex').substring(0, 9)}`;
const metadata = {
appUploadId, s3UploadId, s3Key: finalS3Key,
originalFilename: originalSanitizedFullpath, // Use the full original path for notification
fileSize: size, bytesReceived: 0, parts: [], batchId,
createdAt: Date.now(), lastActivity: Date.now()
appUploadId: appUploadId, // Store our ID
s3UploadId: s3UploadId,
s3Key: s3Key,
originalFilename: filename, // Keep original for notifications etc.
fileSize: size,
bytesReceived: 0, // Track approximate bytes locally
parts: [], // Array to store { PartNumber, ETag }
batchId,
createdAt: Date.now(),
lastActivity: Date.now()
};
await writeUploadMetadata(appUploadId, metadata);
return { uploadId: appUploadId };
await writeUploadMetadata(appUploadId, metadata); // Write metadata keyed by our appUploadId
return { uploadId: appUploadId }; // Return OUR internal upload ID to the client
} catch (err) {
logger.error(`[S3 Adapter] Failed multipart init for ${finalS3Key}: ${err.message}`);
logger.error(`[S3 Adapter] Failed to initiate multipart upload for ${s3Key}: ${err.message}`);
// TODO: Map specific S3 errors (e.g., NoSuchBucket, AccessDenied) to better client messages
throw err;
}
}
}
async function storeChunk(appUploadId, chunk, partNumber) {
/**
* Uploads a chunk as a part to S3.
* @param {string} appUploadId - The application's upload ID.
* @param {Buffer} chunk - The data chunk to store.
* @param {number} partNumber - The sequential number of this part (starting from 1).
* @returns {Promise<{bytesReceived: number, progress: number, completed: boolean}>} Upload status.
*/
async function storeChunk(appUploadId, chunk, partNumber) {
const chunkSize = chunk.length;
if (!chunkSize) throw new Error('Empty chunk received');
if (partNumber < 1) throw new Error('PartNumber must be 1 or greater');
const metadata = await readUploadMetadata(appUploadId);
if (!metadata || !metadata.s3UploadId) {
logger.warn(`[S3 Adapter] Metadata or S3 UploadId not found for chunk: ${appUploadId}`);
if (!metadata || !metadata.s3UploadId) { // Check for s3UploadId presence
logger.warn(`[S3 Adapter] Metadata or S3 UploadId not found for chunk: ${appUploadId}. Upload might be complete, cancelled, or zero-byte.`);
throw new Error('Upload session not found or already completed');
}
// --- Sanity Check ---
// S3 handles duplicate part uploads gracefully (last one wins), so less critical than local append.
// We still track bytesReceived locally for progress approximation.
if (metadata.bytesReceived >= metadata.fileSize && metadata.fileSize > 0) {
logger.warn(`[S3 Adapter] Chunk for already completed upload ${appUploadId}. Ignoring.`);
return { bytesReceived: metadata.bytesReceived, progress: 100, completed: true };
logger.warn(`[S3 Adapter] Received chunk for already completed upload ${appUploadId}. Ignoring.`);
// Can't really finalize again easily without full parts list. Indicate completion based on local state.
const progress = metadata.fileSize > 0 ? 100 : 0;
return { bytesReceived: metadata.bytesReceived, progress, completed: true };
}
try {
const cmd = new UploadPartCommand({
Bucket: config.s3BucketName, Key: metadata.s3Key, UploadId: metadata.s3UploadId,
Body: chunk, PartNumber: partNumber, ContentLength: chunkSize
});
const response = await s3Client.send(cmd);
const etag = response.ETag;
if (!etag) throw new Error(`S3 ETag missing for Part ${partNumber}`);
try {
const uploadPartCommand = new UploadPartCommand({
Bucket: config.s3BucketName,
Key: metadata.s3Key,
UploadId: metadata.s3UploadId,
Body: chunk,
PartNumber: partNumber,
ContentLength: chunkSize // Required for UploadPart
});
const response = await s3Client.send(uploadPartCommand);
const etag = response.ETag;
if (!etag) {
throw new Error(`S3 did not return an ETag for PartNumber ${partNumber}`);
}
// --- Update Local Metadata ---
// Ensure parts are stored correctly
metadata.parts = metadata.parts || [];
metadata.parts.push({ PartNumber: partNumber, ETag: etag });
// Sort parts just in case uploads happen out of order client-side (though unlikely with current client)
metadata.parts.sort((a, b) => a.PartNumber - b.PartNumber);
metadata.bytesReceived = Math.min((metadata.bytesReceived || 0) + chunkSize, metadata.fileSize);
// Update approximate bytes received
metadata.bytesReceived = (metadata.bytesReceived || 0) + chunkSize;
// Cap bytesReceived at fileSize for progress calculation
metadata.bytesReceived = Math.min(metadata.bytesReceived, metadata.fileSize);
await writeUploadMetadata(appUploadId, metadata);
const progress = metadata.fileSize === 0 ? 100 : Math.min(Math.round((metadata.bytesReceived / metadata.fileSize) * 100), 100);
// --- Calculate Progress ---
const progress = metadata.fileSize === 0 ? 100 :
Math.min(Math.round((metadata.bytesReceived / metadata.fileSize) * 100), 100);
logger.debug(`[S3 Adapter] Part ${partNumber} uploaded for ${appUploadId} (ETag: ${etag}). Progress: ~${progress}%`);
// Check for completion potential based on local byte tracking
const completed = metadata.bytesReceived >= metadata.fileSize;
logger.debug(`[S3 Adapter] Part ${partNumber} for ${appUploadId} (Key: ${metadata.s3Key}). ETag: ${etag}. Progress: ~${progress}%. Completed: ${completed}`);
if (completed) {
logger.info(`[S3 Adapter] Upload ${appUploadId} potentially complete based on bytes received.`);
}
return { bytesReceived: metadata.bytesReceived, progress, completed };
} catch (err) {
logger.error(`[S3 Adapter] Failed Part ${partNumber} for ${appUploadId} (Key: ${metadata.s3Key}): ${err.message}`);
logger.error(`[S3 Adapter] Failed to upload part ${partNumber} for ${appUploadId} (Key: ${metadata.s3Key}): ${err.message}`);
// TODO: Map specific S3 errors (InvalidPart, SlowDown, etc.)
throw err;
}
}
}
async function completeUpload(appUploadId) {
/**
* Finalizes a completed S3 multipart upload.
* @param {string} appUploadId - The application's upload ID.
* @returns {Promise<{filename: string, size: number, finalPath: string}>} Details of the completed file (finalPath is S3 Key).
*/
async function completeUpload(appUploadId) {
const metadata = await readUploadMetadata(appUploadId);
if (!metadata || !metadata.s3UploadId || !metadata.parts || metadata.parts.length === 0) {
throw new Error('Upload completion failed: Missing metadata/parts');
logger.warn(`[S3 Adapter] completeUpload called for ${appUploadId}, but metadata, S3 UploadId, or parts list is missing/empty. Assuming already completed or invalid state.`);
// Check if object exists as a fallback? Risky.
throw new Error('Upload completion failed: Required metadata or parts list not found');
}
// Basic check if enough bytes were tracked locally (approximate check)
if (metadata.bytesReceived < metadata.fileSize) {
logger.warn(`[S3 Adapter] Completing ${appUploadId} with ${metadata.bytesReceived}/${metadata.fileSize} bytes tracked.`);
logger.warn(`[S3 Adapter] Attempting to complete upload ${appUploadId} but locally tracked bytes (${metadata.bytesReceived}) are less than expected size (${metadata.fileSize}). Proceeding anyway.`);
}
try {
const cmd = new CompleteMultipartUploadCommand({
Bucket: config.s3BucketName, Key: metadata.s3Key, UploadId: metadata.s3UploadId,
MultipartUpload: { Parts: metadata.parts },
const completeCommand = new CompleteMultipartUploadCommand({
Bucket: config.s3BucketName,
Key: metadata.s3Key,
UploadId: metadata.s3UploadId,
MultipartUpload: {
Parts: metadata.parts // Use the collected parts { PartNumber, ETag }
},
});
const response = await s3Client.send(cmd);
logger.success(`[S3 Adapter] Finalized: ${metadata.s3Key} (ETag: ${response.ETag})`);
const response = await s3Client.send(completeCommand);
// Example response: { ETag: '"..."', Location: '...', Key: '...', Bucket: '...' }
logger.success(`[S3 Adapter] Finalized multipart upload: ${metadata.s3Key} (ETag: ${response.ETag})`);
// Clean up local metadata AFTER successful S3 completion
await deleteUploadMetadata(appUploadId);
// Send notification
sendNotification(metadata.originalFilename, metadata.fileSize, config);
// Return info consistent with local adapter where possible
return { filename: metadata.originalFilename, size: metadata.fileSize, finalPath: metadata.s3Key };
} catch (err) {
logger.error(`[S3 Adapter] Failed CompleteMultipartUpload for ${metadata.s3Key}: ${err.message}`);
if (err.Code === 'NoSuchUpload' || err.name === 'NoSuchUpload') {
logger.warn(`[S3 Adapter] NoSuchUpload on complete for ${appUploadId}. Assuming completed/aborted.`);
await deleteUploadMetadata(appUploadId).catch(()=>{});
logger.error(`[S3 Adapter] Failed to complete multipart upload for ${appUploadId} (Key: ${metadata.s3Key}): ${err.message}`);
// Specific S3 errors like InvalidPartOrder, EntityTooSmall might occur here.
// If Complete fails, S3 *might* have already assembled it (rare).
// Check if the object now exists? If so, maybe delete metadata? Complex recovery.
// For now, just log the error and throw. The local metadata will persist.
if (err.Code === 'NoSuchUpload') {
logger.warn(`[S3 Adapter] CompleteMultipartUpload failed with NoSuchUpload for ${appUploadId}. Assuming already completed or aborted.`);
await deleteUploadMetadata(appUploadId).catch(()=>{}); // Attempt metadata cleanup
// Check if final object exists?
try {
await s3Client.send(new HeadObjectCommand({ Bucket: config.s3BucketName, Key: metadata.s3Key }));
logger.info(`[S3 Adapter] Final object ${metadata.s3Key} exists after NoSuchUpload. Treating as completed.`);
// Use GetObject or HeadObject to check
await s3Client.send(new GetObjectCommand({ Bucket: config.s3BucketName, Key: metadata.s3Key }));
logger.info(`[S3 Adapter] Final object ${metadata.s3Key} exists after NoSuchUpload error. Treating as completed.`);
return { filename: metadata.originalFilename, size: metadata.fileSize, finalPath: metadata.s3Key };
} catch (headErr) { throw new Error('Completion failed: Session & final object not found.'); }
} catch (headErr) {
// Final object doesn't exist either.
throw new Error('Completion failed: Upload session not found and final object does not exist.');
}
}
throw err;
}
}
}
async function abortUpload(appUploadId) {
/**
* Aborts an ongoing S3 multipart upload.
* @param {string} appUploadId - The application's upload ID.
* @returns {Promise<void>}
*/
async function abortUpload(appUploadId) {
const metadata = await readUploadMetadata(appUploadId);
if (!metadata || !metadata.s3UploadId) {
logger.warn(`[S3 Adapter] Abort for non-existent/completed upload: ${appUploadId}`);
await deleteUploadMetadata(appUploadId); return;
logger.warn(`[S3 Adapter] Abort request for non-existent or completed upload: ${appUploadId}`);
await deleteUploadMetadata(appUploadId); // Clean up local metadata if it exists anyway
return;
}
try {
await s3Client.send(new AbortMultipartUploadCommand({
Bucket: config.s3BucketName, Key: metadata.s3Key, UploadId: metadata.s3UploadId,
}));
logger.info(`[S3 Adapter] Aborted: ${appUploadId} (Key: ${metadata.s3Key})`);
const abortCommand = new AbortMultipartUploadCommand({
Bucket: config.s3BucketName,
Key: metadata.s3Key,
UploadId: metadata.s3UploadId,
});
await s3Client.send(abortCommand);
logger.info(`[S3 Adapter] Aborted multipart upload: ${appUploadId} (Key: ${metadata.s3Key})`);
} catch (err) {
if (err.name !== 'NoSuchUpload') {
logger.error(`[S3 Adapter] Failed Abort for ${metadata.s3Key}: ${err.message}`); throw err;
if (err.name === 'NoSuchUpload') {
logger.warn(`[S3 Adapter] Multipart upload ${appUploadId} (Key: ${metadata.s3Key}) not found during abort. Already aborted or completed.`);
} else {
logger.error(`[S3 Adapter] Failed to abort multipart upload for ${appUploadId} (Key: ${metadata.s3Key}): ${err.message}`);
// Don't delete local metadata if abort failed, might be retryable or need manual cleanup
throw err; // Rethrow S3 error
}
logger.warn(`[S3 Adapter] NoSuchUpload on abort for ${metadata.s3Key}. Already aborted/completed.`);
}
// Delete local metadata AFTER successful abort or if NoSuchUpload
await deleteUploadMetadata(appUploadId);
}
}
async function listFiles() {
/**
* Lists files in the S3 bucket.
* @returns {Promise<Array<{filename: string, size: number, formattedSize: string, uploadDate: Date}>>} List of files.
*/
async function listFiles() {
try {
let isTruncated = true; let continuationToken; const allFiles = [];
while(isTruncated) {
const params = { Bucket: config.s3BucketName };
if (continuationToken) params.ContinuationToken = continuationToken;
const response = await s3Client.send(new ListObjectsV2Command(params));
(response.Contents || []).forEach(item => allFiles.push({
filename: item.Key, size: item.Size,
formattedSize: formatFileSize(item.Size), uploadDate: item.LastModified
const command = new ListObjectsV2Command({
Bucket: config.s3BucketName,
// Optional: Add Prefix if you want to list within a specific 'folder'
// Prefix: 'uploads/'
});
// TODO: Add pagination handling if expecting >1000 objects
const response = await s3Client.send(command);
const files = (response.Contents || [])
// Optional: Filter out objects that might represent folders if necessary
// .filter(item => !(item.Key.endsWith('/') && item.Size === 0))
.map(item => ({
filename: item.Key, // S3 Key is the filename/path
size: item.Size,
formattedSize: formatFileSize(item.Size), // Use utility
uploadDate: item.LastModified
}));
isTruncated = response.IsTruncated;
continuationToken = response.NextContinuationToken;
}
allFiles.sort((a, b) => b.uploadDate.getTime() - a.uploadDate.getTime());
return allFiles;
} catch (err) {
logger.error(`[S3 Adapter] Failed list objects in ${config.s3BucketName}: ${err.message}`); throw err;
}
}
async function getDownloadUrlOrStream(s3Key) {
if (!s3Key || s3Key.includes('..') || s3Key.startsWith('/')) throw new Error('Invalid filename for download');
// Sort by date, newest first
files.sort((a, b) => b.uploadDate.getTime() - a.uploadDate.getTime());
return files;
} catch (err) {
logger.error(`[S3 Adapter] Failed to list objects in bucket ${config.s3BucketName}: ${err.message}`);
throw err;
}
}
/**
* Generates a presigned URL for downloading an S3 object.
* @param {string} s3Key - The S3 Key (filename/path) of the object.
* @returns {Promise<{type: string, value: string}>} Object indicating type ('url') and value (the presigned URL).
*/
async function getDownloadUrlOrStream(s3Key) {
// Input `s3Key` is assumed to be sanitized by the calling route/logic
if (!s3Key || s3Key.includes('..') || s3Key.startsWith('/')) {
logger.error(`[S3 Adapter] Invalid S3 key detected for download: ${s3Key}`);
throw new Error('Invalid filename');
}
try {
const cmd = new GetObjectCommand({ Bucket: config.s3BucketName, Key: s3Key });
const url = await getSignedUrl(s3Client, cmd, { expiresIn: 3600 });
logger.info(`[S3 Adapter] Presigned URL for ${s3Key}`);
const command = new GetObjectCommand({
Bucket: config.s3BucketName,
Key: s3Key,
// Optional: Override response headers like filename
// ResponseContentDisposition: `attachment; filename="${path.basename(s3Key)}"`
});
// Generate presigned URL (expires in 1 hour by default, adjustable)
const url = await getSignedUrl(s3Client, command, { expiresIn: 3600 });
logger.info(`[S3 Adapter] Generated presigned URL for ${s3Key}`);
return { type: 'url', value: url };
} catch (err) {
logger.error(`[S3 Adapter] Failed presigned URL for ${s3Key}: ${err.message}`);
if (err.name === 'NoSuchKey') throw new Error('File not found in S3'); throw err;
}
}
async function deleteFile(s3Key) {
if (!s3Key || s3Key.includes('..') || s3Key.startsWith('/')) throw new Error('Invalid filename for delete');
try {
await s3Client.send(new DeleteObjectCommand({ Bucket: config.s3BucketName, Key: s3Key }));
logger.info(`[S3 Adapter] Deleted: ${s3Key}`);
} catch (err) {
logger.error(`[S3 Adapter] Failed delete for ${s3Key}: ${err.message}`); throw err;
logger.error(`[S3 Adapter] Failed to generate presigned URL for ${s3Key}: ${err.message}`);
if (err.name === 'NoSuchKey') {
throw new Error('File not found in S3');
}
throw err; // Re-throw other S3 errors
}
}
/**
* Deletes an object from the S3 bucket.
* @param {string} s3Key - The S3 Key (filename/path) of the object to delete.
* @returns {Promise<void>}
*/
async function deleteFile(s3Key) {
// Input `s3Key` is assumed to be sanitized
if (!s3Key || s3Key.includes('..') || s3Key.startsWith('/')) {
logger.error(`[S3 Adapter] Invalid S3 key detected for delete: ${s3Key}`);
throw new Error('Invalid filename');
}
}
async function cleanupStale() {
logger.info('[S3 Adapter] Cleaning stale local metadata...');
let cleaned = 0, checked = 0;
try {
await ensureMetadataDirExists(); const files = await fs.readdir(METADATA_DIR); const now = Date.now();
const command = new DeleteObjectCommand({
Bucket: config.s3BucketName,
Key: s3Key,
});
await s3Client.send(command);
logger.info(`[S3 Adapter] Deleted object: ${s3Key}`);
} catch (err) {
// DeleteObject is idempotent, so NoSuchKey isn't typically an error unless you need to know.
logger.error(`[S3 Adapter] Failed to delete object ${s3Key}: ${err.message}`);
throw err;
}
}
/**
* Cleans up stale *local* metadata files for S3 uploads.
* Relies on S3 Lifecycle Policies for actual S3 cleanup.
* @returns {Promise<void>}
*/
async function cleanupStale() {
logger.info('[S3 Adapter] Running cleanup for stale local metadata files...');
let cleanedCount = 0;
let checkedCount = 0;
try {
await ensureMetadataDirExists(); // Re-check
const files = await fs.readdir(METADATA_DIR);
const now = Date.now();
for (const file of files) {
if (file.endsWith('.meta')) {
checked++; const id = file.replace('.meta',''); const fp = path.join(METADATA_DIR, file);
try {
const meta = JSON.parse(await fs.readFile(fp, 'utf8'));
if (now - (meta.lastActivity || meta.createdAt || 0) > UPLOAD_TIMEOUT) {
logger.warn(`[S3 Adapter] Stale local meta: ${file}, S3 ID: ${meta.s3UploadId||'N/A'}`);
await deleteUploadMetadata(id); cleaned++;
}
} catch (e) { logger.error(`[S3 Adapter] Error parsing meta ${fp}: ${e.message}`); await fs.unlink(fp).catch(()=>{}); }
} else if (file.endsWith('.tmp')) {
const tmpP = path.join(METADATA_DIR, file);
try { if (now - (await fs.stat(tmpP)).mtime.getTime() > UPLOAD_TIMEOUT) { logger.warn(`[S3 Adapter] Deleting stale tmp meta: ${file}`); await fs.unlink(tmpP); }}
catch (e) { if (e.code!=='ENOENT') logger.error(`[S3 Adapter] Error stat/unlink tmp meta ${tmpP}: ${e.message}`);}
}
}
if (checked > 0 || cleaned > 0) logger.info(`[S3 Adapter] Local meta cleanup: Checked ${checked}, Cleaned ${cleaned}.`);
logger.warn(`[S3 Adapter] IMPORTANT: Configure S3 Lifecycle Rules on bucket '${config.s3BucketName}' to clean incomplete multipart uploads.`);
} catch (err) {
if (err.code==='ENOENT'&&err.path===METADATA_DIR) logger.warn('[S3 Adapter] Local meta dir not found for cleanup.');
else logger.error(`[S3 Adapter] Error local meta cleanup: ${err.message}`);
}
// Basic batchS3PrefixMappings cleanup
if (batchS3PrefixMappings.size > 1000) {
logger.warn(`[S3 Adapter] Clearing batchS3PrefixMappings (size: ${batchS3PrefixMappings.size}).`);
batchS3PrefixMappings.clear();
}
}
checkedCount++;
const appUploadId = file.replace('.meta', '');
const metaFilePath = path.join(METADATA_DIR, file);
module.exports = {
initUpload, storeChunk, completeUpload, abortUpload,
listFiles, getDownloadUrlOrStream, deleteFile, cleanupStale
};
try {
const data = await fs.readFile(metaFilePath, 'utf8');
const metadata = JSON.parse(data);
// Check inactivity based on local metadata timestamp
if (now - (metadata.lastActivity || metadata.createdAt || 0) > UPLOAD_TIMEOUT) {
logger.warn(`[S3 Adapter] Found stale local metadata: ${file}. Last activity: ${new Date(metadata.lastActivity || metadata.createdAt)}. S3 UploadId: ${metadata.s3UploadId || 'N/A'}`);
// Only delete the LOCAL metadata file. DO NOT ABORT S3 UPLOAD HERE.
await deleteUploadMetadata(appUploadId); // Use helper
cleanedCount++;
}
} catch (readErr) {
logger.error(`[S3 Adapter] Error reading/parsing local metadata ${metaFilePath} during cleanup: ${readErr.message}. Skipping.`);
await fs.unlink(metaFilePath).catch(()=>{ logger.warn(`[S3 Adapter] Failed to delete potentially corrupt local metadata file: ${metaFilePath}`) });
}
} else if (file.endsWith('.tmp')) {
// Clean up potential leftover temp metadata files (same as local adapter)
const tempMetaPath = path.join(METADATA_DIR, file);
try {
const stats = await fs.stat(tempMetaPath);
if (now - stats.mtime.getTime() > UPLOAD_TIMEOUT) {
logger.warn(`[S3 Adapter] Deleting stale temporary local metadata file: ${file}`);
await fs.unlink(tempMetaPath);
}
} catch (statErr) {
if (statErr.code !== 'ENOENT') {
logger.error(`[S3 Adapter] Error checking temp local metadata file ${tempMetaPath}: ${statErr.message}`);
}
}
}
}
if (checkedCount > 0 || cleanedCount > 0) {
logger.info(`[S3 Adapter] Local metadata cleanup finished. Checked: ${checkedCount}, Cleaned stale local files: ${cleanedCount}.`);
}
// Log the crucial recommendation
logger.warn(`[S3 Adapter] IMPORTANT: For S3 storage, configure Lifecycle Rules on your bucket (${config.s3BucketName}) or use provider-specific tools to automatically clean up incomplete multipart uploads after a few days. This adapter only cleans up local tracking files.`);
} catch (err) {
if (err.code === 'ENOENT' && err.path === METADATA_DIR) {
logger.warn('[S3 Adapter] Local metadata directory not found during cleanup scan.');
} else {
logger.error(`[S3 Adapter] Error during local metadata cleanup scan: ${err.message}`);
}
}
}
module.exports = {
initUpload,
storeChunk,
completeUpload,
abortUpload,
listFiles,
getDownloadUrlOrStream,
deleteFile,
cleanupStale
};