1 Commits
dev ... s3

Author SHA1 Message Date
greirson
bdd80020a0 feat(storage): Implement S3 and local storage adapters with enhanced configuration
- Introduced a storage adapter factory to dynamically select between local and S3 storage based on the STORAGE_TYPE environment variable.
- Added S3 adapter for handling file operations on AWS S3, including multipart uploads and presigned URLs.
- Implemented local storage adapter for managing file operations on the local filesystem.
- Enhanced configuration validation to ensure proper setup for both storage types.
- Updated .env.example and README.md to document new storage configuration options and usage.

This commit significantly improves the application's flexibility in handling file uploads by supporting both local and cloud storage options, enhancing user experience and deployment versatility.
2025-05-05 21:52:22 -07:00
12 changed files with 1275 additions and 1119 deletions

View File

@@ -106,23 +106,6 @@ AUTO_UPLOAD=false
# ALLOWED_IFRAME_ORIGINS=https://example.com,https://another.com
ALLOWED_IFRAME_ORIGINS=
# --- Docker Specific Settings ---
# User and Group IDs for file permissions
# Sets the user/group the application runs as inside the container.
# Files created in the mapped volume (e.g., ./local_uploads) will have this ownership.
# Set these to match your host user's ID/GID to avoid permission issues.
# Find your IDs with `id -u` and `id -g` on Linux/macOS.
# PUID=1000
# PGID=1000
# File Mode Creation Mask (Umask)
# Controls the default permissions for newly created files.
# 022 (default): Files 644 (rw-r--r--), Dirs 755 (rwxr-xr-x)
# 002: Files 664 (rw-rw-r--), Dirs 775 (rwxrwxr-x) - Good for group sharing
# 007: Files 660 (rw-rw----), Dirs 770 (rwxrwx---) - More restrictive
# 077: Files 600 (rw-------), Dirs 700 (rwx------) - Most restrictive
# UMASK=022
# Max number of retries for client-side chunk uploads (default: 5)
CLIENT_MAX_RETRIES=5

View File

@@ -1,16 +1,8 @@
# Base stage for shared configurations
FROM node:20-alpine as base
# Add user and group IDs as arguments with defaults
ARG PUID=1000
ARG PGID=1000
# Default umask (complement of 022 is 755 for dirs, 644 for files)
ARG UMASK=022
# Install necessary packages:
# - su-exec: lightweight sudo alternative
# - python3, pip: for apprise dependency
RUN apk add --no-cache su-exec python3 py3-pip && \
# Install python and create virtual environment with minimal dependencies
RUN apk add --no-cache python3 py3-pip && \
python3 -m venv /opt/venv && \
rm -rf /var/cache/apk/*
@@ -22,194 +14,52 @@ RUN . /opt/venv/bin/activate && \
# Add virtual environment to PATH
ENV PATH="/opt/venv/bin:$PATH"
# Create group and user with fallback to prevent build failures
# We use the ARG values here, but with a fallback mechanism to avoid build failures
RUN ( \
set -e; \
echo "Attempting to create/verify user with PUID=${PUID} and PGID=${PGID}..."; \
\
# Initialize variables \
TARGET_USER="nodeuser"; \
TARGET_GROUP="nodeuser"; \
NEW_GID="${PGID}"; \
NEW_UID="${PUID}"; \
\
# Step 1: Handle GID and group first \
echo "Setting up group for GID ${NEW_GID}..."; \
if getent group "${NEW_GID}" > /dev/null; then \
# GID exists, check which group has it \
EXISTING_GROUP=$(getent group "${NEW_GID}" | cut -d: -f1); \
echo "GID ${NEW_GID} is already used by group '${EXISTING_GROUP}'."; \
\
if [ "${EXISTING_GROUP}" = "${TARGET_GROUP}" ]; then \
echo "Group '${TARGET_GROUP}' already exists with correct GID ${NEW_GID}."; \
else \
# GID exists but used by a different group (likely 'node') \
echo "Will create '${TARGET_GROUP}' with a different GID to avoid conflict."; \
# Check if TARGET_GROUP exists but with wrong GID \
if getent group "${TARGET_GROUP}" > /dev/null; then \
echo "Group '${TARGET_GROUP}' exists but with wrong GID. Deleting it."; \
delgroup "${TARGET_GROUP}" || true; \
fi; \
# Create TARGET_GROUP with GID+1 (or find next available GID) \
NEXT_GID=$((${NEW_GID} + 1)); \
while getent group "${NEXT_GID}" > /dev/null; do \
NEXT_GID=$((${NEXT_GID} + 1)); \
done; \
echo "Creating group '${TARGET_GROUP}' with new GID ${NEXT_GID}."; \
addgroup -S -g "${NEXT_GID}" "${TARGET_GROUP}"; \
NEW_GID="${NEXT_GID}"; \
fi; \
else \
# GID does not exist - create group with desired GID \
echo "Creating group '${TARGET_GROUP}' with GID ${NEW_GID}."; \
addgroup -S -g "${NEW_GID}" "${TARGET_GROUP}"; \
fi; \
\
# Verify group was created \
echo "Verifying group '${TARGET_GROUP}' exists..."; \
getent group "${TARGET_GROUP}" || (echo "ERROR: Failed to find group '${TARGET_GROUP}'!"; exit 1); \
GID_FOR_USER=$(getent group "${TARGET_GROUP}" | cut -d: -f3); \
echo "Final group: '${TARGET_GROUP}' with GID ${GID_FOR_USER}"; \
\
# Step 2: Handle UID and user \
echo "Setting up user with UID ${NEW_UID}..."; \
if getent passwd "${NEW_UID}" > /dev/null; then \
# UID exists, check which user has it \
EXISTING_USER=$(getent passwd "${NEW_UID}" | cut -d: -f1); \
echo "UID ${NEW_UID} is already used by user '${EXISTING_USER}'."; \
\
if [ "${EXISTING_USER}" = "${TARGET_USER}" ]; then \
echo "User '${TARGET_USER}' already exists with correct UID ${NEW_UID}."; \
# Check if user needs group update \
CURRENT_GID=$(getent passwd "${TARGET_USER}" | cut -d: -f4); \
if [ "${CURRENT_GID}" != "${GID_FOR_USER}" ]; then \
echo "User '${TARGET_USER}' has wrong GID (${CURRENT_GID}). Modifying..."; \
deluser "${TARGET_USER}"; \
adduser -S -D -u "${NEW_UID}" -G "${TARGET_GROUP}" -s /bin/sh "${TARGET_USER}"; \
fi; \
else \
# Another user has our UID (e.g., 'node'). Delete it. \
echo "Deleting existing user '${EXISTING_USER}' with UID ${NEW_UID}."; \
deluser "${EXISTING_USER}" || true; \
\
# Now check if TARGET_USER exists but with wrong UID \
if getent passwd "${TARGET_USER}" > /dev/null; then \
echo "User '${TARGET_USER}' exists but with wrong UID. Updating..."; \
deluser "${TARGET_USER}" || true; \
fi; \
\
# Create user \
echo "Creating user '${TARGET_USER}' with UID ${NEW_UID} and group '${TARGET_GROUP}'."; \
adduser -S -D -u "${NEW_UID}" -G "${TARGET_GROUP}" -s /bin/sh "${TARGET_USER}"; \
fi; \
else \
# UID does not exist - check if user exists with wrong UID \
if getent passwd "${TARGET_USER}" > /dev/null; then \
echo "User '${TARGET_USER}' exists but with wrong UID. Updating..."; \
deluser "${TARGET_USER}" || true; \
fi; \
\
# Create user with desired UID \
echo "Creating user '${TARGET_USER}' with UID ${NEW_UID} and group '${TARGET_GROUP}'."; \
adduser -S -D -u "${NEW_UID}" -G "${TARGET_GROUP}" -s /bin/sh "${TARGET_USER}"; \
fi; \
\
# Create and set permissions on home directory \
echo "Setting up home directory for ${TARGET_USER}..."; \
mkdir -p /home/${TARGET_USER} && \
chown -R ${TARGET_USER}:${TARGET_GROUP} /home/${TARGET_USER} && \
chmod 755 /home/${TARGET_USER}; \
\
# Verify user was created \
echo "Verifying user '${TARGET_USER}' exists..."; \
getent passwd "${TARGET_USER}" || (echo "ERROR: Failed to find user '${TARGET_USER}'!"; exit 1); \
\
# Clean up and verify system files \
echo "Ensuring root user definition is pristine..."; \
chown root:root /etc/passwd /etc/group && \
chmod 644 /etc/passwd /etc/group && \
getent passwd root || (echo "ERROR: root not found after user/group operations!"; exit 1); \
\
# Print final status \
echo "Final user/group setup:"; \
id "${TARGET_USER}"; \
)
WORKDIR /usr/src/app
# Set UMASK - this applies to processes run by the user created in this stage
# The entrypoint will also set it based on the ENV var at runtime.
RUN umask ${UMASK}
# Dependencies stage
FROM base as deps
# Change ownership early so npm cache is owned correctly
RUN chown nodeuser:nodeuser /usr/src/app
# Switch to nodeuser before running npm commands
USER nodeuser
COPY --chown=nodeuser:nodeuser package*.json ./
COPY package*.json ./
RUN npm ci --only=production && \
# Remove npm cache
npm cache clean --force
# Switch back to root for the next stages if needed
USER root
# Development stage
FROM deps as development
USER root
ENV NODE_ENV=development
# Create and set up directories
RUN mkdir -p /usr/src/app/local_uploads /usr/src/app/uploads && \
chown -R nodeuser:nodeuser /usr/src/app/local_uploads /usr/src/app/uploads
COPY --chown=nodeuser:nodeuser package*.json ./
# Install dev dependencies
RUN npm install && \
npm cache clean --force
COPY --chown=nodeuser:nodeuser src/ ./src/
COPY --chown=nodeuser:nodeuser public/ ./public/
# Check if __tests__ and dev exist in your project root, if not, these COPY lines will fail for dev target
# COPY --chown=nodeuser:nodeuser __tests__/ ./__tests__/
# COPY --chown=nodeuser:nodeuser dev/ ./dev/
COPY --chown=nodeuser:nodeuser .eslintrc.json .eslintignore .prettierrc nodemon.json ./
# Create upload directory
RUN mkdir -p uploads
# Switch back to nodeuser for runtime
USER nodeuser
EXPOSE 3000
# Production stage
FROM deps as production
USER root
ENV NODE_ENV=production
ENV UPLOAD_DIR /app/uploads
# Create and set up directories
RUN mkdir -p /usr/src/app/local_uploads /usr/src/app/uploads && \
chown -R nodeuser:nodeuser /usr/src/app/local_uploads /usr/src/app/uploads
# Copy only necessary source files and ensure ownership
COPY --chown=nodeuser:nodeuser src/ ./src/
COPY --chown=nodeuser:nodeuser public/ ./public/
# Copy the entrypoint script and make it executable
COPY --chown=root:root src/scripts/entrypoint.sh /usr/local/bin/entrypoint.sh
RUN chmod +x /usr/local/bin/entrypoint.sh
# Copy source with specific paths to avoid unnecessary files
COPY src/ ./src/
COPY public/ ./public/
COPY __tests__/ ./__tests__/
COPY dev/ ./dev/
COPY .eslintrc.json .eslintignore ./
# Expose port
EXPOSE 3000
# Set the entrypoint
ENTRYPOINT ["/usr/local/bin/entrypoint.sh"]
CMD ["npm", "run", "dev"]
# Final user should be nodeuser for runtime
USER nodeuser
# Production stage
FROM deps as production
ENV NODE_ENV=production
ENV UPLOAD_DIR /app/uploads
# Default command to run (passed to entrypoint)
CMD ["npm", "start"]
# Create upload directory
# RUN mkdir -p uploads # No longer strictly needed here as volume mapping is expected, but harmless
# Copy only necessary source files
COPY src/ ./src/
COPY public/ ./public/
# Expose port
EXPOSE 3000
CMD ["npm", "start"]

View File

@@ -254,4 +254,6 @@ See [Local Development (Recommended Quick Start)](LOCAL_DEVELOPMENT.md) for loca
Made with ❤️ by [DumbWare.io](https://dumbware.io)
## Future Features
- Camera Upload for Mobile
> Got an idea? [Open an issue](https://github.com/dumbwareio/dumbdrop/issues) or [submit a PR](https://github.com/dumbwareio/dumbdrop/pulls)
```

View File

@@ -14,29 +14,13 @@ services:
DUMBDROP_PIN: 123456 # Optional PIN protection (4-10 digits, leave empty to disable)
AUTO_UPLOAD: true # Upload without clicking button
BASE_URL: http://localhost:3000 # The base URL for the application
# Additional available environment variables (commented out with defaults)
# FOOTER_LINKS: "My Site @ https://example.com,Docs @ https://docs.example.com" # Custom footer links
# PORT: 3000 # Server port (default: 3000)
# NODE_ENV: production # Node environment (development/production)
# DEBUG: false # Debug mode for verbose logging (default: false in production, true in development)
# APPRISE_URL: "" # Apprise notification URL for upload notifications (default: none)
# APPRISE_MESSAGE: "New file uploaded - {filename} ({size}), Storage used {storage}" # Notification message template with placeholders: {filename}, {size}, {storage}
# APPRISE_SIZE_UNIT: "Auto" # Size unit for notifications (B, KB, MB, GB, TB, or Auto)
# ALLOWED_EXTENSIONS: ".jpg,.jpeg,.png,.pdf,.doc,.docx,.txt" # Comma-separated list of allowed file extensions (default: all allowed)
# PUID: 1000 # User ID for file ownership (default: 1000)
# PGID: 1000 # Group ID for file ownership (default: 1000)
# UMASK: "000" # File permissions mask (default: 000)
restart: unless-stopped
# user: "${PUID}:${PGID}" # Don't set user here, entrypoint handles it
# Consider adding healthcheck
# healthcheck:
# test: ["CMD", "curl", "--fail", "http://localhost:3000/health"] # Assuming a /health endpoint exists
# interval: 30s
# timeout: 10s
# retries: 3
# healthcheck:
# test: ["CMD", "curl", "--fail", "http://localhost:3000/health"]
# interval: 30s
# timeout: 10s
# retries: 3
# start_period: 30s
# ALLOWED_EXTENSIONS: ".jpg,.jpeg,.png,.pdf,.doc,.docx,.txt" # Comma-separated list of allowed file extensions (default: all allowed)

View File

@@ -49,6 +49,9 @@
<div id="uploadProgress"></div> <!-- Original progress bar container -->
<div id="fileList" class="file-list"></div> <!-- Original file list container -->
<button id="uploadButton" class="upload-button" style="display: none;">Upload Files</button>
<footer>
{{FOOTER_CONTENT}}
</footer>
</div>
<script defer>
@@ -56,51 +59,67 @@
const CHUNK_SIZE = 1024 * 1024 * 5; // 5MB chunks
const RETRY_DELAY = 1000; // 1 second delay between retries
// Read MAX_RETRIES from the injected server value, with a fallback
const MAX_RETRIES_STR = '{{MAX_RETRIES}}';
let maxRetries = 5;
let maxRetries = 5; // Default value
if (MAX_RETRIES_STR && MAX_RETRIES_STR !== '{{MAX_RETRIES}}') {
const parsedRetries = parseInt(MAX_RETRIES_STR, 10);
if (!isNaN(parsedRetries) && parsedRetries >= 0) maxRetries = parsedRetries;
else console.warn(`Invalid MAX_RETRIES value "${MAX_RETRIES_STR}", defaulting to ${maxRetries}.`);
} else console.warn('MAX_RETRIES not injected by server, defaulting to 5.');
if (!isNaN(parsedRetries) && parsedRetries >= 0) {
maxRetries = parsedRetries;
} else {
console.warn(`Invalid MAX_RETRIES value "${MAX_RETRIES_STR}" received from server, defaulting to ${maxRetries}.`);
}
} else {
console.warn('MAX_RETRIES not injected by server, defaulting to 5.');
}
window.MAX_RETRIES = maxRetries;
console.log(`Max retries for chunk uploads: ${window.MAX_RETRIES}`);
const AUTO_UPLOAD_STR = '{{AUTO_UPLOAD}}';
const AUTO_UPLOAD = ['true', '1', 'yes'].includes(AUTO_UPLOAD_STR.toLowerCase());
// --- NEW: Variable to track active uploads ---
let activeUploadCount = 0;
// Utility function to generate a unique batch ID
function generateBatchId() {
return `${Date.now()}-${Math.random().toString(36).substr(2, 9)}`;
}
function generateBatchId() { return `${Date.now()}-${Math.random().toString(36).substr(2, 9)}`; }
function formatFileSize(bytes) { if (bytes === 0) return '0 Bytes'; const k = 1024; const sizes = ['Bytes', 'KB', 'MB', 'GB']; const i = Math.floor(Math.log(bytes) / Math.log(k)); return parseFloat((bytes / Math.pow(k, i)).toFixed(2)) + ' ' + sizes[i]; }
// Utility function to format file sizes
function formatFileSize(bytes) {
if (bytes === 0) return '0 Bytes';
const k = 1024;
const sizes = ['Bytes', 'KB', 'MB', 'GB'];
const i = Math.floor(Math.log(bytes) / Math.log(k));
return parseFloat((bytes / Math.pow(k, i)).toFixed(2)) + ' ' + sizes[i];
}
class FileUploader {
constructor(file, batchId) {
this.file = file;
this.batchId = batchId;
this.uploadId = null;
this.uploadId = null; // Application's upload ID
this.position = 0;
this.progressElement = null;
this.chunkSize = CHUNK_SIZE;
this.lastUploadedBytes = 0;
this.lastUploadTime = null;
this.uploadRate = 0;
this.progressElement = null; // For the separate progress bar
this.chunkSize = CHUNK_SIZE; // Use constant
this.lastUploadedBytes = 0; // Used for rate calculation in original progress bar
this.lastUploadTime = null; // Used for rate calculation
this.uploadRate = 0; // Used for rate calculation
this.maxRetries = window.MAX_RETRIES;
this.retryDelay = RETRY_DELAY;
// *** ADDED for S3/Adapter logic ***
// *** ADDED partNumber for S3 ***
this.partNumber = 1;
// *** ADDED completed flag ***
this.completed = false;
}
async start() {
try {
this.createProgressElement(); // Original: create separate progress bar
this.updateProgress(0);
this.createProgressElement(); // Create the progress bar UI element
this.updateProgress(0); // Initial progress update to 0%
await this.initUpload();
// Handle zero-byte files completed during init
if (this.uploadId && this.uploadId.startsWith('zero-byte-')) {
console.log(`Zero-byte file ${this.file.name} handled by server init.`);
console.log(`Zero-byte file ${this.file.name} handled by server during init.`);
this.updateProgress(100);
this.completed = true;
return true;
@@ -109,10 +128,11 @@
if (this.uploadId && this.file.size > 0) {
await this.uploadChunks();
} else if (this.file.size === 0 && !this.completed) {
console.warn(`File ${this.file.name} is zero bytes, init didn't indicate completion. Assuming complete.`);
console.warn(`File ${this.file.name} is zero bytes, but init didn't indicate completion.`);
this.updateProgress(100);
this.completed = true;
}
// Return completion status
return this.completed;
} catch (error) {
console.error(`Upload failed for ${this.file.webkitRelativePath || this.file.name}:`, error);
@@ -120,178 +140,291 @@
this.progressElement.infoSpan.textContent = `Error: ${error.message}`;
this.progressElement.infoSpan.style.color = 'var(--danger-color)';
}
await this.cancelUploadOnServer();
await this.cancelUploadOnServer(); // Attempt cancellation
this.completed = false;
return false;
}
}
async initUpload() {
// (initUpload logic is identical to the previous version - uses fetch to /init)
const uploadPath = this.file.webkitRelativePath || this.file.name;
const consistentPath = uploadPath.replace(/\\/g, '/');
console.log(`[Uploader] Init for: ${consistentPath} (Size: ${this.file.size})`);
console.log(`[Uploader] Initializing upload for: ${consistentPath} (Size: ${this.file.size}, Batch: ${this.batchId})`);
const headers = { 'Content-Type': 'application/json' };
if (this.batchId) headers['X-Batch-ID'] = this.batchId;
const apiUrlPath = '/api/upload/init';
const fullApiUrl = window.BASE_URL + (apiUrlPath.startsWith('/') ? apiUrlPath.substring(1) : apiUrlPath);
const response = await fetch(fullApiUrl, {
method: 'POST', headers,
method: 'POST',
headers,
body: JSON.stringify({ filename: consistentPath, fileSize: this.file.size })
});
if (!response.ok) {
const errData = await response.json().catch(() => ({ error: `Server error ${response.status}` }));
throw new Error(errData.details || errData.error || `Init failed: ${response.status}`);
const errorData = await response.json().catch(() => ({ error: `Server error ${response.status}` }));
throw new Error(errorData.details || errorData.error || `Init failed: ${response.status}`);
}
const data = await response.json();
if (!data.uploadId) throw new Error('Server did not return uploadId');
this.uploadId = data.uploadId;
console.log(`[Uploader] Init success. App Upload ID: ${this.uploadId}`);
console.log(`[Uploader] Init successful. App Upload ID: ${this.uploadId}`);
}
async uploadChunks() {
if (!this.progressElement) this.createProgressElement(); // Ensure progress bar exists
// Create progress element if not already done (might happen if start didn't create it due to early exit/error)
if (!this.progressElement) this.createProgressElement();
while (this.position < this.file.size && !this.completed) {
while (this.position < this.file.size && !this.completed) { // Check completed flag
const chunkStartPosition = this.position;
const chunk = await this.readChunk();
const currentPartNumber = this.partNumber;
const chunk = await this.readChunk(); // Reads based on this.position, updates this.position
const currentPartNumber = this.partNumber; // *** Get current part number ***
try {
console.debug(`[Uploader] Attempting Part ${currentPartNumber}, Bytes ${chunkStartPosition}-${this.position-1}`);
// *** Pass partNumber to upload function ***
const result = await this.uploadChunkWithRetry(chunk, chunkStartPosition, currentPartNumber);
// Update original progress bar with server's progress
this.updateProgress(result.progress);
// *** Increment part number AFTER successful upload ***
this.partNumber++;
// *** Check if server response indicates completion ***
if (result.completed) {
console.log(`[Uploader] Server indicated completion after Part ${currentPartNumber}.`);
this.completed = true;
this.updateProgress(100); // Ensure it hits 100%
break;
this.updateProgress(100); // Update original progress bar
break; // Exit loop
}
} catch (error) {
console.error(`[Uploader] UploadChunks failed for Part ${this.partNumber}, File: ${this.file.name}`);
throw error;
console.error(`[Uploader] UploadChunks failed permanently after retries for Part ${this.partNumber}. File: ${this.file.webkitRelativePath || this.file.name}`);
throw error; // Propagate up
}
}
// Check completion after loop, same as before
if (!this.completed && this.position >= this.file.size) {
this.completed = true; this.updateProgress(100);
console.warn(`[Uploader] Reached end of file but not marked completed by server. Assuming complete.`);
this.completed = true;
this.updateProgress(100);
}
}
async readChunk() {
// (readChunk logic is identical)
const start = this.position;
const end = Math.min(this.position + this.chunkSize, this.file.size);
const blob = this.file.slice(start, end);
this.position = end;
this.position = end; // Update position *after* slicing
return await blob.arrayBuffer();
}
}
// *** MODIFIED: Added partNumber parameter ***
async uploadChunkWithRetry(chunk, chunkStartPosition, partNumber) {
const chunkApiUrlPath = `/api/upload/chunk/${this.uploadId}?partNumber=${partNumber}`; // *** ADDED partNumber ***
// *** MODIFIED: Append partNumber query parameter to URL ***
const chunkApiUrlPath = `/api/upload/chunk/${this.uploadId}?partNumber=${partNumber}`;
const fullChunkApiUrl = window.BASE_URL + (chunkApiUrlPath.startsWith('/') ? chunkApiUrlPath.substring(1) : chunkApiUrlPath);
let lastError = null;
for (let attempt = 0; attempt <= this.maxRetries; attempt++) {
try {
if (attempt > 0) {
console.warn(`[Uploader] Retrying Part ${partNumber} (Attempt ${attempt}/${this.maxRetries})`);
console.warn(`[Uploader] Retrying Part ${partNumber} upload for ${this.file.webkitRelativePath || this.file.name} (Attempt ${attempt}/${this.maxRetries})...`);
this.updateProgressElementInfo(`Retrying attempt ${attempt}...`, 'var(--warning-color)');
} else if (this.progressElement) { // Update info for first attempt
this.updateProgressElementInfo(`uploading part ${partNumber}...`);
} else {
// Update status text for the current part (optional, depends if you want this level of detail)
// this.updateProgressElementInfo(`uploading part ${partNumber}...`);
}
const controller = new AbortController();
const timeoutId = setTimeout(() => controller.abort(), 60000); // 60s timeout
// Increase timeout slightly for S3 potentially
const timeoutId = setTimeout(() => controller.abort(), 60000);
const response = await fetch(fullChunkApiUrl, {
console.debug(`[Uploader] Sending Part ${partNumber} to ${fullChunkApiUrl}`);
const response = await fetch(fullChunkApiUrl, { // Use modified URL
method: 'POST',
headers: { 'Content-Type': 'application/octet-stream', 'X-Batch-ID': this.batchId },
body: chunk, signal: controller.signal
headers: {
'Content-Type': 'application/octet-stream',
'X-Batch-ID': this.batchId
},
body: chunk,
signal: controller.signal
});
clearTimeout(timeoutId);
if (response.ok) {
const data = await response.json(); // Contains { bytesReceived, progress, completed }
if (attempt > 0) console.log(`[Uploader] Part ${partNumber} success on retry ${attempt}.`);
if (attempt > 0) console.log(`[Uploader] Part ${partNumber} upload successful on retry attempt ${attempt}.`);
else console.debug(`[Uploader] Part ${partNumber} uploaded successfully.`);
if(this.progressElement) this.updateProgressElementInfo('uploading...'); // Reset info
return data; // *** RETURN server data (has 'completed' flag) ***
// *** Use server-provided progress for original progress bar ***
this.updateProgress(data.progress);
this.updateProgressElementInfo('uploading...'); // Reset info message
// *** Return the data which includes the 'completed' flag ***
return data;
} else {
// (Error handling logic for non-OK responses remains the same)
let errorText = `Server error ${response.status}`; try { errorText = (await response.json()).error || errorText } catch(e){}
if (response.status === 404 && attempt > 0) {
console.warn(`[Uploader] 404 on retry (Part ${partNumber}), assuming completed.`);
this.completed = true; // Mark as completed
// this.updateProgress(100); // updateProgress is called from uploadChunks
return { completed: true, progress: 100, bytesReceived: this.file.size };
console.warn(`[Uploader] Received 404 on retry, assuming completed.`);
this.completed = true;
this.updateProgress(100); // Update original progress bar
return { completed: true, progress: 100, bytesReceived: this.file.size }; // Simulate success
}
lastError = new Error(`Failed Part ${partNumber}: ${errorText}`);
console.error(`Attempt ${attempt} failed: ${lastError.message}`);
this.updateProgressElementInfo(`Attempt ${attempt} failed: ${response.statusText}`, 'var(--danger-color)');
}
} catch (error) {
// (Network/Abort error handling remains the same)
lastError = error;
if (error.name === 'AbortError') { console.error(`Part ${partNumber} Attempt ${attempt} timed out.`); this.updateProgressElementInfo(`Attempt ${attempt} timed out`, 'var(--danger-color)');}
else { console.error(`Part ${partNumber} Attempt ${attempt} network error: ${error.message}`); this.updateProgressElementInfo(`Attempt ${attempt} network error`, 'var(--danger-color)'); }
if (error.name === 'AbortError') { console.error(`Attempt ${attempt} timed out.`); this.updateProgressElementInfo(`Attempt ${attempt} timed out`, 'var(--danger-color)');}
else { console.error(`Attempt ${attempt} network error: ${error.message}`); this.updateProgressElementInfo(`Attempt ${attempt} network error`, 'var(--danger-color)'); }
}
// (Retry delay logic remains the same)
if (attempt < this.maxRetries) await new Promise(r => setTimeout(r, Math.min(this.retryDelay * Math.pow(2, attempt), 30000)));
}
console.error(`[Uploader] Part ${partNumber} failed permanently after ${this.maxRetries} retries.`);
} // End retry loop
console.error(`[Uploader] Part ${partNumber} upload failed permanently after ${this.maxRetries} retries.`);
this.updateProgressElementInfo(`Upload failed after ${this.maxRetries} retries`, 'var(--danger-color)');
throw lastError || new Error(`Part ${partNumber} failed after ${this.maxRetries} retries.`);
}
// --- Original Progress Bar UI Methods ---
// (These methods remain identical to the original file content)
createProgressElement() {
if (this.progressElement) return;
const container = document.createElement('div'); container.className = 'progress-container';
const label = document.createElement('div'); label.className = 'progress-label'; label.textContent = this.file.webkitRelativePath || this.file.name;
const progress = document.createElement('div'); progress.className = 'progress';
const bar = document.createElement('div'); bar.className = 'progress-bar';
const status = document.createElement('div'); status.className = 'progress-status';
const info = document.createElement('div'); info.className = 'progress-info'; info.textContent = 'initializing...';
const details = document.createElement('div'); details.className = 'progress-details'; details.textContent = `0 Bytes of ${formatFileSize(this.file.size)} (0.0%)`;
status.appendChild(info); status.appendChild(details); progress.appendChild(bar);
container.appendChild(label); container.appendChild(progress); container.appendChild(status);
if (this.progressElement) return; // Avoid duplicates if called multiple times
const container = document.createElement('div');
container.className = 'progress-container';
container.setAttribute('data-upload-id', this.uploadId || `pending-${this.file.name}`); // Use unique identifier
const label = document.createElement('div');
label.className = 'progress-label';
label.textContent = this.file.webkitRelativePath || this.file.name;
const progress = document.createElement('div');
progress.className = 'progress';
const bar = document.createElement('div');
bar.className = 'progress-bar';
const status = document.createElement('div');
status.className = 'progress-status';
const info = document.createElement('div');
info.className = 'progress-info';
info.textContent = 'initializing...';
const details = document.createElement('div');
details.className = 'progress-details';
details.textContent = `0 Bytes of ${formatFileSize(this.file.size)} (0.0%)`;
status.appendChild(info);
status.appendChild(details);
progress.appendChild(bar);
container.appendChild(label);
container.appendChild(progress);
container.appendChild(status);
document.getElementById('uploadProgress').appendChild(container);
this.progressElement = { container, bar, infoSpan: info, detailsSpan: details };
this.lastUploadTime = Date.now(); this.lastUploadedBytes = 0; this.uploadRate = 0;
this.lastUploadTime = Date.now(); // Initialize for rate calculation
this.lastUploadedBytes = 0;
}
updateProgress(percent) {
if (!this.progressElement) this.createProgressElement(); if (!this.progressElement) return;
// Ensure element exists, create if necessary (though start() usually does)
if (!this.progressElement) this.createProgressElement();
if (!this.progressElement) return; // Still couldn't create it? Bail.
const clampedPercent = Math.max(0, Math.min(100, percent));
this.progressElement.bar.style.width = `${clampedPercent}%`;
const currentTime = Date.now(); const timeDiff = (currentTime - (this.lastUploadTime || currentTime)) / 1000;
const bytesDiff = this.position - this.lastUploadedBytes; // Use this.position for rate too
if (timeDiff > 0.1 && bytesDiff > 0) { this.uploadRate = bytesDiff / timeDiff; this.lastUploadedBytes = this.position; this.lastUploadTime = currentTime; }
else if (timeDiff > 5) { this.uploadRate = 0; }
// Calculate upload rate using server response bytes (as original)
// Note: For S3, data.bytesReceived might not perfectly reflect total uploaded bytes.
// We'll use this.position primarily for display bytes, but rate calculation follows original logic.
const currentTime = Date.now();
const timeDiff = (currentTime - (this.lastUploadTime || currentTime)) / 1000;
// Using this.position for rate might be visually smoother, but let's stick to original logic for now.
// We need the `bytesReceived` from the server response if we want to use it here...
// Let's fallback to using this.position for rate calculation as well, like the progress display.
const bytesDiff = this.position - this.lastUploadedBytes;
if (timeDiff > 0.1 && bytesDiff > 0) {
this.uploadRate = bytesDiff / timeDiff;
this.lastUploadedBytes = this.position;
this.lastUploadTime = currentTime;
} else if (timeDiff > 5) { // Reset rate if stalled
this.uploadRate = 0;
}
// Format rate (same as original)
let rateText = 'Calculating...';
if (this.uploadRate > 0) { const u=['B/s','KB/s','MB/s','GB/s']; let i=0,r=this.uploadRate; while(r>=1024&&i<u.length-1){r/=1024;i++;} rateText=`${r.toFixed(1)} ${u[i]}`; }
if (this.uploadRate > 0) { /* ... format rate ... */ const units=['B/s','KB/s','MB/s','GB/s']; let i=0, r=this.uploadRate; while(r>=1024 && i<units.length-1){r/=1024;i++;} rateText=`${r.toFixed(1)} ${units[i]}`; }
else if (this.position > 0 || clampedPercent > 0) { rateText = '0.0 B/s'; }
// Update info/details (same as original)
const statusText = clampedPercent >= 100 ? 'complete' : 'uploading...';
if (!this.progressElement.infoSpan.textContent.startsWith('Retry') && !this.progressElement.infoSpan.textContent.startsWith('Attempt') && !this.progressElement.infoSpan.textContent.startsWith('Error')) {
if (!this.progressElement.infoSpan.textContent.startsWith('Retry') &&
!this.progressElement.infoSpan.textContent.startsWith('Attempt') &&
!this.progressElement.infoSpan.textContent.startsWith('Error')) {
this.updateProgressElementInfo(`${rateText} · ${statusText}`);
}
this.progressElement.detailsSpan.textContent = `${formatFileSize(this.position)} of ${formatFileSize(this.file.size)} (${clampedPercent.toFixed(1)}%)`;
// Display progress using this.position (client's view) and clampedPercent
this.progressElement.detailsSpan.textContent =
`${formatFileSize(this.position)} of ${formatFileSize(this.file.size)} (${clampedPercent.toFixed(1)}%)`;
// Fade out (same as original)
if (clampedPercent === 100) {
this.progressElement.container.style.opacity = '0.5'; // Original had fade out
setTimeout(() => { if (this.progressElement && this.progressElement.container) { this.progressElement.container.remove(); this.progressElement = null; }}, 2000);
this.progressElement.container.style.opacity = '0.5';
setTimeout(() => {
if (this.progressElement && this.progressElement.container) {
this.progressElement.container.remove();
this.progressElement = null;
}
}, 2000);
}
}
updateProgressElementInfo(message, color = '') { if (this.progressElement && this.progressElement.infoSpan) { this.progressElement.infoSpan.textContent = message; this.progressElement.infoSpan.style.color = color; }}
async cancelUploadOnServer() { if (!this.uploadId || this.completed || this.uploadId.startsWith('zero-byte-')) return; console.log(`[Uploader] Server cancel for ${this.uploadId}`); try { const p=`/api/upload/cancel/${this.uploadId}`; const u=window.BASE_URL+(p.startsWith('/')?p.substring(1):p); fetch(u,{method:'POST'}).catch(e=>console.warn('Cancel req failed:',e));}catch(e){console.warn('Cancel init err:',e);}}
}
updateProgressElementInfo(message, color = '') {
// (Identical to original)
if (this.progressElement && this.progressElement.infoSpan) {
this.progressElement.infoSpan.textContent = message;
this.progressElement.infoSpan.style.color = color;
}
}
// --- Cancellation Logic ---
async cancelUploadOnServer() {
// (Identical to original, just ensure checks use this.completed and this.uploadId)
if (!this.uploadId || this.completed || this.uploadId.startsWith('zero-byte-')) return;
console.log(`[Uploader] Attempting server cancel for ${this.uploadId}`);
try {
const cancelApiUrlPath = `/api/upload/cancel/${this.uploadId}`;
const fullUrl = window.BASE_URL + (cancelApiUrlPath.startsWith('/') ? cancelApiUrlPath.substring(1) : cancelApiUrlPath);
fetch(fullUrl, { method: 'POST' }).catch(err => console.warn(`Cancel request failed:`, err));
} catch (e) { console.warn(`Error initiating cancel:`, e); }
}
} // End FileUploader Class
// --- Original UI Handlers and Logic ---
// (All the following code remains identical to the original file)
const dropZone = document.getElementById('dropZone');
const fileInput = document.getElementById('fileInput');
const folderInput = document.getElementById('folderInput');
const fileListDiv = document.getElementById('fileList'); // Original div for list
const fileList = document.getElementById('fileList'); // Refers to the original div#fileList
const uploadButton = document.getElementById('uploadButton');
let filesToUpload = []; // Use a different name than original `files` for clarity
let filesToUpload = []; // Renamed variable
async function getAllFileEntries(dataTransferItems) { /* ... (original implementation from previous message) ... */
console.debug('Starting getAllFileEntries with items:', Array.from(dataTransferItems).map(item => ({ kind: item.kind, type: item.type })));
// --- Drag and Drop Folder Handling (getAllFileEntries) ---
async function getAllFileEntries(dataTransferItems) {
// (Keep original implementation)
console.debug('Starting getAllFileEntries with items:', Array.from(dataTransferItems).map(item => ({ kind: item.kind, type: item.type })));
let fileEntries = []; let rootFolderName = null;
async function traverseEntry(entry, path = '') {
console.debug('Traversing entry:', { name: entry.name, isFile: entry.isFile, isDirectory: entry.isDirectory, currentPath: path });
if (entry.isFile) {
const file = await new Promise((resolve, reject) => entry.file(f => {
const fileWithPath = new File([f], entry.name, { type: f.type, lastModified: f.lastModified });
@@ -312,137 +445,137 @@
const entryPromises = Array.from(dataTransferItems).map(item => item.webkitGetAsEntry()).filter(Boolean).map(entry => traverseEntry(entry));
await Promise.all(entryPromises);
fileEntries.sort((a, b) => (a.webkitRelativePath || a.name).localeCompare(b.webkitRelativePath || b.name));
console.debug('getAllFileEntries result:', fileEntries.map(f=>f.webkitRelativePath || f.name));
return fileEntries;
} catch (error) { console.error('Error in getAllFileEntries:', error); throw error; }
}
['dragenter', 'dragover', 'dragleave', 'drop'].forEach(ev => { dropZone.addEventListener(ev, preventDefaults, false); document.body.addEventListener(ev, preventDefaults, false); });
['dragenter', 'dragover'].forEach(ev => dropZone.addEventListener(ev, highlight, false));
['dragleave', 'drop'].forEach(ev => dropZone.addEventListener(ev, unhighlight, false));
// --- Event Listeners (Original) ---
['dragenter', 'dragover', 'dragleave', 'drop'].forEach(ev => { dropZone.addEventListener(ev, preventDefaults); document.body.addEventListener(ev, preventDefaults); });
['dragenter', 'dragover'].forEach(ev => dropZone.addEventListener(ev, highlight));
['dragleave', 'drop'].forEach(ev => dropZone.addEventListener(ev, unhighlight));
dropZone.addEventListener('drop', handleDrop);
fileInput.addEventListener('change', handleFilesFromInput);
folderInput.addEventListener('change', handleFilesFromInput);
fileInput.addEventListener('change', handleFilesFromInput); // Use renamed handler
folderInput.addEventListener('change', handleFilesFromInput); // Use renamed handler
uploadButton.addEventListener('click', startUploads);
// --- Event Handler Functions (Original) ---
function preventDefaults(e) { e.preventDefault(); e.stopPropagation(); }
function highlight() { dropZone.classList.add('highlight'); }
function unhighlight() { dropZone.classList.remove('highlight'); }
function highlight(e) { dropZone.classList.add('highlight'); }
function unhighlight(e) { dropZone.classList.remove('highlight'); }
async function handleDrop(e) {
// Use original logic, just assign to filesToUpload
const items = e.dataTransfer.items;
fileListDiv.innerHTML = ''; // Clear old list display
uploadButton.style.display = 'none';
const loadingItem = document.createElement('div'); loadingItem.className = 'file-item loading'; loadingItem.textContent = 'Processing dropped items...'; fileListDiv.appendChild(loadingItem);
try {
let newFiles;
if (items && items.length > 0 && items[0].webkitGetAsEntry) newFiles = await getAllFileEntries(items);
else newFiles = [...e.dataTransfer.files].filter(f => f.size >= 0);
if (newFiles.length === 0) { loadingItem.textContent = 'No files found.'; setTimeout(() => loadingItem.remove(), 2000); return; }
filesToUpload = newFiles; updateFileList();
if (AUTO_UPLOAD) startUploads(); else if (filesToUpload.length > 0) uploadButton.style.display = 'block';
} catch (error) { console.error('Error handling drop:', error); loadingItem.textContent = `Error: ${error.message}`; loadingItem.style.color = 'var(--danger-color)'; setTimeout(() => {loadingItem.remove(); updateFileList();}, 3000); filesToUpload = []; }
finally { if (loadingItem.parentNode === fileListDiv && filesToUpload.length > 0) loadingItem.remove(); } // Remove loading only if files are shown
if (items && items.length > 0 && items[0].webkitGetAsEntry) {
const loadingItem = document.createElement('div'); loadingItem.className = 'file-item loading'; loadingItem.textContent = 'Processing dropped items...'; fileList.innerHTML = ''; fileList.appendChild(loadingItem); uploadButton.style.display = 'none';
try {
const newFiles = await getAllFileEntries(items); if (newFiles.length === 0) throw new Error('No valid files found.');
filesToUpload = newFiles; updateFileList(); if (AUTO_UPLOAD) startUploads(); else uploadButton.style.display = 'block';
} catch (error) { console.error('Error processing dropped items:', error); loadingItem.textContent = `Error: ${error.message}`; loadingItem.style.color = 'var(--danger-color)'; setTimeout(() => loadingItem.remove(), 3000); filesToUpload = []; updateFileList(); }
finally { if (loadingItem.parentNode === fileList) loadingItem.remove(); }
} else {
filesToUpload = [...e.dataTransfer.files].filter(f => f.size >= 0); updateFileList(); if (AUTO_UPLOAD) startUploads(); else if (filesToUpload.length > 0) uploadButton.style.display = 'block';
}
}
function handleFilesFromInput(e) {
// Use original logic, just assign to filesToUpload
const input = e.target; const selectedFiles = [...input.files];
if (input.id === 'folderInput' && selectedFiles.length > 0 && !('webkitRelativePath' in selectedFiles[0])) { alert('Folder upload not fully supported.'); filesToUpload = []; }
else filesToUpload = selectedFiles.filter(f => f.size >= 0);
else { filesToUpload = selectedFiles.filter(f => f.size >= 0); if (input.id === 'folderInput') console.log('Folder files:', filesToUpload.map(f => ({ name: f.name, path: f.webkitRelativePath }))); }
updateFileList();
if (AUTO_UPLOAD && filesToUpload.length > 0) startUploads(); else if (filesToUpload.length > 0) uploadButton.style.display = 'block'; else uploadButton.style.display = 'none';
if (AUTO_UPLOAD && filesToUpload.length > 0) startUploads();
else if (filesToUpload.length > 0) uploadButton.style.display = 'block'; else uploadButton.style.display = 'none';
input.value = '';
}
function updateFileList() { // Original simple list display
// --- File List UI Update (Original Simple List) ---
function updateFileList() {
// Keep the original simpler list rendering
console.debug('Updating original file list UI for', filesToUpload.length, 'files');
fileListDiv.innerHTML = '';
fileList.innerHTML = ''; // Clear current list
if (filesToUpload.length === 0) {
fileListDiv.innerHTML = '<div class="file-item placeholder">No files selected.</div>';
fileList.innerHTML = '<div class="file-item placeholder">No files selected.</div>'; // Show placeholder in original div
uploadButton.style.display = 'none';
return;
}
filesToUpload.forEach(file => {
const fileItem = document.createElement('div');
fileItem.className = 'file-item';
fileItem.className = 'file-item'; // Use original class
const displayName = file.webkitRelativePath || file.name;
fileItem.innerHTML = `📄 ${displayName} (${formatFileSize(file.size)})`;
fileListDiv.appendChild(fileItem);
fileList.appendChild(fileItem);
});
uploadButton.style.display = (!AUTO_UPLOAD && filesToUpload.length > 0) ? 'block' : 'none';
}
// Add original styles for list items (if they were in the script, otherwise they are in styles.css)
const style = document.createElement('style');
style.textContent = `
.file-list { /* Original styles for the list container */ margin-top: 20px; display: flex; flex-direction: column; gap: 10px; }
.file-item { background: var(--container-bg); padding: 10px 15px; border-radius: 5px; text-align: left; box-shadow: 0 2px 4px rgba(0,0,0,0.1); }
.file-item.placeholder { text-align: center; opacity: 0.6; box-shadow: none; background: transparent; border: none; } /* Ensure placeholder has no border if list had one */
.file-item.loading { text-align: center; padding: 15px; background: var(--container-bg); border-radius: 5px; animation: pulse 1.5s infinite; }
.file-item { background: var(--container-bg); padding: 10px 15px; border-radius: 5px; text-align: left; box-shadow: 0 2px 4px rgba(0,0,0,0.1); margin-bottom: 10px; }
.file-item.placeholder { text-align: center; opacity: 0.6; box-shadow: none; background: transparent; }
.file-item.loading { text-align: center; padding: 15px; background: var(--container-bg); border-radius: 5px; margin: 10px 0; animation: pulse 1.5s infinite; }
@keyframes pulse { 0% { opacity: 0.6; } 50% { opacity: 1; } 100% { opacity: 0.6; } }
/* Styles for the separate progress bars, from original */
#uploadProgress { margin: 20px 0; display: flex; flex-direction: column; gap: 15px; }
.progress-container { background: var(--container-bg); padding: 15px; border-radius: 5px; box-shadow: 0 2px 4px rgba(0,0,0,0.1); transition: opacity 0.5s ease-out; }
.progress-label { text-align: left; margin-bottom: 8px; color: var(--text-color); font-size: 0.9rem; }
.progress-status { display: flex; justify-content: space-between; align-items: center; font-size: 0.8rem; color: var(--text-color); opacity: 0.8; margin-top: 8px; }
.progress-info { text-align: left; } .progress-details { text-align: right; }
.progress { background: var(--progress-bg); border-radius: 10px; height: 8px; overflow: hidden; margin-top: 8px; margin-bottom: 8px; }
.progress-bar { height: 100%; background: var(--highlight-color); transition: width 0.3s ease; }
/* Ensure progress bar styles don't conflict if somehow left over */
.progress-container { transition: opacity 0.5s ease-out; }
`;
document.head.appendChild(style);
// --- Upload Process (Original Structure) ---
async function startUploads() {
if (filesToUpload.length === 0) { Toastify({ text: "No files selected.", duration: 3000 }).showToast(); return; }
uploadButton.disabled = true; uploadButton.textContent = 'Uploading...';
document.getElementById('uploadProgress').innerHTML = ''; // Clear old progress bars
uploadButton.disabled = true;
uploadButton.textContent = 'Uploading...';
document.getElementById('uploadProgress').innerHTML = ''; // Clear the separate progress bar container
const batchId = generateBatchId();
let successfulUploads = 0, failedUploads = 0;
let successfulUploads = 0;
let failedUploads = 0;
// Process uploads sequentially (same loop as original)
for (const file of filesToUpload) {
// --- NEW: Increment active upload counter ---
activeUploadCount++;
const uploader = new FileUploader(file, batchId);
const uploader = new FileUploader(file, batchId); // Create uploader instance
try {
if (await uploader.start()) successfulUploads++;
else failedUploads++;
const success = await uploader.start(); // Start the upload
if (success) successfulUploads++; else failedUploads++;
} catch (error) {
console.error(`Unhandled error during upload start for ${file.name}:`, error);
failedUploads++;
// Progress bar might show error via uploader's catch block
}
catch (error) {
console.error(`Unhandled error for ${file.name}:`, error);
failedUploads++;
} finally {
// --- NEW: Decrement active upload counter ---
activeUploadCount--;
}
}
const totalFiles = filesToUpload.length;
let msg = `Uploaded ${successfulUploads} of ${totalFiles} files`;
let bg = successfulUploads === totalFiles ? "#4CAF50" : (successfulUploads > 0 ? "#ff9800" : "#f44336");
Toastify({ text: msg, duration: 3000, gravity: "bottom", position: "right", style: { background: bg } }).showToast();
} // End for...of loop
filesToUpload = []; updateFileList();
uploadButton.disabled = false; uploadButton.textContent = 'Upload Files'; uploadButton.style.display = 'none';
fileInput.value = ''; folderInput.value = '';
// --- Show Summary Toast (Original logic) ---
const totalFiles = filesToUpload.length;
let toastMessage = `Uploaded ${successfulUploads} of ${totalFiles} files`;
let toastBackground = successfulUploads === totalFiles ? "#4CAF50" : "#f44336";
if (successfulUploads > 0 && failedUploads > 0) toastBackground = "#ff9800"; // Orange if partial success
Toastify({ text: toastMessage, duration: 3000, gravity: "bottom", position: "right", style: { background: toastBackground } }).showToast();
// --- Reset UI State (Original logic) ---
filesToUpload = []; // Clear the list of files
updateFileList(); // Clear the displayed file list
// Progress bars are removed automatically by the uploader on completion/error
uploadButton.disabled = false;
uploadButton.textContent = 'Upload Files';
uploadButton.style.display = 'none';
fileInput.value = '';
folderInput.value = '';
}
function setTheme(theme) { document.documentElement.setAttribute('data-theme', theme); localStorage.setItem('theme', theme); const m=document.querySelectorAll('.theme-toggle-icon .moon'); const s=document.querySelectorAll('.theme-toggle-icon .sun'); if(theme==='dark'){m.forEach(p=>p.style.display='none');s.forEach(p=>p.style.display='');}else{m.forEach(p=>p.style.display='');s.forEach(p=>p.style.display='none');} }
// --- Theme Management (Original) ---
function setTheme(theme) { document.documentElement.setAttribute('data-theme', theme); localStorage.setItem('theme', theme); const m=document.querySelectorAll('.moon'); const s=document.querySelectorAll('.sun'); if(theme==='dark'){m.forEach(p=>p.style.display='none');s.forEach(p=>p.style.display='');}else{m.forEach(p=>p.style.display='');s.forEach(p=>p.style.display='none');} }
function toggleTheme() { const c=document.documentElement.getAttribute('data-theme'); setTheme(c==='dark'?'light':'dark'); }
const savedTheme = localStorage.getItem('theme'); const prefersDark = window.matchMedia('(prefers-color-scheme: dark)').matches; setTheme(savedTheme || (prefersDark ? 'dark' : 'light'));
updateFileList(); // Initialize list on load
const savedTheme = localStorage.getItem('theme') || (window.matchMedia('(prefers-color-scheme: dark)').matches ? 'dark' : 'light'); setTheme(savedTheme);
// --- Initial Setup ---
updateFileList(); // Initialize the simple file list display
// --- NEW: beforeunload event listener ---
window.addEventListener('beforeunload', function (e) {
if (activeUploadCount > 0) {
// Standard message for the confirmation dialog
const confirmationMessage = 'Uploads are in progress. If you leave this page, ongoing uploads will be interrupted. Are you sure you want to leave?';
// For modern browsers:
e.returnValue = confirmationMessage;
// For older browsers:
return confirmationMessage;
}
});
</script>
<footer>
{{FOOTER_CONTENT}}
</footer>
</body>
</html>

View File

@@ -39,7 +39,7 @@ body {
display: flex;
justify-content: center;
padding-top: 2rem;
padding-bottom: 150px;
padding-bottom: 80px;
color: var(--text-color);
transition: background-color 0.3s ease, color 0.3s ease;
}
@@ -47,7 +47,7 @@ body {
.container {
width: 100%;
max-width: 600px;
padding: 20px 20px 80px 20px;
padding: 20px;
text-align: center;
position: relative;
}
@@ -364,19 +364,20 @@ button:disabled {
/* Footer Styles */
footer {
position: fixed;
bottom: 0;
bottom: 10px;
left: 0;
right: 0;
width: 100%;
max-width: 600px;
margin-left: auto;
margin-right: auto;
padding: 15px;
text-align: center;
font-size: 0.85rem;
color: var(--text-color);
opacity: 0.9;
opacity: 0.7;
border-top: 1px solid var(--border-color);
transition: background-color 0.3s ease, color 0.3s ease;
background-color: var(--bg-color);
z-index: 100;
}
footer a {

View File

@@ -136,11 +136,6 @@ app.get('/login.html', (req, res) => {
}
});
// --- Health Check Endpoint ---
app.get('/health', (req, res) => {
res.status(200).json({ status: 'UP', message: 'Server is healthy' });
});
// --- Static File Serving ---
// Serve static files (CSS, JS, assets) from the 'public' directory
// Use express.static middleware, placed AFTER specific HTML routes

View File

@@ -1,13 +1,39 @@
// File: src/config/index.js
require('dotenv').config();
const { validatePin } = require('../utils/security');
const logger = require('../utils/logger');
const logger = require('../utils/logger'); // Use the default logger instance
const fs = require('fs');
const path = require('path');
// const { version } = require('../../package.json'); // version not currently used, can be removed or kept
const { version } = require('../../package.json'); // Get version from package.json
// --- Environment Variables Reference ---
/* (Comments listing all ENV vars - keep as is) */
/*
STORAGE_TYPE - Storage backend ('local' or 's3', default: 'local')
// --- Local Storage ---
UPLOAD_DIR - Directory for uploads (Docker/production, if STORAGE_TYPE=local)
LOCAL_UPLOAD_DIR - Directory for uploads (local dev, fallback: './local_uploads', if STORAGE_TYPE=local)
// --- S3 Storage ---
S3_REGION - AWS Region for S3 Bucket (required if STORAGE_TYPE=s3)
S3_BUCKET_NAME - Name of the S3 Bucket (required if STORAGE_TYPE=s3)
S3_ACCESS_KEY_ID - S3 Access Key ID (required if STORAGE_TYPE=s3)
S3_SECRET_ACCESS_KEY - S3 Secret Access Key (required if STORAGE_TYPE=s3)
S3_ENDPOINT_URL - Custom S3 endpoint URL (optional, for non-AWS S3)
S3_FORCE_PATH_STYLE - Force path-style access (true/false, optional, for non-AWS S3)
// --- Common ---
PORT - Port for the server (default: 3000)
NODE_ENV - Node environment (default: 'development')
BASE_URL - Base URL for the app (default: http://localhost:${PORT})
MAX_FILE_SIZE - Max upload size in MB (default: 1024)
AUTO_UPLOAD - Enable auto-upload (true/false, default: false)
DUMBDROP_PIN - Security PIN for uploads (required for protected endpoints)
DUMBDROP_TITLE - Site title (default: 'DumbDrop')
APPRISE_URL - Apprise notification URL (optional)
APPRISE_MESSAGE - Notification message template (default provided)
APPRISE_SIZE_UNIT - Size unit for notifications (optional)
ALLOWED_EXTENSIONS - Comma-separated list of allowed file extensions (optional)
ALLOWED_IFRAME_ORIGINS- Comma-separated list of allowed iframe origins (optional)
CLIENT_MAX_RETRIES - Max retries for client chunk uploads (default: 5)
DEMO_MODE - Enable demo mode (true/false, default: false)
*/
// --- Helper for clear configuration logging ---
const logConfig = (message, level = 'info') => {
@@ -18,7 +44,7 @@ const logConfig = (message, level = 'info') => {
// --- Default configurations ---
const DEFAULT_PORT = 3000;
const DEFAULT_SITE_TITLE = 'DumbDrop';
const DEFAULT_BASE_URL_PREFIX = 'http://localhost'; // Prefix, port added later
const DEFAULT_BASE_URL = 'http://localhost:3000';
const DEFAULT_CLIENT_MAX_RETRIES = 5;
const DEFAULT_STORAGE_TYPE = 'local';
@@ -28,62 +54,81 @@ const logAndReturn = (key, value, isDefault = false, sensitive = false) => {
return value;
};
// --- Utility to detect if running in local development mode ---
// (This helps decide whether to *create* LOCAL_UPLOAD_DIR, but doesn't affect UPLOAD_DIR usage in Docker)
function isLocalDevelopment() {
return process.env.NODE_ENV !== 'production' && !process.env.UPLOAD_DIR;
}
/**
* Determine the local upload directory path.
* Only relevant when STORAGE_TYPE is 'local'.
* @returns {string|null} The path, or null if storage is not local.
*/
function determineLocalUploadDirectory() {
if (process.env.STORAGE_TYPE && process.env.STORAGE_TYPE.toLowerCase() !== 'local') {
return null; // Not using local storage
}
let uploadDir;
if (process.env.UPLOAD_DIR) {
uploadDir = process.env.UPLOAD_DIR;
// logger.info(`[Local Storage] Upload directory set from UPLOAD_DIR: ${uploadDir}`); // Logger might not be fully init here
logger.info(`[Local Storage] Upload directory set from UPLOAD_DIR: ${uploadDir}`);
} else if (process.env.LOCAL_UPLOAD_DIR) {
uploadDir = process.env.LOCAL_UPLOAD_DIR;
// logger.warn(`[Local Storage] Upload directory using LOCAL_UPLOAD_DIR fallback: ${uploadDir}`);
logger.warn(`[Local Storage] Upload directory using LOCAL_UPLOAD_DIR fallback: ${uploadDir}`);
} else {
uploadDir = './local_uploads';
// logger.warn(`[Local Storage] Upload directory using default fallback: ${uploadDir}`);
uploadDir = './local_uploads'; // Default local path
logger.warn(`[Local Storage] Upload directory using default fallback: ${uploadDir}`);
}
// logger.info(`[Local Storage] Final upload directory path: ${path.resolve(uploadDir)}`);
return path.resolve(uploadDir); // Always resolve to absolute
logger.info(`[Local Storage] Final upload directory path: ${path.resolve(uploadDir)}`);
return uploadDir;
}
/**
* Ensure the local upload directory exists (if applicable and in local dev).
*/
function ensureLocalUploadDirExists(dirPath) {
if (!dirPath || !isLocalDevelopment()) {
return;
return; // Only create if using local storage in a local dev environment
}
try {
if (!fs.existsSync(dirPath)) {
fs.mkdirSync(dirPath, { recursive: true });
console.log(`[INFO] CONFIGURATION: [Local Storage] Created local upload directory: ${dirPath}`);
logger.info(`[Local Storage] Created local upload directory: ${dirPath}`);
} else {
console.log(`[INFO] CONFIGURATION: [Local Storage] Local upload directory exists: ${dirPath}`);
logger.info(`[Local Storage] Local upload directory exists: ${dirPath}`);
}
// Basic writability check
fs.accessSync(dirPath, fs.constants.W_OK);
console.log(`[SUCCESS] CONFIGURATION: [Local Storage] Local upload directory is writable: ${dirPath}`);
logger.success(`[Local Storage] Local upload directory is writable: ${dirPath}`);
} catch (err) {
console.error(`[ERROR] CONFIGURATION: [Local Storage] Failed to create or access local upload directory: ${dirPath}. Error: ${err.message}`);
logger.error(`[Local Storage] Failed to create or access local upload directory: ${dirPath}. Error: ${err.message}`);
throw new Error(`Upload directory "${dirPath}" is not accessible or writable.`);
}
}
// --- Determine Storage Type ---
const storageTypeInput = process.env.STORAGE_TYPE || DEFAULT_STORAGE_TYPE;
const storageType = ['local', 's3'].includes(storageTypeInput.toLowerCase())
? storageTypeInput.toLowerCase()
: DEFAULT_STORAGE_TYPE;
if (storageTypeInput.toLowerCase() !== storageType) {
console.warn(`[WARN] CONFIGURATION: Invalid STORAGE_TYPE "${storageTypeInput}", using default: "${storageType}"`);
logger.warn(`Invalid STORAGE_TYPE "${storageTypeInput}", using default: "${storageType}"`);
}
const resolvedLocalUploadDir = determineLocalUploadDirectory();
if (storageType === 'local' && resolvedLocalUploadDir) { // Only ensure if actually using local storage
// Determine and potentially ensure local upload directory
const resolvedLocalUploadDir = determineLocalUploadDirectory(); // Will be null if STORAGE_TYPE is 's3'
if (resolvedLocalUploadDir) {
ensureLocalUploadDirExists(resolvedLocalUploadDir);
}
/**
* Function to parse the FOOTER_LINKS environment variable
* @param {string} linksString - The input string containing links
* @returns {Array} - An array of objects containing text and URL
*/
const parseFooterLinks = (linksString) => {
if (!linksString) return [];
return linksString.split(',')
@@ -91,45 +136,85 @@ const parseFooterLinks = (linksString) => {
const parts = linkPair.split('@').map(part => part.trim());
if (parts.length === 2 && parts[0] && parts[1] && (parts[1].startsWith('http://') || parts[1].startsWith('https://'))) {
return { text: parts[0], url: parts[1] };
} else {
logger.warn(`Invalid format or URL in FOOTER_LINKS: "${linkPair}". Expected "Text @ http(s)://URL". Skipping.`);
return null;
}
// logger.warn(`Invalid format or URL in FOOTER_LINKS: "${linkPair}".`); // Logger might not be fully init
return null;
})
.filter(link => link !== null);
};
const port = parseInt(process.env.PORT || DEFAULT_PORT, 10);
const baseUrl = process.env.BASE_URL || `${DEFAULT_BASE_URL_PREFIX}:${port}/`;
/**
* Application configuration
* Loads and validates environment variables
*/
const config = {
port,
// =====================
// Core Settings
// =====================
port: parseInt(process.env.PORT || DEFAULT_PORT, 10),
nodeEnv: process.env.NODE_ENV || 'development',
baseUrl,
baseUrl: process.env.BASE_URL || `${DEFAULT_BASE_URL.replace(/:3000$/, '')}:${process.env.PORT || DEFAULT_PORT}/`, // Ensure trailing slash
isDemoMode: process.env.DEMO_MODE === 'true',
storageType,
uploadDir: storageType === 'local' ? resolvedLocalUploadDir : path.resolve(process.env.UPLOAD_DIR || process.env.LOCAL_UPLOAD_DIR || './uploads'), // For S3, metadata dir. Fallback required.
// =====================
// Storage Settings
// =====================
storageType: logAndReturn('STORAGE_TYPE', storageType, storageType === DEFAULT_STORAGE_TYPE),
/**
* The primary directory for storing files or metadata.
* If STORAGE_TYPE=local, this is where files are stored.
* If STORAGE_TYPE=s3, this is where '.metadata' lives.
* We default to the determined local path or a standard './uploads' if S3 is used.
*/
uploadDir: resolvedLocalUploadDir || path.resolve('./uploads'), // S3 needs a place for metadata too
// --- S3 Specific (only relevant if storageType is 's3') ---
s3Region: process.env.S3_REGION || null,
s3BucketName: process.env.S3_BUCKET_NAME || null,
s3AccessKeyId: process.env.S3_ACCESS_KEY_ID || null,
s3SecretAccessKey: process.env.S3_SECRET_ACCESS_KEY || null,
s3EndpointUrl: process.env.S3_ENDPOINT_URL || null,
s3ForcePathStyle: process.env.S3_FORCE_PATH_STYLE === 'true',
s3EndpointUrl: process.env.S3_ENDPOINT_URL || null, // Default to null (AWS default endpoint)
s3ForcePathStyle: process.env.S3_FORCE_PATH_STYLE === 'true', // Default to false
// =====================
// Upload Behavior
// =====================
maxFileSize: (() => {
const sizeInMB = parseInt(process.env.MAX_FILE_SIZE || '1024', 10);
return (isNaN(sizeInMB) || sizeInMB <= 0 ? 1024 : sizeInMB) * 1024 * 1024;
if (isNaN(sizeInMB) || sizeInMB <= 0) {
logger.error('Invalid MAX_FILE_SIZE, must be a positive number. Using 1024MB.');
return 1024 * 1024 * 1024;
}
return sizeInMB * 1024 * 1024; // Convert MB to bytes
})(),
autoUpload: process.env.AUTO_UPLOAD === 'true',
allowedExtensions: process.env.ALLOWED_EXTENSIONS ?
process.env.ALLOWED_EXTENSIONS.split(',').map(ext => ext.trim().toLowerCase().replace(/^\./, '.')).filter(Boolean) :
process.env.ALLOWED_EXTENSIONS.split(',').map(ext => ext.trim().toLowerCase().replace(/^\./, '.')).filter(Boolean) : // Ensure dot prefix
null,
clientMaxRetries: (() => {
const retries = parseInt(process.env.CLIENT_MAX_RETRIES || DEFAULT_CLIENT_MAX_RETRIES, 10);
return (isNaN(retries) || retries < 0) ? DEFAULT_CLIENT_MAX_RETRIES : retries;
const envValue = process.env.CLIENT_MAX_RETRIES;
const defaultValue = DEFAULT_CLIENT_MAX_RETRIES;
if (envValue === undefined) return logAndReturn('CLIENT_MAX_RETRIES', defaultValue, true);
const retries = parseInt(envValue, 10);
if (isNaN(retries) || retries < 0) {
logger.warn(`Invalid CLIENT_MAX_RETRIES value: "${envValue}". Using default: ${defaultValue}`);
return logAndReturn('CLIENT_MAX_RETRIES', defaultValue, true);
}
return logAndReturn('CLIENT_MAX_RETRIES', retries);
})(),
pin: validatePin(process.env.DUMBDROP_PIN), // validatePin uses logger, ensure logger is available
// =====================
// Security
// =====================
pin: validatePin(process.env.DUMBDROP_PIN),
allowedIframeOrigins: process.env.ALLOWED_IFRAME_ORIGINS ?
process.env.ALLOWED_IFRAME_ORIGINS.split(',').map(origin => origin.trim()).filter(Boolean) :
null,
// =====================
// UI & Notifications
// =====================
siteTitle: process.env.DUMBDROP_TITLE || DEFAULT_SITE_TITLE,
footerLinks: parseFooterLinks(process.env.FOOTER_LINKS),
appriseUrl: process.env.APPRISE_URL || null,
@@ -137,86 +222,113 @@ const config = {
appriseSizeUnit: process.env.APPRISE_SIZE_UNIT || 'Auto',
};
// --- Log Configuration (after logger is confirmed available) ---
// Moved logging to after config object is built, so logger is definitely available
logger.info(`--- Configuration Start ---`);
logAndReturn('NODE_ENV', config.nodeEnv);
logAndReturn('PORT', config.port);
logAndReturn('BASE_URL', config.baseUrl);
logAndReturn('DEMO_MODE', config.isDemoMode);
logAndReturn('STORAGE_TYPE', config.storageType);
// --- Log Sensitive & Conditional Config ---
logConfig(`NODE_ENV: ${config.nodeEnv}`);
logConfig(`PORT: ${config.port}`);
logConfig(`BASE_URL: ${config.baseUrl}`);
logConfig(`DEMO_MODE: ${config.isDemoMode}`);
if (config.storageType === 'local') {
logAndReturn('Upload Directory (Local Storage)', config.uploadDir);
logConfig(`Upload Directory (Local): ${config.uploadDir}`);
} else {
logAndReturn('Metadata Directory (S3 Mode)', config.uploadDir); // Clarify role for S3
logConfig(`Metadata Directory (S3 Mode): ${config.uploadDir}`); // Clarify role in S3 mode
logAndReturn('S3_REGION', config.s3Region);
logAndReturn('S3_BUCKET_NAME', config.s3BucketName);
logAndReturn('S3_ACCESS_KEY_ID', config.s3AccessKeyId, false, true);
logAndReturn('S3_SECRET_ACCESS_KEY', config.s3SecretAccessKey, false, true);
logAndReturn('S3_ACCESS_KEY_ID', config.s3AccessKeyId, false, true); // Sensitive
logAndReturn('S3_SECRET_ACCESS_KEY', config.s3SecretAccessKey, false, true); // Sensitive
if (config.s3EndpointUrl) logAndReturn('S3_ENDPOINT_URL', config.s3EndpointUrl);
logAndReturn('S3_FORCE_PATH_STYLE', config.s3ForcePathStyle);
}
logger.info(`Max File Size: ${config.maxFileSize / (1024 * 1024)}MB`);
logger.info(`Auto Upload: ${config.autoUpload}`);
if (config.allowedExtensions) logger.info(`Allowed Extensions: ${config.allowedExtensions.join(', ')}`);
if (config.pin) logAndReturn('DUMBDROP_PIN', config.pin, false, true);
if (config.allowedIframeOrigins) logger.info(`Allowed Iframe Origins: ${config.allowedIframeOrigins.join(', ')}`);
logConfig(`Max File Size: ${config.maxFileSize / (1024 * 1024)}MB`);
logConfig(`Auto Upload: ${config.autoUpload}`);
if (config.allowedExtensions) logConfig(`Allowed Extensions: ${config.allowedExtensions.join(', ')}`);
if (config.pin) logAndReturn('DUMBDROP_PIN', config.pin, false, true); // Sensitive
if (config.allowedIframeOrigins) logConfig(`Allowed Iframe Origins: ${config.allowedIframeOrigins.join(', ')}`);
if (config.appriseUrl) logAndReturn('APPRISE_URL', config.appriseUrl);
logger.info(`Client Max Retries: ${config.clientMaxRetries}`);
logger.info(`--- Configuration End ---`);
// --- Configuration Validation ---
function validateConfig() {
const errors = [];
if (config.port <= 0 || config.port > 65535) errors.push('PORT must be a valid number between 1 and 65535');
if (config.maxFileSize <= 0) errors.push('MAX_FILE_SIZE must be greater than 0');
try {
new URL(config.baseUrl);
if (!config.baseUrl.endsWith('/')) errors.push('BASE_URL must end with a trailing slash ("/"). Current: ' + config.baseUrl);
} catch (err) { errors.push(`BASE_URL must be a valid URL. Error: ${err.message}`); }
if (config.storageType === 's3') {
if (!config.s3Region) errors.push('S3_REGION is required for S3 storage');
if (!config.s3BucketName) errors.push('S3_BUCKET_NAME is required for S3 storage');
if (!config.s3AccessKeyId) errors.push('S3_ACCESS_KEY_ID is required for S3 storage');
if (!config.s3SecretAccessKey) errors.push('S3_SECRET_ACCESS_KEY is required for S3 storage');
if (config.s3ForcePathStyle && !config.s3EndpointUrl) {
logger.warn('[Config Validation] S3_FORCE_PATH_STYLE is true, but S3_ENDPOINT_URL is not set. This may not work as expected with default AWS endpoints.');
if (!config.port || config.port <= 0 || config.port > 65535) {
errors.push('PORT must be a valid number between 1 and 65535');
}
if (config.maxFileSize <= 0) {
errors.push('MAX_FILE_SIZE must be greater than 0');
}
// Validate BASE_URL format and trailing slash
try {
let url = new URL(config.baseUrl);
if (!config.baseUrl.endsWith('/')) {
errors.push('BASE_URL must end with a trailing slash ("/"). Current value: ' + config.baseUrl);
// Attempt to fix it for runtime, but still report error
// config.baseUrl = config.baseUrl + '/';
}
} else if (config.storageType === 'local') {
if (!config.uploadDir) errors.push('Upload directory (UPLOAD_DIR or LOCAL_UPLOAD_DIR) is required for local storage.');
else {
try { fs.accessSync(config.uploadDir, fs.constants.W_OK); }
catch (err) { errors.push(`Local upload directory "${config.uploadDir}" is not writable or does not exist.`); }
} catch (err) {
errors.push(`BASE_URL must be a valid URL. Error: ${err.message}`);
}
// Validate S3 configuration if STORAGE_TYPE is 's3'
if (config.storageType === 's3') {
if (!config.s3Region) errors.push('S3_REGION is required when STORAGE_TYPE is "s3"');
if (!config.s3BucketName) errors.push('S3_BUCKET_NAME is required when STORAGE_TYPE is "s3"');
if (!config.s3AccessKeyId) errors.push('S3_ACCESS_KEY_ID is required when STORAGE_TYPE is "s3"');
if (!config.s3SecretAccessKey) errors.push('S3_SECRET_ACCESS_KEY is required when STORAGE_TYPE is "s3"');
if (config.s3ForcePathStyle && !config.s3EndpointUrl) {
logger.warn('S3_FORCE_PATH_STYLE is true, but S3_ENDPOINT_URL is not set. This typically requires a custom endpoint.');
}
}
// Validate local storage dir only if type is local
if (config.storageType === 'local') {
if (!config.uploadDir) {
errors.push('Upload directory could not be determined for local storage.');
} else {
// Check existence and writability again (ensureLocalUploadDirExists might have failed)
try {
fs.accessSync(config.uploadDir, fs.constants.W_OK);
} catch (err) {
errors.push(`Local upload directory "${config.uploadDir}" is not writable or does not exist.`);
}
}
}
// Metadata directory check (for both local file metadata and S3 upload state metadata)
if (!config.uploadDir) { // This condition might be redundant if local storage dir is already checked
errors.push('A base directory (UPLOAD_DIR or LOCAL_UPLOAD_DIR) is required for metadata storage.');
} else {
try {
const metadataBase = path.resolve(config.uploadDir); // Base for .metadata
if (!fs.existsSync(metadataBase)) {
fs.mkdirSync(metadataBase, { recursive: true });
logger.info(`[Config Validation] Created base directory for metadata: ${metadataBase}`);
}
fs.accessSync(metadataBase, fs.constants.W_OK); // Check writability of the parent of .metadata
} catch (err) {
errors.push(`Cannot access or create base directory for metadata at "${config.uploadDir}". Error: ${err.message}`);
// Check metadata dir existence/writability regardless of storage type, as S3 uses it too
try {
const metadataParentDir = path.dirname(path.join(config.uploadDir, '.metadata'));
if (!fs.existsSync(metadataParentDir)) {
fs.mkdirSync(metadataParentDir, { recursive: true });
logger.info(`Created base directory for metadata: ${metadataParentDir}`);
}
fs.accessSync(metadataParentDir, fs.constants.W_OK);
} catch (err) {
errors.push(`Cannot access or create directory for metadata storage at "${config.uploadDir}". Error: ${err.message}`);
}
if (config.nodeEnv === 'production') {
if (!config.appriseUrl) {
logger.info('Apprise notifications disabled (APPRISE_URL not set).');
}
}
if (errors.length > 0) {
logger.error('--- CONFIGURATION ERRORS ---');
errors.forEach(err => logger.error(`- ${err}`));
logger.error('-----------------------------');
throw new Error('Configuration validation failed. Please check environment variables and correct the issues.');
throw new Error('Configuration validation failed. Please check environment variables.');
}
logger.success('[Config Validation] Configuration validated successfully.');
logger.success('Configuration validated successfully.');
}
Object.freeze(config); // Freeze after logging and validation
// Freeze configuration to prevent modifications after initial load
Object.freeze(config);
module.exports = { config, validateConfig };
module.exports = {
config,
validateConfig
};

View File

@@ -16,118 +16,157 @@ const { isDemoMode } = require('../utils/demoMode'); // Keep demo check for spec
// Initialize upload
router.post('/init', async (req, res) => {
if (isDemoMode() && config.storageType !== 's3') { // S3 demo might still hit the adapter for presigned URLs etc.
// but local demo can be simpler.
const { filename = 'demo_file.txt', fileSize = 0 } = req.body;
// Note: Demo mode might bypass storage adapter logic via middleware or adapter factory itself.
// If specific demo responses are needed here, keep the check.
if (isDemoMode()) {
// Simplified Demo Response (assuming demoAdapter handles non-persistence)
const { filename = 'demo_file', fileSize = 0 } = req.body;
const demoUploadId = 'demo-' + Math.random().toString(36).substr(2, 9);
logger.info(`[DEMO /init] Req for ${filename}, size ${fileSize}. ID ${demoUploadId}`);
logger.info(`[DEMO] Init request for ${filename}, size ${fileSize}. Returning ID ${demoUploadId}`);
if (Number(fileSize) === 0) {
logger.success(`[DEMO /init] Sim complete zero-byte: ${filename}`);
logger.success(`[DEMO] Simulated completion of zero-byte file: ${filename}`);
// Potentially call demoAdapter.completeUpload or similar mock logic if needed
}
return res.json({ uploadId: demoUploadId });
}
const { filename, fileSize } = req.body;
const clientBatchId = req.headers['x-batch-id'];
const clientBatchId = req.headers['x-batch-id']; // Adapter might use this
// --- Basic validations ---
if (!filename) return res.status(400).json({ error: 'Missing filename' });
if (fileSize === undefined || fileSize === null) return res.status(400).json({ error: 'Missing fileSize' });
const size = Number(fileSize);
if (isNaN(size) || size < 0) return res.status(400).json({ error: 'Invalid file size' });
// --- Max File Size Check ---
if (size > config.maxFileSize) {
logger.warn(`Upload rejected: File size ${size} exceeds limit ${config.maxFileSize} for ${filename}`);
logger.warn(`Upload rejected: File size ${size} exceeds limit ${config.maxFileSize}`);
return res.status(413).json({ error: 'File too large', limit: config.maxFileSize });
}
// --- Extension Check ---
// Perform extension check before handing off to adapter
if (config.allowedExtensions && config.allowedExtensions.length > 0) {
const fileExt = path.extname(filename).toLowerCase();
// Check if the extracted extension (including '.') is in the allowed list
if (!fileExt || !config.allowedExtensions.includes(fileExt)) {
logger.warn(`Upload rejected: File type not allowed: ${filename} (Ext: ${fileExt || 'none'})`);
logger.warn(`Upload rejected: File type not allowed: ${filename} (Extension: ${fileExt || 'none'})`);
return res.status(400).json({ error: 'File type not allowed', receivedExtension: fileExt || 'none' });
}
logger.debug(`File extension ${fileExt} allowed for ${filename}`);
logger.debug(`File extension ${fileExt} allowed for ${filename}`);
}
try {
// Delegate initialization to the storage adapter
const result = await storageAdapter.initUpload(filename, size, clientBatchId);
// Respond with the uploadId generated by the adapter/system
res.json({ uploadId: result.uploadId });
} catch (err) {
logger.error(`[Route /init] Upload initialization failed for "${filename}": ${err.name} - ${err.message}`, err.stack);
logger.error(`[Route /init] Upload initialization failed: ${err.message}`, err.stack);
// Map common errors
let statusCode = 500;
let clientMessage = 'Failed to initialize upload.';
if (err.message.includes('Invalid batch ID format')) {
statusCode = 400; clientMessage = err.message;
} else if (err.name === 'NoSuchBucket' || err.name === 'AccessDenied') {
statusCode = 500; clientMessage = 'Storage configuration error.';
} else if (err.code === 'EACCES' || err.code === 'EPERM' || err.message.includes('writable') || err.message.includes('metadata directory')) {
statusCode = 500; clientMessage = 'Storage permission or access error.';
} else if (err.message.includes('S3 Client configuration failed')) {
statusCode = 503; clientMessage = 'Storage service unavailable or misconfigured.';
statusCode = 400;
clientMessage = err.message;
} else if (err.name === 'NoSuchBucket' || err.name === 'AccessDenied') { // S3 Specific
statusCode = 500; // Internal config error
clientMessage = 'Storage configuration error.';
} else if (err.code === 'EACCES' || err.code === 'EPERM' || err.message.includes('writable')) { // Local Specific
statusCode = 500;
clientMessage = 'Storage permission or access error.';
}
res.status(statusCode).json({ error: clientMessage, details: config.nodeEnv === 'development' ? err.message : undefined });
// Add more specific error mapping based on adapter exceptions if needed
res.status(statusCode).json({ error: clientMessage, details: err.message }); // Include details only for logging/debugging
}
});
// Upload chunk
router.post('/chunk/:uploadId', express.raw({
limit: config.maxFileSize + (10 * 1024 * 1024),
limit: config.maxFileSize + (10 * 1024 * 1024), // Allow slightly larger raw body than max file size
type: 'application/octet-stream'
}), async (req, res) => {
const { uploadId } = req.params;
const chunk = req.body;
const partNumber = parseInt(req.query.partNumber, 10); // Ensure partNumber is parsed
const clientBatchId = req.headers['x-batch-id']; // May be useful for logging context
if (isNaN(partNumber) || partNumber < 1) {
logger.error(`[Route /chunk] Invalid partNumber for ${uploadId}: ${req.query.partNumber}`);
return res.status(400).json({ error: 'Missing or invalid partNumber query parameter (must be >= 1)' });
}
// ** CRITICAL FOR S3: Get Part Number from client **
// Client needs to send this, e.g., ?partNumber=1, ?partNumber=2, ...
const partNumber = parseInt(req.query.partNumber || '1', 10);
if (isNaN(partNumber) || partNumber < 1) {
logger.error(`[Route /chunk] Invalid partNumber received: ${req.query.partNumber}`);
return res.status(400).json({ error: 'Missing or invalid partNumber query parameter (must be >= 1)' });
}
// Demo mode handling (simplified)
if (isDemoMode()) {
logger.debug(`[DEMO /chunk] Received chunk for ${uploadId}, part ${partNumber}, size ${chunk?.length || 0}`);
// Simulate progress - more sophisticated logic could go in a demoAdapter
const demoProgress = Math.min(100, Math.random() * 100);
const completed = demoProgress > 95; // Simulate completion occasionally
if (completed) {
logger.info(`[DEMO /chunk] Simulated completion for ${uploadId}`);
}
return res.json({ bytesReceived: 0, progress: demoProgress, completed }); // Approximate response
}
if (isDemoMode() && config.storageType !== 's3') {
logger.debug(`[DEMO /chunk] Chunk for ${uploadId}, part ${partNumber}, size ${chunk?.length || 0}`);
const demoProgress = Math.min(100, (Math.random() * 50) + (partNumber * 10) ); // Simulate increasing progress
const completed = demoProgress >= 100;
if (completed) logger.info(`[DEMO /chunk] Sim completion for ${uploadId}`);
return res.json({ bytesReceived: 0, progress: demoProgress, completed });
}
if (!chunk || chunk.length === 0) {
logger.warn(`[Route /chunk] Empty chunk for ${uploadId}, part ${partNumber}`);
logger.warn(`[Route /chunk] Received empty chunk for uploadId: ${uploadId}, part ${partNumber}`);
return res.status(400).json({ error: 'Empty chunk received' });
}
try {
// Delegate chunk storage to the adapter
const result = await storageAdapter.storeChunk(uploadId, chunk, partNumber);
// If the adapter indicates completion after storing this chunk, finalize the upload
if (result.completed) {
logger.info(`[Route /chunk] Part ${partNumber} for ${uploadId} triggered completion. Finalizing...`);
logger.info(`[Route /chunk] Chunk ${partNumber} for ${uploadId} triggered completion. Finalizing...`);
try {
const completionResult = await storageAdapter.completeUpload(uploadId);
logger.success(`[Route /chunk] Finalized upload ${uploadId}. Path/Key: ${completionResult.finalPath}`);
logger.success(`[Route /chunk] Successfully finalized upload ${uploadId}. Final path/key: ${completionResult.finalPath}`);
// Send final success response (ensure progress is 100)
return res.json({ bytesReceived: result.bytesReceived, progress: 100, completed: true });
} catch (completionError) {
logger.error(`[Route /chunk] CRITICAL: Failed to finalize ${uploadId} after part ${partNumber}: ${completionError.message}`, completionError.stack);
return res.status(500).json({ error: 'Upload chunk received, but failed to finalize.', details: config.nodeEnv === 'development' ? completionError.message : undefined });
logger.error(`[Route /chunk] CRITICAL: Failed to finalize completed upload ${uploadId} after storing chunk ${partNumber}: ${completionError.message}`, completionError.stack);
// What to return to client? The chunk was stored, but completion failed.
// Return 500, indicating server-side issue during finalization.
return res.status(500).json({ error: 'Upload chunk received, but failed to finalize.', details: completionError.message });
}
} else {
// Chunk stored, but upload not yet complete, return progress
res.json({ bytesReceived: result.bytesReceived, progress: result.progress, completed: false });
}
} catch (err) {
logger.error(`[Route /chunk] Chunk upload failed for ${uploadId}, part ${partNumber}: ${err.name} - ${err.message}`, err.stack);
logger.error(`[Route /chunk] Chunk upload failed for ${uploadId}, part ${partNumber}: ${err.message}`, err.stack);
// Map common errors
let statusCode = 500;
let clientMessage = 'Failed to process chunk.';
if (err.message.includes('Upload session not found') || err.name === 'NoSuchUpload' || err.code === 'ENOENT' || err.name === 'NotFound' || err.name === 'NoSuchKey') {
statusCode = 404; clientMessage = 'Upload session not found or already completed/aborted.';
} else if (err.name === 'InvalidPart' || err.name === 'InvalidPartOrder') {
statusCode = 400; clientMessage = 'Invalid upload chunk sequence or data.';
} else if (err.name === 'SlowDown' || (err.$metadata && err.$metadata.httpStatusCode === 503) ) {
statusCode = 429; clientMessage = 'Storage provider rate limit exceeded, please try again later.';
} else if (err.code === 'EACCES' || err.code === 'EPERM' ) {
statusCode = 500; clientMessage = 'Storage permission error while writing chunk.';
if (err.message.includes('Upload session not found') || err.name === 'NoSuchUpload' || err.code === 'ENOENT') {
statusCode = 404;
clientMessage = 'Upload session not found or already completed/aborted.';
} else if (err.name === 'InvalidPart' || err.name === 'InvalidPartOrder') { // S3 Specific
statusCode = 400;
clientMessage = 'Invalid upload chunk sequence or data.';
} else if (err.name === 'SlowDown') { // S3 Throttling
statusCode = 429;
clientMessage = 'Upload rate limit exceeded by storage provider, please try again later.';
} else if (err.code === 'EACCES' || err.code === 'EPERM' ) { // Local specific
statusCode = 500;
clientMessage = 'Storage permission error while writing chunk.';
}
res.status(statusCode).json({ error: clientMessage, details: config.nodeEnv === 'development' ? err.message : undefined });
// Add more specific error mapping if needed
res.status(statusCode).json({ error: clientMessage, details: err.message });
}
});
@@ -135,27 +174,27 @@ router.post('/chunk/:uploadId', express.raw({
router.post('/cancel/:uploadId', async (req, res) => {
const { uploadId } = req.params;
if (isDemoMode() && config.storageType !== 's3') {
logger.info(`[DEMO /cancel] Request for ${uploadId}`);
if (isDemoMode()) {
logger.info(`[DEMO /cancel] Request received for ${uploadId}`);
// Call demoAdapter.abortUpload(uploadId) if it exists?
return res.json({ message: 'Upload cancelled (Demo)' });
}
logger.info(`[Route /cancel] Cancel request for upload: ${uploadId}`);
logger.info(`[Route /cancel] Received cancel request for upload: ${uploadId}`);
try {
// Delegate cancellation to the storage adapter
await storageAdapter.abortUpload(uploadId);
res.json({ message: 'Upload cancelled successfully or was already inactive.' });
} catch (err) {
logger.error(`[Route /cancel] Error during cancellation for ${uploadId}: ${err.name} - ${err.message}`, err.stack);
// Generally, client doesn't need to know if server-side abort failed catastrophically,
// as long as client stops sending. However, if it's a config error, 500 is appropriate.
let statusCode = err.name === 'NoSuchUpload' ? 200 : 500; // If not found, it's like success for client
let clientMessage = err.name === 'NoSuchUpload' ? 'Upload already inactive or not found.' : 'Failed to cancel upload on server.';
if (err.name === 'AccessDenied' || err.name === 'NoSuchBucket') {
clientMessage = 'Storage configuration error during cancel.';
statusCode = 500;
}
res.status(statusCode).json({ message: clientMessage, details: config.nodeEnv === 'development' ? err.message : undefined });
// Abort errors are often less critical, log them but maybe return success anyway
logger.error(`[Route /cancel] Error during upload cancellation for ${uploadId}: ${err.message}`, err.stack);
// Don't necessarily send 500, as the goal is just to stop the upload client-side
// Maybe just return success but log the server-side issue?
// Or return 500 if S3 abort fails significantly? Let's return 500 for now.
res.status(500).json({ error: 'Failed to cancel upload on server.', details: err.message });
}
});
module.exports = { router }; // Only export the router object
// Export the router, remove previous function exports
module.exports = { router };

View File

@@ -1,110 +0,0 @@
#!/bin/sh
# Simple entrypoint script to manage user permissions and execute CMD
# Exit immediately if a command exits with a non-zero status.
set -e
# Function to log messages
log_info() {
echo "[INFO] Entrypoint: $1"
}
log_warning() {
echo "[WARN] Entrypoint: $1"
}
log_error() {
echo "[ERROR] Entrypoint: $1" >&2
}
log_info "Starting entrypoint script..."
# Default user/group/umask values
DEFAULT_UID=1000
DEFAULT_GID=1000
DEFAULT_UMASK=022
# Default upload directory if not set by user (should align with Dockerfile/compose)
DEFAULT_UPLOAD_DIR="/usr/src/app/local_uploads"
# Check if PUID or PGID environment variables are set by the user
if [ -z "${PUID}" ] && [ -z "${PGID}" ]; then
# --- Run as Root ---
log_info "PUID/PGID not set, running as root."
# Set umask (use UMASK env var if provided, otherwise default)
CURRENT_UMASK=${UMASK:-$DEFAULT_UMASK}
log_info "Setting umask to ${CURRENT_UMASK}"
umask "${CURRENT_UMASK}"
# Execute the command passed to the entrypoint as root
log_info "Executing command as root: $@"
exec "$@"
else
# --- Run as Custom User (nodeuser with adjusted UID/GID) ---
log_info "PUID/PGID set, configuring user 'nodeuser'..."
# Use provided UID/GID or default if only one is set
CURRENT_UID=${PUID:-$DEFAULT_UID}
CURRENT_GID=${PGID:-$DEFAULT_GID}
CURRENT_UMASK=${UMASK:-$DEFAULT_UMASK}
# Read the upload directory from ENV var or use default
TARGET_UPLOAD_DIR=${UPLOAD_DIR:-$DEFAULT_UPLOAD_DIR}
log_info "Target UID: ${CURRENT_UID}, GID: ${CURRENT_GID}, UMASK: ${CURRENT_UMASK}"
log_info "Target Upload Dir: ${TARGET_UPLOAD_DIR}"
# Check if user/group exists (should exist from Dockerfile)
if ! getent group nodeuser > /dev/null 2>&1; then
log_warning "Group 'nodeuser' not found, creating with GID ${CURRENT_GID}..."
addgroup -g "${CURRENT_GID}" nodeuser
else
EXISTING_GID=$(getent group nodeuser | cut -d: -f3)
if [ "${EXISTING_GID}" != "${CURRENT_GID}" ]; then
log_info "Updating 'nodeuser' group GID from ${EXISTING_GID} to ${CURRENT_GID}..."
groupmod -o -g "${CURRENT_GID}" nodeuser
fi
fi
if ! getent passwd nodeuser > /dev/null 2>&1; then
log_warning "User 'nodeuser' not found, creating with UID ${CURRENT_UID}..."
adduser -u "${CURRENT_UID}" -G nodeuser -s /bin/sh -D nodeuser
else
EXISTING_UID=$(getent passwd nodeuser | cut -d: -f3)
if [ "${EXISTING_UID}" != "${CURRENT_UID}" ]; then
log_info "Updating 'nodeuser' user UID from ${EXISTING_UID} to ${CURRENT_UID}..."
usermod -o -u "${CURRENT_UID}" nodeuser
fi
fi
# Ensure the base application directory ownership is correct
log_info "Ensuring ownership of /usr/src/app..."
chown -R nodeuser:nodeuser /usr/src/app || log_warning "Could not chown /usr/src/app"
# Ensure the target upload directory exists and has correct ownership
if [ -n "${TARGET_UPLOAD_DIR}" ]; then
if [ ! -d "${TARGET_UPLOAD_DIR}" ]; then
log_info "Creating directory: ${TARGET_UPLOAD_DIR}"
# Use -p to create parent directories as needed
mkdir -p "${TARGET_UPLOAD_DIR}"
# Chown after creation
chown nodeuser:nodeuser "${TARGET_UPLOAD_DIR}" || log_warning "Could not chown ${TARGET_UPLOAD_DIR}"
else
# Directory exists, ensure ownership
log_info "Ensuring ownership of ${TARGET_UPLOAD_DIR}..."
chown -R nodeuser:nodeuser "${TARGET_UPLOAD_DIR}" || log_warning "Could not chown ${TARGET_UPLOAD_DIR}"
fi
else
log_warning "UPLOAD_DIR variable is not set or is empty, skipping ownership check for upload directory."
fi
# Set the umask
log_info "Setting umask to ${CURRENT_UMASK}"
umask "${CURRENT_UMASK}"
# Execute the command passed to the entrypoint using su-exec to drop privileges
log_info "Executing command as nodeuser (${CURRENT_UID}:${CURRENT_GID}): $@"
exec su-exec nodeuser "$@"
fi
log_info "Entrypoint script finished (should not reach here if exec worked)."

View File

@@ -1,111 +1,125 @@
/**
* Server entry point that starts the HTTP server and manages connections.
* Handles graceful shutdown, connection tracking, and server initialization.
* Provides development mode directory listing functionality.
*/
const { app, initialize, config } = require('./app'); // config is now also exported from app.js
const { app, initialize, config } = require('./app');
const logger = require('./utils/logger');
const fs = require('fs'); // Keep for readdirSync if needed for local dev logging
const fs = require('fs');
const { executeCleanup } = require('./utils/cleanup');
const { generatePWAManifest } = require('./scripts/pwa-manifest-generator');
const { generatePWAManifest } = require('./scripts/pwa-manifest-generator')
// Track open connections
const connections = new Set();
/**
* Start the server and initialize the application
* @returns {Promise<http.Server>} The HTTP server instance
*/
async function startServer() {
try {
await initialize(); // This will call validateConfig and load storage adapter via app.js
// Initialize the application
await initialize();
// Start the server
const server = app.listen(config.port, () => {
logger.info(`Server running at ${config.baseUrl}`);
// ** MODIFIED LOGGING **
logger.info(`Active Storage Type: ${config.storageType}`);
logger.info(`Data Directory (for uploads or metadata): ${config.uploadDir}`);
if (config.nodeEnv === 'development' && config.storageType === 'local') {
logger.info(`Upload directory: ${config.uploadDisplayPath}`);
// List directory contents in development
if (config.nodeEnv === 'development') {
try {
// Only list contents if it's local storage and dev mode
if (fs.existsSync(config.uploadDir)) {
const files = fs.readdirSync(config.uploadDir);
logger.info(`Current local upload directory contents (${config.uploadDir}):`);
files.forEach(file => logger.info(`- ${file}`));
} else {
logger.warn(`Local upload directory ${config.uploadDir} does not exist for listing.`);
}
const files = fs.readdirSync(config.uploadDir);
logger.info(`Current directory contents (${files.length} files):`);
files.forEach(file => {
logger.info(`- ${file}`);
});
} catch (err) {
logger.error(`Failed to list local upload directory contents: ${err.message}`);
logger.error(`Failed to list directory contents: ${err.message}`);
}
}
});
// Dynamically generate PWA manifest into public folder
generatePWAManifest();
// Track new connections
server.on('connection', (connection) => {
connections.add(connection);
connection.on('close', () => connections.delete(connection));
connection.on('close', () => {
connections.delete(connection);
});
});
let isShuttingDown = false;
// Shutdown handler function
let isShuttingDown = false; // Prevent multiple shutdowns
const shutdownHandler = async (signal) => {
if (isShuttingDown) return;
isShuttingDown = true;
logger.info(`${signal} received. Shutting down gracefully...`);
// Start a shorter force shutdown timer
const forceShutdownTimer = setTimeout(() => {
logger.error('Force shutdown due to timeout.');
logger.error('Force shutdown initiated');
process.exit(1);
}, 5000); // Increased slightly
}, 3000); // 3 seconds maximum for total shutdown
try {
server.closeIdleConnections?.(); // Node 18+
// 1. Stop accepting new connections immediately
server.unref();
const closePromises = Array.from(connections).map(conn => new Promise(resolve => {
conn.on('close', resolve); // Ensure close event resolves
conn.destroy(); // Actively destroy connections
}));
await Promise.race([
Promise.all(closePromises),
new Promise(resolve => setTimeout(resolve, 2000)) // Max 2s for connections
]);
connections.clear();
await new Promise((resolve, reject) => {
server.close((err) => {
if (err) return reject(err);
logger.info('Server closed.');
resolve();
// 2. Close all existing connections with a shorter timeout
const connectionClosePromises = Array.from(connections).map(conn => {
return new Promise(resolve => {
conn.end(() => {
connections.delete(conn);
resolve();
});
});
});
await executeCleanup(1500); // Max 1.5s for cleanup
// Wait for connections to close with a timeout
await Promise.race([
Promise.all(connectionClosePromises),
new Promise(resolve => setTimeout(resolve, 1000)) // 1 second timeout for connections
]);
// 3. Close the server
await new Promise((resolve) => server.close(resolve));
logger.info('Server closed');
// 4. Run cleanup tasks with a shorter timeout
await executeCleanup(1000); // 1 second timeout for cleanup
// Clear the force shutdown timer since we completed gracefully
clearTimeout(forceShutdownTimer);
logger.info('Shutdown complete.');
process.exit(0);
process.exitCode = 0;
process.exit(0); // Ensure immediate exit
} catch (error) {
clearTimeout(forceShutdownTimer); // Clear timer on error too
logger.error(`Error during shutdown: ${error.message}`);
process.exit(1);
}
};
// Handle both SIGTERM and SIGINT
process.on('SIGTERM', () => shutdownHandler('SIGTERM'));
process.on('SIGINT', () => shutdownHandler('SIGINT'));
return server;
} catch (error) {
logger.error('Failed to start server:', error);
// Ensure process exits if startServer itself fails before listener setup
process.exitCode = 1;
throw error;
}
}
// Only start the server if this file is run directly
if (require.main === module) {
startServer().catch((error) => {
// Error already logged by startServer
// process.exitCode is already set if startServer throws
logger.error('Server failed to start:', error);
process.exitCode = 1;
throw error;
});
}
module.exports = { app, startServer };
module.exports = { app, startServer };

File diff suppressed because it is too large Load Diff