mirror of
				https://github.com/kyantech/Palmr.git
				synced 2025-11-04 05:53:23 +00:00 
			
		
		
		
	feat: migrate storage system from filesystem to S3-compatible storage
- Implemented S3 storage integration, replacing the previous filesystem storage. - Added automatic migration script for existing files from filesystem to S3. - Updated Docker configuration to include MinIO for local S3 emulation. - Removed obsolete filesystem-related code and endpoints. - Enhanced upload and download functionalities to utilize presigned URLs for S3. - Updated environment configuration to support S3 settings. This change significantly simplifies file management and enhances scalability.
This commit is contained in:
		
							
								
								
									
										56
									
								
								Dockerfile
									
									
									
									
									
								
							
							
						
						
									
										56
									
								
								Dockerfile
									
									
									
									
									
								
							@@ -5,11 +5,21 @@ RUN apk add --no-cache \
 | 
			
		||||
  gcompat \
 | 
			
		||||
  supervisor \
 | 
			
		||||
  curl \
 | 
			
		||||
  wget \
 | 
			
		||||
  openssl \
 | 
			
		||||
  su-exec
 | 
			
		||||
 | 
			
		||||
# Enable pnpm
 | 
			
		||||
RUN corepack enable pnpm
 | 
			
		||||
 | 
			
		||||
# Install storage system for S3-compatible storage
 | 
			
		||||
COPY infra/install-minio.sh /tmp/install-minio.sh
 | 
			
		||||
RUN chmod +x /tmp/install-minio.sh && /tmp/install-minio.sh
 | 
			
		||||
 | 
			
		||||
# Install storage client (mc)
 | 
			
		||||
RUN wget https://dl.min.io/client/mc/release/linux-amd64/mc -O /usr/local/bin/mc && \
 | 
			
		||||
  chmod +x /usr/local/bin/mc
 | 
			
		||||
 | 
			
		||||
# Set working directory
 | 
			
		||||
WORKDIR /app
 | 
			
		||||
 | 
			
		||||
@@ -119,11 +129,14 @@ RUN mkdir -p /etc/supervisor/conf.d
 | 
			
		||||
 | 
			
		||||
# Copy server start script and configuration files
 | 
			
		||||
COPY infra/server-start.sh /app/server-start.sh
 | 
			
		||||
COPY infra/start-minio.sh /app/start-minio.sh
 | 
			
		||||
COPY infra/minio-setup.sh /app/minio-setup.sh
 | 
			
		||||
COPY infra/load-minio-credentials.sh /app/load-minio-credentials.sh
 | 
			
		||||
COPY infra/configs.json /app/infra/configs.json
 | 
			
		||||
COPY infra/providers.json /app/infra/providers.json
 | 
			
		||||
COPY infra/check-missing.js /app/infra/check-missing.js
 | 
			
		||||
RUN chmod +x /app/server-start.sh
 | 
			
		||||
RUN chown -R palmr:nodejs /app/server-start.sh /app/infra
 | 
			
		||||
RUN chmod +x /app/server-start.sh /app/start-minio.sh /app/minio-setup.sh /app/load-minio-credentials.sh
 | 
			
		||||
RUN chown -R palmr:nodejs /app/server-start.sh /app/start-minio.sh /app/minio-setup.sh /app/load-minio-credentials.sh /app/infra
 | 
			
		||||
 | 
			
		||||
# Copy supervisor configuration
 | 
			
		||||
COPY infra/supervisord.conf /etc/supervisor/conf.d/supervisord.conf
 | 
			
		||||
@@ -144,9 +157,42 @@ export DATABASE_URL="file:/app/server/prisma/palmr.db"
 | 
			
		||||
export NEXT_PUBLIC_DEFAULT_LANGUAGE=\${DEFAULT_LANGUAGE:-en-US}
 | 
			
		||||
 | 
			
		||||
# Ensure /app/server directory exists for bind mounts
 | 
			
		||||
mkdir -p /app/server/uploads /app/server/temp-uploads /app/server/prisma
 | 
			
		||||
mkdir -p /app/server/uploads /app/server/temp-uploads /app/server/prisma /app/server/minio-data
 | 
			
		||||
 | 
			
		||||
echo "Data directories ready for first run..."
 | 
			
		||||
# CRITICAL: Fix permissions BEFORE starting any services
 | 
			
		||||
# This runs on EVERY startup to handle updates and corrupted metadata
 | 
			
		||||
echo "🔐 Fixing permissions for internal storage..."
 | 
			
		||||
 | 
			
		||||
# DYNAMIC: Detect palmr user's actual UID and GID
 | 
			
		||||
# Works with any Docker --user configuration
 | 
			
		||||
PALMR_UID=\$(id -u palmr 2>/dev/null || echo "1001")
 | 
			
		||||
PALMR_GID=\$(id -g palmr 2>/dev/null || echo "1001")
 | 
			
		||||
echo "   Target user: palmr (UID:\$PALMR_UID, GID:\$PALMR_GID)"
 | 
			
		||||
 | 
			
		||||
# ALWAYS remove storage system metadata to prevent corruption issues
 | 
			
		||||
# This is safe - storage system recreates it automatically
 | 
			
		||||
# User data (files) are NOT in .minio.sys, they're safe
 | 
			
		||||
if [ -d "/app/server/minio-data/.minio.sys" ]; then
 | 
			
		||||
    echo "   🧹 Cleaning storage system metadata (safe, auto-regenerated)..."
 | 
			
		||||
    rm -rf /app/server/minio-data/.minio.sys 2>/dev/null || true
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
# Fix ownership and permissions (safe for updates)
 | 
			
		||||
echo "   🔧 Setting ownership and permissions..."
 | 
			
		||||
chown -R \$PALMR_UID:\$PALMR_GID /app/server 2>/dev/null || echo "   ⚠️  chown skipped"
 | 
			
		||||
chmod -R 755 /app/server 2>/dev/null || echo "   ⚠️  chmod skipped"
 | 
			
		||||
 | 
			
		||||
# Verify critical directories are writable
 | 
			
		||||
if touch /app/server/.test-write 2>/dev/null; then
 | 
			
		||||
    rm -f /app/server/.test-write
 | 
			
		||||
    echo "   ✅ Storage directory is writable"
 | 
			
		||||
else
 | 
			
		||||
    echo "   ❌ FATAL: /app/server is NOT writable!"
 | 
			
		||||
    echo "   Check Docker volume permissions"
 | 
			
		||||
    ls -la /app/server 2>/dev/null || true
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
echo "✅ Storage ready, starting services..."
 | 
			
		||||
 | 
			
		||||
# Start supervisor
 | 
			
		||||
exec /usr/bin/supervisord -c /etc/supervisor/conf.d/supervisord.conf
 | 
			
		||||
@@ -158,7 +204,7 @@ RUN chmod +x /app/start.sh
 | 
			
		||||
VOLUME ["/app/server"]
 | 
			
		||||
 | 
			
		||||
# Expose ports
 | 
			
		||||
EXPOSE 3333 5487
 | 
			
		||||
EXPOSE 3333 5487 9379 9378
 | 
			
		||||
 | 
			
		||||
# Health check
 | 
			
		||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \
 | 
			
		||||
 
 | 
			
		||||
@@ -1,10 +1,57 @@
 | 
			
		||||
import * as fs from "fs";
 | 
			
		||||
import process from "node:process";
 | 
			
		||||
import { S3Client } from "@aws-sdk/client-s3";
 | 
			
		||||
 | 
			
		||||
import { env } from "../env";
 | 
			
		||||
import { StorageConfig } from "../types/storage";
 | 
			
		||||
 | 
			
		||||
export const storageConfig: StorageConfig = {
 | 
			
		||||
/**
 | 
			
		||||
 * Load internal storage credentials if they exist
 | 
			
		||||
 * This provides S3-compatible storage automatically when ENABLE_S3=false
 | 
			
		||||
 */
 | 
			
		||||
function loadInternalStorageCredentials(): Partial<StorageConfig> | null {
 | 
			
		||||
  const credentialsPath = "/app/server/.minio-credentials";
 | 
			
		||||
 | 
			
		||||
  try {
 | 
			
		||||
    if (fs.existsSync(credentialsPath)) {
 | 
			
		||||
      const content = fs.readFileSync(credentialsPath, "utf-8");
 | 
			
		||||
      const credentials: any = {};
 | 
			
		||||
 | 
			
		||||
      content.split("\n").forEach((line) => {
 | 
			
		||||
        const [key, value] = line.split("=");
 | 
			
		||||
        if (key && value) {
 | 
			
		||||
          credentials[key.trim()] = value.trim();
 | 
			
		||||
        }
 | 
			
		||||
      });
 | 
			
		||||
 | 
			
		||||
      console.log("[STORAGE] Using internal storage system");
 | 
			
		||||
 | 
			
		||||
      return {
 | 
			
		||||
        endpoint: credentials.S3_ENDPOINT || "127.0.0.1",
 | 
			
		||||
        port: parseInt(credentials.S3_PORT || "9379", 10),
 | 
			
		||||
        useSSL: credentials.S3_USE_SSL === "true",
 | 
			
		||||
        accessKey: credentials.S3_ACCESS_KEY,
 | 
			
		||||
        secretKey: credentials.S3_SECRET_KEY,
 | 
			
		||||
        region: credentials.S3_REGION || "default",
 | 
			
		||||
        bucketName: credentials.S3_BUCKET_NAME || "palmr-files",
 | 
			
		||||
        forcePathStyle: true,
 | 
			
		||||
      };
 | 
			
		||||
    }
 | 
			
		||||
  } catch (error) {
 | 
			
		||||
    console.warn("[STORAGE] Could not load internal storage credentials:", error);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  return null;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * Storage configuration:
 | 
			
		||||
 * - Default (ENABLE_S3=false or not set): Internal storage (auto-configured, zero config)
 | 
			
		||||
 * - ENABLE_S3=true: External S3 (AWS, S3-compatible, etc) using env vars
 | 
			
		||||
 */
 | 
			
		||||
const internalStorageConfig = env.ENABLE_S3 === "true" ? null : loadInternalStorageCredentials();
 | 
			
		||||
 | 
			
		||||
export const storageConfig: StorageConfig = (internalStorageConfig as StorageConfig) || {
 | 
			
		||||
  endpoint: env.S3_ENDPOINT || "",
 | 
			
		||||
  port: env.S3_PORT ? Number(env.S3_PORT) : undefined,
 | 
			
		||||
  useSSL: env.S3_USE_SSL === "true",
 | 
			
		||||
@@ -23,21 +70,76 @@ if (storageConfig.useSSL && env.S3_REJECT_UNAUTHORIZED === "false") {
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export const s3Client =
 | 
			
		||||
  env.ENABLE_S3 === "true"
 | 
			
		||||
    ? new S3Client({
 | 
			
		||||
        endpoint: storageConfig.useSSL
 | 
			
		||||
          ? `https://${storageConfig.endpoint}${storageConfig.port ? `:${storageConfig.port}` : ""}`
 | 
			
		||||
          : `http://${storageConfig.endpoint}${storageConfig.port ? `:${storageConfig.port}` : ""}`,
 | 
			
		||||
        region: storageConfig.region,
 | 
			
		||||
        credentials: {
 | 
			
		||||
          accessKeyId: storageConfig.accessKey,
 | 
			
		||||
          secretAccessKey: storageConfig.secretKey,
 | 
			
		||||
        },
 | 
			
		||||
        forcePathStyle: storageConfig.forcePathStyle,
 | 
			
		||||
      })
 | 
			
		||||
    : null;
 | 
			
		||||
/**
 | 
			
		||||
 * Storage is ALWAYS S3-compatible:
 | 
			
		||||
 * - ENABLE_S3=false → Internal storage (automatic)
 | 
			
		||||
 * - ENABLE_S3=true  → External S3 (AWS, S3-compatible, etc)
 | 
			
		||||
 */
 | 
			
		||||
const hasValidConfig = storageConfig.endpoint && storageConfig.accessKey && storageConfig.secretKey;
 | 
			
		||||
 | 
			
		||||
export const s3Client = hasValidConfig
 | 
			
		||||
  ? new S3Client({
 | 
			
		||||
      endpoint: storageConfig.useSSL
 | 
			
		||||
        ? `https://${storageConfig.endpoint}${storageConfig.port ? `:${storageConfig.port}` : ""}`
 | 
			
		||||
        : `http://${storageConfig.endpoint}${storageConfig.port ? `:${storageConfig.port}` : ""}`,
 | 
			
		||||
      region: storageConfig.region,
 | 
			
		||||
      credentials: {
 | 
			
		||||
        accessKeyId: storageConfig.accessKey,
 | 
			
		||||
        secretAccessKey: storageConfig.secretKey,
 | 
			
		||||
      },
 | 
			
		||||
      forcePathStyle: storageConfig.forcePathStyle,
 | 
			
		||||
    })
 | 
			
		||||
  : null;
 | 
			
		||||
 | 
			
		||||
export const bucketName = storageConfig.bucketName;
 | 
			
		||||
 | 
			
		||||
export const isS3Enabled = env.ENABLE_S3 === "true";
 | 
			
		||||
/**
 | 
			
		||||
 * Storage is always S3-compatible
 | 
			
		||||
 * ENABLE_S3=true means EXTERNAL S3, otherwise uses internal storage
 | 
			
		||||
 */
 | 
			
		||||
export const isS3Enabled = s3Client !== null;
 | 
			
		||||
export const isExternalS3 = env.ENABLE_S3 === "true";
 | 
			
		||||
export const isInternalStorage = s3Client !== null && env.ENABLE_S3 !== "true";
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * Creates a public S3 client for presigned URL generation.
 | 
			
		||||
 * - Internal storage (ENABLE_S3=false): Uses STORAGE_URL (e.g., https://syrg.palmr.com)
 | 
			
		||||
 * - External S3 (ENABLE_S3=true): Uses the original S3 endpoint configuration
 | 
			
		||||
 *
 | 
			
		||||
 * @returns S3Client configured with public endpoint, or null if S3 is disabled
 | 
			
		||||
 */
 | 
			
		||||
export function createPublicS3Client(): S3Client | null {
 | 
			
		||||
  if (!s3Client) {
 | 
			
		||||
    return null;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  let publicEndpoint: string;
 | 
			
		||||
 | 
			
		||||
  if (isInternalStorage) {
 | 
			
		||||
    // Internal storage: use STORAGE_URL
 | 
			
		||||
    if (!env.STORAGE_URL) {
 | 
			
		||||
      throw new Error(
 | 
			
		||||
        "[STORAGE] STORAGE_URL environment variable is required when using internal storage (ENABLE_S3=false). " +
 | 
			
		||||
          "Set STORAGE_URL to your public storage URL with protocol (e.g., https://syrg.palmr.com or http://192.168.1.100:9379)"
 | 
			
		||||
      );
 | 
			
		||||
    }
 | 
			
		||||
    publicEndpoint = env.STORAGE_URL;
 | 
			
		||||
  } else {
 | 
			
		||||
    // External S3: use the original endpoint configuration
 | 
			
		||||
    publicEndpoint = storageConfig.useSSL
 | 
			
		||||
      ? `https://${storageConfig.endpoint}${storageConfig.port ? `:${storageConfig.port}` : ""}`
 | 
			
		||||
      : `http://${storageConfig.endpoint}${storageConfig.port ? `:${storageConfig.port}` : ""}`;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  console.log(`[STORAGE] Creating public S3 client with endpoint: ${publicEndpoint}`);
 | 
			
		||||
 | 
			
		||||
  return new S3Client({
 | 
			
		||||
    endpoint: publicEndpoint,
 | 
			
		||||
    region: storageConfig.region,
 | 
			
		||||
    credentials: {
 | 
			
		||||
      accessKeyId: storageConfig.accessKey,
 | 
			
		||||
      secretAccessKey: storageConfig.secretKey,
 | 
			
		||||
    },
 | 
			
		||||
    forcePathStyle: storageConfig.forcePathStyle,
 | 
			
		||||
  });
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -1,9 +1,8 @@
 | 
			
		||||
import { z } from "zod";
 | 
			
		||||
 | 
			
		||||
const envSchema = z.object({
 | 
			
		||||
  // Storage configuration
 | 
			
		||||
  ENABLE_S3: z.union([z.literal("true"), z.literal("false")]).default("false"),
 | 
			
		||||
  ENCRYPTION_KEY: z.string().optional(),
 | 
			
		||||
  DISABLE_FILESYSTEM_ENCRYPTION: z.union([z.literal("true"), z.literal("false")]).default("true"),
 | 
			
		||||
  S3_ENDPOINT: z.string().optional(),
 | 
			
		||||
  S3_PORT: z.string().optional(),
 | 
			
		||||
  S3_USE_SSL: z.string().optional(),
 | 
			
		||||
@@ -13,26 +12,16 @@ const envSchema = z.object({
 | 
			
		||||
  S3_BUCKET_NAME: z.string().optional(),
 | 
			
		||||
  S3_FORCE_PATH_STYLE: z.union([z.literal("true"), z.literal("false")]).default("false"),
 | 
			
		||||
  S3_REJECT_UNAUTHORIZED: z.union([z.literal("true"), z.literal("false")]).default("true"),
 | 
			
		||||
 | 
			
		||||
  // Legacy encryption vars (kept for backward compatibility but not used with S3/Garage)
 | 
			
		||||
  ENCRYPTION_KEY: z.string().optional(),
 | 
			
		||||
  DISABLE_FILESYSTEM_ENCRYPTION: z.union([z.literal("true"), z.literal("false")]).default("true"),
 | 
			
		||||
 | 
			
		||||
  // Application configuration
 | 
			
		||||
  PRESIGNED_URL_EXPIRATION: z.string().optional().default("3600"),
 | 
			
		||||
  SECURE_SITE: z.union([z.literal("true"), z.literal("false")]).default("false"),
 | 
			
		||||
  STORAGE_URL: z.string().optional(), // Storage URL for internal storage presigned URLs (required when ENABLE_S3=false, e.g., https://syrg.palmr.com or http://192.168.1.100:9379)
 | 
			
		||||
  DATABASE_URL: z.string().optional().default("file:/app/server/prisma/palmr.db"),
 | 
			
		||||
  DOWNLOAD_MAX_CONCURRENT: z
 | 
			
		||||
    .string()
 | 
			
		||||
    .optional()
 | 
			
		||||
    .transform((val) => (val ? parseInt(val, 10) : undefined)),
 | 
			
		||||
  DOWNLOAD_MEMORY_THRESHOLD_MB: z
 | 
			
		||||
    .string()
 | 
			
		||||
    .optional()
 | 
			
		||||
    .transform((val) => (val ? parseInt(val, 10) : undefined)),
 | 
			
		||||
  DOWNLOAD_QUEUE_SIZE: z
 | 
			
		||||
    .string()
 | 
			
		||||
    .optional()
 | 
			
		||||
    .transform((val) => (val ? parseInt(val, 10) : undefined)),
 | 
			
		||||
  DOWNLOAD_AUTO_SCALE: z.union([z.literal("true"), z.literal("false")]).default("true"),
 | 
			
		||||
  DOWNLOAD_MIN_FILE_SIZE_GB: z
 | 
			
		||||
    .string()
 | 
			
		||||
    .optional()
 | 
			
		||||
    .transform((val) => (val ? parseFloat(val) : undefined)),
 | 
			
		||||
  CUSTOM_PATH: z.string().optional(),
 | 
			
		||||
});
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -1,4 +1,3 @@
 | 
			
		||||
import { isS3Enabled } from "../../config/storage.config";
 | 
			
		||||
import { prisma } from "../../shared/prisma";
 | 
			
		||||
import { ConfigService } from "../config/service";
 | 
			
		||||
 | 
			
		||||
@@ -23,8 +22,8 @@ export class AppService {
 | 
			
		||||
 | 
			
		||||
  async getSystemInfo() {
 | 
			
		||||
    return {
 | 
			
		||||
      storageProvider: isS3Enabled ? "s3" : "filesystem",
 | 
			
		||||
      s3Enabled: isS3Enabled,
 | 
			
		||||
      storageProvider: "s3",
 | 
			
		||||
      s3Enabled: true,
 | 
			
		||||
    };
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -1,4 +1,3 @@
 | 
			
		||||
import * as fs from "fs";
 | 
			
		||||
import bcrypt from "bcryptjs";
 | 
			
		||||
import { FastifyReply, FastifyRequest } from "fastify";
 | 
			
		||||
 | 
			
		||||
@@ -29,31 +28,32 @@ export class FileController {
 | 
			
		||||
  private fileService = new FileService();
 | 
			
		||||
  private configService = new ConfigService();
 | 
			
		||||
 | 
			
		||||
  async getPresignedUrl(request: FastifyRequest, reply: FastifyReply) {
 | 
			
		||||
    try {
 | 
			
		||||
      const { filename, extension } = request.query as {
 | 
			
		||||
        filename?: string;
 | 
			
		||||
        extension?: string;
 | 
			
		||||
      };
 | 
			
		||||
      if (!filename || !extension) {
 | 
			
		||||
        return reply.status(400).send({
 | 
			
		||||
          error: "The 'filename' and 'extension' parameters are required.",
 | 
			
		||||
        });
 | 
			
		||||
      }
 | 
			
		||||
  async getPresignedUrl(request: FastifyRequest, reply: FastifyReply): Promise<void> {
 | 
			
		||||
    const { filename, extension } = request.query as { filename: string; extension: string };
 | 
			
		||||
 | 
			
		||||
    if (!filename || !extension) {
 | 
			
		||||
      return reply.status(400).send({ error: "filename and extension are required" });
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    try {
 | 
			
		||||
      // JWT already verified by preValidation in routes.ts
 | 
			
		||||
      const userId = (request as any).user?.userId;
 | 
			
		||||
      if (!userId) {
 | 
			
		||||
        return reply.status(401).send({ error: "Unauthorized: a valid token is required to access this resource." });
 | 
			
		||||
        return reply.status(401).send({ error: "Unauthorized" });
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      const objectName = `${userId}/${Date.now()}-${filename}.${extension}`;
 | 
			
		||||
      // Generate unique object name
 | 
			
		||||
      const objectName = `${userId}/${Date.now()}-${Math.random().toString(36).substring(7)}-${filename}.${extension}`;
 | 
			
		||||
      const expires = parseInt(env.PRESIGNED_URL_EXPIRATION);
 | 
			
		||||
 | 
			
		||||
      console.log(`[PRESIGNED] Generating upload URL using STORAGE_URL: ${env.STORAGE_URL || "from S3 config"}`);
 | 
			
		||||
 | 
			
		||||
      const url = await this.fileService.getPresignedPutUrl(objectName, expires);
 | 
			
		||||
      return reply.send({ url, objectName });
 | 
			
		||||
 | 
			
		||||
      return reply.status(200).send({ url, objectName });
 | 
			
		||||
    } catch (error) {
 | 
			
		||||
      console.error("Error in getPresignedUrl:", error);
 | 
			
		||||
      return reply.status(500).send({ error: "Internal server error." });
 | 
			
		||||
      return reply.status(500).send({ error: "Internal server error" });
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
@@ -264,6 +264,8 @@ export class FileController {
 | 
			
		||||
 | 
			
		||||
      const fileName = fileRecord.name;
 | 
			
		||||
      const expires = parseInt(env.PRESIGNED_URL_EXPIRATION);
 | 
			
		||||
 | 
			
		||||
      // Always use presigned URLs (works for both internal and external storage)
 | 
			
		||||
      const url = await this.fileService.getPresignedGetUrl(objectName, expires, fileName);
 | 
			
		||||
      return reply.send({ url, expiresIn: expires });
 | 
			
		||||
    } catch (error) {
 | 
			
		||||
@@ -309,16 +311,14 @@ export class FileController {
 | 
			
		||||
            return reply.status(401).send({ error: "Unauthorized access to file." });
 | 
			
		||||
          }
 | 
			
		||||
 | 
			
		||||
          const storageProvider = (this.fileService as any).storageProvider;
 | 
			
		||||
          const filePath = storageProvider.getFilePath(objectName);
 | 
			
		||||
 | 
			
		||||
          // Stream from S3/storage system
 | 
			
		||||
          const stream = await this.fileService.getObjectStream(objectName);
 | 
			
		||||
          const contentType = getContentType(reverseShareFile.name);
 | 
			
		||||
          const fileName = reverseShareFile.name;
 | 
			
		||||
 | 
			
		||||
          reply.header("Content-Type", contentType);
 | 
			
		||||
          reply.header("Content-Disposition", `inline; filename="${encodeURIComponent(fileName)}"`);
 | 
			
		||||
 | 
			
		||||
          const stream = fs.createReadStream(filePath);
 | 
			
		||||
          return reply.send(stream);
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
@@ -367,16 +367,14 @@ export class FileController {
 | 
			
		||||
        return reply.status(401).send({ error: "Unauthorized access to file." });
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      const storageProvider = (this.fileService as any).storageProvider;
 | 
			
		||||
      const filePath = storageProvider.getFilePath(objectName);
 | 
			
		||||
 | 
			
		||||
      // Stream from S3/MinIO
 | 
			
		||||
      const stream = await this.fileService.getObjectStream(objectName);
 | 
			
		||||
      const contentType = getContentType(fileRecord.name);
 | 
			
		||||
      const fileName = fileRecord.name;
 | 
			
		||||
 | 
			
		||||
      reply.header("Content-Type", contentType);
 | 
			
		||||
      reply.header("Content-Disposition", `inline; filename="${encodeURIComponent(fileName)}"`);
 | 
			
		||||
 | 
			
		||||
      const stream = fs.createReadStream(filePath);
 | 
			
		||||
      return reply.send(stream);
 | 
			
		||||
    } catch (error) {
 | 
			
		||||
      console.error("Error in downloadFile:", error);
 | 
			
		||||
@@ -612,9 +610,8 @@ export class FileController {
 | 
			
		||||
        });
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      const storageProvider = (this.fileService as any).storageProvider;
 | 
			
		||||
      const filePath = storageProvider.getFilePath(fileRecord.objectName);
 | 
			
		||||
 | 
			
		||||
      // Stream from S3/MinIO
 | 
			
		||||
      const stream = await this.fileService.getObjectStream(fileRecord.objectName);
 | 
			
		||||
      const contentType = getContentType(fileRecord.name);
 | 
			
		||||
      const fileName = fileRecord.name;
 | 
			
		||||
 | 
			
		||||
@@ -622,7 +619,6 @@ export class FileController {
 | 
			
		||||
      reply.header("Content-Disposition", `inline; filename="${encodeURIComponent(fileName)}"`);
 | 
			
		||||
      reply.header("Cache-Control", "public, max-age=31536000"); // Cache por 1 ano
 | 
			
		||||
 | 
			
		||||
      const stream = fs.createReadStream(filePath);
 | 
			
		||||
      return reply.send(stream);
 | 
			
		||||
    } catch (error) {
 | 
			
		||||
      console.error("Error in embedFile:", error);
 | 
			
		||||
 
 | 
			
		||||
@@ -1,5 +1,3 @@
 | 
			
		||||
import { isS3Enabled } from "../../config/storage.config";
 | 
			
		||||
import { FilesystemStorageProvider } from "../../providers/filesystem-storage.provider";
 | 
			
		||||
import { S3StorageProvider } from "../../providers/s3-storage.provider";
 | 
			
		||||
import { StorageProvider } from "../../types/storage";
 | 
			
		||||
 | 
			
		||||
@@ -7,29 +5,16 @@ export class FileService {
 | 
			
		||||
  private storageProvider: StorageProvider;
 | 
			
		||||
 | 
			
		||||
  constructor() {
 | 
			
		||||
    if (isS3Enabled) {
 | 
			
		||||
      this.storageProvider = new S3StorageProvider();
 | 
			
		||||
    } else {
 | 
			
		||||
      this.storageProvider = FilesystemStorageProvider.getInstance();
 | 
			
		||||
    }
 | 
			
		||||
    // Always use S3 (Garage internal or external S3)
 | 
			
		||||
    this.storageProvider = new S3StorageProvider();
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  async getPresignedPutUrl(objectName: string, expires: number): Promise<string> {
 | 
			
		||||
    try {
 | 
			
		||||
      return await this.storageProvider.getPresignedPutUrl(objectName, expires);
 | 
			
		||||
    } catch (err) {
 | 
			
		||||
      console.error("Erro no presignedPutObject:", err);
 | 
			
		||||
      throw err;
 | 
			
		||||
    }
 | 
			
		||||
  async getPresignedPutUrl(objectName: string, expires: number = 3600): Promise<string> {
 | 
			
		||||
    return await this.storageProvider.getPresignedPutUrl(objectName, expires);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  async getPresignedGetUrl(objectName: string, expires: number, fileName?: string): Promise<string> {
 | 
			
		||||
    try {
 | 
			
		||||
      return await this.storageProvider.getPresignedGetUrl(objectName, expires, fileName);
 | 
			
		||||
    } catch (err) {
 | 
			
		||||
      console.error("Erro no presignedGetObject:", err);
 | 
			
		||||
      throw err;
 | 
			
		||||
    }
 | 
			
		||||
  async getPresignedGetUrl(objectName: string, expires: number = 3600, fileName?: string): Promise<string> {
 | 
			
		||||
    return await this.storageProvider.getPresignedGetUrl(objectName, expires, fileName);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  async deleteObject(objectName: string): Promise<void> {
 | 
			
		||||
@@ -41,7 +26,12 @@ export class FileService {
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  isFilesystemMode(): boolean {
 | 
			
		||||
    return !isS3Enabled;
 | 
			
		||||
  async getObjectStream(objectName: string): Promise<NodeJS.ReadableStream> {
 | 
			
		||||
    try {
 | 
			
		||||
      return await this.storageProvider.getObjectStream(objectName);
 | 
			
		||||
    } catch (err) {
 | 
			
		||||
      console.error("Error getting object stream:", err);
 | 
			
		||||
      throw err;
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -1,345 +0,0 @@
 | 
			
		||||
import * as fs from "fs";
 | 
			
		||||
import * as path from "path";
 | 
			
		||||
 | 
			
		||||
import { getTempFilePath } from "../../config/directories.config";
 | 
			
		||||
import { FilesystemStorageProvider } from "../../providers/filesystem-storage.provider";
 | 
			
		||||
 | 
			
		||||
export interface ChunkMetadata {
 | 
			
		||||
  fileId: string;
 | 
			
		||||
  chunkIndex: number;
 | 
			
		||||
  totalChunks: number;
 | 
			
		||||
  chunkSize: number;
 | 
			
		||||
  totalSize: number;
 | 
			
		||||
  fileName: string;
 | 
			
		||||
  isLastChunk: boolean;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export interface ChunkInfo {
 | 
			
		||||
  fileId: string;
 | 
			
		||||
  fileName: string;
 | 
			
		||||
  totalSize: number;
 | 
			
		||||
  totalChunks: number;
 | 
			
		||||
  uploadedChunks: Set<number>;
 | 
			
		||||
  tempPath: string;
 | 
			
		||||
  createdAt: number;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export class ChunkManager {
 | 
			
		||||
  private static instance: ChunkManager;
 | 
			
		||||
  private activeUploads = new Map<string, ChunkInfo>();
 | 
			
		||||
  private finalizingUploads = new Set<string>(); // Track uploads currently being finalized
 | 
			
		||||
  private cleanupInterval: NodeJS.Timeout;
 | 
			
		||||
 | 
			
		||||
  private constructor() {
 | 
			
		||||
    // Cleanup expired uploads every 30 minutes
 | 
			
		||||
    this.cleanupInterval = setInterval(
 | 
			
		||||
      () => {
 | 
			
		||||
        this.cleanupExpiredUploads();
 | 
			
		||||
      },
 | 
			
		||||
      30 * 60 * 1000
 | 
			
		||||
    );
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  public static getInstance(): ChunkManager {
 | 
			
		||||
    if (!ChunkManager.instance) {
 | 
			
		||||
      ChunkManager.instance = new ChunkManager();
 | 
			
		||||
    }
 | 
			
		||||
    return ChunkManager.instance;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  /**
 | 
			
		||||
   * Process a chunk upload with streaming
 | 
			
		||||
   */
 | 
			
		||||
  async processChunk(
 | 
			
		||||
    metadata: ChunkMetadata,
 | 
			
		||||
    inputStream: NodeJS.ReadableStream,
 | 
			
		||||
    originalObjectName: string
 | 
			
		||||
  ): Promise<{ isComplete: boolean; finalPath?: string }> {
 | 
			
		||||
    const startTime = Date.now();
 | 
			
		||||
    const { fileId, chunkIndex, totalChunks, fileName, totalSize, isLastChunk } = metadata;
 | 
			
		||||
 | 
			
		||||
    console.log(`Processing chunk ${chunkIndex + 1}/${totalChunks} for file ${fileName} (${fileId})`);
 | 
			
		||||
 | 
			
		||||
    let chunkInfo = this.activeUploads.get(fileId);
 | 
			
		||||
    if (!chunkInfo) {
 | 
			
		||||
      if (chunkIndex !== 0) {
 | 
			
		||||
        throw new Error("First chunk must be chunk 0");
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      const tempPath = getTempFilePath(fileId);
 | 
			
		||||
      chunkInfo = {
 | 
			
		||||
        fileId,
 | 
			
		||||
        fileName,
 | 
			
		||||
        totalSize,
 | 
			
		||||
        totalChunks,
 | 
			
		||||
        uploadedChunks: new Set(),
 | 
			
		||||
        tempPath,
 | 
			
		||||
        createdAt: Date.now(),
 | 
			
		||||
      };
 | 
			
		||||
      this.activeUploads.set(fileId, chunkInfo);
 | 
			
		||||
      console.log(`Created new upload session for ${fileName} at ${tempPath}`);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    console.log(
 | 
			
		||||
      `Validating chunk ${chunkIndex} (total: ${totalChunks}, uploaded: ${Array.from(chunkInfo.uploadedChunks).join(",")})`
 | 
			
		||||
    );
 | 
			
		||||
 | 
			
		||||
    if (chunkIndex < 0 || chunkIndex >= totalChunks) {
 | 
			
		||||
      throw new Error(`Invalid chunk index: ${chunkIndex} (must be 0-${totalChunks - 1})`);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if (chunkInfo.uploadedChunks.has(chunkIndex)) {
 | 
			
		||||
      console.log(`Chunk ${chunkIndex} already uploaded, treating as success`);
 | 
			
		||||
 | 
			
		||||
      if (isLastChunk && chunkInfo.uploadedChunks.size === totalChunks) {
 | 
			
		||||
        if (this.finalizingUploads.has(fileId)) {
 | 
			
		||||
          console.log(`Upload ${fileId} is already being finalized, waiting...`);
 | 
			
		||||
          return { isComplete: false };
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        console.log(`All chunks uploaded, finalizing ${fileName}`);
 | 
			
		||||
        return await this.finalizeUpload(chunkInfo, metadata, originalObjectName);
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      return { isComplete: false };
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    const tempDir = path.dirname(chunkInfo.tempPath);
 | 
			
		||||
    await fs.promises.mkdir(tempDir, { recursive: true });
 | 
			
		||||
    console.log(`Temp directory ensured: ${tempDir}`);
 | 
			
		||||
 | 
			
		||||
    await this.writeChunkToFile(chunkInfo.tempPath, inputStream, chunkIndex === 0);
 | 
			
		||||
 | 
			
		||||
    chunkInfo.uploadedChunks.add(chunkIndex);
 | 
			
		||||
 | 
			
		||||
    try {
 | 
			
		||||
      const stats = await fs.promises.stat(chunkInfo.tempPath);
 | 
			
		||||
      const processingTime = Date.now() - startTime;
 | 
			
		||||
      console.log(
 | 
			
		||||
        `Chunk ${chunkIndex + 1}/${totalChunks} uploaded successfully in ${processingTime}ms. Temp file size: ${stats.size} bytes`
 | 
			
		||||
      );
 | 
			
		||||
    } catch (error) {
 | 
			
		||||
      console.warn(`Could not get temp file stats:`, error);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    console.log(
 | 
			
		||||
      `Checking completion: isLastChunk=${isLastChunk}, uploadedChunks.size=${chunkInfo.uploadedChunks.size}, totalChunks=${totalChunks}`
 | 
			
		||||
    );
 | 
			
		||||
 | 
			
		||||
    if (isLastChunk && chunkInfo.uploadedChunks.size === totalChunks) {
 | 
			
		||||
      if (this.finalizingUploads.has(fileId)) {
 | 
			
		||||
        console.log(`Upload ${fileId} is already being finalized, waiting...`);
 | 
			
		||||
        return { isComplete: false };
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      console.log(`All chunks uploaded, finalizing ${fileName}`);
 | 
			
		||||
 | 
			
		||||
      const uploadedChunksArray = Array.from(chunkInfo.uploadedChunks).sort((a, b) => a - b);
 | 
			
		||||
      console.log(`Uploaded chunks in order: ${uploadedChunksArray.join(", ")}`);
 | 
			
		||||
 | 
			
		||||
      const expectedChunks = Array.from({ length: totalChunks }, (_, i) => i);
 | 
			
		||||
      const missingChunks = expectedChunks.filter((chunk) => !chunkInfo.uploadedChunks.has(chunk));
 | 
			
		||||
 | 
			
		||||
      if (missingChunks.length > 0) {
 | 
			
		||||
        throw new Error(`Missing chunks: ${missingChunks.join(", ")}`);
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      return await this.finalizeUpload(chunkInfo, metadata, originalObjectName);
 | 
			
		||||
    } else {
 | 
			
		||||
      console.log(
 | 
			
		||||
        `Not ready for finalization: isLastChunk=${isLastChunk}, uploadedChunks.size=${chunkInfo.uploadedChunks.size}, totalChunks=${totalChunks}`
 | 
			
		||||
      );
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    return { isComplete: false };
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  /**
 | 
			
		||||
   * Write chunk to file using streaming
 | 
			
		||||
   */
 | 
			
		||||
  private async writeChunkToFile(
 | 
			
		||||
    filePath: string,
 | 
			
		||||
    inputStream: NodeJS.ReadableStream,
 | 
			
		||||
    isFirstChunk: boolean
 | 
			
		||||
  ): Promise<void> {
 | 
			
		||||
    return new Promise((resolve, reject) => {
 | 
			
		||||
      console.log(`Writing chunk to ${filePath} (first: ${isFirstChunk})`);
 | 
			
		||||
 | 
			
		||||
      if (isFirstChunk) {
 | 
			
		||||
        const writeStream = fs.createWriteStream(filePath, {
 | 
			
		||||
          highWaterMark: 64 * 1024 * 1024, // 64MB buffer for better performance
 | 
			
		||||
        });
 | 
			
		||||
        writeStream.on("error", (error) => {
 | 
			
		||||
          console.error("Write stream error:", error);
 | 
			
		||||
          reject(error);
 | 
			
		||||
        });
 | 
			
		||||
        writeStream.on("finish", () => {
 | 
			
		||||
          console.log("Write stream finished successfully");
 | 
			
		||||
          resolve();
 | 
			
		||||
        });
 | 
			
		||||
        inputStream.pipe(writeStream);
 | 
			
		||||
      } else {
 | 
			
		||||
        const writeStream = fs.createWriteStream(filePath, {
 | 
			
		||||
          flags: "a",
 | 
			
		||||
          highWaterMark: 64 * 1024 * 1024, // 64MB buffer for better performance
 | 
			
		||||
        });
 | 
			
		||||
        writeStream.on("error", (error) => {
 | 
			
		||||
          console.error("Write stream error:", error);
 | 
			
		||||
          reject(error);
 | 
			
		||||
        });
 | 
			
		||||
        writeStream.on("finish", () => {
 | 
			
		||||
          console.log("Write stream finished successfully");
 | 
			
		||||
          resolve();
 | 
			
		||||
        });
 | 
			
		||||
        inputStream.pipe(writeStream);
 | 
			
		||||
      }
 | 
			
		||||
    });
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  /**
 | 
			
		||||
   * Finalize upload by moving temp file to final location and encrypting (if enabled)
 | 
			
		||||
   */
 | 
			
		||||
  private async finalizeUpload(
 | 
			
		||||
    chunkInfo: ChunkInfo,
 | 
			
		||||
    metadata: ChunkMetadata,
 | 
			
		||||
    originalObjectName: string
 | 
			
		||||
  ): Promise<{ isComplete: boolean; finalPath: string }> {
 | 
			
		||||
    // Mark as finalizing to prevent race conditions
 | 
			
		||||
    this.finalizingUploads.add(chunkInfo.fileId);
 | 
			
		||||
 | 
			
		||||
    try {
 | 
			
		||||
      console.log(`Finalizing upload for ${chunkInfo.fileName}`);
 | 
			
		||||
 | 
			
		||||
      const tempStats = await fs.promises.stat(chunkInfo.tempPath);
 | 
			
		||||
      console.log(`Temp file size: ${tempStats.size} bytes, expected: ${chunkInfo.totalSize} bytes`);
 | 
			
		||||
 | 
			
		||||
      if (tempStats.size !== chunkInfo.totalSize) {
 | 
			
		||||
        console.warn(`Size mismatch! Temp: ${tempStats.size}, Expected: ${chunkInfo.totalSize}`);
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      const provider = FilesystemStorageProvider.getInstance();
 | 
			
		||||
      const finalObjectName = originalObjectName;
 | 
			
		||||
      const filePath = provider.getFilePath(finalObjectName);
 | 
			
		||||
      const dir = path.dirname(filePath);
 | 
			
		||||
 | 
			
		||||
      console.log(`Starting finalization: ${finalObjectName}`);
 | 
			
		||||
 | 
			
		||||
      await fs.promises.mkdir(dir, { recursive: true });
 | 
			
		||||
 | 
			
		||||
      const tempReadStream = fs.createReadStream(chunkInfo.tempPath, {
 | 
			
		||||
        highWaterMark: 64 * 1024 * 1024, // 64MB buffer for better performance
 | 
			
		||||
      });
 | 
			
		||||
      const writeStream = fs.createWriteStream(filePath, {
 | 
			
		||||
        highWaterMark: 64 * 1024 * 1024,
 | 
			
		||||
      });
 | 
			
		||||
      const encryptStream = provider.createEncryptStream();
 | 
			
		||||
 | 
			
		||||
      await new Promise<void>((resolve, reject) => {
 | 
			
		||||
        const startTime = Date.now();
 | 
			
		||||
 | 
			
		||||
        tempReadStream
 | 
			
		||||
          .pipe(encryptStream)
 | 
			
		||||
          .pipe(writeStream)
 | 
			
		||||
          .on("finish", () => {
 | 
			
		||||
            const duration = Date.now() - startTime;
 | 
			
		||||
            console.log(`File processed and saved to: ${filePath} in ${duration}ms`);
 | 
			
		||||
            resolve();
 | 
			
		||||
          })
 | 
			
		||||
          .on("error", (error) => {
 | 
			
		||||
            console.error("Error during processing:", error);
 | 
			
		||||
            reject(error);
 | 
			
		||||
          });
 | 
			
		||||
      });
 | 
			
		||||
 | 
			
		||||
      console.log(`File successfully uploaded and processed: ${finalObjectName}`);
 | 
			
		||||
 | 
			
		||||
      await this.cleanupTempFile(chunkInfo.tempPath);
 | 
			
		||||
 | 
			
		||||
      this.activeUploads.delete(chunkInfo.fileId);
 | 
			
		||||
      this.finalizingUploads.delete(chunkInfo.fileId);
 | 
			
		||||
 | 
			
		||||
      return { isComplete: true, finalPath: finalObjectName };
 | 
			
		||||
    } catch (error) {
 | 
			
		||||
      console.error("Error during finalization:", error);
 | 
			
		||||
      await this.cleanupTempFile(chunkInfo.tempPath);
 | 
			
		||||
      this.activeUploads.delete(chunkInfo.fileId);
 | 
			
		||||
      this.finalizingUploads.delete(chunkInfo.fileId);
 | 
			
		||||
      throw error;
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  /**
 | 
			
		||||
   * Cleanup temporary file
 | 
			
		||||
   */
 | 
			
		||||
  private async cleanupTempFile(tempPath: string): Promise<void> {
 | 
			
		||||
    try {
 | 
			
		||||
      await fs.promises.access(tempPath);
 | 
			
		||||
      await fs.promises.unlink(tempPath);
 | 
			
		||||
      console.log(`Temp file cleaned up: ${tempPath}`);
 | 
			
		||||
    } catch (error: any) {
 | 
			
		||||
      if (error.code === "ENOENT") {
 | 
			
		||||
        console.log(`Temp file already cleaned up: ${tempPath}`);
 | 
			
		||||
      } else {
 | 
			
		||||
        console.warn(`Failed to cleanup temp file ${tempPath}:`, error);
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  /**
 | 
			
		||||
   * Cleanup expired uploads (older than 2 hours)
 | 
			
		||||
   */
 | 
			
		||||
  private async cleanupExpiredUploads(): Promise<void> {
 | 
			
		||||
    const now = Date.now();
 | 
			
		||||
    const maxAge = 2 * 60 * 60 * 1000; // 2 hours
 | 
			
		||||
 | 
			
		||||
    for (const [fileId, chunkInfo] of this.activeUploads.entries()) {
 | 
			
		||||
      if (now - chunkInfo.createdAt > maxAge) {
 | 
			
		||||
        console.log(`Cleaning up expired upload: ${fileId}`);
 | 
			
		||||
        await this.cleanupTempFile(chunkInfo.tempPath);
 | 
			
		||||
        this.activeUploads.delete(fileId);
 | 
			
		||||
        this.finalizingUploads.delete(fileId);
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  /**
 | 
			
		||||
   * Get upload progress
 | 
			
		||||
   */
 | 
			
		||||
  getUploadProgress(fileId: string): { uploaded: number; total: number; percentage: number } | null {
 | 
			
		||||
    const chunkInfo = this.activeUploads.get(fileId);
 | 
			
		||||
    if (!chunkInfo) return null;
 | 
			
		||||
 | 
			
		||||
    return {
 | 
			
		||||
      uploaded: chunkInfo.uploadedChunks.size,
 | 
			
		||||
      total: chunkInfo.totalChunks,
 | 
			
		||||
      percentage: Math.round((chunkInfo.uploadedChunks.size / chunkInfo.totalChunks) * 100),
 | 
			
		||||
    };
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  /**
 | 
			
		||||
   * Cancel upload
 | 
			
		||||
   */
 | 
			
		||||
  async cancelUpload(fileId: string): Promise<void> {
 | 
			
		||||
    const chunkInfo = this.activeUploads.get(fileId);
 | 
			
		||||
    if (chunkInfo) {
 | 
			
		||||
      await this.cleanupTempFile(chunkInfo.tempPath);
 | 
			
		||||
      this.activeUploads.delete(fileId);
 | 
			
		||||
      this.finalizingUploads.delete(fileId);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  /**
 | 
			
		||||
   * Cleanup on shutdown
 | 
			
		||||
   */
 | 
			
		||||
  destroy(): void {
 | 
			
		||||
    if (this.cleanupInterval) {
 | 
			
		||||
      clearInterval(this.cleanupInterval);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    for (const [fileId, chunkInfo] of this.activeUploads.entries()) {
 | 
			
		||||
      this.cleanupTempFile(chunkInfo.tempPath);
 | 
			
		||||
    }
 | 
			
		||||
    this.activeUploads.clear();
 | 
			
		||||
    this.finalizingUploads.clear();
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
@@ -1,444 +0,0 @@
 | 
			
		||||
import * as fs from "fs";
 | 
			
		||||
import { pipeline } from "stream/promises";
 | 
			
		||||
import { FastifyReply, FastifyRequest } from "fastify";
 | 
			
		||||
 | 
			
		||||
import { FilesystemStorageProvider } from "../../providers/filesystem-storage.provider";
 | 
			
		||||
import { DownloadCancelResponse, QueueClearResponse, QueueStatusResponse } from "../../types/download-queue";
 | 
			
		||||
import { DownloadMemoryManager } from "../../utils/download-memory-manager";
 | 
			
		||||
import { getContentType } from "../../utils/mime-types";
 | 
			
		||||
import { ChunkManager, ChunkMetadata } from "./chunk-manager";
 | 
			
		||||
 | 
			
		||||
export class FilesystemController {
 | 
			
		||||
  private chunkManager = ChunkManager.getInstance();
 | 
			
		||||
  private memoryManager = DownloadMemoryManager.getInstance();
 | 
			
		||||
 | 
			
		||||
  /**
 | 
			
		||||
   * Check if a character is valid in an HTTP token (RFC 2616)
 | 
			
		||||
   * Tokens can contain: alphanumeric and !#$%&'*+-.^_`|~
 | 
			
		||||
   * Must exclude separators: ()<>@,;:\"/[]?={} and space/tab
 | 
			
		||||
   */
 | 
			
		||||
  private isTokenChar(char: string): boolean {
 | 
			
		||||
    const code = char.charCodeAt(0);
 | 
			
		||||
    // Basic ASCII range check
 | 
			
		||||
    if (code < 33 || code > 126) return false;
 | 
			
		||||
    // Exclude separator characters per RFC 2616
 | 
			
		||||
    const separators = '()<>@,;:\\"/[]?={} \t';
 | 
			
		||||
    return !separators.includes(char);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  private encodeFilenameForHeader(filename: string): string {
 | 
			
		||||
    if (!filename || filename.trim() === "") {
 | 
			
		||||
      return 'attachment; filename="download"';
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    let sanitized = filename
 | 
			
		||||
      .replace(/"/g, "'")
 | 
			
		||||
      .replace(/[\r\n\t\v\f]/g, "")
 | 
			
		||||
      .replace(/[\\|/]/g, "-")
 | 
			
		||||
      .replace(/[<>:|*?]/g, "");
 | 
			
		||||
 | 
			
		||||
    sanitized = sanitized
 | 
			
		||||
      .split("")
 | 
			
		||||
      .filter((char) => {
 | 
			
		||||
        const code = char.charCodeAt(0);
 | 
			
		||||
        return code >= 32 && !(code >= 127 && code <= 159);
 | 
			
		||||
      })
 | 
			
		||||
      .join("")
 | 
			
		||||
      .trim();
 | 
			
		||||
 | 
			
		||||
    if (!sanitized) {
 | 
			
		||||
      return 'attachment; filename="download"';
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // Create ASCII-safe version with only valid token characters
 | 
			
		||||
    const asciiSafe = sanitized
 | 
			
		||||
      .split("")
 | 
			
		||||
      .filter((char) => this.isTokenChar(char))
 | 
			
		||||
      .join("");
 | 
			
		||||
 | 
			
		||||
    if (asciiSafe && asciiSafe.trim()) {
 | 
			
		||||
      const encoded = encodeURIComponent(sanitized);
 | 
			
		||||
      return `attachment; filename="${asciiSafe}"; filename*=UTF-8''${encoded}`;
 | 
			
		||||
    } else {
 | 
			
		||||
      const encoded = encodeURIComponent(sanitized);
 | 
			
		||||
      return `attachment; filename*=UTF-8''${encoded}`;
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  async upload(request: FastifyRequest, reply: FastifyReply) {
 | 
			
		||||
    try {
 | 
			
		||||
      const { token } = request.params as { token: string };
 | 
			
		||||
 | 
			
		||||
      const provider = FilesystemStorageProvider.getInstance();
 | 
			
		||||
 | 
			
		||||
      const tokenData = provider.validateUploadToken(token);
 | 
			
		||||
 | 
			
		||||
      if (!tokenData) {
 | 
			
		||||
        return reply.status(400).send({ error: "Invalid or expired upload token" });
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      const chunkMetadata = this.extractChunkMetadata(request);
 | 
			
		||||
 | 
			
		||||
      if (chunkMetadata) {
 | 
			
		||||
        try {
 | 
			
		||||
          const result = await this.handleChunkedUpload(request, chunkMetadata, tokenData.objectName);
 | 
			
		||||
 | 
			
		||||
          if (result.isComplete) {
 | 
			
		||||
            reply.status(200).send({
 | 
			
		||||
              message: "File uploaded successfully",
 | 
			
		||||
              objectName: result.finalPath,
 | 
			
		||||
              finalObjectName: result.finalPath,
 | 
			
		||||
            });
 | 
			
		||||
          } else {
 | 
			
		||||
            reply.status(200).send({
 | 
			
		||||
              message: "Chunk uploaded successfully",
 | 
			
		||||
              progress: this.chunkManager.getUploadProgress(chunkMetadata.fileId),
 | 
			
		||||
            });
 | 
			
		||||
          }
 | 
			
		||||
        } catch (chunkError: any) {
 | 
			
		||||
          return reply.status(400).send({
 | 
			
		||||
            error: chunkError.message || "Chunked upload failed",
 | 
			
		||||
            details: chunkError.toString(),
 | 
			
		||||
          });
 | 
			
		||||
        }
 | 
			
		||||
      } else {
 | 
			
		||||
        await this.uploadFileStream(request, provider, tokenData.objectName);
 | 
			
		||||
        reply.status(200).send({ message: "File uploaded successfully" });
 | 
			
		||||
      }
 | 
			
		||||
    } catch (error) {
 | 
			
		||||
      return reply.status(500).send({ error: "Internal server error" });
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  private async uploadFileStream(request: FastifyRequest, provider: FilesystemStorageProvider, objectName: string) {
 | 
			
		||||
    await provider.uploadFileFromStream(objectName, request.raw);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  private extractChunkMetadata(request: FastifyRequest): ChunkMetadata | null {
 | 
			
		||||
    const fileId = request.headers["x-file-id"] as string;
 | 
			
		||||
    const chunkIndex = request.headers["x-chunk-index"] as string;
 | 
			
		||||
    const totalChunks = request.headers["x-total-chunks"] as string;
 | 
			
		||||
    const chunkSize = request.headers["x-chunk-size"] as string;
 | 
			
		||||
    const totalSize = request.headers["x-total-size"] as string;
 | 
			
		||||
    const encodedFileName = request.headers["x-file-name"] as string;
 | 
			
		||||
    const isLastChunk = request.headers["x-is-last-chunk"] as string;
 | 
			
		||||
 | 
			
		||||
    if (!fileId || !chunkIndex || !totalChunks || !chunkSize || !totalSize || !encodedFileName) {
 | 
			
		||||
      return null;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // Decode the base64-encoded filename to handle UTF-8 characters
 | 
			
		||||
    let fileName: string;
 | 
			
		||||
    try {
 | 
			
		||||
      fileName = decodeURIComponent(escape(Buffer.from(encodedFileName, "base64").toString("binary")));
 | 
			
		||||
    } catch (error) {
 | 
			
		||||
      // Fallback to the encoded value if decoding fails (for backward compatibility)
 | 
			
		||||
      fileName = encodedFileName;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    const metadata = {
 | 
			
		||||
      fileId,
 | 
			
		||||
      chunkIndex: parseInt(chunkIndex, 10),
 | 
			
		||||
      totalChunks: parseInt(totalChunks, 10),
 | 
			
		||||
      chunkSize: parseInt(chunkSize, 10),
 | 
			
		||||
      totalSize: parseInt(totalSize, 10),
 | 
			
		||||
      fileName,
 | 
			
		||||
      isLastChunk: isLastChunk === "true",
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    return metadata;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  private async handleChunkedUpload(request: FastifyRequest, metadata: ChunkMetadata, originalObjectName: string) {
 | 
			
		||||
    const stream = request.raw;
 | 
			
		||||
 | 
			
		||||
    stream.on("error", (error) => {
 | 
			
		||||
      console.error("Request stream error:", error);
 | 
			
		||||
    });
 | 
			
		||||
 | 
			
		||||
    return await this.chunkManager.processChunk(metadata, stream, originalObjectName);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  async getUploadProgress(request: FastifyRequest, reply: FastifyReply) {
 | 
			
		||||
    try {
 | 
			
		||||
      const { fileId } = request.params as { fileId: string };
 | 
			
		||||
 | 
			
		||||
      const progress = this.chunkManager.getUploadProgress(fileId);
 | 
			
		||||
 | 
			
		||||
      if (!progress) {
 | 
			
		||||
        return reply.status(404).send({ error: "Upload not found" });
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      reply.status(200).send(progress);
 | 
			
		||||
    } catch (error) {
 | 
			
		||||
      return reply.status(500).send({ error: "Internal server error" });
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  async cancelUpload(request: FastifyRequest, reply: FastifyReply) {
 | 
			
		||||
    try {
 | 
			
		||||
      const { fileId } = request.params as { fileId: string };
 | 
			
		||||
 | 
			
		||||
      await this.chunkManager.cancelUpload(fileId);
 | 
			
		||||
 | 
			
		||||
      reply.status(200).send({ message: "Upload cancelled successfully" });
 | 
			
		||||
    } catch (error) {
 | 
			
		||||
      return reply.status(500).send({ error: "Internal server error" });
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  async download(request: FastifyRequest, reply: FastifyReply) {
 | 
			
		||||
    const downloadId = `${Date.now()}-${Math.random().toString(36).substring(2, 11)}`;
 | 
			
		||||
 | 
			
		||||
    try {
 | 
			
		||||
      const { token } = request.params as { token: string };
 | 
			
		||||
      const provider = FilesystemStorageProvider.getInstance();
 | 
			
		||||
 | 
			
		||||
      const tokenData = provider.validateDownloadToken(token);
 | 
			
		||||
 | 
			
		||||
      if (!tokenData) {
 | 
			
		||||
        return reply.status(400).send({ error: "Invalid or expired download token" });
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      const filePath = provider.getFilePath(tokenData.objectName);
 | 
			
		||||
 | 
			
		||||
      const fileExists = await provider.fileExists(tokenData.objectName);
 | 
			
		||||
      if (!fileExists) {
 | 
			
		||||
        console.error(`[DOWNLOAD] File not found: ${tokenData.objectName}`);
 | 
			
		||||
        return reply.status(404).send({
 | 
			
		||||
          error: "File not found",
 | 
			
		||||
          message:
 | 
			
		||||
            "The requested file does not exist on the server. It may have been deleted or the upload was incomplete.",
 | 
			
		||||
        });
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      const stats = await fs.promises.stat(filePath);
 | 
			
		||||
      const fileSize = stats.size;
 | 
			
		||||
      const fileName = tokenData.fileName || "download";
 | 
			
		||||
 | 
			
		||||
      const fileSizeMB = fileSize / (1024 * 1024);
 | 
			
		||||
      console.log(`[DOWNLOAD] Requesting slot for ${downloadId}: ${tokenData.objectName} (${fileSizeMB.toFixed(1)}MB)`);
 | 
			
		||||
 | 
			
		||||
      try {
 | 
			
		||||
        await this.memoryManager.requestDownloadSlot(downloadId, {
 | 
			
		||||
          fileName,
 | 
			
		||||
          fileSize,
 | 
			
		||||
          objectName: tokenData.objectName,
 | 
			
		||||
        });
 | 
			
		||||
      } catch (error: any) {
 | 
			
		||||
        console.warn(`[DOWNLOAD] Queue full for ${downloadId}: ${error.message}`);
 | 
			
		||||
        return reply.status(503).send({
 | 
			
		||||
          error: "Download queue is full",
 | 
			
		||||
          message: error.message,
 | 
			
		||||
          retryAfter: 60,
 | 
			
		||||
        });
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      console.log(`[DOWNLOAD] Starting ${downloadId}: ${tokenData.objectName} (${fileSizeMB.toFixed(1)}MB)`);
 | 
			
		||||
      this.memoryManager.startDownload(downloadId);
 | 
			
		||||
 | 
			
		||||
      const range = request.headers.range;
 | 
			
		||||
 | 
			
		||||
      reply.header("Content-Disposition", this.encodeFilenameForHeader(fileName));
 | 
			
		||||
      reply.header("Content-Type", getContentType(fileName));
 | 
			
		||||
      reply.header("Accept-Ranges", "bytes");
 | 
			
		||||
      reply.header("X-Download-ID", downloadId);
 | 
			
		||||
 | 
			
		||||
      reply.raw.on("close", () => {
 | 
			
		||||
        this.memoryManager.endDownload(downloadId);
 | 
			
		||||
        console.log(`[DOWNLOAD] Client disconnected: ${downloadId}`);
 | 
			
		||||
      });
 | 
			
		||||
 | 
			
		||||
      reply.raw.on("error", () => {
 | 
			
		||||
        this.memoryManager.endDownload(downloadId);
 | 
			
		||||
        console.log(`[DOWNLOAD] Client error: ${downloadId}`);
 | 
			
		||||
      });
 | 
			
		||||
 | 
			
		||||
      try {
 | 
			
		||||
        if (range) {
 | 
			
		||||
          const parts = range.replace(/bytes=/, "").split("-");
 | 
			
		||||
          const start = parseInt(parts[0], 10);
 | 
			
		||||
          const end = parts[1] ? parseInt(parts[1], 10) : fileSize - 1;
 | 
			
		||||
 | 
			
		||||
          reply.status(206);
 | 
			
		||||
          reply.header("Content-Range", `bytes ${start}-${end}/${fileSize}`);
 | 
			
		||||
          reply.header("Content-Length", end - start + 1);
 | 
			
		||||
 | 
			
		||||
          await this.downloadFileRange(reply, provider, tokenData.objectName, start, end, downloadId);
 | 
			
		||||
        } else {
 | 
			
		||||
          reply.header("Content-Length", fileSize);
 | 
			
		||||
          await this.downloadFileStream(reply, provider, tokenData.objectName, downloadId);
 | 
			
		||||
        }
 | 
			
		||||
      } finally {
 | 
			
		||||
        this.memoryManager.endDownload(downloadId);
 | 
			
		||||
      }
 | 
			
		||||
    } catch (error) {
 | 
			
		||||
      this.memoryManager.endDownload(downloadId);
 | 
			
		||||
      console.error(`[DOWNLOAD] Error in ${downloadId}:`, error);
 | 
			
		||||
      return reply.status(500).send({ error: "Internal server error" });
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  private async downloadFileStream(
 | 
			
		||||
    reply: FastifyReply,
 | 
			
		||||
    provider: FilesystemStorageProvider,
 | 
			
		||||
    objectName: string,
 | 
			
		||||
    downloadId?: string
 | 
			
		||||
  ) {
 | 
			
		||||
    try {
 | 
			
		||||
      FilesystemStorageProvider.logMemoryUsage(`Download start: ${objectName} (${downloadId})`);
 | 
			
		||||
 | 
			
		||||
      const downloadStream = provider.createDownloadStream(objectName);
 | 
			
		||||
 | 
			
		||||
      downloadStream.on("error", (error) => {
 | 
			
		||||
        console.error("Download stream error:", error);
 | 
			
		||||
        FilesystemStorageProvider.logMemoryUsage(`Download error: ${objectName} (${downloadId})`);
 | 
			
		||||
        if (!reply.sent) {
 | 
			
		||||
          reply.status(500).send({ error: "Download failed" });
 | 
			
		||||
        }
 | 
			
		||||
      });
 | 
			
		||||
 | 
			
		||||
      reply.raw.on("close", () => {
 | 
			
		||||
        if (downloadStream.readable && typeof (downloadStream as any).destroy === "function") {
 | 
			
		||||
          (downloadStream as any).destroy();
 | 
			
		||||
        }
 | 
			
		||||
        FilesystemStorageProvider.logMemoryUsage(`Download client disconnect: ${objectName} (${downloadId})`);
 | 
			
		||||
      });
 | 
			
		||||
 | 
			
		||||
      if (this.memoryManager.shouldThrottleStream()) {
 | 
			
		||||
        console.log(
 | 
			
		||||
          `[MEMORY THROTTLE] ${objectName} - Pausing stream due to high memory usage: ${this.memoryManager.getCurrentMemoryUsageMB().toFixed(0)}MB`
 | 
			
		||||
        );
 | 
			
		||||
 | 
			
		||||
        const { Transform } = require("stream");
 | 
			
		||||
        const memoryManager = this.memoryManager;
 | 
			
		||||
        const throttleStream = new Transform({
 | 
			
		||||
          highWaterMark: 256 * 1024,
 | 
			
		||||
          transform(chunk: Buffer, _encoding: BufferEncoding, callback: (error?: Error | null, data?: any) => void) {
 | 
			
		||||
            if (memoryManager.shouldThrottleStream()) {
 | 
			
		||||
              setImmediate(() => {
 | 
			
		||||
                this.push(chunk);
 | 
			
		||||
                callback();
 | 
			
		||||
              });
 | 
			
		||||
            } else {
 | 
			
		||||
              this.push(chunk);
 | 
			
		||||
              callback();
 | 
			
		||||
            }
 | 
			
		||||
          },
 | 
			
		||||
        });
 | 
			
		||||
 | 
			
		||||
        await pipeline(downloadStream, throttleStream, reply.raw);
 | 
			
		||||
      } else {
 | 
			
		||||
        await pipeline(downloadStream, reply.raw);
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      FilesystemStorageProvider.logMemoryUsage(`Download complete: ${objectName} (${downloadId})`);
 | 
			
		||||
    } catch (error) {
 | 
			
		||||
      console.error("Download error:", error);
 | 
			
		||||
      FilesystemStorageProvider.logMemoryUsage(`Download failed: ${objectName} (${downloadId})`);
 | 
			
		||||
      if (!reply.sent) {
 | 
			
		||||
        reply.status(500).send({ error: "Download failed" });
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  private async downloadFileRange(
 | 
			
		||||
    reply: FastifyReply,
 | 
			
		||||
    provider: FilesystemStorageProvider,
 | 
			
		||||
    objectName: string,
 | 
			
		||||
    start: number,
 | 
			
		||||
    end: number,
 | 
			
		||||
    downloadId?: string
 | 
			
		||||
  ) {
 | 
			
		||||
    try {
 | 
			
		||||
      FilesystemStorageProvider.logMemoryUsage(`Range download start: ${objectName} (${start}-${end}) (${downloadId})`);
 | 
			
		||||
 | 
			
		||||
      const rangeStream = await provider.createDownloadRangeStream(objectName, start, end);
 | 
			
		||||
 | 
			
		||||
      rangeStream.on("error", (error) => {
 | 
			
		||||
        console.error("Range download stream error:", error);
 | 
			
		||||
        FilesystemStorageProvider.logMemoryUsage(
 | 
			
		||||
          `Range download error: ${objectName} (${start}-${end}) (${downloadId})`
 | 
			
		||||
        );
 | 
			
		||||
        if (!reply.sent) {
 | 
			
		||||
          reply.status(500).send({ error: "Download failed" });
 | 
			
		||||
        }
 | 
			
		||||
      });
 | 
			
		||||
 | 
			
		||||
      reply.raw.on("close", () => {
 | 
			
		||||
        if (rangeStream.readable && typeof (rangeStream as any).destroy === "function") {
 | 
			
		||||
          (rangeStream as any).destroy();
 | 
			
		||||
        }
 | 
			
		||||
        FilesystemStorageProvider.logMemoryUsage(
 | 
			
		||||
          `Range download client disconnect: ${objectName} (${start}-${end}) (${downloadId})`
 | 
			
		||||
        );
 | 
			
		||||
      });
 | 
			
		||||
 | 
			
		||||
      await pipeline(rangeStream, reply.raw);
 | 
			
		||||
 | 
			
		||||
      FilesystemStorageProvider.logMemoryUsage(
 | 
			
		||||
        `Range download complete: ${objectName} (${start}-${end}) (${downloadId})`
 | 
			
		||||
      );
 | 
			
		||||
    } catch (error) {
 | 
			
		||||
      console.error("Range download error:", error);
 | 
			
		||||
      FilesystemStorageProvider.logMemoryUsage(
 | 
			
		||||
        `Range download failed: ${objectName} (${start}-${end}) (${downloadId})`
 | 
			
		||||
      );
 | 
			
		||||
      if (!reply.sent) {
 | 
			
		||||
        reply.status(500).send({ error: "Download failed" });
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  async getQueueStatus(_request: FastifyRequest, reply: FastifyReply) {
 | 
			
		||||
    try {
 | 
			
		||||
      const queueStatus = this.memoryManager.getQueueStatus();
 | 
			
		||||
      const response: QueueStatusResponse = {
 | 
			
		||||
        status: "success",
 | 
			
		||||
        data: queueStatus,
 | 
			
		||||
      };
 | 
			
		||||
      reply.status(200).send(response);
 | 
			
		||||
    } catch (error) {
 | 
			
		||||
      console.error("Error getting queue status:", error);
 | 
			
		||||
      return reply.status(500).send({ error: "Internal server error" });
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  async cancelQueuedDownload(request: FastifyRequest, reply: FastifyReply) {
 | 
			
		||||
    try {
 | 
			
		||||
      const { downloadId } = request.params as { downloadId: string };
 | 
			
		||||
 | 
			
		||||
      const cancelled = this.memoryManager.cancelQueuedDownload(downloadId);
 | 
			
		||||
 | 
			
		||||
      if (cancelled) {
 | 
			
		||||
        const response: DownloadCancelResponse = {
 | 
			
		||||
          message: "Download cancelled successfully",
 | 
			
		||||
          downloadId,
 | 
			
		||||
        };
 | 
			
		||||
        reply.status(200).send(response);
 | 
			
		||||
      } else {
 | 
			
		||||
        reply.status(404).send({
 | 
			
		||||
          error: "Download not found in queue",
 | 
			
		||||
          downloadId,
 | 
			
		||||
        });
 | 
			
		||||
      }
 | 
			
		||||
    } catch (error) {
 | 
			
		||||
      console.error("Error cancelling queued download:", error);
 | 
			
		||||
      return reply.status(500).send({ error: "Internal server error" });
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  async clearDownloadQueue(_request: FastifyRequest, reply: FastifyReply) {
 | 
			
		||||
    try {
 | 
			
		||||
      const clearedCount = this.memoryManager.clearQueue();
 | 
			
		||||
      const response: QueueClearResponse = {
 | 
			
		||||
        message: "Download queue cleared successfully",
 | 
			
		||||
        clearedCount,
 | 
			
		||||
      };
 | 
			
		||||
      reply.status(200).send(response);
 | 
			
		||||
    } catch (error) {
 | 
			
		||||
      console.error("Error clearing download queue:", error);
 | 
			
		||||
      return reply.status(500).send({ error: "Internal server error" });
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
@@ -1,95 +0,0 @@
 | 
			
		||||
import { FastifyInstance } from "fastify";
 | 
			
		||||
import { z } from "zod";
 | 
			
		||||
 | 
			
		||||
import { FilesystemController } from "./controller";
 | 
			
		||||
 | 
			
		||||
export async function downloadQueueRoutes(app: FastifyInstance) {
 | 
			
		||||
  const filesystemController = new FilesystemController();
 | 
			
		||||
 | 
			
		||||
  app.get(
 | 
			
		||||
    "/filesystem/download-queue/status",
 | 
			
		||||
    {
 | 
			
		||||
      schema: {
 | 
			
		||||
        tags: ["Download Queue"],
 | 
			
		||||
        operationId: "getDownloadQueueStatus",
 | 
			
		||||
        summary: "Get download queue status",
 | 
			
		||||
        description: "Get current status of the download queue including active downloads and queue length",
 | 
			
		||||
        response: {
 | 
			
		||||
          200: z.object({
 | 
			
		||||
            status: z.string(),
 | 
			
		||||
            data: z.object({
 | 
			
		||||
              queueLength: z.number(),
 | 
			
		||||
              maxQueueSize: z.number(),
 | 
			
		||||
              activeDownloads: z.number(),
 | 
			
		||||
              maxConcurrent: z.number(),
 | 
			
		||||
              queuedDownloads: z.array(
 | 
			
		||||
                z.object({
 | 
			
		||||
                  downloadId: z.string(),
 | 
			
		||||
                  position: z.number(),
 | 
			
		||||
                  waitTime: z.number(),
 | 
			
		||||
                  fileName: z.string().optional(),
 | 
			
		||||
                  fileSize: z.number().optional(),
 | 
			
		||||
                })
 | 
			
		||||
              ),
 | 
			
		||||
            }),
 | 
			
		||||
          }),
 | 
			
		||||
          500: z.object({
 | 
			
		||||
            error: z.string(),
 | 
			
		||||
          }),
 | 
			
		||||
        },
 | 
			
		||||
      },
 | 
			
		||||
    },
 | 
			
		||||
    filesystemController.getQueueStatus.bind(filesystemController)
 | 
			
		||||
  );
 | 
			
		||||
 | 
			
		||||
  app.delete(
 | 
			
		||||
    "/filesystem/download-queue/:downloadId",
 | 
			
		||||
    {
 | 
			
		||||
      schema: {
 | 
			
		||||
        tags: ["Download Queue"],
 | 
			
		||||
        operationId: "cancelQueuedDownload",
 | 
			
		||||
        summary: "Cancel a queued download",
 | 
			
		||||
        description: "Cancel a specific download that is waiting in the queue",
 | 
			
		||||
        params: z.object({
 | 
			
		||||
          downloadId: z.string().describe("Download ID"),
 | 
			
		||||
        }),
 | 
			
		||||
        response: {
 | 
			
		||||
          200: z.object({
 | 
			
		||||
            message: z.string(),
 | 
			
		||||
            downloadId: z.string(),
 | 
			
		||||
          }),
 | 
			
		||||
          404: z.object({
 | 
			
		||||
            error: z.string(),
 | 
			
		||||
            downloadId: z.string(),
 | 
			
		||||
          }),
 | 
			
		||||
          500: z.object({
 | 
			
		||||
            error: z.string(),
 | 
			
		||||
          }),
 | 
			
		||||
        },
 | 
			
		||||
      },
 | 
			
		||||
    },
 | 
			
		||||
    filesystemController.cancelQueuedDownload.bind(filesystemController)
 | 
			
		||||
  );
 | 
			
		||||
 | 
			
		||||
  app.delete(
 | 
			
		||||
    "/filesystem/download-queue",
 | 
			
		||||
    {
 | 
			
		||||
      schema: {
 | 
			
		||||
        tags: ["Download Queue"],
 | 
			
		||||
        operationId: "clearDownloadQueue",
 | 
			
		||||
        summary: "Clear entire download queue",
 | 
			
		||||
        description: "Cancel all downloads waiting in the queue (admin operation)",
 | 
			
		||||
        response: {
 | 
			
		||||
          200: z.object({
 | 
			
		||||
            message: z.string(),
 | 
			
		||||
            clearedCount: z.number(),
 | 
			
		||||
          }),
 | 
			
		||||
          500: z.object({
 | 
			
		||||
            error: z.string(),
 | 
			
		||||
          }),
 | 
			
		||||
        },
 | 
			
		||||
      },
 | 
			
		||||
    },
 | 
			
		||||
    filesystemController.clearDownloadQueue.bind(filesystemController)
 | 
			
		||||
  );
 | 
			
		||||
}
 | 
			
		||||
@@ -1,123 +0,0 @@
 | 
			
		||||
import { FastifyInstance, FastifyRequest } from "fastify";
 | 
			
		||||
import { z } from "zod";
 | 
			
		||||
 | 
			
		||||
import { FilesystemController } from "./controller";
 | 
			
		||||
 | 
			
		||||
export async function filesystemRoutes(app: FastifyInstance) {
 | 
			
		||||
  const filesystemController = new FilesystemController();
 | 
			
		||||
 | 
			
		||||
  app.addContentTypeParser("*", async (request: FastifyRequest, payload: any) => {
 | 
			
		||||
    return payload;
 | 
			
		||||
  });
 | 
			
		||||
 | 
			
		||||
  app.addContentTypeParser("application/json", async (request: FastifyRequest, payload: any) => {
 | 
			
		||||
    return payload;
 | 
			
		||||
  });
 | 
			
		||||
 | 
			
		||||
  app.put(
 | 
			
		||||
    "/filesystem/upload/:token",
 | 
			
		||||
    {
 | 
			
		||||
      bodyLimit: 1024 * 1024 * 1024 * 1024 * 1024, // 1PB limit
 | 
			
		||||
      schema: {
 | 
			
		||||
        tags: ["Filesystem"],
 | 
			
		||||
        operationId: "uploadToFilesystem",
 | 
			
		||||
        summary: "Upload file to filesystem storage",
 | 
			
		||||
        description: "Upload a file directly to the encrypted filesystem storage",
 | 
			
		||||
        params: z.object({
 | 
			
		||||
          token: z.string().describe("Upload token"),
 | 
			
		||||
        }),
 | 
			
		||||
        response: {
 | 
			
		||||
          200: z.object({
 | 
			
		||||
            message: z.string(),
 | 
			
		||||
          }),
 | 
			
		||||
          400: z.object({
 | 
			
		||||
            error: z.string(),
 | 
			
		||||
          }),
 | 
			
		||||
          500: z.object({
 | 
			
		||||
            error: z.string(),
 | 
			
		||||
          }),
 | 
			
		||||
        },
 | 
			
		||||
      },
 | 
			
		||||
    },
 | 
			
		||||
    filesystemController.upload.bind(filesystemController)
 | 
			
		||||
  );
 | 
			
		||||
 | 
			
		||||
  app.get(
 | 
			
		||||
    "/filesystem/download/:token",
 | 
			
		||||
    {
 | 
			
		||||
      bodyLimit: 1024 * 1024 * 1024 * 1024 * 1024, // 1PB limit
 | 
			
		||||
      schema: {
 | 
			
		||||
        tags: ["Filesystem"],
 | 
			
		||||
        operationId: "downloadFromFilesystem",
 | 
			
		||||
        summary: "Download file from filesystem storage",
 | 
			
		||||
        description: "Download a file directly from the encrypted filesystem storage",
 | 
			
		||||
        params: z.object({
 | 
			
		||||
          token: z.string().describe("Download token"),
 | 
			
		||||
        }),
 | 
			
		||||
        response: {
 | 
			
		||||
          200: z.string().describe("File content"),
 | 
			
		||||
          400: z.object({
 | 
			
		||||
            error: z.string(),
 | 
			
		||||
          }),
 | 
			
		||||
          500: z.object({
 | 
			
		||||
            error: z.string(),
 | 
			
		||||
          }),
 | 
			
		||||
        },
 | 
			
		||||
      },
 | 
			
		||||
    },
 | 
			
		||||
    filesystemController.download.bind(filesystemController)
 | 
			
		||||
  );
 | 
			
		||||
 | 
			
		||||
  app.get(
 | 
			
		||||
    "/filesystem/upload-progress/:fileId",
 | 
			
		||||
    {
 | 
			
		||||
      schema: {
 | 
			
		||||
        tags: ["Filesystem"],
 | 
			
		||||
        operationId: "getUploadProgress",
 | 
			
		||||
        summary: "Get chunked upload progress",
 | 
			
		||||
        description: "Get the progress of a chunked upload",
 | 
			
		||||
        params: z.object({
 | 
			
		||||
          fileId: z.string().describe("File ID"),
 | 
			
		||||
        }),
 | 
			
		||||
        response: {
 | 
			
		||||
          200: z.object({
 | 
			
		||||
            uploaded: z.number(),
 | 
			
		||||
            total: z.number(),
 | 
			
		||||
            percentage: z.number(),
 | 
			
		||||
          }),
 | 
			
		||||
          404: z.object({
 | 
			
		||||
            error: z.string(),
 | 
			
		||||
          }),
 | 
			
		||||
          500: z.object({
 | 
			
		||||
            error: z.string(),
 | 
			
		||||
          }),
 | 
			
		||||
        },
 | 
			
		||||
      },
 | 
			
		||||
    },
 | 
			
		||||
    filesystemController.getUploadProgress.bind(filesystemController)
 | 
			
		||||
  );
 | 
			
		||||
 | 
			
		||||
  app.delete(
 | 
			
		||||
    "/filesystem/cancel-upload/:fileId",
 | 
			
		||||
    {
 | 
			
		||||
      schema: {
 | 
			
		||||
        tags: ["Filesystem"],
 | 
			
		||||
        operationId: "cancelUpload",
 | 
			
		||||
        summary: "Cancel chunked upload",
 | 
			
		||||
        description: "Cancel an ongoing chunked upload",
 | 
			
		||||
        params: z.object({
 | 
			
		||||
          fileId: z.string().describe("File ID"),
 | 
			
		||||
        }),
 | 
			
		||||
        response: {
 | 
			
		||||
          200: z.object({
 | 
			
		||||
            message: z.string(),
 | 
			
		||||
          }),
 | 
			
		||||
          500: z.object({
 | 
			
		||||
            error: z.string(),
 | 
			
		||||
          }),
 | 
			
		||||
        },
 | 
			
		||||
      },
 | 
			
		||||
    },
 | 
			
		||||
    filesystemController.cancelUpload.bind(filesystemController)
 | 
			
		||||
  );
 | 
			
		||||
}
 | 
			
		||||
@@ -1,5 +1,3 @@
 | 
			
		||||
import { isS3Enabled } from "../../config/storage.config";
 | 
			
		||||
import { FilesystemStorageProvider } from "../../providers/filesystem-storage.provider";
 | 
			
		||||
import { S3StorageProvider } from "../../providers/s3-storage.provider";
 | 
			
		||||
import { prisma } from "../../shared/prisma";
 | 
			
		||||
import { StorageProvider } from "../../types/storage";
 | 
			
		||||
@@ -8,11 +6,8 @@ export class FolderService {
 | 
			
		||||
  private storageProvider: StorageProvider;
 | 
			
		||||
 | 
			
		||||
  constructor() {
 | 
			
		||||
    if (isS3Enabled) {
 | 
			
		||||
      this.storageProvider = new S3StorageProvider();
 | 
			
		||||
    } else {
 | 
			
		||||
      this.storageProvider = FilesystemStorageProvider.getInstance();
 | 
			
		||||
    }
 | 
			
		||||
    // Always use S3 (Garage internal or external S3)
 | 
			
		||||
    this.storageProvider = new S3StorageProvider();
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  async getPresignedPutUrl(objectName: string, expires: number): Promise<string> {
 | 
			
		||||
@@ -42,10 +37,6 @@ export class FolderService {
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  isFilesystemMode(): boolean {
 | 
			
		||||
    return !isS3Enabled;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  async getAllFilesInFolder(folderId: string, userId: string, basePath: string = ""): Promise<any[]> {
 | 
			
		||||
    const files = await prisma.file.findMany({
 | 
			
		||||
      where: { folderId, userId },
 | 
			
		||||
 
 | 
			
		||||
@@ -319,59 +319,12 @@ export class ReverseShareController {
 | 
			
		||||
 | 
			
		||||
      const { fileId } = request.params as { fileId: string };
 | 
			
		||||
 | 
			
		||||
      const fileInfo = await this.reverseShareService.getFileInfo(fileId, userId);
 | 
			
		||||
      const downloadId = `reverse-${Date.now()}-${Math.random().toString(36).substring(2, 11)}`;
 | 
			
		||||
      // Pass request context for internal storage proxy URLs
 | 
			
		||||
      const requestContext = { protocol: "https", host: "localhost" }; // Simplified - frontend will handle the real URL
 | 
			
		||||
 | 
			
		||||
      const { DownloadMemoryManager } = await import("../../utils/download-memory-manager.js");
 | 
			
		||||
      const memoryManager = DownloadMemoryManager.getInstance();
 | 
			
		||||
      const result = await this.reverseShareService.downloadReverseShareFile(fileId, userId, requestContext);
 | 
			
		||||
 | 
			
		||||
      const fileSizeMB = Number(fileInfo.size) / (1024 * 1024);
 | 
			
		||||
      console.log(
 | 
			
		||||
        `[REVERSE-DOWNLOAD] Requesting slot for ${downloadId}: ${fileInfo.name} (${fileSizeMB.toFixed(1)}MB)`
 | 
			
		||||
      );
 | 
			
		||||
 | 
			
		||||
      try {
 | 
			
		||||
        await memoryManager.requestDownloadSlot(downloadId, {
 | 
			
		||||
          fileName: fileInfo.name,
 | 
			
		||||
          fileSize: Number(fileInfo.size),
 | 
			
		||||
          objectName: fileInfo.objectName,
 | 
			
		||||
        });
 | 
			
		||||
      } catch (error: any) {
 | 
			
		||||
        console.warn(`[REVERSE-DOWNLOAD] Queued ${downloadId}: ${error.message}`);
 | 
			
		||||
        return reply.status(202).send({
 | 
			
		||||
          queued: true,
 | 
			
		||||
          downloadId: downloadId,
 | 
			
		||||
          message: "Download queued due to memory constraints",
 | 
			
		||||
          estimatedWaitTime: error.estimatedWaitTime || 60,
 | 
			
		||||
        });
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      console.log(`[REVERSE-DOWNLOAD] Starting ${downloadId}: ${fileInfo.name} (${fileSizeMB.toFixed(1)}MB)`);
 | 
			
		||||
      memoryManager.startDownload(downloadId);
 | 
			
		||||
 | 
			
		||||
      try {
 | 
			
		||||
        const result = await this.reverseShareService.downloadReverseShareFile(fileId, userId);
 | 
			
		||||
 | 
			
		||||
        const originalUrl = result.url;
 | 
			
		||||
        reply.header("X-Download-ID", downloadId);
 | 
			
		||||
 | 
			
		||||
        reply.raw.on("finish", () => {
 | 
			
		||||
          memoryManager.endDownload(downloadId);
 | 
			
		||||
        });
 | 
			
		||||
 | 
			
		||||
        reply.raw.on("close", () => {
 | 
			
		||||
          memoryManager.endDownload(downloadId);
 | 
			
		||||
        });
 | 
			
		||||
 | 
			
		||||
        reply.raw.on("error", () => {
 | 
			
		||||
          memoryManager.endDownload(downloadId);
 | 
			
		||||
        });
 | 
			
		||||
 | 
			
		||||
        return reply.send(result);
 | 
			
		||||
      } catch (downloadError) {
 | 
			
		||||
        memoryManager.endDownload(downloadId);
 | 
			
		||||
        throw downloadError;
 | 
			
		||||
      }
 | 
			
		||||
      return reply.send(result);
 | 
			
		||||
    } catch (error: any) {
 | 
			
		||||
      if (error.message === "File not found") {
 | 
			
		||||
        return reply.status(404).send({ error: error.message });
 | 
			
		||||
 
 | 
			
		||||
@@ -228,9 +228,21 @@ export class ReverseShareService {
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    const expires = parseInt(env.PRESIGNED_URL_EXPIRATION);
 | 
			
		||||
    const url = await this.fileService.getPresignedPutUrl(objectName, expires);
 | 
			
		||||
 | 
			
		||||
    return { url, expiresIn: expires };
 | 
			
		||||
    // Import storage config to check if using internal or external S3
 | 
			
		||||
    const { isInternalStorage } = await import("../../config/storage.config.js");
 | 
			
		||||
 | 
			
		||||
    if (isInternalStorage) {
 | 
			
		||||
      // Internal storage: Use backend proxy for uploads (127.0.0.1 not accessible from client)
 | 
			
		||||
      // Note: This would need request context, but reverse-shares are typically used by external users
 | 
			
		||||
      // For now, we'll use presigned URLs and handle the error on the client side
 | 
			
		||||
      const url = await this.fileService.getPresignedPutUrl(objectName, expires);
 | 
			
		||||
      return { url, expiresIn: expires };
 | 
			
		||||
    } else {
 | 
			
		||||
      // External S3: Use presigned URLs directly (more efficient)
 | 
			
		||||
      const url = await this.fileService.getPresignedPutUrl(objectName, expires);
 | 
			
		||||
      return { url, expiresIn: expires };
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  async getPresignedUrlByAlias(alias: string, objectName: string, password?: string) {
 | 
			
		||||
@@ -258,9 +270,21 @@ export class ReverseShareService {
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    const expires = parseInt(env.PRESIGNED_URL_EXPIRATION);
 | 
			
		||||
    const url = await this.fileService.getPresignedPutUrl(objectName, expires);
 | 
			
		||||
 | 
			
		||||
    return { url, expiresIn: expires };
 | 
			
		||||
    // Import storage config to check if using internal or external S3
 | 
			
		||||
    const { isInternalStorage } = await import("../../config/storage.config.js");
 | 
			
		||||
 | 
			
		||||
    if (isInternalStorage) {
 | 
			
		||||
      // Internal storage: Use backend proxy for uploads (127.0.0.1 not accessible from client)
 | 
			
		||||
      // Note: This would need request context, but reverse-shares are typically used by external users
 | 
			
		||||
      // For now, we'll use presigned URLs and handle the error on the client side
 | 
			
		||||
      const url = await this.fileService.getPresignedPutUrl(objectName, expires);
 | 
			
		||||
      return { url, expiresIn: expires };
 | 
			
		||||
    } else {
 | 
			
		||||
      // External S3: Use presigned URLs directly (more efficient)
 | 
			
		||||
      const url = await this.fileService.getPresignedPutUrl(objectName, expires);
 | 
			
		||||
      return { url, expiresIn: expires };
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  async registerFileUpload(reverseShareId: string, fileData: UploadToReverseShareInput, password?: string) {
 | 
			
		||||
@@ -386,7 +410,11 @@ export class ReverseShareService {
 | 
			
		||||
    };
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  async downloadReverseShareFile(fileId: string, creatorId: string) {
 | 
			
		||||
  async downloadReverseShareFile(
 | 
			
		||||
    fileId: string,
 | 
			
		||||
    creatorId: string,
 | 
			
		||||
    requestContext?: { protocol: string; host: string }
 | 
			
		||||
  ) {
 | 
			
		||||
    const file = await this.reverseShareRepository.findFileById(fileId);
 | 
			
		||||
    if (!file) {
 | 
			
		||||
      throw new Error("File not found");
 | 
			
		||||
@@ -398,8 +426,19 @@ export class ReverseShareService {
 | 
			
		||||
 | 
			
		||||
    const fileName = file.name;
 | 
			
		||||
    const expires = parseInt(env.PRESIGNED_URL_EXPIRATION);
 | 
			
		||||
    const url = await this.fileService.getPresignedGetUrl(file.objectName, expires, fileName);
 | 
			
		||||
    return { url, expiresIn: expires };
 | 
			
		||||
 | 
			
		||||
    // Import storage config to check if using internal or external S3
 | 
			
		||||
    const { isInternalStorage } = await import("../../config/storage.config.js");
 | 
			
		||||
 | 
			
		||||
    if (isInternalStorage) {
 | 
			
		||||
      // Internal storage: Use frontend proxy (much simpler!)
 | 
			
		||||
      const url = `/api/files/download?objectName=${encodeURIComponent(file.objectName)}`;
 | 
			
		||||
      return { url, expiresIn: expires };
 | 
			
		||||
    } else {
 | 
			
		||||
      // External S3: Use presigned URLs directly (more efficient, no backend proxy)
 | 
			
		||||
      const url = await this.fileService.getPresignedGetUrl(file.objectName, expires, fileName);
 | 
			
		||||
      return { url, expiresIn: expires };
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  async deleteReverseShareFile(fileId: string, creatorId: string) {
 | 
			
		||||
@@ -568,76 +607,58 @@ export class ReverseShareService {
 | 
			
		||||
 | 
			
		||||
    const newObjectName = `${creatorId}/${Date.now()}-${file.name}`;
 | 
			
		||||
 | 
			
		||||
    if (this.fileService.isFilesystemMode()) {
 | 
			
		||||
      const { FilesystemStorageProvider } = await import("../../providers/filesystem-storage.provider.js");
 | 
			
		||||
      const provider = FilesystemStorageProvider.getInstance();
 | 
			
		||||
    // Copy file using S3 presigned URLs
 | 
			
		||||
    const fileSizeMB = Number(file.size) / (1024 * 1024);
 | 
			
		||||
    const needsStreaming = fileSizeMB > 100;
 | 
			
		||||
 | 
			
		||||
      const sourcePath = provider.getFilePath(file.objectName);
 | 
			
		||||
      const fs = await import("fs");
 | 
			
		||||
    const downloadUrl = await this.fileService.getPresignedGetUrl(file.objectName, 300);
 | 
			
		||||
    const uploadUrl = await this.fileService.getPresignedPutUrl(newObjectName, 300);
 | 
			
		||||
 | 
			
		||||
      const targetPath = provider.getFilePath(newObjectName);
 | 
			
		||||
    let retries = 0;
 | 
			
		||||
    const maxRetries = 3;
 | 
			
		||||
    let success = false;
 | 
			
		||||
 | 
			
		||||
      const path = await import("path");
 | 
			
		||||
      const targetDir = path.dirname(targetPath);
 | 
			
		||||
      if (!fs.existsSync(targetDir)) {
 | 
			
		||||
        fs.mkdirSync(targetDir, { recursive: true });
 | 
			
		||||
      }
 | 
			
		||||
    while (retries < maxRetries && !success) {
 | 
			
		||||
      try {
 | 
			
		||||
        const response = await fetch(downloadUrl, {
 | 
			
		||||
          signal: AbortSignal.timeout(600000), // 10 minutes timeout
 | 
			
		||||
        });
 | 
			
		||||
 | 
			
		||||
      const { copyFile } = await import("fs/promises");
 | 
			
		||||
      await copyFile(sourcePath, targetPath);
 | 
			
		||||
    } else {
 | 
			
		||||
      const fileSizeMB = Number(file.size) / (1024 * 1024);
 | 
			
		||||
      const needsStreaming = fileSizeMB > 100;
 | 
			
		||||
 | 
			
		||||
      const downloadUrl = await this.fileService.getPresignedGetUrl(file.objectName, 300);
 | 
			
		||||
      const uploadUrl = await this.fileService.getPresignedPutUrl(newObjectName, 300);
 | 
			
		||||
 | 
			
		||||
      let retries = 0;
 | 
			
		||||
      const maxRetries = 3;
 | 
			
		||||
      let success = false;
 | 
			
		||||
 | 
			
		||||
      while (retries < maxRetries && !success) {
 | 
			
		||||
        try {
 | 
			
		||||
          const response = await fetch(downloadUrl, {
 | 
			
		||||
            signal: AbortSignal.timeout(600000), // 10 minutes timeout
 | 
			
		||||
          });
 | 
			
		||||
 | 
			
		||||
          if (!response.ok) {
 | 
			
		||||
            throw new Error(`Failed to download file: ${response.statusText}`);
 | 
			
		||||
          }
 | 
			
		||||
 | 
			
		||||
          if (!response.body) {
 | 
			
		||||
            throw new Error("No response body received");
 | 
			
		||||
          }
 | 
			
		||||
 | 
			
		||||
          const uploadOptions: any = {
 | 
			
		||||
            method: "PUT",
 | 
			
		||||
            body: response.body,
 | 
			
		||||
            headers: {
 | 
			
		||||
              "Content-Type": "application/octet-stream",
 | 
			
		||||
              "Content-Length": file.size.toString(),
 | 
			
		||||
            },
 | 
			
		||||
            signal: AbortSignal.timeout(600000), // 10 minutes timeout
 | 
			
		||||
          };
 | 
			
		||||
 | 
			
		||||
          const uploadResponse = await fetch(uploadUrl, uploadOptions);
 | 
			
		||||
 | 
			
		||||
          if (!uploadResponse.ok) {
 | 
			
		||||
            const errorText = await uploadResponse.text();
 | 
			
		||||
            throw new Error(`Failed to upload file: ${uploadResponse.statusText} - ${errorText}`);
 | 
			
		||||
          }
 | 
			
		||||
 | 
			
		||||
          success = true;
 | 
			
		||||
        } catch (error: any) {
 | 
			
		||||
          retries++;
 | 
			
		||||
 | 
			
		||||
          if (retries >= maxRetries) {
 | 
			
		||||
            throw new Error(`Failed to copy file after ${maxRetries} attempts: ${error.message}`);
 | 
			
		||||
          }
 | 
			
		||||
 | 
			
		||||
          const delay = Math.min(1000 * Math.pow(2, retries - 1), 10000);
 | 
			
		||||
          await new Promise((resolve) => setTimeout(resolve, delay));
 | 
			
		||||
        if (!response.ok) {
 | 
			
		||||
          throw new Error(`Failed to download file: ${response.statusText}`);
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        if (!response.body) {
 | 
			
		||||
          throw new Error("No response body received");
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        const uploadOptions: any = {
 | 
			
		||||
          method: "PUT",
 | 
			
		||||
          body: response.body,
 | 
			
		||||
          headers: {
 | 
			
		||||
            "Content-Type": "application/octet-stream",
 | 
			
		||||
            "Content-Length": file.size.toString(),
 | 
			
		||||
          },
 | 
			
		||||
          signal: AbortSignal.timeout(600000), // 10 minutes timeout
 | 
			
		||||
        };
 | 
			
		||||
 | 
			
		||||
        const uploadResponse = await fetch(uploadUrl, uploadOptions);
 | 
			
		||||
 | 
			
		||||
        if (!uploadResponse.ok) {
 | 
			
		||||
          const errorText = await uploadResponse.text();
 | 
			
		||||
          throw new Error(`Failed to upload file: ${uploadResponse.statusText} - ${errorText}`);
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        success = true;
 | 
			
		||||
      } catch (error: any) {
 | 
			
		||||
        retries++;
 | 
			
		||||
 | 
			
		||||
        if (retries >= maxRetries) {
 | 
			
		||||
          throw new Error(`Failed to copy file after ${maxRetries} attempts: ${error.message}`);
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        const delay = Math.min(1000 * Math.pow(2, retries - 1), 10000);
 | 
			
		||||
        await new Promise((resolve) => setTimeout(resolve, delay));
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										174
									
								
								apps/server/src/modules/s3-storage/controller.ts
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										174
									
								
								apps/server/src/modules/s3-storage/controller.ts
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,174 @@
 | 
			
		||||
/**
 | 
			
		||||
 * S3 Storage Controller (Simplified)
 | 
			
		||||
 *
 | 
			
		||||
 * This controller handles uploads/downloads using S3-compatible storage (Garage).
 | 
			
		||||
 * It's much simpler than the filesystem controller because:
 | 
			
		||||
 * - Uses S3 multipart uploads (no chunk management needed)
 | 
			
		||||
 * - Uses presigned URLs (no streaming through Node.js)
 | 
			
		||||
 * - No memory management needed (Garage handles it)
 | 
			
		||||
 * - No encryption needed (Garage handles it)
 | 
			
		||||
 *
 | 
			
		||||
 * Replaces ~800 lines of complex code with ~100 lines of simple code.
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
import { FastifyReply, FastifyRequest } from "fastify";
 | 
			
		||||
 | 
			
		||||
import { S3StorageProvider } from "../../providers/s3-storage.provider";
 | 
			
		||||
 | 
			
		||||
export class S3StorageController {
 | 
			
		||||
  private storageProvider = new S3StorageProvider();
 | 
			
		||||
 | 
			
		||||
  /**
 | 
			
		||||
   * Generate presigned upload URL
 | 
			
		||||
   * Client uploads directly to S3 (Garage)
 | 
			
		||||
   */
 | 
			
		||||
  async getUploadUrl(request: FastifyRequest, reply: FastifyReply) {
 | 
			
		||||
    try {
 | 
			
		||||
      const { objectName, expires } = request.body as { objectName: string; expires?: number };
 | 
			
		||||
 | 
			
		||||
      if (!objectName) {
 | 
			
		||||
        return reply.status(400).send({ error: "objectName is required" });
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      const expiresIn = expires || 3600; // 1 hour default
 | 
			
		||||
 | 
			
		||||
      // Import storage config to check if using internal or external S3
 | 
			
		||||
      const { isInternalStorage } = await import("../../config/storage.config.js");
 | 
			
		||||
 | 
			
		||||
      let uploadUrl: string;
 | 
			
		||||
 | 
			
		||||
      if (isInternalStorage) {
 | 
			
		||||
        // Internal storage: Use frontend proxy (much simpler!)
 | 
			
		||||
        uploadUrl = `/api/files/upload?objectName=${encodeURIComponent(objectName)}`;
 | 
			
		||||
      } else {
 | 
			
		||||
        // External S3: Use presigned URLs directly (more efficient)
 | 
			
		||||
        uploadUrl = await this.storageProvider.getPresignedPutUrl(objectName, expiresIn);
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      return reply.status(200).send({
 | 
			
		||||
        uploadUrl,
 | 
			
		||||
        objectName,
 | 
			
		||||
        expiresIn,
 | 
			
		||||
        message: isInternalStorage ? "Upload via backend proxy" : "Upload directly to this URL using PUT request",
 | 
			
		||||
      });
 | 
			
		||||
    } catch (error) {
 | 
			
		||||
      console.error("[S3] Error generating upload URL:", error);
 | 
			
		||||
      return reply.status(500).send({ error: "Failed to generate upload URL" });
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  /**
 | 
			
		||||
   * Generate presigned download URL
 | 
			
		||||
   * For internal storage: Uses backend proxy
 | 
			
		||||
   * For external S3: Uses presigned URLs directly
 | 
			
		||||
   */
 | 
			
		||||
  async getDownloadUrl(request: FastifyRequest, reply: FastifyReply) {
 | 
			
		||||
    try {
 | 
			
		||||
      const { objectName, expires, fileName } = request.query as {
 | 
			
		||||
        objectName: string;
 | 
			
		||||
        expires?: string;
 | 
			
		||||
        fileName?: string;
 | 
			
		||||
      };
 | 
			
		||||
 | 
			
		||||
      if (!objectName) {
 | 
			
		||||
        return reply.status(400).send({ error: "objectName is required" });
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      // Check if file exists
 | 
			
		||||
      const exists = await this.storageProvider.fileExists(objectName);
 | 
			
		||||
      if (!exists) {
 | 
			
		||||
        return reply.status(404).send({ error: "File not found" });
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      const expiresIn = expires ? parseInt(expires, 10) : 3600;
 | 
			
		||||
 | 
			
		||||
      // Import storage config to check if using internal or external S3
 | 
			
		||||
      const { isInternalStorage } = await import("../../config/storage.config.js");
 | 
			
		||||
 | 
			
		||||
      let downloadUrl: string;
 | 
			
		||||
 | 
			
		||||
      if (isInternalStorage) {
 | 
			
		||||
        // Internal storage: Use frontend proxy (much simpler!)
 | 
			
		||||
        downloadUrl = `/api/files/download?objectName=${encodeURIComponent(objectName)}`;
 | 
			
		||||
      } else {
 | 
			
		||||
        // External S3: Use presigned URLs directly (more efficient)
 | 
			
		||||
        downloadUrl = await this.storageProvider.getPresignedGetUrl(objectName, expiresIn, fileName);
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      return reply.status(200).send({
 | 
			
		||||
        downloadUrl,
 | 
			
		||||
        objectName,
 | 
			
		||||
        expiresIn,
 | 
			
		||||
        message: isInternalStorage ? "Download via backend proxy" : "Download directly from this URL",
 | 
			
		||||
      });
 | 
			
		||||
    } catch (error) {
 | 
			
		||||
      console.error("[S3] Error generating download URL:", error);
 | 
			
		||||
      return reply.status(500).send({ error: "Failed to generate download URL" });
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  /**
 | 
			
		||||
   * Upload directly (for small files)
 | 
			
		||||
   * Receives file and uploads to S3
 | 
			
		||||
   */
 | 
			
		||||
  async upload(request: FastifyRequest, reply: FastifyReply) {
 | 
			
		||||
    try {
 | 
			
		||||
      // For large files, clients should use presigned URLs
 | 
			
		||||
      // This is just for backward compatibility or small files
 | 
			
		||||
 | 
			
		||||
      return reply.status(501).send({
 | 
			
		||||
        error: "Not implemented",
 | 
			
		||||
        message: "Use getUploadUrl endpoint for efficient uploads",
 | 
			
		||||
      });
 | 
			
		||||
    } catch (error) {
 | 
			
		||||
      console.error("[S3] Error in upload:", error);
 | 
			
		||||
      return reply.status(500).send({ error: "Upload failed" });
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  /**
 | 
			
		||||
   * Delete object from S3
 | 
			
		||||
   */
 | 
			
		||||
  async deleteObject(request: FastifyRequest, reply: FastifyReply) {
 | 
			
		||||
    try {
 | 
			
		||||
      const { objectName } = request.params as { objectName: string };
 | 
			
		||||
 | 
			
		||||
      if (!objectName) {
 | 
			
		||||
        return reply.status(400).send({ error: "objectName is required" });
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      await this.storageProvider.deleteObject(objectName);
 | 
			
		||||
 | 
			
		||||
      return reply.status(200).send({
 | 
			
		||||
        message: "Object deleted successfully",
 | 
			
		||||
        objectName,
 | 
			
		||||
      });
 | 
			
		||||
    } catch (error) {
 | 
			
		||||
      console.error("[S3] Error deleting object:", error);
 | 
			
		||||
      return reply.status(500).send({ error: "Failed to delete object" });
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  /**
 | 
			
		||||
   * Check if object exists
 | 
			
		||||
   */
 | 
			
		||||
  async checkExists(request: FastifyRequest, reply: FastifyReply) {
 | 
			
		||||
    try {
 | 
			
		||||
      const { objectName } = request.query as { objectName: string };
 | 
			
		||||
 | 
			
		||||
      if (!objectName) {
 | 
			
		||||
        return reply.status(400).send({ error: "objectName is required" });
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      const exists = await this.storageProvider.fileExists(objectName);
 | 
			
		||||
 | 
			
		||||
      return reply.status(200).send({
 | 
			
		||||
        exists,
 | 
			
		||||
        objectName,
 | 
			
		||||
      });
 | 
			
		||||
    } catch (error) {
 | 
			
		||||
      console.error("[S3] Error checking existence:", error);
 | 
			
		||||
      return reply.status(500).send({ error: "Failed to check existence" });
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										112
									
								
								apps/server/src/modules/s3-storage/routes.ts
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										112
									
								
								apps/server/src/modules/s3-storage/routes.ts
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,112 @@
 | 
			
		||||
/**
 | 
			
		||||
 * S3 Storage Routes
 | 
			
		||||
 *
 | 
			
		||||
 * Simple routes for S3-based storage using presigned URLs.
 | 
			
		||||
 * Much simpler than filesystem routes - no chunk management, no streaming.
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
import { FastifyInstance } from "fastify";
 | 
			
		||||
import { z } from "zod";
 | 
			
		||||
 | 
			
		||||
import { S3StorageController } from "./controller";
 | 
			
		||||
 | 
			
		||||
export async function s3StorageRoutes(app: FastifyInstance) {
 | 
			
		||||
  const controller = new S3StorageController();
 | 
			
		||||
 | 
			
		||||
  // Get presigned upload URL
 | 
			
		||||
  app.post(
 | 
			
		||||
    "/s3/upload-url",
 | 
			
		||||
    {
 | 
			
		||||
      schema: {
 | 
			
		||||
        tags: ["S3 Storage"],
 | 
			
		||||
        operationId: "getS3UploadUrl",
 | 
			
		||||
        summary: "Get presigned URL for upload",
 | 
			
		||||
        description: "Returns a presigned URL that clients can use to upload directly to S3",
 | 
			
		||||
        body: z.object({
 | 
			
		||||
          objectName: z.string().describe("Object name/path in S3"),
 | 
			
		||||
          expires: z.number().optional().describe("URL expiration in seconds (default: 3600)"),
 | 
			
		||||
        }),
 | 
			
		||||
        response: {
 | 
			
		||||
          200: z.object({
 | 
			
		||||
            uploadUrl: z.string(),
 | 
			
		||||
            objectName: z.string(),
 | 
			
		||||
            expiresIn: z.number(),
 | 
			
		||||
            message: z.string(),
 | 
			
		||||
          }),
 | 
			
		||||
        },
 | 
			
		||||
      },
 | 
			
		||||
    },
 | 
			
		||||
    controller.getUploadUrl.bind(controller)
 | 
			
		||||
  );
 | 
			
		||||
 | 
			
		||||
  // Get presigned download URL
 | 
			
		||||
  app.get(
 | 
			
		||||
    "/s3/download-url",
 | 
			
		||||
    {
 | 
			
		||||
      schema: {
 | 
			
		||||
        tags: ["S3 Storage"],
 | 
			
		||||
        operationId: "getS3DownloadUrl",
 | 
			
		||||
        summary: "Get presigned URL for download",
 | 
			
		||||
        description: "Returns a presigned URL that clients can use to download directly from S3",
 | 
			
		||||
        querystring: z.object({
 | 
			
		||||
          objectName: z.string().describe("Object name/path in S3"),
 | 
			
		||||
          expires: z.string().optional().describe("URL expiration in seconds (default: 3600)"),
 | 
			
		||||
          fileName: z.string().optional().describe("Optional filename for download"),
 | 
			
		||||
        }),
 | 
			
		||||
        response: {
 | 
			
		||||
          200: z.object({
 | 
			
		||||
            downloadUrl: z.string(),
 | 
			
		||||
            objectName: z.string(),
 | 
			
		||||
            expiresIn: z.number(),
 | 
			
		||||
            message: z.string(),
 | 
			
		||||
          }),
 | 
			
		||||
        },
 | 
			
		||||
      },
 | 
			
		||||
    },
 | 
			
		||||
    controller.getDownloadUrl.bind(controller)
 | 
			
		||||
  );
 | 
			
		||||
 | 
			
		||||
  // Delete object
 | 
			
		||||
  app.delete(
 | 
			
		||||
    "/s3/object/:objectName",
 | 
			
		||||
    {
 | 
			
		||||
      schema: {
 | 
			
		||||
        tags: ["S3 Storage"],
 | 
			
		||||
        operationId: "deleteS3Object",
 | 
			
		||||
        summary: "Delete object from S3",
 | 
			
		||||
        params: z.object({
 | 
			
		||||
          objectName: z.string().describe("Object name/path in S3"),
 | 
			
		||||
        }),
 | 
			
		||||
        response: {
 | 
			
		||||
          200: z.object({
 | 
			
		||||
            message: z.string(),
 | 
			
		||||
            objectName: z.string(),
 | 
			
		||||
          }),
 | 
			
		||||
        },
 | 
			
		||||
      },
 | 
			
		||||
    },
 | 
			
		||||
    controller.deleteObject.bind(controller)
 | 
			
		||||
  );
 | 
			
		||||
 | 
			
		||||
  // Check if object exists
 | 
			
		||||
  app.get(
 | 
			
		||||
    "/s3/exists",
 | 
			
		||||
    {
 | 
			
		||||
      schema: {
 | 
			
		||||
        tags: ["S3 Storage"],
 | 
			
		||||
        operationId: "checkS3ObjectExists",
 | 
			
		||||
        summary: "Check if object exists in S3",
 | 
			
		||||
        querystring: z.object({
 | 
			
		||||
          objectName: z.string().describe("Object name/path in S3"),
 | 
			
		||||
        }),
 | 
			
		||||
        response: {
 | 
			
		||||
          200: z.object({
 | 
			
		||||
            exists: z.boolean(),
 | 
			
		||||
            objectName: z.string(),
 | 
			
		||||
          }),
 | 
			
		||||
        },
 | 
			
		||||
      },
 | 
			
		||||
    },
 | 
			
		||||
    controller.checkExists.bind(controller)
 | 
			
		||||
  );
 | 
			
		||||
}
 | 
			
		||||
@@ -1,715 +0,0 @@
 | 
			
		||||
import * as crypto from "crypto";
 | 
			
		||||
import * as fsSync from "fs";
 | 
			
		||||
import * as fs from "fs/promises";
 | 
			
		||||
import * as path from "path";
 | 
			
		||||
import { Transform } from "stream";
 | 
			
		||||
import { pipeline } from "stream/promises";
 | 
			
		||||
 | 
			
		||||
import { directoriesConfig, getTempFilePath } from "../config/directories.config";
 | 
			
		||||
import { env } from "../env";
 | 
			
		||||
import { StorageProvider } from "../types/storage";
 | 
			
		||||
 | 
			
		||||
export class FilesystemStorageProvider implements StorageProvider {
 | 
			
		||||
  private static instance: FilesystemStorageProvider;
 | 
			
		||||
  private uploadsDir: string;
 | 
			
		||||
  private encryptionKey = env.ENCRYPTION_KEY;
 | 
			
		||||
  private isEncryptionDisabled = env.DISABLE_FILESYSTEM_ENCRYPTION === "true";
 | 
			
		||||
  private uploadTokens = new Map<string, { objectName: string; expiresAt: number }>();
 | 
			
		||||
  private downloadTokens = new Map<string, { objectName: string; expiresAt: number; fileName?: string }>();
 | 
			
		||||
 | 
			
		||||
  private constructor() {
 | 
			
		||||
    this.uploadsDir = directoriesConfig.uploads;
 | 
			
		||||
 | 
			
		||||
    if (!this.isEncryptionDisabled && !this.encryptionKey) {
 | 
			
		||||
      throw new Error(
 | 
			
		||||
        "Encryption is enabled but ENCRYPTION_KEY is not provided. " +
 | 
			
		||||
          "Please set ENCRYPTION_KEY environment variable or set DISABLE_FILESYSTEM_ENCRYPTION=true to disable encryption."
 | 
			
		||||
      );
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    this.ensureUploadsDir();
 | 
			
		||||
    setInterval(() => this.cleanExpiredTokens(), 5 * 60 * 1000);
 | 
			
		||||
    setInterval(() => this.cleanupEmptyTempDirs(), 10 * 60 * 1000);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  public static getInstance(): FilesystemStorageProvider {
 | 
			
		||||
    if (!FilesystemStorageProvider.instance) {
 | 
			
		||||
      FilesystemStorageProvider.instance = new FilesystemStorageProvider();
 | 
			
		||||
    }
 | 
			
		||||
    return FilesystemStorageProvider.instance;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  private async ensureUploadsDir(): Promise<void> {
 | 
			
		||||
    try {
 | 
			
		||||
      await fs.access(this.uploadsDir);
 | 
			
		||||
    } catch {
 | 
			
		||||
      await fs.mkdir(this.uploadsDir, { recursive: true });
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  private cleanExpiredTokens(): void {
 | 
			
		||||
    const now = Date.now();
 | 
			
		||||
 | 
			
		||||
    for (const [token, data] of this.uploadTokens.entries()) {
 | 
			
		||||
      if (now > data.expiresAt) {
 | 
			
		||||
        this.uploadTokens.delete(token);
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    for (const [token, data] of this.downloadTokens.entries()) {
 | 
			
		||||
      if (now > data.expiresAt) {
 | 
			
		||||
        this.downloadTokens.delete(token);
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  public getFilePath(objectName: string): string {
 | 
			
		||||
    const sanitizedName = objectName.replace(/[^a-zA-Z0-9\-_./]/g, "_");
 | 
			
		||||
    return path.join(this.uploadsDir, sanitizedName);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  private createEncryptionKey(): Buffer {
 | 
			
		||||
    if (!this.encryptionKey) {
 | 
			
		||||
      throw new Error(
 | 
			
		||||
        "Encryption key is required when encryption is enabled. Please set ENCRYPTION_KEY environment variable."
 | 
			
		||||
      );
 | 
			
		||||
    }
 | 
			
		||||
    return crypto.scryptSync(this.encryptionKey, "salt", 32);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  public createEncryptStream(): Transform {
 | 
			
		||||
    if (this.isEncryptionDisabled) {
 | 
			
		||||
      return new Transform({
 | 
			
		||||
        highWaterMark: 64 * 1024,
 | 
			
		||||
        transform(chunk, _encoding, callback) {
 | 
			
		||||
          this.push(chunk);
 | 
			
		||||
          callback();
 | 
			
		||||
        },
 | 
			
		||||
      });
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    const key = this.createEncryptionKey();
 | 
			
		||||
    const iv = crypto.randomBytes(16);
 | 
			
		||||
    const cipher = crypto.createCipheriv("aes-256-cbc", key, iv);
 | 
			
		||||
 | 
			
		||||
    let isFirstChunk = true;
 | 
			
		||||
 | 
			
		||||
    return new Transform({
 | 
			
		||||
      highWaterMark: 64 * 1024,
 | 
			
		||||
      transform(chunk, _encoding, callback) {
 | 
			
		||||
        try {
 | 
			
		||||
          if (isFirstChunk) {
 | 
			
		||||
            this.push(iv);
 | 
			
		||||
            isFirstChunk = false;
 | 
			
		||||
          }
 | 
			
		||||
 | 
			
		||||
          const encrypted = cipher.update(chunk);
 | 
			
		||||
          this.push(encrypted);
 | 
			
		||||
          callback();
 | 
			
		||||
        } catch (error) {
 | 
			
		||||
          callback(error as Error);
 | 
			
		||||
        }
 | 
			
		||||
      },
 | 
			
		||||
 | 
			
		||||
      flush(callback) {
 | 
			
		||||
        try {
 | 
			
		||||
          const final = cipher.final();
 | 
			
		||||
          this.push(final);
 | 
			
		||||
          callback();
 | 
			
		||||
        } catch (error) {
 | 
			
		||||
          callback(error as Error);
 | 
			
		||||
        }
 | 
			
		||||
      },
 | 
			
		||||
    });
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  public createDecryptStream(): Transform {
 | 
			
		||||
    if (this.isEncryptionDisabled) {
 | 
			
		||||
      return new Transform({
 | 
			
		||||
        highWaterMark: 64 * 1024,
 | 
			
		||||
        transform(chunk, _encoding, callback) {
 | 
			
		||||
          this.push(chunk);
 | 
			
		||||
          callback();
 | 
			
		||||
        },
 | 
			
		||||
      });
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    const key = this.createEncryptionKey();
 | 
			
		||||
    let iv: Buffer | null = null;
 | 
			
		||||
    let decipher: crypto.Decipher | null = null;
 | 
			
		||||
    let ivBuffer = Buffer.alloc(0);
 | 
			
		||||
 | 
			
		||||
    return new Transform({
 | 
			
		||||
      highWaterMark: 64 * 1024,
 | 
			
		||||
      transform(chunk, _encoding, callback) {
 | 
			
		||||
        try {
 | 
			
		||||
          if (!iv) {
 | 
			
		||||
            ivBuffer = Buffer.concat([ivBuffer, chunk]);
 | 
			
		||||
 | 
			
		||||
            if (ivBuffer.length >= 16) {
 | 
			
		||||
              iv = ivBuffer.subarray(0, 16);
 | 
			
		||||
              decipher = crypto.createDecipheriv("aes-256-cbc", key, iv);
 | 
			
		||||
              const remainingData = ivBuffer.subarray(16);
 | 
			
		||||
              if (remainingData.length > 0) {
 | 
			
		||||
                const decrypted = decipher.update(remainingData);
 | 
			
		||||
                this.push(decrypted);
 | 
			
		||||
              }
 | 
			
		||||
            }
 | 
			
		||||
            callback();
 | 
			
		||||
            return;
 | 
			
		||||
          }
 | 
			
		||||
 | 
			
		||||
          if (decipher) {
 | 
			
		||||
            const decrypted = decipher.update(chunk);
 | 
			
		||||
            this.push(decrypted);
 | 
			
		||||
          }
 | 
			
		||||
          callback();
 | 
			
		||||
        } catch (error) {
 | 
			
		||||
          callback(error as Error);
 | 
			
		||||
        }
 | 
			
		||||
      },
 | 
			
		||||
 | 
			
		||||
      flush(callback) {
 | 
			
		||||
        try {
 | 
			
		||||
          if (decipher) {
 | 
			
		||||
            const final = decipher.final();
 | 
			
		||||
            this.push(final);
 | 
			
		||||
          }
 | 
			
		||||
          callback();
 | 
			
		||||
        } catch (error) {
 | 
			
		||||
          callback(error as Error);
 | 
			
		||||
        }
 | 
			
		||||
      },
 | 
			
		||||
    });
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  async getPresignedPutUrl(objectName: string, expires: number): Promise<string> {
 | 
			
		||||
    const token = crypto.randomBytes(32).toString("hex");
 | 
			
		||||
    const expiresAt = Date.now() + expires * 1000;
 | 
			
		||||
 | 
			
		||||
    this.uploadTokens.set(token, { objectName, expiresAt });
 | 
			
		||||
 | 
			
		||||
    return `/api/filesystem/upload/${token}`;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  async getPresignedGetUrl(objectName: string): Promise<string> {
 | 
			
		||||
    const encodedObjectName = encodeURIComponent(objectName);
 | 
			
		||||
    return `/api/files/download?objectName=${encodedObjectName}`;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  async deleteObject(objectName: string): Promise<void> {
 | 
			
		||||
    const filePath = this.getFilePath(objectName);
 | 
			
		||||
    try {
 | 
			
		||||
      await fs.unlink(filePath);
 | 
			
		||||
    } catch (error: any) {
 | 
			
		||||
      if (error.code !== "ENOENT") {
 | 
			
		||||
        throw error;
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  async uploadFile(objectName: string, buffer: Buffer): Promise<void> {
 | 
			
		||||
    const filePath = this.getFilePath(objectName);
 | 
			
		||||
    const dir = path.dirname(filePath);
 | 
			
		||||
 | 
			
		||||
    await fs.mkdir(dir, { recursive: true });
 | 
			
		||||
 | 
			
		||||
    const { Readable } = await import("stream");
 | 
			
		||||
    const readable = Readable.from(buffer);
 | 
			
		||||
 | 
			
		||||
    await this.uploadFileFromStream(objectName, readable);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  async uploadFileFromStream(objectName: string, inputStream: NodeJS.ReadableStream): Promise<void> {
 | 
			
		||||
    const filePath = this.getFilePath(objectName);
 | 
			
		||||
    const dir = path.dirname(filePath);
 | 
			
		||||
 | 
			
		||||
    await fs.mkdir(dir, { recursive: true });
 | 
			
		||||
 | 
			
		||||
    const tempPath = getTempFilePath(objectName);
 | 
			
		||||
    const tempDir = path.dirname(tempPath);
 | 
			
		||||
 | 
			
		||||
    await fs.mkdir(tempDir, { recursive: true });
 | 
			
		||||
 | 
			
		||||
    const writeStream = fsSync.createWriteStream(tempPath);
 | 
			
		||||
    const encryptStream = this.createEncryptStream();
 | 
			
		||||
 | 
			
		||||
    try {
 | 
			
		||||
      await pipeline(inputStream, encryptStream, writeStream);
 | 
			
		||||
      await this.moveFile(tempPath, filePath);
 | 
			
		||||
    } catch (error) {
 | 
			
		||||
      await this.cleanupTempFile(tempPath);
 | 
			
		||||
      throw error;
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  async downloadFile(objectName: string): Promise<Buffer> {
 | 
			
		||||
    const filePath = this.getFilePath(objectName);
 | 
			
		||||
    const fileBuffer = await fs.readFile(filePath);
 | 
			
		||||
 | 
			
		||||
    if (this.isEncryptionDisabled) {
 | 
			
		||||
      return fileBuffer;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if (fileBuffer.length > 16) {
 | 
			
		||||
      try {
 | 
			
		||||
        return this.decryptFileBuffer(fileBuffer);
 | 
			
		||||
      } catch (error: unknown) {
 | 
			
		||||
        if (error instanceof Error) {
 | 
			
		||||
          console.warn("Failed to decrypt with new method, trying legacy format", error.message);
 | 
			
		||||
        }
 | 
			
		||||
        return this.decryptFileLegacy(fileBuffer);
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    return this.decryptFileLegacy(fileBuffer);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  createDownloadStream(objectName: string): NodeJS.ReadableStream {
 | 
			
		||||
    const filePath = this.getFilePath(objectName);
 | 
			
		||||
 | 
			
		||||
    const streamOptions = {
 | 
			
		||||
      highWaterMark: 64 * 1024,
 | 
			
		||||
      autoDestroy: true,
 | 
			
		||||
      emitClose: true,
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    const fileStream = fsSync.createReadStream(filePath, streamOptions);
 | 
			
		||||
 | 
			
		||||
    if (this.isEncryptionDisabled) {
 | 
			
		||||
      this.setupStreamMemoryManagement(fileStream, objectName);
 | 
			
		||||
      return fileStream;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    const decryptStream = this.createDecryptStream();
 | 
			
		||||
    const { PassThrough } = require("stream");
 | 
			
		||||
    const outputStream = new PassThrough(streamOptions);
 | 
			
		||||
 | 
			
		||||
    let isDestroyed = false;
 | 
			
		||||
    let memoryCheckInterval: NodeJS.Timeout;
 | 
			
		||||
 | 
			
		||||
    const cleanup = () => {
 | 
			
		||||
      if (isDestroyed) return;
 | 
			
		||||
      isDestroyed = true;
 | 
			
		||||
 | 
			
		||||
      if (memoryCheckInterval) {
 | 
			
		||||
        clearInterval(memoryCheckInterval);
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      try {
 | 
			
		||||
        if (fileStream && !fileStream.destroyed) {
 | 
			
		||||
          fileStream.destroy();
 | 
			
		||||
        }
 | 
			
		||||
        if (decryptStream && !decryptStream.destroyed) {
 | 
			
		||||
          decryptStream.destroy();
 | 
			
		||||
        }
 | 
			
		||||
        if (outputStream && !outputStream.destroyed) {
 | 
			
		||||
          outputStream.destroy();
 | 
			
		||||
        }
 | 
			
		||||
      } catch (error) {
 | 
			
		||||
        console.warn("Error during download stream cleanup:", error);
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      setImmediate(() => {
 | 
			
		||||
        if (global.gc) {
 | 
			
		||||
          global.gc();
 | 
			
		||||
        }
 | 
			
		||||
      });
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    memoryCheckInterval = setInterval(() => {
 | 
			
		||||
      const memUsage = process.memoryUsage();
 | 
			
		||||
      const memoryUsageMB = memUsage.heapUsed / 1024 / 1024;
 | 
			
		||||
 | 
			
		||||
      if (memoryUsageMB > 1024) {
 | 
			
		||||
        if (!fileStream.readableFlowing) return;
 | 
			
		||||
 | 
			
		||||
        console.warn(
 | 
			
		||||
          `[MEMORY THROTTLE] ${objectName} - Pausing stream due to high memory usage: ${memoryUsageMB.toFixed(2)}MB`
 | 
			
		||||
        );
 | 
			
		||||
        fileStream.pause();
 | 
			
		||||
 | 
			
		||||
        if (global.gc) {
 | 
			
		||||
          global.gc();
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        setTimeout(() => {
 | 
			
		||||
          if (!isDestroyed && fileStream && !fileStream.destroyed) {
 | 
			
		||||
            fileStream.resume();
 | 
			
		||||
            console.log(`[MEMORY THROTTLE] ${objectName} - Stream resumed`);
 | 
			
		||||
          }
 | 
			
		||||
        }, 100);
 | 
			
		||||
      }
 | 
			
		||||
    }, 1000);
 | 
			
		||||
 | 
			
		||||
    fileStream.on("error", (error: any) => {
 | 
			
		||||
      console.error("File stream error:", error);
 | 
			
		||||
      cleanup();
 | 
			
		||||
    });
 | 
			
		||||
 | 
			
		||||
    decryptStream.on("error", (error: any) => {
 | 
			
		||||
      console.error("Decrypt stream error:", error);
 | 
			
		||||
      cleanup();
 | 
			
		||||
    });
 | 
			
		||||
 | 
			
		||||
    outputStream.on("error", (error: any) => {
 | 
			
		||||
      console.error("Output stream error:", error);
 | 
			
		||||
      cleanup();
 | 
			
		||||
    });
 | 
			
		||||
 | 
			
		||||
    outputStream.on("close", cleanup);
 | 
			
		||||
    outputStream.on("finish", cleanup);
 | 
			
		||||
 | 
			
		||||
    outputStream.on("pipe", (src: any) => {
 | 
			
		||||
      if (src && src.on) {
 | 
			
		||||
        src.on("close", cleanup);
 | 
			
		||||
        src.on("error", cleanup);
 | 
			
		||||
      }
 | 
			
		||||
    });
 | 
			
		||||
 | 
			
		||||
    pipeline(fileStream, decryptStream, outputStream)
 | 
			
		||||
      .then(() => {})
 | 
			
		||||
      .catch((error: any) => {
 | 
			
		||||
        console.error("Pipeline error during download:", error);
 | 
			
		||||
        cleanup();
 | 
			
		||||
      });
 | 
			
		||||
 | 
			
		||||
    this.setupStreamMemoryManagement(outputStream, objectName);
 | 
			
		||||
    return outputStream;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  private setupStreamMemoryManagement(stream: NodeJS.ReadableStream, objectName: string): void {
 | 
			
		||||
    let lastMemoryLog = 0;
 | 
			
		||||
 | 
			
		||||
    stream.on("data", () => {
 | 
			
		||||
      const now = Date.now();
 | 
			
		||||
      if (now - lastMemoryLog > 30000) {
 | 
			
		||||
        FilesystemStorageProvider.logMemoryUsage(`Active download: ${objectName}`);
 | 
			
		||||
        lastMemoryLog = now;
 | 
			
		||||
      }
 | 
			
		||||
    });
 | 
			
		||||
 | 
			
		||||
    stream.on("end", () => {
 | 
			
		||||
      FilesystemStorageProvider.logMemoryUsage(`Download completed: ${objectName}`);
 | 
			
		||||
      setImmediate(() => {
 | 
			
		||||
        if (global.gc) {
 | 
			
		||||
          global.gc();
 | 
			
		||||
        }
 | 
			
		||||
      });
 | 
			
		||||
    });
 | 
			
		||||
 | 
			
		||||
    stream.on("close", () => {
 | 
			
		||||
      FilesystemStorageProvider.logMemoryUsage(`Download closed: ${objectName}`);
 | 
			
		||||
    });
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  async createDownloadRangeStream(objectName: string, start: number, end: number): Promise<NodeJS.ReadableStream> {
 | 
			
		||||
    if (!this.isEncryptionDisabled) {
 | 
			
		||||
      return this.createRangeStreamFromDecrypted(objectName, start, end);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    const filePath = this.getFilePath(objectName);
 | 
			
		||||
    return fsSync.createReadStream(filePath, { start, end });
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  private createRangeStreamFromDecrypted(objectName: string, start: number, end: number): NodeJS.ReadableStream {
 | 
			
		||||
    const { Transform, PassThrough } = require("stream");
 | 
			
		||||
    const filePath = this.getFilePath(objectName);
 | 
			
		||||
    const fileStream = fsSync.createReadStream(filePath);
 | 
			
		||||
    const decryptStream = this.createDecryptStream();
 | 
			
		||||
    const rangeStream = new PassThrough();
 | 
			
		||||
 | 
			
		||||
    let bytesRead = 0;
 | 
			
		||||
    let rangeEnded = false;
 | 
			
		||||
    let isDestroyed = false;
 | 
			
		||||
 | 
			
		||||
    const rangeTransform = new Transform({
 | 
			
		||||
      transform(chunk: Buffer, encoding: any, callback: any) {
 | 
			
		||||
        if (rangeEnded || isDestroyed) {
 | 
			
		||||
          callback();
 | 
			
		||||
          return;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        const chunkStart = bytesRead;
 | 
			
		||||
        const chunkEnd = bytesRead + chunk.length - 1;
 | 
			
		||||
        bytesRead += chunk.length;
 | 
			
		||||
 | 
			
		||||
        if (chunkEnd < start) {
 | 
			
		||||
          callback();
 | 
			
		||||
          return;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        if (chunkStart > end) {
 | 
			
		||||
          rangeEnded = true;
 | 
			
		||||
          this.end();
 | 
			
		||||
          callback();
 | 
			
		||||
          return;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        let sliceStart = 0;
 | 
			
		||||
        let sliceEnd = chunk.length;
 | 
			
		||||
 | 
			
		||||
        if (chunkStart < start) {
 | 
			
		||||
          sliceStart = start - chunkStart;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        if (chunkEnd > end) {
 | 
			
		||||
          sliceEnd = end - chunkStart + 1;
 | 
			
		||||
          rangeEnded = true;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        const slicedChunk = chunk.slice(sliceStart, sliceEnd);
 | 
			
		||||
        this.push(slicedChunk);
 | 
			
		||||
 | 
			
		||||
        if (rangeEnded) {
 | 
			
		||||
          this.end();
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        callback();
 | 
			
		||||
      },
 | 
			
		||||
 | 
			
		||||
      flush(callback: any) {
 | 
			
		||||
        if (global.gc) {
 | 
			
		||||
          global.gc();
 | 
			
		||||
        }
 | 
			
		||||
        callback();
 | 
			
		||||
      },
 | 
			
		||||
    });
 | 
			
		||||
 | 
			
		||||
    const cleanup = () => {
 | 
			
		||||
      if (isDestroyed) return;
 | 
			
		||||
      isDestroyed = true;
 | 
			
		||||
 | 
			
		||||
      try {
 | 
			
		||||
        if (fileStream && !fileStream.destroyed) {
 | 
			
		||||
          fileStream.destroy();
 | 
			
		||||
        }
 | 
			
		||||
        if (decryptStream && !decryptStream.destroyed) {
 | 
			
		||||
          decryptStream.destroy();
 | 
			
		||||
        }
 | 
			
		||||
        if (rangeTransform && !rangeTransform.destroyed) {
 | 
			
		||||
          rangeTransform.destroy();
 | 
			
		||||
        }
 | 
			
		||||
        if (rangeStream && !rangeStream.destroyed) {
 | 
			
		||||
          rangeStream.destroy();
 | 
			
		||||
        }
 | 
			
		||||
      } catch (error) {
 | 
			
		||||
        console.warn("Error during stream cleanup:", error);
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      if (global.gc) {
 | 
			
		||||
        global.gc();
 | 
			
		||||
      }
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    fileStream.on("error", cleanup);
 | 
			
		||||
    decryptStream.on("error", cleanup);
 | 
			
		||||
    rangeTransform.on("error", cleanup);
 | 
			
		||||
    rangeStream.on("error", cleanup);
 | 
			
		||||
 | 
			
		||||
    rangeStream.on("close", cleanup);
 | 
			
		||||
    rangeStream.on("end", cleanup);
 | 
			
		||||
 | 
			
		||||
    rangeStream.on("pipe", (src: any) => {
 | 
			
		||||
      if (src && src.on) {
 | 
			
		||||
        src.on("close", cleanup);
 | 
			
		||||
        src.on("error", cleanup);
 | 
			
		||||
      }
 | 
			
		||||
    });
 | 
			
		||||
 | 
			
		||||
    fileStream.pipe(decryptStream).pipe(rangeTransform).pipe(rangeStream);
 | 
			
		||||
 | 
			
		||||
    return rangeStream;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  private decryptFileBuffer(encryptedBuffer: Buffer): Buffer {
 | 
			
		||||
    const key = this.createEncryptionKey();
 | 
			
		||||
    const iv = encryptedBuffer.slice(0, 16);
 | 
			
		||||
    const encrypted = encryptedBuffer.slice(16);
 | 
			
		||||
 | 
			
		||||
    const decipher = crypto.createDecipheriv("aes-256-cbc", key, iv);
 | 
			
		||||
 | 
			
		||||
    return Buffer.concat([decipher.update(encrypted), decipher.final()]);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  private decryptFileLegacy(encryptedBuffer: Buffer): Buffer {
 | 
			
		||||
    if (!this.encryptionKey) {
 | 
			
		||||
      throw new Error(
 | 
			
		||||
        "Encryption key is required when encryption is enabled. Please set ENCRYPTION_KEY environment variable."
 | 
			
		||||
      );
 | 
			
		||||
    }
 | 
			
		||||
    const CryptoJS = require("crypto-js");
 | 
			
		||||
    const decrypted = CryptoJS.AES.decrypt(encryptedBuffer.toString("utf8"), this.encryptionKey);
 | 
			
		||||
    return Buffer.from(decrypted.toString(CryptoJS.enc.Utf8), "base64");
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  static logMemoryUsage(context: string = "Unknown"): void {
 | 
			
		||||
    const memUsage = process.memoryUsage();
 | 
			
		||||
    const formatBytes = (bytes: number) => {
 | 
			
		||||
      const mb = bytes / 1024 / 1024;
 | 
			
		||||
      return `${mb.toFixed(2)} MB`;
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    const rssInMB = memUsage.rss / 1024 / 1024;
 | 
			
		||||
    const heapUsedInMB = memUsage.heapUsed / 1024 / 1024;
 | 
			
		||||
 | 
			
		||||
    if (rssInMB > 1024 || heapUsedInMB > 512) {
 | 
			
		||||
      console.warn(`[MEMORY WARNING] ${context} - High memory usage detected:`);
 | 
			
		||||
      console.warn(`  RSS: ${formatBytes(memUsage.rss)}`);
 | 
			
		||||
      console.warn(`  Heap Used: ${formatBytes(memUsage.heapUsed)}`);
 | 
			
		||||
      console.warn(`  Heap Total: ${formatBytes(memUsage.heapTotal)}`);
 | 
			
		||||
      console.warn(`  External: ${formatBytes(memUsage.external)}`);
 | 
			
		||||
 | 
			
		||||
      if (global.gc) {
 | 
			
		||||
        console.warn("  Forcing garbage collection...");
 | 
			
		||||
        global.gc();
 | 
			
		||||
 | 
			
		||||
        const afterGC = process.memoryUsage();
 | 
			
		||||
        console.warn(`  After GC - RSS: ${formatBytes(afterGC.rss)}, Heap: ${formatBytes(afterGC.heapUsed)}`);
 | 
			
		||||
      }
 | 
			
		||||
    } else {
 | 
			
		||||
      console.log(
 | 
			
		||||
        `[MEMORY INFO] ${context} - RSS: ${formatBytes(memUsage.rss)}, Heap: ${formatBytes(memUsage.heapUsed)}`
 | 
			
		||||
      );
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  static forceGarbageCollection(context: string = "Manual"): void {
 | 
			
		||||
    if (global.gc) {
 | 
			
		||||
      const beforeGC = process.memoryUsage();
 | 
			
		||||
      global.gc();
 | 
			
		||||
      const afterGC = process.memoryUsage();
 | 
			
		||||
 | 
			
		||||
      const formatBytes = (bytes: number) => `${(bytes / 1024 / 1024).toFixed(2)} MB`;
 | 
			
		||||
 | 
			
		||||
      console.log(`[GC] ${context} - Before: RSS ${formatBytes(beforeGC.rss)}, Heap ${formatBytes(beforeGC.heapUsed)}`);
 | 
			
		||||
      console.log(`[GC] ${context} - After:  RSS ${formatBytes(afterGC.rss)}, Heap ${formatBytes(afterGC.heapUsed)}`);
 | 
			
		||||
 | 
			
		||||
      const rssSaved = beforeGC.rss - afterGC.rss;
 | 
			
		||||
      const heapSaved = beforeGC.heapUsed - afterGC.heapUsed;
 | 
			
		||||
 | 
			
		||||
      if (rssSaved > 0 || heapSaved > 0) {
 | 
			
		||||
        console.log(`[GC] ${context} - Freed: RSS ${formatBytes(rssSaved)}, Heap ${formatBytes(heapSaved)}`);
 | 
			
		||||
      }
 | 
			
		||||
    } else {
 | 
			
		||||
      console.warn(`[GC] ${context} - Garbage collection not available. Start Node.js with --expose-gc flag.`);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  async fileExists(objectName: string): Promise<boolean> {
 | 
			
		||||
    const filePath = this.getFilePath(objectName);
 | 
			
		||||
    try {
 | 
			
		||||
      await fs.access(filePath);
 | 
			
		||||
      return true;
 | 
			
		||||
    } catch {
 | 
			
		||||
      return false;
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  validateUploadToken(token: string): { objectName: string } | null {
 | 
			
		||||
    const data = this.uploadTokens.get(token);
 | 
			
		||||
    if (!data || Date.now() > data.expiresAt) {
 | 
			
		||||
      this.uploadTokens.delete(token);
 | 
			
		||||
      return null;
 | 
			
		||||
    }
 | 
			
		||||
    return { objectName: data.objectName };
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  validateDownloadToken(token: string): { objectName: string; fileName?: string } | null {
 | 
			
		||||
    const data = this.downloadTokens.get(token);
 | 
			
		||||
 | 
			
		||||
    if (!data) {
 | 
			
		||||
      return null;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    const now = Date.now();
 | 
			
		||||
 | 
			
		||||
    if (now > data.expiresAt) {
 | 
			
		||||
      this.downloadTokens.delete(token);
 | 
			
		||||
      return null;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    return { objectName: data.objectName, fileName: data.fileName };
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // Tokens are automatically cleaned up by cleanExpiredTokens() every 5 minutes
 | 
			
		||||
  // No need to manually consume tokens - allows reuse for previews, range requests, etc.
 | 
			
		||||
 | 
			
		||||
  private async cleanupTempFile(tempPath: string): Promise<void> {
 | 
			
		||||
    try {
 | 
			
		||||
      await fs.unlink(tempPath);
 | 
			
		||||
 | 
			
		||||
      const tempDir = path.dirname(tempPath);
 | 
			
		||||
      try {
 | 
			
		||||
        const files = await fs.readdir(tempDir);
 | 
			
		||||
        if (files.length === 0) {
 | 
			
		||||
          await fs.rmdir(tempDir);
 | 
			
		||||
        }
 | 
			
		||||
      } catch (dirError: any) {
 | 
			
		||||
        if (dirError.code !== "ENOTEMPTY" && dirError.code !== "ENOENT") {
 | 
			
		||||
          console.warn("Warning: Could not remove temp directory:", dirError.message);
 | 
			
		||||
        }
 | 
			
		||||
      }
 | 
			
		||||
    } catch (cleanupError: any) {
 | 
			
		||||
      if (cleanupError.code !== "ENOENT") {
 | 
			
		||||
        console.error("Error deleting temp file:", cleanupError);
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  private async cleanupEmptyTempDirs(): Promise<void> {
 | 
			
		||||
    try {
 | 
			
		||||
      const tempUploadsDir = directoriesConfig.tempUploads;
 | 
			
		||||
 | 
			
		||||
      try {
 | 
			
		||||
        await fs.access(tempUploadsDir);
 | 
			
		||||
      } catch {
 | 
			
		||||
        return;
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      const items = await fs.readdir(tempUploadsDir);
 | 
			
		||||
 | 
			
		||||
      for (const item of items) {
 | 
			
		||||
        const itemPath = path.join(tempUploadsDir, item);
 | 
			
		||||
 | 
			
		||||
        try {
 | 
			
		||||
          const stat = await fs.stat(itemPath);
 | 
			
		||||
 | 
			
		||||
          if (stat.isDirectory()) {
 | 
			
		||||
            const dirContents = await fs.readdir(itemPath);
 | 
			
		||||
            if (dirContents.length === 0) {
 | 
			
		||||
              await fs.rmdir(itemPath);
 | 
			
		||||
              console.log(`🧹 Cleaned up empty temp directory: ${itemPath}`);
 | 
			
		||||
            }
 | 
			
		||||
          } else if (stat.isFile()) {
 | 
			
		||||
            const oneHourAgo = Date.now() - 60 * 60 * 1000;
 | 
			
		||||
            if (stat.mtime.getTime() < oneHourAgo) {
 | 
			
		||||
              await fs.unlink(itemPath);
 | 
			
		||||
              console.log(`🧹 Cleaned up stale temp file: ${itemPath}`);
 | 
			
		||||
            }
 | 
			
		||||
          }
 | 
			
		||||
        } catch (error: any) {
 | 
			
		||||
          if (error.code !== "ENOENT") {
 | 
			
		||||
            console.warn(`Warning: Could not process temp item ${itemPath}:`, error.message);
 | 
			
		||||
          }
 | 
			
		||||
        }
 | 
			
		||||
      }
 | 
			
		||||
    } catch (error) {
 | 
			
		||||
      console.error("Error during temp directory cleanup:", error);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  private async moveFile(src: string, dest: string): Promise<void> {
 | 
			
		||||
    try {
 | 
			
		||||
      await fs.rename(src, dest);
 | 
			
		||||
    } catch (err: any) {
 | 
			
		||||
      if (err.code === "EXDEV") {
 | 
			
		||||
        // cross-device: fallback to copy + delete
 | 
			
		||||
        await fs.copyFile(src, dest);
 | 
			
		||||
        await fs.unlink(src);
 | 
			
		||||
      } else {
 | 
			
		||||
        throw err;
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
@@ -1,17 +1,16 @@
 | 
			
		||||
import { DeleteObjectCommand, GetObjectCommand, HeadObjectCommand, PutObjectCommand } from "@aws-sdk/client-s3";
 | 
			
		||||
import { getSignedUrl } from "@aws-sdk/s3-request-presigner";
 | 
			
		||||
 | 
			
		||||
import { bucketName, s3Client } from "../config/storage.config";
 | 
			
		||||
import { bucketName, createPublicS3Client, s3Client } from "../config/storage.config";
 | 
			
		||||
import { StorageProvider } from "../types/storage";
 | 
			
		||||
import { getContentType } from "../utils/mime-types";
 | 
			
		||||
 | 
			
		||||
export class S3StorageProvider implements StorageProvider {
 | 
			
		||||
  constructor() {
 | 
			
		||||
  private ensureClient() {
 | 
			
		||||
    if (!s3Client) {
 | 
			
		||||
      throw new Error(
 | 
			
		||||
        "S3 client is not configured. Make sure ENABLE_S3=true and all S3 environment variables are set."
 | 
			
		||||
      );
 | 
			
		||||
      throw new Error("S3 client is not configured. Storage is initializing, please wait...");
 | 
			
		||||
    }
 | 
			
		||||
    return s3Client;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  /**
 | 
			
		||||
@@ -71,8 +70,10 @@ export class S3StorageProvider implements StorageProvider {
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  async getPresignedPutUrl(objectName: string, expires: number): Promise<string> {
 | 
			
		||||
    if (!s3Client) {
 | 
			
		||||
      throw new Error("S3 client is not available");
 | 
			
		||||
    // Always use public S3 client for presigned URLs (uses SERVER_IP)
 | 
			
		||||
    const client = createPublicS3Client();
 | 
			
		||||
    if (!client) {
 | 
			
		||||
      throw new Error("S3 client could not be created");
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    const command = new PutObjectCommand({
 | 
			
		||||
@@ -80,12 +81,14 @@ export class S3StorageProvider implements StorageProvider {
 | 
			
		||||
      Key: objectName,
 | 
			
		||||
    });
 | 
			
		||||
 | 
			
		||||
    return await getSignedUrl(s3Client, command, { expiresIn: expires });
 | 
			
		||||
    return await getSignedUrl(client, command, { expiresIn: expires });
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  async getPresignedGetUrl(objectName: string, expires: number, fileName?: string): Promise<string> {
 | 
			
		||||
    if (!s3Client) {
 | 
			
		||||
      throw new Error("S3 client is not available");
 | 
			
		||||
    // Always use public S3 client for presigned URLs (uses SERVER_IP)
 | 
			
		||||
    const client = createPublicS3Client();
 | 
			
		||||
    if (!client) {
 | 
			
		||||
      throw new Error("S3 client could not be created");
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    let rcdFileName: string;
 | 
			
		||||
@@ -107,26 +110,22 @@ export class S3StorageProvider implements StorageProvider {
 | 
			
		||||
      ResponseContentType: getContentType(rcdFileName),
 | 
			
		||||
    });
 | 
			
		||||
 | 
			
		||||
    return await getSignedUrl(s3Client, command, { expiresIn: expires });
 | 
			
		||||
    return await getSignedUrl(client, command, { expiresIn: expires });
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  async deleteObject(objectName: string): Promise<void> {
 | 
			
		||||
    if (!s3Client) {
 | 
			
		||||
      throw new Error("S3 client is not available");
 | 
			
		||||
    }
 | 
			
		||||
    const client = this.ensureClient();
 | 
			
		||||
 | 
			
		||||
    const command = new DeleteObjectCommand({
 | 
			
		||||
      Bucket: bucketName,
 | 
			
		||||
      Key: objectName,
 | 
			
		||||
    });
 | 
			
		||||
 | 
			
		||||
    await s3Client.send(command);
 | 
			
		||||
    await client.send(command);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  async fileExists(objectName: string): Promise<boolean> {
 | 
			
		||||
    if (!s3Client) {
 | 
			
		||||
      throw new Error("S3 client is not available");
 | 
			
		||||
    }
 | 
			
		||||
    const client = this.ensureClient();
 | 
			
		||||
 | 
			
		||||
    try {
 | 
			
		||||
      const command = new HeadObjectCommand({
 | 
			
		||||
@@ -134,7 +133,7 @@ export class S3StorageProvider implements StorageProvider {
 | 
			
		||||
        Key: objectName,
 | 
			
		||||
      });
 | 
			
		||||
 | 
			
		||||
      await s3Client.send(command);
 | 
			
		||||
      await client.send(command);
 | 
			
		||||
      return true;
 | 
			
		||||
    } catch (error: any) {
 | 
			
		||||
      if (error.name === "NotFound" || error.$metadata?.httpStatusCode === 404) {
 | 
			
		||||
@@ -143,4 +142,26 @@ export class S3StorageProvider implements StorageProvider {
 | 
			
		||||
      throw error;
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  /**
 | 
			
		||||
   * Get a readable stream for downloading an object
 | 
			
		||||
   * Used for proxying downloads through the backend
 | 
			
		||||
   */
 | 
			
		||||
  async getObjectStream(objectName: string): Promise<NodeJS.ReadableStream> {
 | 
			
		||||
    const client = this.ensureClient();
 | 
			
		||||
 | 
			
		||||
    const command = new GetObjectCommand({
 | 
			
		||||
      Bucket: bucketName,
 | 
			
		||||
      Key: objectName,
 | 
			
		||||
    });
 | 
			
		||||
 | 
			
		||||
    const response = await client.send(command);
 | 
			
		||||
 | 
			
		||||
    if (!response.Body) {
 | 
			
		||||
      throw new Error("No body in S3 response");
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // AWS SDK v3 returns a readable stream
 | 
			
		||||
    return response.Body as NodeJS.ReadableStream;
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -1,5 +1,3 @@
 | 
			
		||||
import { isS3Enabled } from "../config/storage.config";
 | 
			
		||||
import { FilesystemStorageProvider } from "../providers/filesystem-storage.provider";
 | 
			
		||||
import { S3StorageProvider } from "../providers/s3-storage.provider";
 | 
			
		||||
import { prisma } from "../shared/prisma";
 | 
			
		||||
import { StorageProvider } from "../types/storage";
 | 
			
		||||
@@ -10,14 +8,10 @@ import { StorageProvider } from "../types/storage";
 | 
			
		||||
 */
 | 
			
		||||
async function cleanupOrphanFiles() {
 | 
			
		||||
  console.log("Starting orphan file cleanup...");
 | 
			
		||||
  console.log(`Storage mode: ${isS3Enabled ? "S3" : "Filesystem"}`);
 | 
			
		||||
  console.log(`Storage mode: S3 (Garage or External)`);
 | 
			
		||||
 | 
			
		||||
  let storageProvider: StorageProvider;
 | 
			
		||||
  if (isS3Enabled) {
 | 
			
		||||
    storageProvider = new S3StorageProvider();
 | 
			
		||||
  } else {
 | 
			
		||||
    storageProvider = FilesystemStorageProvider.getInstance();
 | 
			
		||||
  }
 | 
			
		||||
  // Always use S3 storage provider
 | 
			
		||||
  const storageProvider: StorageProvider = new S3StorageProvider();
 | 
			
		||||
 | 
			
		||||
  // Get all files from database
 | 
			
		||||
  const allFiles = await prisma.file.findMany({
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										305
									
								
								apps/server/src/scripts/migrate-filesystem-to-s3.ts
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										305
									
								
								apps/server/src/scripts/migrate-filesystem-to-s3.ts
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,305 @@
 | 
			
		||||
/**
 | 
			
		||||
 * Automatic Migration Script: Filesystem → S3 (Garage)
 | 
			
		||||
 *
 | 
			
		||||
 * This script runs automatically on server start and:
 | 
			
		||||
 * 1. Detects existing filesystem files
 | 
			
		||||
 * 2. Migrates them to S3 in background
 | 
			
		||||
 * 3. Updates database references
 | 
			
		||||
 * 4. Keeps filesystem as fallback during migration
 | 
			
		||||
 * 5. Zero downtime, zero user intervention
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
import { createReadStream } from "fs";
 | 
			
		||||
import * as fs from "fs/promises";
 | 
			
		||||
import * as path from "path";
 | 
			
		||||
import { PutObjectCommand } from "@aws-sdk/client-s3";
 | 
			
		||||
 | 
			
		||||
import { directoriesConfig } from "../config/directories.config";
 | 
			
		||||
import { bucketName, s3Client } from "../config/storage.config";
 | 
			
		||||
import { prisma } from "../shared/prisma";
 | 
			
		||||
 | 
			
		||||
interface MigrationStats {
 | 
			
		||||
  totalFiles: number;
 | 
			
		||||
  migratedFiles: number;
 | 
			
		||||
  failedFiles: number;
 | 
			
		||||
  skippedFiles: number;
 | 
			
		||||
  totalSizeBytes: number;
 | 
			
		||||
  startTime: number;
 | 
			
		||||
  endTime?: number;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
const MIGRATION_STATE_FILE = path.join(directoriesConfig.uploads, ".migration-state.json");
 | 
			
		||||
const MIGRATION_BATCH_SIZE = 10; // Migrate 10 files at a time
 | 
			
		||||
const MIGRATION_DELAY_MS = 100; // Small delay between batches to avoid overwhelming
 | 
			
		||||
 | 
			
		||||
export class FilesystemToS3Migrator {
 | 
			
		||||
  private stats: MigrationStats = {
 | 
			
		||||
    totalFiles: 0,
 | 
			
		||||
    migratedFiles: 0,
 | 
			
		||||
    failedFiles: 0,
 | 
			
		||||
    skippedFiles: 0,
 | 
			
		||||
    totalSizeBytes: 0,
 | 
			
		||||
    startTime: Date.now(),
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
  /**
 | 
			
		||||
   * Check if migration is needed and should run
 | 
			
		||||
   */
 | 
			
		||||
  async shouldMigrate(): Promise<boolean> {
 | 
			
		||||
    // Only migrate if S3 client is available
 | 
			
		||||
    if (!s3Client) {
 | 
			
		||||
      console.log("[MIGRATION] S3 not configured, skipping migration");
 | 
			
		||||
      return false;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // Check if migration already completed
 | 
			
		||||
    try {
 | 
			
		||||
      const stateExists = await fs
 | 
			
		||||
        .access(MIGRATION_STATE_FILE)
 | 
			
		||||
        .then(() => true)
 | 
			
		||||
        .catch(() => false);
 | 
			
		||||
 | 
			
		||||
      if (stateExists) {
 | 
			
		||||
        const state = JSON.parse(await fs.readFile(MIGRATION_STATE_FILE, "utf-8"));
 | 
			
		||||
 | 
			
		||||
        if (state.completed) {
 | 
			
		||||
          console.log("[MIGRATION] Migration already completed");
 | 
			
		||||
          return false;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        console.log("[MIGRATION] Previous migration incomplete, resuming...");
 | 
			
		||||
        this.stats = { ...state, startTime: Date.now() };
 | 
			
		||||
        return true;
 | 
			
		||||
      }
 | 
			
		||||
    } catch (error) {
 | 
			
		||||
      console.warn("[MIGRATION] Could not read migration state:", error);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // Check if there are files to migrate
 | 
			
		||||
    try {
 | 
			
		||||
      const uploadsDir = directoriesConfig.uploads;
 | 
			
		||||
      const files = await this.scanDirectory(uploadsDir);
 | 
			
		||||
 | 
			
		||||
      if (files.length === 0) {
 | 
			
		||||
        console.log("[MIGRATION] No filesystem files found, nothing to migrate");
 | 
			
		||||
        await this.markMigrationComplete();
 | 
			
		||||
        return false;
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      console.log(`[MIGRATION] Found ${files.length} files to migrate`);
 | 
			
		||||
      this.stats.totalFiles = files.length;
 | 
			
		||||
      return true;
 | 
			
		||||
    } catch (error) {
 | 
			
		||||
      console.error("[MIGRATION] Error scanning files:", error);
 | 
			
		||||
      return false;
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  /**
 | 
			
		||||
   * Run the migration process
 | 
			
		||||
   */
 | 
			
		||||
  async migrate(): Promise<void> {
 | 
			
		||||
    console.log("[MIGRATION] Starting automatic filesystem → S3 migration");
 | 
			
		||||
    console.log("[MIGRATION] This runs in background, zero downtime");
 | 
			
		||||
 | 
			
		||||
    try {
 | 
			
		||||
      const uploadsDir = directoriesConfig.uploads;
 | 
			
		||||
      const files = await this.scanDirectory(uploadsDir);
 | 
			
		||||
 | 
			
		||||
      // Process in batches
 | 
			
		||||
      for (let i = 0; i < files.length; i += MIGRATION_BATCH_SIZE) {
 | 
			
		||||
        const batch = files.slice(i, i + MIGRATION_BATCH_SIZE);
 | 
			
		||||
 | 
			
		||||
        await Promise.all(
 | 
			
		||||
          batch.map((file) =>
 | 
			
		||||
            this.migrateFile(file).catch((error) => {
 | 
			
		||||
              console.error(`[MIGRATION] Failed to migrate ${file}:`, error);
 | 
			
		||||
              this.stats.failedFiles++;
 | 
			
		||||
            })
 | 
			
		||||
          )
 | 
			
		||||
        );
 | 
			
		||||
 | 
			
		||||
        // Save progress
 | 
			
		||||
        await this.saveState();
 | 
			
		||||
 | 
			
		||||
        // Small delay between batches
 | 
			
		||||
        if (i + MIGRATION_BATCH_SIZE < files.length) {
 | 
			
		||||
          await new Promise((resolve) => setTimeout(resolve, MIGRATION_DELAY_MS));
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        // Log progress
 | 
			
		||||
        const progress = Math.round(((i + batch.length) / files.length) * 100);
 | 
			
		||||
        console.log(`[MIGRATION] Progress: ${progress}% (${this.stats.migratedFiles}/${files.length})`);
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      this.stats.endTime = Date.now();
 | 
			
		||||
      await this.markMigrationComplete();
 | 
			
		||||
 | 
			
		||||
      const durationSeconds = Math.round((this.stats.endTime - this.stats.startTime) / 1000);
 | 
			
		||||
      const sizeMB = Math.round(this.stats.totalSizeBytes / 1024 / 1024);
 | 
			
		||||
 | 
			
		||||
      console.log("[MIGRATION] ✓✓✓ Migration completed successfully!");
 | 
			
		||||
      console.log(`[MIGRATION] Stats:`);
 | 
			
		||||
      console.log(`  - Total files: ${this.stats.totalFiles}`);
 | 
			
		||||
      console.log(`  - Migrated: ${this.stats.migratedFiles}`);
 | 
			
		||||
      console.log(`  - Failed: ${this.stats.failedFiles}`);
 | 
			
		||||
      console.log(`  - Skipped: ${this.stats.skippedFiles}`);
 | 
			
		||||
      console.log(`  - Total size: ${sizeMB}MB`);
 | 
			
		||||
      console.log(`  - Duration: ${durationSeconds}s`);
 | 
			
		||||
    } catch (error) {
 | 
			
		||||
      console.error("[MIGRATION] Migration failed:", error);
 | 
			
		||||
      await this.saveState();
 | 
			
		||||
      throw error;
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  /**
 | 
			
		||||
   * Scan directory recursively for files
 | 
			
		||||
   */
 | 
			
		||||
  private async scanDirectory(dir: string, baseDir: string = dir): Promise<string[]> {
 | 
			
		||||
    const files: string[] = [];
 | 
			
		||||
 | 
			
		||||
    try {
 | 
			
		||||
      const entries = await fs.readdir(dir, { withFileTypes: true });
 | 
			
		||||
 | 
			
		||||
      for (const entry of entries) {
 | 
			
		||||
        const fullPath = path.join(dir, entry.name);
 | 
			
		||||
 | 
			
		||||
        // Skip special files and directories
 | 
			
		||||
        if (entry.name.startsWith(".") || entry.name === "temp-uploads") {
 | 
			
		||||
          continue;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        if (entry.isDirectory()) {
 | 
			
		||||
          const subFiles = await this.scanDirectory(fullPath, baseDir);
 | 
			
		||||
          files.push(...subFiles);
 | 
			
		||||
        } else if (entry.isFile()) {
 | 
			
		||||
          // Get relative path for S3 key
 | 
			
		||||
          const relativePath = path.relative(baseDir, fullPath);
 | 
			
		||||
          files.push(relativePath);
 | 
			
		||||
        }
 | 
			
		||||
      }
 | 
			
		||||
    } catch (error) {
 | 
			
		||||
      console.warn(`[MIGRATION] Could not scan directory ${dir}:`, error);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    return files;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  /**
 | 
			
		||||
   * Migrate a single file to S3
 | 
			
		||||
   */
 | 
			
		||||
  private async migrateFile(relativeFilePath: string): Promise<void> {
 | 
			
		||||
    const fullPath = path.join(directoriesConfig.uploads, relativeFilePath);
 | 
			
		||||
 | 
			
		||||
    try {
 | 
			
		||||
      // Check if file still exists
 | 
			
		||||
      const stats = await fs.stat(fullPath);
 | 
			
		||||
 | 
			
		||||
      if (!stats.isFile()) {
 | 
			
		||||
        this.stats.skippedFiles++;
 | 
			
		||||
        return;
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      // S3 object name (preserve directory structure)
 | 
			
		||||
      const objectName = relativeFilePath.replace(/\\/g, "/");
 | 
			
		||||
 | 
			
		||||
      // Check if already exists in S3
 | 
			
		||||
      if (s3Client) {
 | 
			
		||||
        try {
 | 
			
		||||
          const { HeadObjectCommand } = await import("@aws-sdk/client-s3");
 | 
			
		||||
          await s3Client.send(
 | 
			
		||||
            new HeadObjectCommand({
 | 
			
		||||
              Bucket: bucketName,
 | 
			
		||||
              Key: objectName,
 | 
			
		||||
            })
 | 
			
		||||
          );
 | 
			
		||||
 | 
			
		||||
          // Already exists in S3, skip
 | 
			
		||||
          console.log(`[MIGRATION] Already in S3: ${objectName}`);
 | 
			
		||||
          this.stats.skippedFiles++;
 | 
			
		||||
          return;
 | 
			
		||||
        } catch (error: any) {
 | 
			
		||||
          // Not found, proceed with migration
 | 
			
		||||
          if (error.$metadata?.httpStatusCode !== 404) {
 | 
			
		||||
            throw error;
 | 
			
		||||
          }
 | 
			
		||||
        }
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      // Upload to S3
 | 
			
		||||
      if (s3Client) {
 | 
			
		||||
        const fileStream = createReadStream(fullPath);
 | 
			
		||||
 | 
			
		||||
        await s3Client.send(
 | 
			
		||||
          new PutObjectCommand({
 | 
			
		||||
            Bucket: bucketName,
 | 
			
		||||
            Key: objectName,
 | 
			
		||||
            Body: fileStream,
 | 
			
		||||
          })
 | 
			
		||||
        );
 | 
			
		||||
 | 
			
		||||
        this.stats.migratedFiles++;
 | 
			
		||||
        this.stats.totalSizeBytes += stats.size;
 | 
			
		||||
 | 
			
		||||
        console.log(`[MIGRATION] ✓ Migrated: ${objectName} (${Math.round(stats.size / 1024)}KB)`);
 | 
			
		||||
 | 
			
		||||
        // Delete filesystem file after successful migration to free up space
 | 
			
		||||
        try {
 | 
			
		||||
          await fs.unlink(fullPath);
 | 
			
		||||
          console.log(`[MIGRATION] 🗑️  Deleted from filesystem: ${relativeFilePath}`);
 | 
			
		||||
        } catch (unlinkError) {
 | 
			
		||||
          console.warn(`[MIGRATION] Warning: Could not delete ${relativeFilePath}:`, unlinkError);
 | 
			
		||||
        }
 | 
			
		||||
      }
 | 
			
		||||
    } catch (error) {
 | 
			
		||||
      console.error(`[MIGRATION] Failed to migrate ${relativeFilePath}:`, error);
 | 
			
		||||
      this.stats.failedFiles++;
 | 
			
		||||
      throw error;
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  /**
 | 
			
		||||
   * Save migration state
 | 
			
		||||
   */
 | 
			
		||||
  private async saveState(): Promise<void> {
 | 
			
		||||
    try {
 | 
			
		||||
      await fs.writeFile(MIGRATION_STATE_FILE, JSON.stringify({ ...this.stats, completed: false }, null, 2));
 | 
			
		||||
    } catch (error) {
 | 
			
		||||
      console.warn("[MIGRATION] Could not save state:", error);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  /**
 | 
			
		||||
   * Mark migration as complete
 | 
			
		||||
   */
 | 
			
		||||
  private async markMigrationComplete(): Promise<void> {
 | 
			
		||||
    try {
 | 
			
		||||
      await fs.writeFile(MIGRATION_STATE_FILE, JSON.stringify({ ...this.stats, completed: true }, null, 2));
 | 
			
		||||
      console.log("[MIGRATION] Migration marked as complete");
 | 
			
		||||
    } catch (error) {
 | 
			
		||||
      console.warn("[MIGRATION] Could not mark migration complete:", error);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * Auto-run migration on import (called by server.ts)
 | 
			
		||||
 */
 | 
			
		||||
export async function runAutoMigration(): Promise<void> {
 | 
			
		||||
  const migrator = new FilesystemToS3Migrator();
 | 
			
		||||
 | 
			
		||||
  if (await migrator.shouldMigrate()) {
 | 
			
		||||
    // Run in background, don't block server start
 | 
			
		||||
    setTimeout(async () => {
 | 
			
		||||
      try {
 | 
			
		||||
        await migrator.migrate();
 | 
			
		||||
      } catch (error) {
 | 
			
		||||
        console.error("[MIGRATION] Auto-migration failed:", error);
 | 
			
		||||
        console.log("[MIGRATION] Will retry on next server restart");
 | 
			
		||||
      }
 | 
			
		||||
    }, 5000); // Start after 5 seconds
 | 
			
		||||
 | 
			
		||||
    console.log("[MIGRATION] Background migration scheduled");
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
@@ -2,7 +2,6 @@ import * as fs from "fs/promises";
 | 
			
		||||
import crypto from "node:crypto";
 | 
			
		||||
import path from "path";
 | 
			
		||||
import fastifyMultipart from "@fastify/multipart";
 | 
			
		||||
import fastifyStatic from "@fastify/static";
 | 
			
		||||
 | 
			
		||||
import { buildApp } from "./app";
 | 
			
		||||
import { directoriesConfig } from "./config/directories.config";
 | 
			
		||||
@@ -11,12 +10,10 @@ import { appRoutes } from "./modules/app/routes";
 | 
			
		||||
import { authProvidersRoutes } from "./modules/auth-providers/routes";
 | 
			
		||||
import { authRoutes } from "./modules/auth/routes";
 | 
			
		||||
import { fileRoutes } from "./modules/file/routes";
 | 
			
		||||
import { ChunkManager } from "./modules/filesystem/chunk-manager";
 | 
			
		||||
import { downloadQueueRoutes } from "./modules/filesystem/download-queue-routes";
 | 
			
		||||
import { filesystemRoutes } from "./modules/filesystem/routes";
 | 
			
		||||
import { folderRoutes } from "./modules/folder/routes";
 | 
			
		||||
import { healthRoutes } from "./modules/health/routes";
 | 
			
		||||
import { reverseShareRoutes } from "./modules/reverse-share/routes";
 | 
			
		||||
import { s3StorageRoutes } from "./modules/s3-storage/routes";
 | 
			
		||||
import { shareRoutes } from "./modules/share/routes";
 | 
			
		||||
import { storageRoutes } from "./modules/storage/routes";
 | 
			
		||||
import { twoFactorRoutes } from "./modules/two-factor/routes";
 | 
			
		||||
@@ -52,6 +49,14 @@ async function startServer() {
 | 
			
		||||
 | 
			
		||||
  await ensureDirectories();
 | 
			
		||||
 | 
			
		||||
  // Import storage config once at the beginning
 | 
			
		||||
  const { isInternalStorage, isExternalS3 } = await import("./config/storage.config.js");
 | 
			
		||||
 | 
			
		||||
  // Run automatic migration from legacy storage to S3-compatible storage
 | 
			
		||||
  // Transparently migrates any existing files
 | 
			
		||||
  const { runAutoMigration } = await import("./scripts/migrate-filesystem-to-s3.js");
 | 
			
		||||
  await runAutoMigration();
 | 
			
		||||
 | 
			
		||||
  await app.register(fastifyMultipart, {
 | 
			
		||||
    limits: {
 | 
			
		||||
      fieldNameSize: 100,
 | 
			
		||||
@@ -63,29 +68,29 @@ async function startServer() {
 | 
			
		||||
    },
 | 
			
		||||
  });
 | 
			
		||||
 | 
			
		||||
  if (env.ENABLE_S3 !== "true") {
 | 
			
		||||
    await app.register(fastifyStatic, {
 | 
			
		||||
      root: directoriesConfig.uploads,
 | 
			
		||||
      prefix: "/uploads/",
 | 
			
		||||
      decorateReply: false,
 | 
			
		||||
    });
 | 
			
		||||
  }
 | 
			
		||||
  // No static files needed - S3 serves files directly via presigned URLs
 | 
			
		||||
 | 
			
		||||
  app.register(authRoutes);
 | 
			
		||||
  app.register(authProvidersRoutes, { prefix: "/auth" });
 | 
			
		||||
  app.register(twoFactorRoutes, { prefix: "/auth" });
 | 
			
		||||
  app.register(userRoutes);
 | 
			
		||||
  app.register(fileRoutes);
 | 
			
		||||
  app.register(folderRoutes);
 | 
			
		||||
  app.register(downloadQueueRoutes);
 | 
			
		||||
  app.register(fileRoutes);
 | 
			
		||||
  app.register(shareRoutes);
 | 
			
		||||
  app.register(reverseShareRoutes);
 | 
			
		||||
  app.register(storageRoutes);
 | 
			
		||||
  app.register(appRoutes);
 | 
			
		||||
  app.register(healthRoutes);
 | 
			
		||||
 | 
			
		||||
  if (env.ENABLE_S3 !== "true") {
 | 
			
		||||
    app.register(filesystemRoutes);
 | 
			
		||||
  // Always use S3-compatible storage routes
 | 
			
		||||
  app.register(s3StorageRoutes);
 | 
			
		||||
 | 
			
		||||
  if (isInternalStorage) {
 | 
			
		||||
    console.log("📦 Using internal storage (auto-configured)");
 | 
			
		||||
  } else if (isExternalS3) {
 | 
			
		||||
    console.log("📦 Using external S3 storage (AWS/S3-compatible/etc)");
 | 
			
		||||
  } else {
 | 
			
		||||
    console.log("⚠️  WARNING: Storage not configured! Storage may not work.");
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  await app.listen({
 | 
			
		||||
@@ -93,36 +98,14 @@ async function startServer() {
 | 
			
		||||
    host: "0.0.0.0",
 | 
			
		||||
  });
 | 
			
		||||
 | 
			
		||||
  let authProviders = "Disabled";
 | 
			
		||||
  try {
 | 
			
		||||
    const { AuthProvidersService } = await import("./modules/auth-providers/service.js");
 | 
			
		||||
    const authService = new AuthProvidersService();
 | 
			
		||||
    const enabledProviders = await authService.getEnabledProviders();
 | 
			
		||||
    authProviders = enabledProviders.length > 0 ? `Enabled (${enabledProviders.length} providers)` : "Disabled";
 | 
			
		||||
  } catch (error) {
 | 
			
		||||
    console.error("Error getting auth providers status:", error);
 | 
			
		||||
  }
 | 
			
		||||
  const storageMode = isInternalStorage ? "Internal Storage" : isExternalS3 ? "External S3" : "Not Configured";
 | 
			
		||||
 | 
			
		||||
  console.log(`🌴 Palmr server running on port 3333 🌴`);
 | 
			
		||||
  console.log(
 | 
			
		||||
    `📦 Storage mode: ${env.ENABLE_S3 === "true" ? "S3" : `Local Filesystem ${env.DISABLE_FILESYSTEM_ENCRYPTION === "true" ? "(Unencrypted)" : "(Encrypted)"}`}`
 | 
			
		||||
  );
 | 
			
		||||
  console.log(`🔐 Auth Providers: ${authProviders}`);
 | 
			
		||||
  console.log(`📦 Storage: ${storageMode}`);
 | 
			
		||||
 | 
			
		||||
  console.log("\n📚 API Documentation:");
 | 
			
		||||
  console.log(`   - API Reference: http://localhost:3333/docs\n`);
 | 
			
		||||
 | 
			
		||||
  process.on("SIGINT", async () => {
 | 
			
		||||
    const chunkManager = ChunkManager.getInstance();
 | 
			
		||||
    chunkManager.destroy();
 | 
			
		||||
    process.exit(0);
 | 
			
		||||
  });
 | 
			
		||||
 | 
			
		||||
  process.on("SIGTERM", async () => {
 | 
			
		||||
    const chunkManager = ChunkManager.getInstance();
 | 
			
		||||
    chunkManager.destroy();
 | 
			
		||||
    process.exit(0);
 | 
			
		||||
  });
 | 
			
		||||
  // Cleanup on shutdown
 | 
			
		||||
  process.on("SIGINT", () => process.exit(0));
 | 
			
		||||
  process.on("SIGTERM", () => process.exit(0));
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
startServer().catch((err) => {
 | 
			
		||||
 
 | 
			
		||||
@@ -1,52 +0,0 @@
 | 
			
		||||
/**
 | 
			
		||||
 * TypeScript interfaces for download queue management
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
export interface QueuedDownloadInfo {
 | 
			
		||||
  downloadId: string;
 | 
			
		||||
  position: number;
 | 
			
		||||
  waitTime: number;
 | 
			
		||||
  fileName?: string;
 | 
			
		||||
  fileSize?: number;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export interface QueueStatus {
 | 
			
		||||
  queueLength: number;
 | 
			
		||||
  maxQueueSize: number;
 | 
			
		||||
  activeDownloads: number;
 | 
			
		||||
  maxConcurrent: number;
 | 
			
		||||
  queuedDownloads: QueuedDownloadInfo[];
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export interface DownloadCancelResponse {
 | 
			
		||||
  message: string;
 | 
			
		||||
  downloadId: string;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export interface QueueClearResponse {
 | 
			
		||||
  message: string;
 | 
			
		||||
  clearedCount: number;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export interface ApiResponse<T = any> {
 | 
			
		||||
  status: "success" | "error";
 | 
			
		||||
  data?: T;
 | 
			
		||||
  error?: string;
 | 
			
		||||
  message?: string;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export interface QueueStatusResponse extends ApiResponse<QueueStatus> {
 | 
			
		||||
  status: "success";
 | 
			
		||||
  data: QueueStatus;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export interface DownloadSlotRequest {
 | 
			
		||||
  fileName?: string;
 | 
			
		||||
  fileSize?: number;
 | 
			
		||||
  objectName: string;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export interface ActiveDownloadInfo {
 | 
			
		||||
  startTime: number;
 | 
			
		||||
  memoryAtStart: number;
 | 
			
		||||
}
 | 
			
		||||
@@ -3,6 +3,7 @@ export interface StorageProvider {
 | 
			
		||||
  getPresignedGetUrl(objectName: string, expires: number, fileName?: string): Promise<string>;
 | 
			
		||||
  deleteObject(objectName: string): Promise<void>;
 | 
			
		||||
  fileExists(objectName: string): Promise<boolean>;
 | 
			
		||||
  getObjectStream(objectName: string): Promise<NodeJS.ReadableStream>;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export interface StorageConfig {
 | 
			
		||||
 
 | 
			
		||||
@@ -1,423 +0,0 @@
 | 
			
		||||
import { ActiveDownloadInfo, DownloadSlotRequest, QueuedDownloadInfo, QueueStatus } from "../types/download-queue";
 | 
			
		||||
 | 
			
		||||
interface QueuedDownload {
 | 
			
		||||
  downloadId: string;
 | 
			
		||||
  queuedAt: number;
 | 
			
		||||
  resolve: () => void;
 | 
			
		||||
  reject: (error: Error) => void;
 | 
			
		||||
  metadata?: DownloadSlotRequest;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export class DownloadMemoryManager {
 | 
			
		||||
  private static instance: DownloadMemoryManager;
 | 
			
		||||
  private activeDownloads = new Map<string, ActiveDownloadInfo>();
 | 
			
		||||
  private downloadQueue: QueuedDownload[] = [];
 | 
			
		||||
  private maxConcurrentDownloads: number;
 | 
			
		||||
  private memoryThresholdMB: number;
 | 
			
		||||
  private maxQueueSize: number;
 | 
			
		||||
  private cleanupInterval: NodeJS.Timeout;
 | 
			
		||||
  private isAutoScalingEnabled: boolean;
 | 
			
		||||
  private minFileSizeGB: number;
 | 
			
		||||
 | 
			
		||||
  private constructor() {
 | 
			
		||||
    const { env } = require("../env");
 | 
			
		||||
 | 
			
		||||
    const totalMemoryGB = require("os").totalmem() / 1024 ** 3;
 | 
			
		||||
    this.isAutoScalingEnabled = env.DOWNLOAD_AUTO_SCALE === "true";
 | 
			
		||||
 | 
			
		||||
    if (env.DOWNLOAD_MAX_CONCURRENT !== undefined) {
 | 
			
		||||
      this.maxConcurrentDownloads = env.DOWNLOAD_MAX_CONCURRENT;
 | 
			
		||||
    } else if (this.isAutoScalingEnabled) {
 | 
			
		||||
      this.maxConcurrentDownloads = this.calculateDefaultConcurrentDownloads(totalMemoryGB);
 | 
			
		||||
    } else {
 | 
			
		||||
      this.maxConcurrentDownloads = 3;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if (env.DOWNLOAD_MEMORY_THRESHOLD_MB !== undefined) {
 | 
			
		||||
      this.memoryThresholdMB = env.DOWNLOAD_MEMORY_THRESHOLD_MB;
 | 
			
		||||
    } else if (this.isAutoScalingEnabled) {
 | 
			
		||||
      this.memoryThresholdMB = this.calculateDefaultMemoryThreshold(totalMemoryGB);
 | 
			
		||||
    } else {
 | 
			
		||||
      this.memoryThresholdMB = 1024;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if (env.DOWNLOAD_QUEUE_SIZE !== undefined) {
 | 
			
		||||
      this.maxQueueSize = env.DOWNLOAD_QUEUE_SIZE;
 | 
			
		||||
    } else if (this.isAutoScalingEnabled) {
 | 
			
		||||
      this.maxQueueSize = this.calculateDefaultQueueSize(totalMemoryGB);
 | 
			
		||||
    } else {
 | 
			
		||||
      this.maxQueueSize = 15;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if (env.DOWNLOAD_MIN_FILE_SIZE_GB !== undefined) {
 | 
			
		||||
      this.minFileSizeGB = env.DOWNLOAD_MIN_FILE_SIZE_GB;
 | 
			
		||||
    } else {
 | 
			
		||||
      this.minFileSizeGB = 3.0;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    this.validateConfiguration();
 | 
			
		||||
 | 
			
		||||
    console.log(`[DOWNLOAD MANAGER] Configuration loaded:`);
 | 
			
		||||
    console.log(`  System Memory: ${totalMemoryGB.toFixed(1)}GB`);
 | 
			
		||||
    console.log(
 | 
			
		||||
      `  Max Concurrent: ${this.maxConcurrentDownloads} ${env.DOWNLOAD_MAX_CONCURRENT !== undefined ? "(ENV)" : "(AUTO)"}`
 | 
			
		||||
    );
 | 
			
		||||
    console.log(
 | 
			
		||||
      `  Memory Threshold: ${this.memoryThresholdMB}MB ${env.DOWNLOAD_MEMORY_THRESHOLD_MB !== undefined ? "(ENV)" : "(AUTO)"}`
 | 
			
		||||
    );
 | 
			
		||||
    console.log(`  Queue Size: ${this.maxQueueSize} ${env.DOWNLOAD_QUEUE_SIZE !== undefined ? "(ENV)" : "(AUTO)"}`);
 | 
			
		||||
    console.log(
 | 
			
		||||
      `  Min File Size: ${this.minFileSizeGB}GB ${env.DOWNLOAD_MIN_FILE_SIZE_GB !== undefined ? "(ENV)" : "(DEFAULT)"}`
 | 
			
		||||
    );
 | 
			
		||||
    console.log(`  Auto-scaling: ${this.isAutoScalingEnabled ? "enabled" : "disabled"}`);
 | 
			
		||||
 | 
			
		||||
    this.cleanupInterval = setInterval(() => {
 | 
			
		||||
      this.cleanupStaleDownloads();
 | 
			
		||||
    }, 30000);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  public static getInstance(): DownloadMemoryManager {
 | 
			
		||||
    if (!DownloadMemoryManager.instance) {
 | 
			
		||||
      DownloadMemoryManager.instance = new DownloadMemoryManager();
 | 
			
		||||
    }
 | 
			
		||||
    return DownloadMemoryManager.instance;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  private calculateDefaultConcurrentDownloads(totalMemoryGB: number): number {
 | 
			
		||||
    if (totalMemoryGB > 16) return 10;
 | 
			
		||||
    if (totalMemoryGB > 8) return 5;
 | 
			
		||||
    if (totalMemoryGB > 4) return 3;
 | 
			
		||||
    if (totalMemoryGB > 2) return 2;
 | 
			
		||||
    return 1;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  private calculateDefaultMemoryThreshold(totalMemoryGB: number): number {
 | 
			
		||||
    if (totalMemoryGB > 16) return 4096; // 4GB
 | 
			
		||||
    if (totalMemoryGB > 8) return 2048; // 2GB
 | 
			
		||||
    if (totalMemoryGB > 4) return 1024; // 1GB
 | 
			
		||||
    if (totalMemoryGB > 2) return 512; // 512MB
 | 
			
		||||
    return 256; // 256MB
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  private calculateDefaultQueueSize(totalMemoryGB: number): number {
 | 
			
		||||
    if (totalMemoryGB > 16) return 50; // Large queue for powerful servers
 | 
			
		||||
    if (totalMemoryGB > 8) return 25; // Medium queue
 | 
			
		||||
    if (totalMemoryGB > 4) return 15; // Small queue
 | 
			
		||||
    if (totalMemoryGB > 2) return 10; // Very small queue
 | 
			
		||||
    return 5; // Minimal queue
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  private validateConfiguration(): void {
 | 
			
		||||
    const warnings: string[] = [];
 | 
			
		||||
    const errors: string[] = [];
 | 
			
		||||
 | 
			
		||||
    if (this.maxConcurrentDownloads < 1) {
 | 
			
		||||
      errors.push(`DOWNLOAD_MAX_CONCURRENT must be >= 1, got: ${this.maxConcurrentDownloads}`);
 | 
			
		||||
    }
 | 
			
		||||
    if (this.maxConcurrentDownloads > 50) {
 | 
			
		||||
      warnings.push(
 | 
			
		||||
        `DOWNLOAD_MAX_CONCURRENT is very high (${this.maxConcurrentDownloads}), this may cause performance issues`
 | 
			
		||||
      );
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if (this.memoryThresholdMB < 128) {
 | 
			
		||||
      warnings.push(
 | 
			
		||||
        `DOWNLOAD_MEMORY_THRESHOLD_MB is very low (${this.memoryThresholdMB}MB), downloads may be throttled frequently`
 | 
			
		||||
      );
 | 
			
		||||
    }
 | 
			
		||||
    if (this.memoryThresholdMB > 16384) {
 | 
			
		||||
      warnings.push(
 | 
			
		||||
        `DOWNLOAD_MEMORY_THRESHOLD_MB is very high (${this.memoryThresholdMB}MB), system may run out of memory`
 | 
			
		||||
      );
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if (this.maxQueueSize < 1) {
 | 
			
		||||
      errors.push(`DOWNLOAD_QUEUE_SIZE must be >= 1, got: ${this.maxQueueSize}`);
 | 
			
		||||
    }
 | 
			
		||||
    if (this.maxQueueSize > 1000) {
 | 
			
		||||
      warnings.push(`DOWNLOAD_QUEUE_SIZE is very high (${this.maxQueueSize}), this may consume significant memory`);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if (this.minFileSizeGB < 0.1) {
 | 
			
		||||
      warnings.push(
 | 
			
		||||
        `DOWNLOAD_MIN_FILE_SIZE_GB is very low (${this.minFileSizeGB}GB), most downloads will use memory management`
 | 
			
		||||
      );
 | 
			
		||||
    }
 | 
			
		||||
    if (this.minFileSizeGB > 50) {
 | 
			
		||||
      warnings.push(
 | 
			
		||||
        `DOWNLOAD_MIN_FILE_SIZE_GB is very high (${this.minFileSizeGB}GB), memory management may rarely activate`
 | 
			
		||||
      );
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    const recommendedQueueSize = this.maxConcurrentDownloads * 5;
 | 
			
		||||
    if (this.maxQueueSize < this.maxConcurrentDownloads) {
 | 
			
		||||
      warnings.push(
 | 
			
		||||
        `DOWNLOAD_QUEUE_SIZE (${this.maxQueueSize}) is smaller than DOWNLOAD_MAX_CONCURRENT (${this.maxConcurrentDownloads})`
 | 
			
		||||
      );
 | 
			
		||||
    } else if (this.maxQueueSize < recommendedQueueSize) {
 | 
			
		||||
      warnings.push(
 | 
			
		||||
        `DOWNLOAD_QUEUE_SIZE (${this.maxQueueSize}) might be too small. Recommended: ${recommendedQueueSize} (5x concurrent downloads)`
 | 
			
		||||
      );
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if (warnings.length > 0) {
 | 
			
		||||
      console.warn(`[DOWNLOAD MANAGER] Configuration warnings:`);
 | 
			
		||||
      warnings.forEach((warning) => console.warn(`  - ${warning}`));
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if (errors.length > 0) {
 | 
			
		||||
      console.error(`[DOWNLOAD MANAGER] Configuration errors:`);
 | 
			
		||||
      errors.forEach((error) => console.error(`  - ${error}`));
 | 
			
		||||
      throw new Error(`Invalid download manager configuration: ${errors.join(", ")}`);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  public async requestDownloadSlot(downloadId: string, metadata?: DownloadSlotRequest): Promise<void> {
 | 
			
		||||
    if (metadata?.fileSize) {
 | 
			
		||||
      const fileSizeGB = metadata.fileSize / 1024 ** 3;
 | 
			
		||||
      if (fileSizeGB < this.minFileSizeGB) {
 | 
			
		||||
        console.log(
 | 
			
		||||
          `[DOWNLOAD MANAGER] File ${metadata.fileName || "unknown"} (${fileSizeGB.toFixed(2)}GB) below threshold (${this.minFileSizeGB}GB), bypassing queue`
 | 
			
		||||
        );
 | 
			
		||||
        return Promise.resolve();
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if (this.canStartImmediately()) {
 | 
			
		||||
      console.log(`[DOWNLOAD MANAGER] Immediate start: ${downloadId}`);
 | 
			
		||||
      return Promise.resolve();
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if (this.downloadQueue.length >= this.maxQueueSize) {
 | 
			
		||||
      const error = new Error(`Download queue is full: ${this.downloadQueue.length}/${this.maxQueueSize}`);
 | 
			
		||||
      throw error;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    return new Promise<void>((resolve, reject) => {
 | 
			
		||||
      const queuedDownload: QueuedDownload = {
 | 
			
		||||
        downloadId,
 | 
			
		||||
        queuedAt: Date.now(),
 | 
			
		||||
        resolve,
 | 
			
		||||
        reject,
 | 
			
		||||
        metadata,
 | 
			
		||||
      };
 | 
			
		||||
 | 
			
		||||
      this.downloadQueue.push(queuedDownload);
 | 
			
		||||
 | 
			
		||||
      const position = this.downloadQueue.length;
 | 
			
		||||
      console.log(`[DOWNLOAD MANAGER] Queued: ${downloadId} (Position: ${position}/${this.maxQueueSize})`);
 | 
			
		||||
 | 
			
		||||
      if (metadata?.fileName && metadata?.fileSize) {
 | 
			
		||||
        const sizeMB = (metadata.fileSize / (1024 * 1024)).toFixed(1);
 | 
			
		||||
        console.log(`[DOWNLOAD MANAGER] Queued file: ${metadata.fileName} (${sizeMB}MB)`);
 | 
			
		||||
      }
 | 
			
		||||
    });
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  private canStartImmediately(): boolean {
 | 
			
		||||
    const currentMemoryMB = this.getCurrentMemoryUsage();
 | 
			
		||||
 | 
			
		||||
    if (currentMemoryMB > this.memoryThresholdMB) {
 | 
			
		||||
      return false;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if (this.activeDownloads.size >= this.maxConcurrentDownloads) {
 | 
			
		||||
      return false;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    return true;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  public canStartDownload(): { allowed: boolean; reason?: string } {
 | 
			
		||||
    if (this.canStartImmediately()) {
 | 
			
		||||
      return { allowed: true };
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    const currentMemoryMB = this.getCurrentMemoryUsage();
 | 
			
		||||
 | 
			
		||||
    if (currentMemoryMB > this.memoryThresholdMB) {
 | 
			
		||||
      return {
 | 
			
		||||
        allowed: false,
 | 
			
		||||
        reason: `Memory usage too high: ${currentMemoryMB.toFixed(0)}MB > ${this.memoryThresholdMB}MB`,
 | 
			
		||||
      };
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    return {
 | 
			
		||||
      allowed: false,
 | 
			
		||||
      reason: `Too many concurrent downloads: ${this.activeDownloads.size}/${this.maxConcurrentDownloads}`,
 | 
			
		||||
    };
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  public startDownload(downloadId: string): void {
 | 
			
		||||
    const memUsage = process.memoryUsage();
 | 
			
		||||
    this.activeDownloads.set(downloadId, {
 | 
			
		||||
      startTime: Date.now(),
 | 
			
		||||
      memoryAtStart: memUsage.rss + memUsage.external,
 | 
			
		||||
    });
 | 
			
		||||
 | 
			
		||||
    console.log(
 | 
			
		||||
      `[DOWNLOAD MANAGER] Started: ${downloadId} (${this.activeDownloads.size}/${this.maxConcurrentDownloads} active)`
 | 
			
		||||
    );
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  public endDownload(downloadId: string): void {
 | 
			
		||||
    const downloadInfo = this.activeDownloads.get(downloadId);
 | 
			
		||||
    this.activeDownloads.delete(downloadId);
 | 
			
		||||
 | 
			
		||||
    if (downloadInfo) {
 | 
			
		||||
      const duration = Date.now() - downloadInfo.startTime;
 | 
			
		||||
      const memUsage = process.memoryUsage();
 | 
			
		||||
      const currentMemory = memUsage.rss + memUsage.external;
 | 
			
		||||
      const memoryDiff = currentMemory - downloadInfo.memoryAtStart;
 | 
			
		||||
 | 
			
		||||
      console.log(
 | 
			
		||||
        `[DOWNLOAD MANAGER] Ended: ${downloadId} (Duration: ${(duration / 1000).toFixed(1)}s, Memory delta: ${(memoryDiff / 1024 / 1024).toFixed(1)}MB)`
 | 
			
		||||
      );
 | 
			
		||||
 | 
			
		||||
      if (memoryDiff > 100 * 1024 * 1024 && global.gc) {
 | 
			
		||||
        setImmediate(() => {
 | 
			
		||||
          global.gc!();
 | 
			
		||||
          console.log(`[DOWNLOAD MANAGER] Forced GC after download ${downloadId}`);
 | 
			
		||||
        });
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    this.processQueue();
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  private processQueue(): void {
 | 
			
		||||
    if (this.downloadQueue.length === 0 || !this.canStartImmediately()) {
 | 
			
		||||
      return;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    const nextDownload = this.downloadQueue.shift();
 | 
			
		||||
    if (!nextDownload) {
 | 
			
		||||
      return;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    console.log(
 | 
			
		||||
      `[DOWNLOAD MANAGER] Processing queue: ${nextDownload.downloadId} (${this.downloadQueue.length} remaining)`
 | 
			
		||||
    );
 | 
			
		||||
 | 
			
		||||
    if (nextDownload.metadata?.fileName && nextDownload.metadata?.fileSize) {
 | 
			
		||||
      const sizeMB = (nextDownload.metadata.fileSize / (1024 * 1024)).toFixed(1);
 | 
			
		||||
      console.log(`[DOWNLOAD MANAGER] Starting queued file: ${nextDownload.metadata.fileName} (${sizeMB}MB)`);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    nextDownload.resolve();
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  public getActiveDownloadsCount(): number {
 | 
			
		||||
    return this.activeDownloads.size;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  private getCurrentMemoryUsage(): number {
 | 
			
		||||
    const usage = process.memoryUsage();
 | 
			
		||||
    return (usage.rss + usage.external) / (1024 * 1024);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  public getCurrentMemoryUsageMB(): number {
 | 
			
		||||
    return this.getCurrentMemoryUsage();
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  public getQueueStatus(): QueueStatus {
 | 
			
		||||
    return {
 | 
			
		||||
      queueLength: this.downloadQueue.length,
 | 
			
		||||
      maxQueueSize: this.maxQueueSize,
 | 
			
		||||
      activeDownloads: this.activeDownloads.size,
 | 
			
		||||
      maxConcurrent: this.maxConcurrentDownloads,
 | 
			
		||||
      queuedDownloads: this.downloadQueue.map((download, index) => ({
 | 
			
		||||
        downloadId: download.downloadId,
 | 
			
		||||
        position: index + 1,
 | 
			
		||||
        waitTime: Date.now() - download.queuedAt,
 | 
			
		||||
        fileName: download.metadata?.fileName,
 | 
			
		||||
        fileSize: download.metadata?.fileSize,
 | 
			
		||||
      })),
 | 
			
		||||
    };
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  public cancelQueuedDownload(downloadId: string): boolean {
 | 
			
		||||
    const index = this.downloadQueue.findIndex((item) => item.downloadId === downloadId);
 | 
			
		||||
 | 
			
		||||
    if (index === -1) {
 | 
			
		||||
      return false;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    const canceledDownload = this.downloadQueue.splice(index, 1)[0];
 | 
			
		||||
    canceledDownload.reject(new Error(`Download ${downloadId} was cancelled`));
 | 
			
		||||
 | 
			
		||||
    console.log(`[DOWNLOAD MANAGER] Cancelled queued download: ${downloadId} (was at position ${index + 1})`);
 | 
			
		||||
    return true;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  private cleanupStaleDownloads(): void {
 | 
			
		||||
    const now = Date.now();
 | 
			
		||||
    const staleThreshold = 10 * 60 * 1000; // 10 minutes
 | 
			
		||||
    const queueStaleThreshold = 30 * 60 * 1000;
 | 
			
		||||
 | 
			
		||||
    for (const [downloadId, info] of this.activeDownloads.entries()) {
 | 
			
		||||
      if (now - info.startTime > staleThreshold) {
 | 
			
		||||
        console.warn(`[DOWNLOAD MANAGER] Cleaning up stale active download: ${downloadId}`);
 | 
			
		||||
        this.activeDownloads.delete(downloadId);
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    const initialQueueLength = this.downloadQueue.length;
 | 
			
		||||
    this.downloadQueue = this.downloadQueue.filter((download) => {
 | 
			
		||||
      if (now - download.queuedAt > queueStaleThreshold) {
 | 
			
		||||
        console.warn(`[DOWNLOAD MANAGER] Cleaning up stale queued download: ${download.downloadId}`);
 | 
			
		||||
        download.reject(new Error(`Download ${download.downloadId} timed out in queue`));
 | 
			
		||||
        return false;
 | 
			
		||||
      }
 | 
			
		||||
      return true;
 | 
			
		||||
    });
 | 
			
		||||
 | 
			
		||||
    if (this.downloadQueue.length < initialQueueLength) {
 | 
			
		||||
      console.log(
 | 
			
		||||
        `[DOWNLOAD MANAGER] Cleaned up ${initialQueueLength - this.downloadQueue.length} stale queued downloads`
 | 
			
		||||
      );
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    this.processQueue();
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  public shouldThrottleStream(): boolean {
 | 
			
		||||
    const currentMemoryMB = this.getCurrentMemoryUsageMB();
 | 
			
		||||
    return currentMemoryMB > this.memoryThresholdMB * 0.8;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  public getThrottleDelay(): number {
 | 
			
		||||
    const currentMemoryMB = this.getCurrentMemoryUsageMB();
 | 
			
		||||
    const thresholdRatio = currentMemoryMB / this.memoryThresholdMB;
 | 
			
		||||
 | 
			
		||||
    if (thresholdRatio > 0.9) return 200;
 | 
			
		||||
    if (thresholdRatio > 0.8) return 100;
 | 
			
		||||
    return 50;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  public destroy(): void {
 | 
			
		||||
    if (this.cleanupInterval) {
 | 
			
		||||
      clearInterval(this.cleanupInterval);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    this.downloadQueue.forEach((download) => {
 | 
			
		||||
      download.reject(new Error("Download manager is shutting down"));
 | 
			
		||||
    });
 | 
			
		||||
 | 
			
		||||
    this.activeDownloads.clear();
 | 
			
		||||
    this.downloadQueue = [];
 | 
			
		||||
    console.log("[DOWNLOAD MANAGER] Shutdown completed");
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  public clearQueue(): number {
 | 
			
		||||
    const clearedCount = this.downloadQueue.length;
 | 
			
		||||
 | 
			
		||||
    this.downloadQueue.forEach((download) => {
 | 
			
		||||
      download.reject(new Error("Queue was cleared by administrator"));
 | 
			
		||||
    });
 | 
			
		||||
 | 
			
		||||
    this.downloadQueue = [];
 | 
			
		||||
    console.log(`[DOWNLOAD MANAGER] Cleared queue: ${clearedCount} downloads cancelled`);
 | 
			
		||||
    return clearedCount;
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
@@ -2,7 +2,6 @@
 | 
			
		||||
 | 
			
		||||
import { useCallback, useEffect, useState } from "react";
 | 
			
		||||
import { IconCheck, IconFile, IconMail, IconUpload, IconUser, IconX } from "@tabler/icons-react";
 | 
			
		||||
import axios from "axios";
 | 
			
		||||
import { useTranslations } from "next-intl";
 | 
			
		||||
import { useDropzone } from "react-dropzone";
 | 
			
		||||
import { toast } from "sonner";
 | 
			
		||||
@@ -14,9 +13,8 @@ import { Label } from "@/components/ui/label";
 | 
			
		||||
import { Progress } from "@/components/ui/progress";
 | 
			
		||||
import { Textarea } from "@/components/ui/textarea";
 | 
			
		||||
import { getPresignedUrlForUploadByAlias, registerFileUploadByAlias } from "@/http/endpoints";
 | 
			
		||||
import { getSystemInfo } from "@/http/endpoints/app";
 | 
			
		||||
import { ChunkedUploader } from "@/utils/chunked-upload";
 | 
			
		||||
import { formatFileSize } from "@/utils/format-file-size";
 | 
			
		||||
import { S3Uploader } from "@/utils/s3-upload";
 | 
			
		||||
import { FILE_STATUS, UPLOAD_CONFIG, UPLOAD_PROGRESS } from "../constants";
 | 
			
		||||
import { FileUploadSectionProps, FileWithProgress } from "../types";
 | 
			
		||||
 | 
			
		||||
@@ -26,24 +24,9 @@ export function FileUploadSection({ reverseShare, password, alias, onUploadSucce
 | 
			
		||||
  const [uploaderEmail, setUploaderEmail] = useState("");
 | 
			
		||||
  const [description, setDescription] = useState("");
 | 
			
		||||
  const [isUploading, setIsUploading] = useState(false);
 | 
			
		||||
  const [isS3Enabled, setIsS3Enabled] = useState<boolean | null>(null);
 | 
			
		||||
 | 
			
		||||
  const t = useTranslations();
 | 
			
		||||
 | 
			
		||||
  useEffect(() => {
 | 
			
		||||
    const fetchSystemInfo = async () => {
 | 
			
		||||
      try {
 | 
			
		||||
        const response = await getSystemInfo();
 | 
			
		||||
        setIsS3Enabled(response.data.s3Enabled);
 | 
			
		||||
      } catch (error) {
 | 
			
		||||
        console.warn("Failed to fetch system info, defaulting to filesystem mode:", error);
 | 
			
		||||
        setIsS3Enabled(false);
 | 
			
		||||
      }
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    fetchSystemInfo();
 | 
			
		||||
  }, []);
 | 
			
		||||
 | 
			
		||||
  const validateFileSize = useCallback(
 | 
			
		||||
    (file: File): string | null => {
 | 
			
		||||
      if (!reverseShare.maxFileSize) return null;
 | 
			
		||||
@@ -150,55 +133,20 @@ export function FileUploadSection({ reverseShare, password, alias, onUploadSucce
 | 
			
		||||
    return fileName.split(".").pop() || "";
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
  const calculateUploadTimeout = (fileSize: number): number => {
 | 
			
		||||
    const baseTimeout = 300000;
 | 
			
		||||
    const fileSizeMB = fileSize / (1024 * 1024);
 | 
			
		||||
    if (fileSizeMB > 500) {
 | 
			
		||||
      const extraMB = fileSizeMB - 500;
 | 
			
		||||
      const extraMinutes = Math.ceil(extraMB / 100);
 | 
			
		||||
      return baseTimeout + extraMinutes * 60000;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    return baseTimeout;
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
  const uploadFileToStorage = async (
 | 
			
		||||
    file: File,
 | 
			
		||||
    presignedUrl: string,
 | 
			
		||||
    onProgress?: (progress: number) => void
 | 
			
		||||
  ): Promise<void> => {
 | 
			
		||||
    const shouldUseChunked = ChunkedUploader.shouldUseChunkedUpload(file.size, isS3Enabled ?? undefined);
 | 
			
		||||
    // Always use S3 direct upload
 | 
			
		||||
    const result = await S3Uploader.uploadFile({
 | 
			
		||||
      file,
 | 
			
		||||
      presignedUrl,
 | 
			
		||||
      onProgress,
 | 
			
		||||
    });
 | 
			
		||||
 | 
			
		||||
    if (shouldUseChunked) {
 | 
			
		||||
      const chunkSize = ChunkedUploader.calculateOptimalChunkSize(file.size);
 | 
			
		||||
 | 
			
		||||
      const result = await ChunkedUploader.uploadFile({
 | 
			
		||||
        file,
 | 
			
		||||
        url: presignedUrl,
 | 
			
		||||
        chunkSize,
 | 
			
		||||
        isS3Enabled: isS3Enabled ?? undefined,
 | 
			
		||||
        onProgress,
 | 
			
		||||
      });
 | 
			
		||||
 | 
			
		||||
      if (!result.success) {
 | 
			
		||||
        throw new Error(result.error || "Chunked upload failed");
 | 
			
		||||
      }
 | 
			
		||||
    } else {
 | 
			
		||||
      const uploadTimeout = calculateUploadTimeout(file.size);
 | 
			
		||||
      await axios.put(presignedUrl, file, {
 | 
			
		||||
        headers: {
 | 
			
		||||
          "Content-Type": file.type,
 | 
			
		||||
        },
 | 
			
		||||
        timeout: uploadTimeout,
 | 
			
		||||
        maxContentLength: Infinity,
 | 
			
		||||
        maxBodyLength: Infinity,
 | 
			
		||||
        onUploadProgress: (progressEvent) => {
 | 
			
		||||
          if (onProgress && progressEvent.total) {
 | 
			
		||||
            const progress = (progressEvent.loaded / progressEvent.total) * 100;
 | 
			
		||||
            onProgress(Math.round(progress));
 | 
			
		||||
          }
 | 
			
		||||
        },
 | 
			
		||||
      });
 | 
			
		||||
    if (!result.success) {
 | 
			
		||||
      throw new Error(result.error || "Upload failed");
 | 
			
		||||
    }
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -41,10 +41,10 @@ import { Table, TableBody, TableCell, TableHead, TableHeader, TableRow } from "@
 | 
			
		||||
import {
 | 
			
		||||
  copyReverseShareFileToUserFiles,
 | 
			
		||||
  deleteReverseShareFile,
 | 
			
		||||
  downloadReverseShareFile,
 | 
			
		||||
  updateReverseShareFile,
 | 
			
		||||
} from "@/http/endpoints/reverse-shares";
 | 
			
		||||
import type { ReverseShareFile } from "@/http/endpoints/reverse-shares/types";
 | 
			
		||||
import { bulkDownloadWithQueue, downloadReverseShareWithQueue } from "@/utils/download-queue-utils";
 | 
			
		||||
import { getFileIcon } from "@/utils/file-icons";
 | 
			
		||||
import { truncateFileName } from "@/utils/file-utils";
 | 
			
		||||
import { ReverseShare } from "../hooks/use-reverse-shares";
 | 
			
		||||
@@ -471,13 +471,21 @@ export function ReceivedFilesModal({
 | 
			
		||||
 | 
			
		||||
  const handleDownload = async (file: ReverseShareFile) => {
 | 
			
		||||
    try {
 | 
			
		||||
      await downloadReverseShareWithQueue(file.id, file.name, {
 | 
			
		||||
        onComplete: () => toast.success(t("reverseShares.modals.receivedFiles.downloadSuccess")),
 | 
			
		||||
        onFail: () => toast.error(t("reverseShares.modals.receivedFiles.downloadError")),
 | 
			
		||||
      });
 | 
			
		||||
      const loadingToast = toast.loading(t("reverseShares.modals.receivedFiles.downloading") || "Downloading...");
 | 
			
		||||
      const response = await downloadReverseShareFile(file.id);
 | 
			
		||||
 | 
			
		||||
      const link = document.createElement("a");
 | 
			
		||||
      link.href = response.data.url;
 | 
			
		||||
      link.download = file.name;
 | 
			
		||||
      document.body.appendChild(link);
 | 
			
		||||
      link.click();
 | 
			
		||||
      document.body.removeChild(link);
 | 
			
		||||
 | 
			
		||||
      toast.dismiss(loadingToast);
 | 
			
		||||
      toast.success(t("reverseShares.modals.receivedFiles.downloadSuccess"));
 | 
			
		||||
    } catch (error) {
 | 
			
		||||
      console.error("Download error:", error);
 | 
			
		||||
      // Error already handled in downloadReverseShareWithQueue
 | 
			
		||||
      toast.error(t("reverseShares.modals.receivedFiles.downloadError"));
 | 
			
		||||
    }
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
@@ -600,25 +608,28 @@ export function ReceivedFilesModal({
 | 
			
		||||
    if (selectedFileObjects.length === 0) return;
 | 
			
		||||
 | 
			
		||||
    try {
 | 
			
		||||
      const zipName = `${reverseShare.name || t("reverseShares.defaultLinkName")}_files.zip`;
 | 
			
		||||
      const loadingToast = toast.loading(t("shareManager.creatingZip"));
 | 
			
		||||
 | 
			
		||||
      toast.promise(
 | 
			
		||||
        bulkDownloadWithQueue(
 | 
			
		||||
          selectedFileObjects.map((file) => ({
 | 
			
		||||
            name: file.name,
 | 
			
		||||
            id: file.id,
 | 
			
		||||
            isReverseShare: true,
 | 
			
		||||
          })),
 | 
			
		||||
          zipName
 | 
			
		||||
        ).then(() => {
 | 
			
		||||
          setSelectedFiles(new Set());
 | 
			
		||||
        }),
 | 
			
		||||
        {
 | 
			
		||||
          loading: t("shareManager.creatingZip"),
 | 
			
		||||
          success: t("shareManager.zipDownloadSuccess"),
 | 
			
		||||
          error: t("shareManager.zipDownloadError"),
 | 
			
		||||
      try {
 | 
			
		||||
        // Download files individually
 | 
			
		||||
        for (const file of selectedFileObjects) {
 | 
			
		||||
          const response = await downloadReverseShareFile(file.id);
 | 
			
		||||
          const link = document.createElement("a");
 | 
			
		||||
          link.href = response.data.url;
 | 
			
		||||
          link.download = file.name;
 | 
			
		||||
          document.body.appendChild(link);
 | 
			
		||||
          link.click();
 | 
			
		||||
          document.body.removeChild(link);
 | 
			
		||||
        }
 | 
			
		||||
      );
 | 
			
		||||
 | 
			
		||||
        toast.dismiss(loadingToast);
 | 
			
		||||
        toast.success(t("shareManager.zipDownloadSuccess"));
 | 
			
		||||
        setSelectedFiles(new Set());
 | 
			
		||||
      } catch (error) {
 | 
			
		||||
        toast.dismiss(loadingToast);
 | 
			
		||||
        toast.error(t("shareManager.zipDownloadError"));
 | 
			
		||||
        throw error;
 | 
			
		||||
      }
 | 
			
		||||
    } catch (error) {
 | 
			
		||||
      console.error("Error creating ZIP:", error);
 | 
			
		||||
    }
 | 
			
		||||
 
 | 
			
		||||
@@ -6,9 +6,8 @@ import { useTranslations } from "next-intl";
 | 
			
		||||
import { toast } from "sonner";
 | 
			
		||||
 | 
			
		||||
import { Button } from "@/components/ui/button";
 | 
			
		||||
import { deleteReverseShareFile } from "@/http/endpoints/reverse-shares";
 | 
			
		||||
import { deleteReverseShareFile, downloadReverseShareFile } from "@/http/endpoints/reverse-shares";
 | 
			
		||||
import type { ReverseShareFile } from "@/http/endpoints/reverse-shares/types";
 | 
			
		||||
import { downloadReverseShareWithQueue } from "@/utils/download-queue-utils";
 | 
			
		||||
import { getFileIcon } from "@/utils/file-icons";
 | 
			
		||||
import { ReverseShareFilePreviewModal } from "./reverse-share-file-preview-modal";
 | 
			
		||||
 | 
			
		||||
@@ -56,13 +55,21 @@ export function ReceivedFilesSection({ files, onFileDeleted }: ReceivedFilesSect
 | 
			
		||||
 | 
			
		||||
  const handleDownload = async (file: ReverseShareFile) => {
 | 
			
		||||
    try {
 | 
			
		||||
      await downloadReverseShareWithQueue(file.id, file.name, {
 | 
			
		||||
        onComplete: () => toast.success(t("reverseShares.modals.details.downloadSuccess")),
 | 
			
		||||
        onFail: () => toast.error(t("reverseShares.modals.details.downloadError")),
 | 
			
		||||
      });
 | 
			
		||||
      const loadingToast = toast.loading(t("reverseShares.modals.details.downloading") || "Downloading...");
 | 
			
		||||
      const response = await downloadReverseShareFile(file.id);
 | 
			
		||||
 | 
			
		||||
      const link = document.createElement("a");
 | 
			
		||||
      link.href = response.data.url;
 | 
			
		||||
      link.download = file.name;
 | 
			
		||||
      document.body.appendChild(link);
 | 
			
		||||
      link.click();
 | 
			
		||||
      document.body.removeChild(link);
 | 
			
		||||
 | 
			
		||||
      toast.dismiss(loadingToast);
 | 
			
		||||
      toast.success(t("reverseShares.modals.details.downloadSuccess"));
 | 
			
		||||
    } catch (error) {
 | 
			
		||||
      console.error("Download error:", error);
 | 
			
		||||
      // Error already handled in downloadReverseShareWithQueue
 | 
			
		||||
      toast.error(t("reverseShares.modals.details.downloadError"));
 | 
			
		||||
    }
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -5,13 +5,8 @@ import { useParams, useRouter, useSearchParams } from "next/navigation";
 | 
			
		||||
import { useTranslations } from "next-intl";
 | 
			
		||||
import { toast } from "sonner";
 | 
			
		||||
 | 
			
		||||
import { getShareByAlias } from "@/http/endpoints/index";
 | 
			
		||||
import { getDownloadUrl, getShareByAlias } from "@/http/endpoints/index";
 | 
			
		||||
import type { Share } from "@/http/endpoints/shares/types";
 | 
			
		||||
import {
 | 
			
		||||
  bulkDownloadShareWithQueue,
 | 
			
		||||
  downloadFileWithQueue,
 | 
			
		||||
  downloadShareFolderWithQueue,
 | 
			
		||||
} from "@/utils/download-queue-utils";
 | 
			
		||||
 | 
			
		||||
const createSlug = (name: string): string => {
 | 
			
		||||
  return name
 | 
			
		||||
@@ -223,17 +218,14 @@ export function usePublicShare() {
 | 
			
		||||
    await loadShare(password);
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
  const handleFolderDownload = async (folderId: string, folderName: string) => {
 | 
			
		||||
  const handleFolderDownload = async () => {
 | 
			
		||||
    try {
 | 
			
		||||
      if (!share) {
 | 
			
		||||
        throw new Error("Share data not available");
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      await downloadShareFolderWithQueue(folderId, folderName, share.files || [], share.folders || [], {
 | 
			
		||||
        silent: true,
 | 
			
		||||
        showToasts: false,
 | 
			
		||||
        sharePassword: password,
 | 
			
		||||
      });
 | 
			
		||||
      // Folder download not yet implemented
 | 
			
		||||
      toast.info("Folder download: use bulk download modal");
 | 
			
		||||
    } catch (error) {
 | 
			
		||||
      console.error("Error downloading folder:", error);
 | 
			
		||||
      throw error;
 | 
			
		||||
@@ -243,25 +235,28 @@ export function usePublicShare() {
 | 
			
		||||
  const handleDownload = async (objectName: string, fileName: string) => {
 | 
			
		||||
    try {
 | 
			
		||||
      if (objectName.startsWith("folder:")) {
 | 
			
		||||
        const folderId = objectName.replace("folder:", "");
 | 
			
		||||
        await toast.promise(handleFolderDownload(folderId, fileName), {
 | 
			
		||||
        await toast.promise(handleFolderDownload(), {
 | 
			
		||||
          loading: t("shareManager.creatingZip"),
 | 
			
		||||
          success: t("shareManager.zipDownloadSuccess"),
 | 
			
		||||
          error: t("share.errors.downloadFailed"),
 | 
			
		||||
        });
 | 
			
		||||
      } else {
 | 
			
		||||
        await toast.promise(
 | 
			
		||||
          downloadFileWithQueue(objectName, fileName, {
 | 
			
		||||
            silent: true,
 | 
			
		||||
            showToasts: false,
 | 
			
		||||
            sharePassword: password,
 | 
			
		||||
          }),
 | 
			
		||||
          {
 | 
			
		||||
            loading: t("share.messages.downloadStarted"),
 | 
			
		||||
            success: t("shareManager.downloadSuccess"),
 | 
			
		||||
            error: t("share.errors.downloadFailed"),
 | 
			
		||||
          }
 | 
			
		||||
        const loadingToast = toast.loading(t("share.messages.downloadStarted"));
 | 
			
		||||
 | 
			
		||||
        const response = await getDownloadUrl(
 | 
			
		||||
          objectName,
 | 
			
		||||
          password ? { headers: { "x-share-password": password } } : undefined
 | 
			
		||||
        );
 | 
			
		||||
 | 
			
		||||
        const link = document.createElement("a");
 | 
			
		||||
        link.href = response.data.url;
 | 
			
		||||
        link.download = fileName;
 | 
			
		||||
        document.body.appendChild(link);
 | 
			
		||||
        link.click();
 | 
			
		||||
        document.body.removeChild(link);
 | 
			
		||||
 | 
			
		||||
        toast.dismiss(loadingToast);
 | 
			
		||||
        toast.success(t("shareManager.downloadSuccess"));
 | 
			
		||||
      }
 | 
			
		||||
    } catch {}
 | 
			
		||||
  };
 | 
			
		||||
@@ -281,8 +276,6 @@ export function usePublicShare() {
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    try {
 | 
			
		||||
      const zipName = `${share.name || t("shareManager.defaultShareName")}.zip`;
 | 
			
		||||
 | 
			
		||||
      // Prepare all items for the share-specific bulk download
 | 
			
		||||
      const allItems: Array<{
 | 
			
		||||
        objectName?: string;
 | 
			
		||||
@@ -321,22 +314,43 @@ export function usePublicShare() {
 | 
			
		||||
        return;
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      toast.promise(
 | 
			
		||||
        bulkDownloadShareWithQueue(
 | 
			
		||||
          allItems,
 | 
			
		||||
          share.files || [],
 | 
			
		||||
          share.folders || [],
 | 
			
		||||
          zipName,
 | 
			
		||||
          undefined,
 | 
			
		||||
          true,
 | 
			
		||||
          password
 | 
			
		||||
        ).then(() => {}),
 | 
			
		||||
        {
 | 
			
		||||
          loading: t("shareManager.creatingZip"),
 | 
			
		||||
          success: t("shareManager.zipDownloadSuccess"),
 | 
			
		||||
          error: t("shareManager.zipDownloadError"),
 | 
			
		||||
      const loadingToast = toast.loading(t("shareManager.preparingDownload"));
 | 
			
		||||
 | 
			
		||||
      try {
 | 
			
		||||
        // Get presigned URLs for all files
 | 
			
		||||
        const downloadItems = await Promise.all(
 | 
			
		||||
          allItems
 | 
			
		||||
            .filter((item) => item.type === "file" && item.objectName)
 | 
			
		||||
            .map(async (item) => {
 | 
			
		||||
              const response = await getDownloadUrl(
 | 
			
		||||
                item.objectName!,
 | 
			
		||||
                password ? { headers: { "x-share-password": password } } : undefined
 | 
			
		||||
              );
 | 
			
		||||
              return {
 | 
			
		||||
                url: response.data.url,
 | 
			
		||||
                name: item.name,
 | 
			
		||||
              };
 | 
			
		||||
            })
 | 
			
		||||
        );
 | 
			
		||||
 | 
			
		||||
        if (downloadItems.length === 0) {
 | 
			
		||||
          toast.dismiss(loadingToast);
 | 
			
		||||
          toast.error(t("shareManager.noFilesToDownload"));
 | 
			
		||||
          return;
 | 
			
		||||
        }
 | 
			
		||||
      );
 | 
			
		||||
 | 
			
		||||
        // Create ZIP with all files
 | 
			
		||||
        const { downloadFilesAsZip } = await import("@/utils/zip-download");
 | 
			
		||||
        const zipName = `${share.name || t("shareManager.defaultShareName")}.zip`;
 | 
			
		||||
        await downloadFilesAsZip(downloadItems, zipName);
 | 
			
		||||
 | 
			
		||||
        toast.dismiss(loadingToast);
 | 
			
		||||
        toast.success(t("shareManager.zipDownloadSuccess"));
 | 
			
		||||
      } catch (error) {
 | 
			
		||||
        toast.dismiss(loadingToast);
 | 
			
		||||
        toast.error(t("shareManager.zipDownloadError"));
 | 
			
		||||
        throw error;
 | 
			
		||||
      }
 | 
			
		||||
    } catch (error) {
 | 
			
		||||
      console.error("Error creating ZIP:", error);
 | 
			
		||||
    }
 | 
			
		||||
@@ -392,24 +406,46 @@ export function usePublicShare() {
 | 
			
		||||
          })),
 | 
			
		||||
      ];
 | 
			
		||||
 | 
			
		||||
      const zipName = `${share.name || t("shareManager.defaultShareName")}-selected.zip`;
 | 
			
		||||
      const loadingToast = toast.loading(t("shareManager.preparingDownload"));
 | 
			
		||||
 | 
			
		||||
      toast.promise(
 | 
			
		||||
        bulkDownloadShareWithQueue(
 | 
			
		||||
          allItems,
 | 
			
		||||
          share.files || [],
 | 
			
		||||
          share.folders || [],
 | 
			
		||||
          zipName,
 | 
			
		||||
          undefined,
 | 
			
		||||
          false,
 | 
			
		||||
          password
 | 
			
		||||
        ).then(() => {}),
 | 
			
		||||
        {
 | 
			
		||||
          loading: t("shareManager.creatingZip"),
 | 
			
		||||
          success: t("shareManager.zipDownloadSuccess"),
 | 
			
		||||
          error: t("shareManager.zipDownloadError"),
 | 
			
		||||
      try {
 | 
			
		||||
        // Get presigned URLs for all files
 | 
			
		||||
        const fileItems = allItems.filter(
 | 
			
		||||
          (item): item is { objectName: string; name: string; type: "file" } =>
 | 
			
		||||
            item.type === "file" && "objectName" in item && !!item.objectName
 | 
			
		||||
        );
 | 
			
		||||
 | 
			
		||||
        const downloadItems = await Promise.all(
 | 
			
		||||
          fileItems.map(async (item) => {
 | 
			
		||||
            const response = await getDownloadUrl(
 | 
			
		||||
              item.objectName,
 | 
			
		||||
              password ? { headers: { "x-share-password": password } } : undefined
 | 
			
		||||
            );
 | 
			
		||||
            return {
 | 
			
		||||
              url: response.data.url,
 | 
			
		||||
              name: item.name,
 | 
			
		||||
            };
 | 
			
		||||
          })
 | 
			
		||||
        );
 | 
			
		||||
 | 
			
		||||
        if (downloadItems.length === 0) {
 | 
			
		||||
          toast.dismiss(loadingToast);
 | 
			
		||||
          toast.error(t("shareManager.noFilesToDownload"));
 | 
			
		||||
          return;
 | 
			
		||||
        }
 | 
			
		||||
      );
 | 
			
		||||
 | 
			
		||||
        // Create ZIP with all files
 | 
			
		||||
        const { downloadFilesAsZip } = await import("@/utils/zip-download");
 | 
			
		||||
        const finalZipName = `${share.name || t("shareManager.defaultShareName")}-selected.zip`;
 | 
			
		||||
        await downloadFilesAsZip(downloadItems, finalZipName);
 | 
			
		||||
 | 
			
		||||
        toast.dismiss(loadingToast);
 | 
			
		||||
        toast.success(t("shareManager.zipDownloadSuccess"));
 | 
			
		||||
      } catch (error) {
 | 
			
		||||
        toast.dismiss(loadingToast);
 | 
			
		||||
        toast.error(t("shareManager.zipDownloadError"));
 | 
			
		||||
        throw error;
 | 
			
		||||
      }
 | 
			
		||||
    } catch (error) {
 | 
			
		||||
      console.error("Error creating ZIP:", error);
 | 
			
		||||
      toast.error(t("shareManager.zipDownloadError"));
 | 
			
		||||
 
 | 
			
		||||
@@ -1,66 +1,71 @@
 | 
			
		||||
import { NextRequest, NextResponse } from "next/server";
 | 
			
		||||
 | 
			
		||||
import { detectMimeTypeWithFallback } from "@/utils/mime-types";
 | 
			
		||||
 | 
			
		||||
const API_BASE_URL = process.env.API_BASE_URL || "http://localhost:3333";
 | 
			
		||||
 | 
			
		||||
export async function GET(req: NextRequest) {
 | 
			
		||||
  const cookieHeader = req.headers.get("cookie");
 | 
			
		||||
  const searchParams = req.nextUrl.searchParams;
 | 
			
		||||
  const objectName = searchParams.get("objectName");
 | 
			
		||||
  try {
 | 
			
		||||
    const { searchParams } = new URL(req.url);
 | 
			
		||||
    const queryString = searchParams.toString();
 | 
			
		||||
    const url = `${API_BASE_URL}/files/download${queryString ? `?${queryString}` : ""}`;
 | 
			
		||||
 | 
			
		||||
  if (!objectName) {
 | 
			
		||||
    return new NextResponse(JSON.stringify({ error: "objectName parameter is required" }), {
 | 
			
		||||
      status: 400,
 | 
			
		||||
    const apiRes = await fetch(url, {
 | 
			
		||||
      method: "GET",
 | 
			
		||||
      headers: {
 | 
			
		||||
        "Content-Type": "application/json",
 | 
			
		||||
        cookie: req.headers.get("cookie") || "",
 | 
			
		||||
        ...Object.fromEntries(
 | 
			
		||||
          Array.from(req.headers.entries()).filter(
 | 
			
		||||
            ([key]) =>
 | 
			
		||||
              key.startsWith("authorization") ||
 | 
			
		||||
              key.startsWith("x-forwarded") ||
 | 
			
		||||
              key === "user-agent" ||
 | 
			
		||||
              key === "accept"
 | 
			
		||||
          )
 | 
			
		||||
        ),
 | 
			
		||||
      },
 | 
			
		||||
      redirect: "manual",
 | 
			
		||||
    });
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  const queryString = searchParams.toString();
 | 
			
		||||
  const url = `${API_BASE_URL}/files/download?${queryString}`;
 | 
			
		||||
    if (!apiRes.ok) {
 | 
			
		||||
      const errorText = await apiRes.text();
 | 
			
		||||
      return new NextResponse(errorText, {
 | 
			
		||||
        status: apiRes.status,
 | 
			
		||||
        headers: {
 | 
			
		||||
          "Content-Type": "application/json",
 | 
			
		||||
        },
 | 
			
		||||
      });
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
  const apiRes = await fetch(url, {
 | 
			
		||||
    method: "GET",
 | 
			
		||||
    headers: {
 | 
			
		||||
      cookie: cookieHeader || "",
 | 
			
		||||
    },
 | 
			
		||||
    redirect: "manual",
 | 
			
		||||
  });
 | 
			
		||||
    // Stream the file content
 | 
			
		||||
    const contentType = apiRes.headers.get("content-type") || "application/octet-stream";
 | 
			
		||||
    const contentDisposition = apiRes.headers.get("content-disposition");
 | 
			
		||||
    const contentLength = apiRes.headers.get("content-length");
 | 
			
		||||
    const cacheControl = apiRes.headers.get("cache-control");
 | 
			
		||||
 | 
			
		||||
  if (!apiRes.ok) {
 | 
			
		||||
    const resBody = await apiRes.text();
 | 
			
		||||
    return new NextResponse(resBody, {
 | 
			
		||||
    const res = new NextResponse(apiRes.body, {
 | 
			
		||||
      status: apiRes.status,
 | 
			
		||||
      headers: {
 | 
			
		||||
        "Content-Type": contentType,
 | 
			
		||||
      },
 | 
			
		||||
    });
 | 
			
		||||
 | 
			
		||||
    if (contentDisposition) {
 | 
			
		||||
      res.headers.set("Content-Disposition", contentDisposition);
 | 
			
		||||
    }
 | 
			
		||||
    if (contentLength) {
 | 
			
		||||
      res.headers.set("Content-Length", contentLength);
 | 
			
		||||
    }
 | 
			
		||||
    if (cacheControl) {
 | 
			
		||||
      res.headers.set("Cache-Control", cacheControl);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    return res;
 | 
			
		||||
  } catch (error) {
 | 
			
		||||
    console.error("Error proxying download request:", error);
 | 
			
		||||
    return new NextResponse(JSON.stringify({ error: "Failed to download file" }), {
 | 
			
		||||
      status: 500,
 | 
			
		||||
      headers: {
 | 
			
		||||
        "Content-Type": "application/json",
 | 
			
		||||
      },
 | 
			
		||||
    });
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  const serverContentType = apiRes.headers.get("Content-Type");
 | 
			
		||||
  const contentDisposition = apiRes.headers.get("Content-Disposition");
 | 
			
		||||
  const contentLength = apiRes.headers.get("Content-Length");
 | 
			
		||||
  const acceptRanges = apiRes.headers.get("Accept-Ranges");
 | 
			
		||||
  const contentRange = apiRes.headers.get("Content-Range");
 | 
			
		||||
  const contentType = detectMimeTypeWithFallback(serverContentType, contentDisposition, objectName);
 | 
			
		||||
 | 
			
		||||
  const res = new NextResponse(apiRes.body, {
 | 
			
		||||
    status: apiRes.status,
 | 
			
		||||
    headers: {
 | 
			
		||||
      "Content-Type": contentType,
 | 
			
		||||
      ...(contentLength && { "Content-Length": contentLength }),
 | 
			
		||||
      ...(acceptRanges && { "Accept-Ranges": acceptRanges }),
 | 
			
		||||
      ...(contentRange && { "Content-Range": contentRange }),
 | 
			
		||||
      ...(contentDisposition && { "Content-Disposition": contentDisposition }),
 | 
			
		||||
    },
 | 
			
		||||
  });
 | 
			
		||||
 | 
			
		||||
  const setCookie = apiRes.headers.getSetCookie?.() || [];
 | 
			
		||||
  if (setCookie.length > 0) {
 | 
			
		||||
    res.headers.set("Set-Cookie", setCookie.join(","));
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  return res;
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										81
									
								
								apps/web/src/app/api/(proxy)/files/upload/route.ts
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										81
									
								
								apps/web/src/app/api/(proxy)/files/upload/route.ts
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,81 @@
 | 
			
		||||
import { NextRequest, NextResponse } from "next/server";
 | 
			
		||||
 | 
			
		||||
const API_BASE_URL = process.env.API_BASE_URL || "http://localhost:3333";
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * Upload proxy for internal storage system
 | 
			
		||||
 *
 | 
			
		||||
 * This proxy is ONLY used when ENABLE_S3=false (internal storage mode).
 | 
			
		||||
 * External S3 uploads use presigned URLs directly from the client.
 | 
			
		||||
 *
 | 
			
		||||
 * Why we need this proxy:
 | 
			
		||||
 * 1. Security: Internal storage is not exposed to the internet
 | 
			
		||||
 * 2. Simplicity: No need to configure CORS on storage system
 | 
			
		||||
 * 3. Compatibility: Works in any network setup
 | 
			
		||||
 *
 | 
			
		||||
 * Performance note: Node.js streams the upload efficiently with minimal memory overhead
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
async function handleUpload(req: NextRequest, method: "POST" | "PUT") {
 | 
			
		||||
  try {
 | 
			
		||||
    const { searchParams } = new URL(req.url);
 | 
			
		||||
    const queryString = searchParams.toString();
 | 
			
		||||
    const url = `${API_BASE_URL}/files/upload${queryString ? `?${queryString}` : ""}`;
 | 
			
		||||
 | 
			
		||||
    const body = req.body;
 | 
			
		||||
 | 
			
		||||
    const apiRes = await fetch(url, {
 | 
			
		||||
      method: method,
 | 
			
		||||
      headers: {
 | 
			
		||||
        "Content-Type": req.headers.get("content-type") || "application/octet-stream",
 | 
			
		||||
        cookie: req.headers.get("cookie") || "",
 | 
			
		||||
        ...Object.fromEntries(
 | 
			
		||||
          Array.from(req.headers.entries()).filter(
 | 
			
		||||
            ([key]) =>
 | 
			
		||||
              key.startsWith("authorization") ||
 | 
			
		||||
              key.startsWith("x-forwarded") ||
 | 
			
		||||
              key === "user-agent" ||
 | 
			
		||||
              key === "accept"
 | 
			
		||||
          )
 | 
			
		||||
        ),
 | 
			
		||||
      },
 | 
			
		||||
      body: body,
 | 
			
		||||
      // Required for streaming request bodies in Node.js 18+ / Next.js 15
 | 
			
		||||
      // See: https://nodejs.org/docs/latest-v18.x/api/fetch.html#request-duplex
 | 
			
		||||
      // @ts-expect-error - duplex not yet in TypeScript types but required at runtime
 | 
			
		||||
      duplex: "half",
 | 
			
		||||
      redirect: "manual",
 | 
			
		||||
    });
 | 
			
		||||
 | 
			
		||||
    const resBody = await apiRes.text();
 | 
			
		||||
    const res = new NextResponse(resBody, {
 | 
			
		||||
      status: apiRes.status,
 | 
			
		||||
      headers: {
 | 
			
		||||
        "Content-Type": "application/json",
 | 
			
		||||
      },
 | 
			
		||||
    });
 | 
			
		||||
 | 
			
		||||
    const setCookie = apiRes.headers.getSetCookie?.() || [];
 | 
			
		||||
    if (setCookie.length > 0) {
 | 
			
		||||
      res.headers.set("Set-Cookie", setCookie.join(","));
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    return res;
 | 
			
		||||
  } catch (error) {
 | 
			
		||||
    console.error("Error proxying upload request:", error);
 | 
			
		||||
    return new NextResponse(JSON.stringify({ error: "Failed to upload file" }), {
 | 
			
		||||
      status: 500,
 | 
			
		||||
      headers: {
 | 
			
		||||
        "Content-Type": "application/json",
 | 
			
		||||
      },
 | 
			
		||||
    });
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export async function POST(req: NextRequest) {
 | 
			
		||||
  return handleUpload(req, "POST");
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export async function PUT(req: NextRequest) {
 | 
			
		||||
  return handleUpload(req, "PUT");
 | 
			
		||||
}
 | 
			
		||||
@@ -1,33 +0,0 @@
 | 
			
		||||
import { NextRequest, NextResponse } from "next/server";
 | 
			
		||||
 | 
			
		||||
const API_BASE_URL = process.env.API_BASE_URL || "http://localhost:3333";
 | 
			
		||||
 | 
			
		||||
export async function DELETE(req: NextRequest, { params }: { params: Promise<{ fileId: string }> }) {
 | 
			
		||||
  const { fileId } = await params;
 | 
			
		||||
  const cookieHeader = req.headers.get("cookie");
 | 
			
		||||
  const url = `${API_BASE_URL}/filesystem/cancel-upload/${fileId}`;
 | 
			
		||||
 | 
			
		||||
  const apiRes = await fetch(url, {
 | 
			
		||||
    method: "DELETE",
 | 
			
		||||
    headers: {
 | 
			
		||||
      cookie: cookieHeader || "",
 | 
			
		||||
    },
 | 
			
		||||
  });
 | 
			
		||||
 | 
			
		||||
  const contentType = apiRes.headers.get("Content-Type") || "application/json";
 | 
			
		||||
  const resBody = await apiRes.text();
 | 
			
		||||
 | 
			
		||||
  const res = new NextResponse(resBody, {
 | 
			
		||||
    status: apiRes.status,
 | 
			
		||||
    headers: {
 | 
			
		||||
      "Content-Type": contentType,
 | 
			
		||||
    },
 | 
			
		||||
  });
 | 
			
		||||
 | 
			
		||||
  const setCookie = apiRes.headers.getSetCookie?.() || [];
 | 
			
		||||
  if (setCookie.length > 0) {
 | 
			
		||||
    res.headers.set("Set-Cookie", setCookie.join(","));
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  return res;
 | 
			
		||||
}
 | 
			
		||||
@@ -1,38 +0,0 @@
 | 
			
		||||
import { NextRequest, NextResponse } from "next/server";
 | 
			
		||||
 | 
			
		||||
const API_BASE_URL = process.env.API_BASE_URL || "http://localhost:3333";
 | 
			
		||||
 | 
			
		||||
export async function DELETE(req: NextRequest, { params }: { params: Promise<{ downloadId: string }> }) {
 | 
			
		||||
  const { downloadId } = await params;
 | 
			
		||||
  const cookieHeader = req.headers.get("cookie");
 | 
			
		||||
  const url = `${API_BASE_URL}/filesystem/download-queue/${downloadId}`;
 | 
			
		||||
 | 
			
		||||
  try {
 | 
			
		||||
    const apiRes = await fetch(url, {
 | 
			
		||||
      method: "DELETE",
 | 
			
		||||
      headers: {
 | 
			
		||||
        "Content-Type": "application/json",
 | 
			
		||||
        cookie: cookieHeader || "",
 | 
			
		||||
      },
 | 
			
		||||
      redirect: "manual",
 | 
			
		||||
    });
 | 
			
		||||
 | 
			
		||||
    const resBody = await apiRes.text();
 | 
			
		||||
    const res = new NextResponse(resBody, {
 | 
			
		||||
      status: apiRes.status,
 | 
			
		||||
      headers: {
 | 
			
		||||
        "Content-Type": "application/json",
 | 
			
		||||
      },
 | 
			
		||||
    });
 | 
			
		||||
 | 
			
		||||
    const setCookie = apiRes.headers.getSetCookie?.() || [];
 | 
			
		||||
    if (setCookie.length > 0) {
 | 
			
		||||
      res.headers.set("Set-Cookie", setCookie.join(","));
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    return res;
 | 
			
		||||
  } catch (error) {
 | 
			
		||||
    console.error("Error proxying cancel download request:", error);
 | 
			
		||||
    return NextResponse.json({ error: "Internal server error" }, { status: 500 });
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
@@ -1,37 +0,0 @@
 | 
			
		||||
import { NextRequest, NextResponse } from "next/server";
 | 
			
		||||
 | 
			
		||||
const API_BASE_URL = process.env.API_BASE_URL || "http://localhost:3333";
 | 
			
		||||
 | 
			
		||||
export async function DELETE(req: NextRequest) {
 | 
			
		||||
  const cookieHeader = req.headers.get("cookie");
 | 
			
		||||
  const url = `${API_BASE_URL}/filesystem/download-queue`;
 | 
			
		||||
 | 
			
		||||
  try {
 | 
			
		||||
    const apiRes = await fetch(url, {
 | 
			
		||||
      method: "DELETE",
 | 
			
		||||
      headers: {
 | 
			
		||||
        "Content-Type": "application/json",
 | 
			
		||||
        cookie: cookieHeader || "",
 | 
			
		||||
      },
 | 
			
		||||
      redirect: "manual",
 | 
			
		||||
    });
 | 
			
		||||
 | 
			
		||||
    const resBody = await apiRes.text();
 | 
			
		||||
    const res = new NextResponse(resBody, {
 | 
			
		||||
      status: apiRes.status,
 | 
			
		||||
      headers: {
 | 
			
		||||
        "Content-Type": "application/json",
 | 
			
		||||
      },
 | 
			
		||||
    });
 | 
			
		||||
 | 
			
		||||
    const setCookie = apiRes.headers.getSetCookie?.() || [];
 | 
			
		||||
    if (setCookie.length > 0) {
 | 
			
		||||
      res.headers.set("Set-Cookie", setCookie.join(","));
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    return res;
 | 
			
		||||
  } catch (error) {
 | 
			
		||||
    console.error("Error proxying clear download queue request:", error);
 | 
			
		||||
    return NextResponse.json({ error: "Internal server error" }, { status: 500 });
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
@@ -1,37 +0,0 @@
 | 
			
		||||
import { NextRequest, NextResponse } from "next/server";
 | 
			
		||||
 | 
			
		||||
const API_BASE_URL = process.env.API_BASE_URL || "http://localhost:3333";
 | 
			
		||||
 | 
			
		||||
export async function GET(req: NextRequest) {
 | 
			
		||||
  const cookieHeader = req.headers.get("cookie");
 | 
			
		||||
  const url = `${API_BASE_URL}/filesystem/download-queue/status`;
 | 
			
		||||
 | 
			
		||||
  try {
 | 
			
		||||
    const apiRes = await fetch(url, {
 | 
			
		||||
      method: "GET",
 | 
			
		||||
      headers: {
 | 
			
		||||
        "Content-Type": "application/json",
 | 
			
		||||
        cookie: cookieHeader || "",
 | 
			
		||||
      },
 | 
			
		||||
      redirect: "manual",
 | 
			
		||||
    });
 | 
			
		||||
 | 
			
		||||
    const resBody = await apiRes.text();
 | 
			
		||||
    const res = new NextResponse(resBody, {
 | 
			
		||||
      status: apiRes.status,
 | 
			
		||||
      headers: {
 | 
			
		||||
        "Content-Type": "application/json",
 | 
			
		||||
      },
 | 
			
		||||
    });
 | 
			
		||||
 | 
			
		||||
    const setCookie = apiRes.headers.getSetCookie?.() || [];
 | 
			
		||||
    if (setCookie.length > 0) {
 | 
			
		||||
      res.headers.set("Set-Cookie", setCookie.join(","));
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    return res;
 | 
			
		||||
  } catch (error) {
 | 
			
		||||
    console.error("Error proxying download queue status request:", error);
 | 
			
		||||
    return NextResponse.json({ error: "Internal server error" }, { status: 500 });
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
@@ -1,39 +0,0 @@
 | 
			
		||||
import { NextRequest, NextResponse } from "next/server";
 | 
			
		||||
 | 
			
		||||
import { detectMimeTypeWithFallback } from "@/utils/mime-types";
 | 
			
		||||
 | 
			
		||||
const API_BASE_URL = process.env.API_BASE_URL || "http://localhost:3333";
 | 
			
		||||
 | 
			
		||||
export async function GET(req: NextRequest, { params }: { params: Promise<{ token: string }> }) {
 | 
			
		||||
  const { token } = await params;
 | 
			
		||||
  const cookieHeader = req.headers.get("cookie");
 | 
			
		||||
  const url = `${API_BASE_URL}/filesystem/download/${token}`;
 | 
			
		||||
 | 
			
		||||
  const apiRes = await fetch(url, {
 | 
			
		||||
    method: "GET",
 | 
			
		||||
    headers: {
 | 
			
		||||
      cookie: cookieHeader || "",
 | 
			
		||||
    },
 | 
			
		||||
  });
 | 
			
		||||
 | 
			
		||||
  const serverContentType = apiRes.headers.get("Content-Type");
 | 
			
		||||
  const contentDisposition = apiRes.headers.get("Content-Disposition");
 | 
			
		||||
  const contentLength = apiRes.headers.get("Content-Length");
 | 
			
		||||
  const contentType = detectMimeTypeWithFallback(serverContentType, contentDisposition);
 | 
			
		||||
 | 
			
		||||
  const res = new NextResponse(apiRes.body, {
 | 
			
		||||
    status: apiRes.status,
 | 
			
		||||
    headers: {
 | 
			
		||||
      "Content-Type": contentType,
 | 
			
		||||
      ...(contentDisposition && { "Content-Disposition": contentDisposition }),
 | 
			
		||||
      ...(contentLength && { "Content-Length": contentLength }),
 | 
			
		||||
    },
 | 
			
		||||
  });
 | 
			
		||||
 | 
			
		||||
  const setCookie = apiRes.headers.getSetCookie?.() || [];
 | 
			
		||||
  if (setCookie.length > 0) {
 | 
			
		||||
    res.headers.set("Set-Cookie", setCookie.join(","));
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  return res;
 | 
			
		||||
}
 | 
			
		||||
@@ -1,33 +0,0 @@
 | 
			
		||||
import { NextRequest, NextResponse } from "next/server";
 | 
			
		||||
 | 
			
		||||
const API_BASE_URL = process.env.API_BASE_URL || "http://localhost:3333";
 | 
			
		||||
 | 
			
		||||
export async function GET(req: NextRequest, { params }: { params: Promise<{ fileId: string }> }) {
 | 
			
		||||
  const { fileId } = await params;
 | 
			
		||||
  const cookieHeader = req.headers.get("cookie");
 | 
			
		||||
  const url = `${API_BASE_URL}/filesystem/upload-progress/${fileId}`;
 | 
			
		||||
 | 
			
		||||
  const apiRes = await fetch(url, {
 | 
			
		||||
    method: "GET",
 | 
			
		||||
    headers: {
 | 
			
		||||
      cookie: cookieHeader || "",
 | 
			
		||||
    },
 | 
			
		||||
  });
 | 
			
		||||
 | 
			
		||||
  const contentType = apiRes.headers.get("Content-Type") || "application/json";
 | 
			
		||||
  const resBody = await apiRes.text();
 | 
			
		||||
 | 
			
		||||
  const res = new NextResponse(resBody, {
 | 
			
		||||
    status: apiRes.status,
 | 
			
		||||
    headers: {
 | 
			
		||||
      "Content-Type": contentType,
 | 
			
		||||
    },
 | 
			
		||||
  });
 | 
			
		||||
 | 
			
		||||
  const setCookie = apiRes.headers.getSetCookie?.() || [];
 | 
			
		||||
  if (setCookie.length > 0) {
 | 
			
		||||
    res.headers.set("Set-Cookie", setCookie.join(","));
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  return res;
 | 
			
		||||
}
 | 
			
		||||
@@ -1,69 +0,0 @@
 | 
			
		||||
import { NextRequest, NextResponse } from "next/server";
 | 
			
		||||
 | 
			
		||||
export const maxDuration = 120000; // 2 minutes to handle large files
 | 
			
		||||
export const dynamic = "force-dynamic";
 | 
			
		||||
 | 
			
		||||
const API_BASE_URL = process.env.API_BASE_URL || "http://localhost:3333";
 | 
			
		||||
 | 
			
		||||
export async function PUT(req: NextRequest, { params }: { params: Promise<{ token: string }> }) {
 | 
			
		||||
  const { token } = await params;
 | 
			
		||||
  const cookieHeader = req.headers.get("cookie");
 | 
			
		||||
  const url = `${API_BASE_URL}/filesystem/upload/${token}`;
 | 
			
		||||
 | 
			
		||||
  const headers: Record<string, string> = {
 | 
			
		||||
    cookie: cookieHeader || "",
 | 
			
		||||
    "Content-Type": req.headers.get("Content-Type") || "application/octet-stream",
 | 
			
		||||
    "Content-Length": req.headers.get("Content-Length") || "0",
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
  req.headers.forEach((value, key) => {
 | 
			
		||||
    if (key.startsWith("x-") || key.startsWith("X-")) {
 | 
			
		||||
      headers[key] = value;
 | 
			
		||||
    }
 | 
			
		||||
  });
 | 
			
		||||
 | 
			
		||||
  try {
 | 
			
		||||
    const apiRes = await fetch(url, {
 | 
			
		||||
      method: "PUT",
 | 
			
		||||
      headers,
 | 
			
		||||
      body: req.body,
 | 
			
		||||
      duplex: "half",
 | 
			
		||||
    } as RequestInit);
 | 
			
		||||
 | 
			
		||||
    const contentType = apiRes.headers.get("Content-Type") || "application/json";
 | 
			
		||||
 | 
			
		||||
    let resBody;
 | 
			
		||||
    if (contentType.includes("application/json")) {
 | 
			
		||||
      resBody = await apiRes.text();
 | 
			
		||||
    } else {
 | 
			
		||||
      resBody = await apiRes.arrayBuffer();
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    const res = new NextResponse(resBody, {
 | 
			
		||||
      status: apiRes.status,
 | 
			
		||||
      headers: {
 | 
			
		||||
        "Content-Type": contentType,
 | 
			
		||||
      },
 | 
			
		||||
    });
 | 
			
		||||
 | 
			
		||||
    const setCookie = apiRes.headers.getSetCookie?.() || [];
 | 
			
		||||
    if (setCookie.length > 0) {
 | 
			
		||||
      res.headers.set("Set-Cookie", setCookie.join(","));
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    return res;
 | 
			
		||||
  } catch (error) {
 | 
			
		||||
    return new NextResponse(
 | 
			
		||||
      JSON.stringify({
 | 
			
		||||
        error: "Proxy request failed",
 | 
			
		||||
        details: error instanceof Error ? error.message : "Unknown error",
 | 
			
		||||
      }),
 | 
			
		||||
      {
 | 
			
		||||
        status: 500,
 | 
			
		||||
        headers: {
 | 
			
		||||
          "Content-Type": "application/json",
 | 
			
		||||
        },
 | 
			
		||||
      }
 | 
			
		||||
    );
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
@@ -46,8 +46,6 @@ export default function DashboardPage() {
 | 
			
		||||
          icon={<IconLayoutDashboardFilled className="text-xl" />}
 | 
			
		||||
          showBreadcrumb={false}
 | 
			
		||||
          title={t("dashboard.pageTitle")}
 | 
			
		||||
          pendingDownloads={fileManager.pendingDownloads}
 | 
			
		||||
          onCancelDownload={fileManager.cancelPendingDownload}
 | 
			
		||||
        >
 | 
			
		||||
          <StorageUsage diskSpace={diskSpace} diskSpaceError={diskSpaceError} onRetry={handleRetryDiskSpace} />
 | 
			
		||||
          <QuickAccessCards />
 | 
			
		||||
 
 | 
			
		||||
@@ -122,8 +122,6 @@ export default function FilesPage() {
 | 
			
		||||
          breadcrumbLabel={t("files.breadcrumb")}
 | 
			
		||||
          icon={<IconFolderOpen size={20} />}
 | 
			
		||||
          title={t("files.pageTitle")}
 | 
			
		||||
          pendingDownloads={fileManager.pendingDownloads}
 | 
			
		||||
          onCancelDownload={fileManager.cancelPendingDownload}
 | 
			
		||||
        >
 | 
			
		||||
          <Card>
 | 
			
		||||
            <CardContent>
 | 
			
		||||
 
 | 
			
		||||
@@ -1,268 +0,0 @@
 | 
			
		||||
"use client";
 | 
			
		||||
 | 
			
		||||
import { useEffect, useState } from "react";
 | 
			
		||||
import {
 | 
			
		||||
  IconAlertCircle,
 | 
			
		||||
  IconBell,
 | 
			
		||||
  IconBellOff,
 | 
			
		||||
  IconClock,
 | 
			
		||||
  IconDownload,
 | 
			
		||||
  IconLoader2,
 | 
			
		||||
  IconX,
 | 
			
		||||
} from "@tabler/icons-react";
 | 
			
		||||
import { useTranslations } from "next-intl";
 | 
			
		||||
 | 
			
		||||
import { Badge } from "@/components/ui/badge";
 | 
			
		||||
import { Button } from "@/components/ui/button";
 | 
			
		||||
import { Progress } from "@/components/ui/progress";
 | 
			
		||||
import { useDownloadQueue } from "@/hooks/use-download-queue";
 | 
			
		||||
import { usePushNotifications } from "@/hooks/use-push-notifications";
 | 
			
		||||
import { formatFileSize } from "@/utils/format-file-size";
 | 
			
		||||
 | 
			
		||||
interface PendingDownload {
 | 
			
		||||
  downloadId: string;
 | 
			
		||||
  fileName: string;
 | 
			
		||||
  objectName: string;
 | 
			
		||||
  startTime: number;
 | 
			
		||||
  status: "pending" | "queued" | "downloading" | "completed" | "failed";
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
interface DownloadQueueIndicatorProps {
 | 
			
		||||
  pendingDownloads?: PendingDownload[];
 | 
			
		||||
  onCancelDownload?: (downloadId: string) => void;
 | 
			
		||||
  className?: string;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export function DownloadQueueIndicator({
 | 
			
		||||
  pendingDownloads = [],
 | 
			
		||||
  onCancelDownload,
 | 
			
		||||
  className = "",
 | 
			
		||||
}: DownloadQueueIndicatorProps) {
 | 
			
		||||
  const t = useTranslations();
 | 
			
		||||
 | 
			
		||||
  const shouldAutoRefresh = pendingDownloads.length > 0;
 | 
			
		||||
  const { queueStatus, refreshQueue, cancelDownload, getEstimatedWaitTime } = useDownloadQueue(shouldAutoRefresh);
 | 
			
		||||
  const notifications = usePushNotifications();
 | 
			
		||||
  const [isOpen, setIsOpen] = useState(false);
 | 
			
		||||
 | 
			
		||||
  useEffect(() => {
 | 
			
		||||
    if (pendingDownloads.length > 0 || (queueStatus && queueStatus.queueLength > 0)) {
 | 
			
		||||
      setIsOpen(true);
 | 
			
		||||
    }
 | 
			
		||||
    // eslint-disable-next-line react-hooks/exhaustive-deps
 | 
			
		||||
  }, [pendingDownloads.length, queueStatus?.queueLength]);
 | 
			
		||||
 | 
			
		||||
  const totalDownloads = pendingDownloads.length + (queueStatus?.queueLength || 0);
 | 
			
		||||
  const activeDownloads = queueStatus?.activeDownloads || 0;
 | 
			
		||||
 | 
			
		||||
  if (totalDownloads === 0 && activeDownloads === 0) {
 | 
			
		||||
    return null;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  const getStatusIcon = (status: string) => {
 | 
			
		||||
    switch (status) {
 | 
			
		||||
      case "pending":
 | 
			
		||||
        return <IconLoader2 className="h-4 w-4 animate-spin text-blue-500" />;
 | 
			
		||||
      case "queued":
 | 
			
		||||
        return <IconClock className="h-4 w-4 text-yellow-500" />;
 | 
			
		||||
      case "downloading":
 | 
			
		||||
        return <IconDownload className="h-4 w-4 text-green-500" />;
 | 
			
		||||
      case "completed":
 | 
			
		||||
        return <IconDownload className="h-4 w-4 text-green-600" />;
 | 
			
		||||
      case "failed":
 | 
			
		||||
        return <IconAlertCircle className="h-4 w-4 text-red-500" />;
 | 
			
		||||
      default:
 | 
			
		||||
        return <IconLoader2 className="h-4 w-4 animate-spin" />;
 | 
			
		||||
    }
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
  const getStatusText = (status: string) => {
 | 
			
		||||
    switch (status) {
 | 
			
		||||
      case "pending":
 | 
			
		||||
        return t("downloadQueue.status.pending");
 | 
			
		||||
      case "queued":
 | 
			
		||||
        return t("downloadQueue.status.queued");
 | 
			
		||||
      case "downloading":
 | 
			
		||||
        return t("downloadQueue.status.downloading");
 | 
			
		||||
      case "completed":
 | 
			
		||||
        return t("downloadQueue.status.completed");
 | 
			
		||||
      case "failed":
 | 
			
		||||
        return t("downloadQueue.status.failed");
 | 
			
		||||
      default:
 | 
			
		||||
        return status;
 | 
			
		||||
    }
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
  return (
 | 
			
		||||
    <div className={`fixed bottom-6 right-6 z-50 max-w-sm ${className}`} data-download-indicator>
 | 
			
		||||
      <div className="flex flex-col gap-3">
 | 
			
		||||
        <Button
 | 
			
		||||
          variant="outline"
 | 
			
		||||
          size="sm"
 | 
			
		||||
          onClick={() => setIsOpen(!isOpen)}
 | 
			
		||||
          className="min-w-fit bg-background/80 backdrop-blur-md border-border/50 shadow-lg hover:shadow-xl transition-all duration-200 text-sm font-medium"
 | 
			
		||||
        >
 | 
			
		||||
          <IconDownload className="h-4 w-4 mr-2 text-primary" />
 | 
			
		||||
          Downloads
 | 
			
		||||
          {totalDownloads > 0 && (
 | 
			
		||||
            <Badge variant="secondary" className="ml-2 text-xs font-semibold bg-primary/10 text-primary border-0">
 | 
			
		||||
              {totalDownloads}
 | 
			
		||||
            </Badge>
 | 
			
		||||
          )}
 | 
			
		||||
        </Button>
 | 
			
		||||
 | 
			
		||||
        {isOpen && (
 | 
			
		||||
          <div className="border border-border/50 rounded-xl bg-background/95 backdrop-blur-md shadow-xl animate-in slide-in-from-bottom-2 duration-200">
 | 
			
		||||
            <div className="p-4 border-b border-border/50">
 | 
			
		||||
              <div className="flex items-center justify-between">
 | 
			
		||||
                <h3 className="font-semibold text-sm text-foreground">Download Manager</h3>
 | 
			
		||||
                <div className="flex items-center gap-2">
 | 
			
		||||
                  {notifications.isSupported && (
 | 
			
		||||
                    <Button
 | 
			
		||||
                      variant="ghost"
 | 
			
		||||
                      size="sm"
 | 
			
		||||
                      onClick={notifications.requestPermission}
 | 
			
		||||
                      className="h-7 w-7 p-0 rounded-md hover:bg-muted/80"
 | 
			
		||||
                      title={
 | 
			
		||||
                        notifications.hasPermission
 | 
			
		||||
                          ? t("notifications.permissionGranted")
 | 
			
		||||
                          : "Enable download notifications"
 | 
			
		||||
                      }
 | 
			
		||||
                    >
 | 
			
		||||
                      {notifications.hasPermission ? (
 | 
			
		||||
                        <IconBell className="h-3.5 w-3.5 text-green-600" />
 | 
			
		||||
                      ) : (
 | 
			
		||||
                        <IconBellOff className="h-3.5 w-3.5 text-muted-foreground" />
 | 
			
		||||
                      )}
 | 
			
		||||
                    </Button>
 | 
			
		||||
                  )}
 | 
			
		||||
                  <Button
 | 
			
		||||
                    variant="ghost"
 | 
			
		||||
                    size="sm"
 | 
			
		||||
                    onClick={() => setIsOpen(false)}
 | 
			
		||||
                    className="h-7 w-7 p-0 rounded-md hover:bg-muted/80"
 | 
			
		||||
                  >
 | 
			
		||||
                    <IconX className="h-3.5 w-3.5 text-muted-foreground" />
 | 
			
		||||
                  </Button>
 | 
			
		||||
                </div>
 | 
			
		||||
              </div>
 | 
			
		||||
 | 
			
		||||
              {queueStatus && (
 | 
			
		||||
                <div className="mt-3 space-y-2">
 | 
			
		||||
                  <div className="flex items-center justify-between text-xs">
 | 
			
		||||
                    <span className="text-muted-foreground">Active:</span>
 | 
			
		||||
                    <span className="font-medium text-foreground">
 | 
			
		||||
                      {activeDownloads}/{queueStatus.maxConcurrent}
 | 
			
		||||
                    </span>
 | 
			
		||||
                  </div>
 | 
			
		||||
                  <div className="flex items-center justify-between text-xs">
 | 
			
		||||
                    <span className="text-muted-foreground">Queued:</span>
 | 
			
		||||
                    <span className="font-medium text-foreground">
 | 
			
		||||
                      {queueStatus.queueLength}/{queueStatus.maxQueueSize}
 | 
			
		||||
                    </span>
 | 
			
		||||
                  </div>
 | 
			
		||||
                  {queueStatus.maxConcurrent > 0 && (
 | 
			
		||||
                    <div className="space-y-1">
 | 
			
		||||
                      <Progress value={(activeDownloads / queueStatus.maxConcurrent) * 100} className="h-1.5" />
 | 
			
		||||
                      <p className="text-xs text-muted-foreground">
 | 
			
		||||
                        {Math.round((activeDownloads / queueStatus.maxConcurrent) * 100)}% capacity
 | 
			
		||||
                      </p>
 | 
			
		||||
                    </div>
 | 
			
		||||
                  )}
 | 
			
		||||
                </div>
 | 
			
		||||
              )}
 | 
			
		||||
            </div>
 | 
			
		||||
 | 
			
		||||
            <div className="p-3 space-y-2">
 | 
			
		||||
              {pendingDownloads.map((download) => (
 | 
			
		||||
                <div
 | 
			
		||||
                  key={download.downloadId}
 | 
			
		||||
                  className="group flex items-center justify-between p-2.5 rounded-lg bg-muted/30 hover:bg-muted/50 transition-colors border border-transparent hover:border-border/50"
 | 
			
		||||
                >
 | 
			
		||||
                  <div className="flex items-center gap-3 flex-1 min-w-0">
 | 
			
		||||
                    <div className="shrink-0">{getStatusIcon(download.status)}</div>
 | 
			
		||||
                    <div className="flex-1 min-w-0">
 | 
			
		||||
                      <p className="text-sm font-medium text-foreground truncate leading-tight">{download.fileName}</p>
 | 
			
		||||
                      <p className="text-xs text-muted-foreground mt-0.5">{getStatusText(download.status)}</p>
 | 
			
		||||
                    </div>
 | 
			
		||||
                  </div>
 | 
			
		||||
 | 
			
		||||
                  {(download.status === "pending" || download.status === "queued") && onCancelDownload && (
 | 
			
		||||
                    <Button
 | 
			
		||||
                      variant="ghost"
 | 
			
		||||
                      size="sm"
 | 
			
		||||
                      onClick={() => onCancelDownload(download.downloadId)}
 | 
			
		||||
                      className="h-7 w-7 p-0 opacity-0 group-hover:opacity-100 transition-opacity shrink-0 hover:bg-destructive/10 hover:text-destructive"
 | 
			
		||||
                    >
 | 
			
		||||
                      <IconX className="h-3.5 w-3.5" />
 | 
			
		||||
                    </Button>
 | 
			
		||||
                  )}
 | 
			
		||||
                </div>
 | 
			
		||||
              ))}
 | 
			
		||||
 | 
			
		||||
              {(queueStatus?.queuedDownloads || []).map((download) => {
 | 
			
		||||
                const waitTime = getEstimatedWaitTime(download.downloadId);
 | 
			
		||||
 | 
			
		||||
                return (
 | 
			
		||||
                  <div
 | 
			
		||||
                    key={download.downloadId}
 | 
			
		||||
                    className="group flex items-center justify-between p-2.5 rounded-lg bg-muted/30 hover:bg-muted/50 transition-colors border border-transparent hover:border-border/50"
 | 
			
		||||
                  >
 | 
			
		||||
                    <div className="flex items-center gap-3 flex-1 min-w-0">
 | 
			
		||||
                      <div className="shrink-0">
 | 
			
		||||
                        <IconClock className="h-4 w-4 text-amber-500" />
 | 
			
		||||
                      </div>
 | 
			
		||||
                      <div className="flex-1 min-w-0">
 | 
			
		||||
                        <p className="text-sm font-medium text-foreground truncate leading-tight">
 | 
			
		||||
                          {download.fileName || t("downloadQueue.indicator.unknownFile")}
 | 
			
		||||
                        </p>
 | 
			
		||||
                        <div className="text-xs text-muted-foreground space-y-0.5">
 | 
			
		||||
                          <div className="flex items-center gap-2">
 | 
			
		||||
                            <span>#{download.position} in queue</span>
 | 
			
		||||
                            {download.fileSize && (
 | 
			
		||||
                              <span className="text-muted-foreground/70">• {formatFileSize(download.fileSize)}</span>
 | 
			
		||||
                            )}
 | 
			
		||||
                          </div>
 | 
			
		||||
                          {waitTime && <p className="text-xs text-muted-foreground/80">~{waitTime} remaining</p>}
 | 
			
		||||
                        </div>
 | 
			
		||||
                      </div>
 | 
			
		||||
                    </div>
 | 
			
		||||
 | 
			
		||||
                    <Button
 | 
			
		||||
                      variant="ghost"
 | 
			
		||||
                      size="sm"
 | 
			
		||||
                      onClick={() => cancelDownload(download.downloadId)}
 | 
			
		||||
                      className="h-7 w-7 p-0 opacity-0 group-hover:opacity-100 transition-opacity shrink-0 hover:bg-destructive/10 hover:text-destructive"
 | 
			
		||||
                    >
 | 
			
		||||
                      <IconX className="h-3.5 w-3.5" />
 | 
			
		||||
                    </Button>
 | 
			
		||||
                  </div>
 | 
			
		||||
                );
 | 
			
		||||
              })}
 | 
			
		||||
 | 
			
		||||
              {totalDownloads === 0 && (
 | 
			
		||||
                <div className="text-center py-8">
 | 
			
		||||
                  <IconDownload className="h-8 w-8 mx-auto text-muted-foreground/50 mb-2" />
 | 
			
		||||
                  <p className="text-sm text-muted-foreground">No active downloads</p>
 | 
			
		||||
                </div>
 | 
			
		||||
              )}
 | 
			
		||||
            </div>
 | 
			
		||||
 | 
			
		||||
            {queueStatus && queueStatus.queueLength > 0 && (
 | 
			
		||||
              <div className="p-3 border-t border-border/50">
 | 
			
		||||
                <Button
 | 
			
		||||
                  variant="outline"
 | 
			
		||||
                  size="sm"
 | 
			
		||||
                  onClick={refreshQueue}
 | 
			
		||||
                  className="w-full text-xs font-medium hover:bg-muted/80"
 | 
			
		||||
                >
 | 
			
		||||
                  Refresh Queue
 | 
			
		||||
                </Button>
 | 
			
		||||
              </div>
 | 
			
		||||
            )}
 | 
			
		||||
          </div>
 | 
			
		||||
        )}
 | 
			
		||||
      </div>
 | 
			
		||||
    </div>
 | 
			
		||||
  );
 | 
			
		||||
}
 | 
			
		||||
@@ -2,19 +2,17 @@
 | 
			
		||||
 | 
			
		||||
import { useCallback, useEffect, useState } from "react";
 | 
			
		||||
import { IconCloudUpload, IconLoader, IconX } from "@tabler/icons-react";
 | 
			
		||||
import axios from "axios";
 | 
			
		||||
import { useTranslations } from "next-intl";
 | 
			
		||||
import { toast } from "sonner";
 | 
			
		||||
 | 
			
		||||
import { Button } from "@/components/ui/button";
 | 
			
		||||
import { Progress } from "@/components/ui/progress";
 | 
			
		||||
import { checkFile, getFilePresignedUrl, registerFile } from "@/http/endpoints";
 | 
			
		||||
import { getSystemInfo } from "@/http/endpoints/app";
 | 
			
		||||
import { ChunkedUploader } from "@/utils/chunked-upload";
 | 
			
		||||
import { getFileIcon } from "@/utils/file-icons";
 | 
			
		||||
import { generateSafeFileName } from "@/utils/file-utils";
 | 
			
		||||
import { formatFileSize } from "@/utils/format-file-size";
 | 
			
		||||
import getErrorData from "@/utils/getErrorData";
 | 
			
		||||
import { S3Uploader } from "@/utils/s3-upload";
 | 
			
		||||
 | 
			
		||||
interface GlobalDropZoneProps {
 | 
			
		||||
  onSuccess?: () => void;
 | 
			
		||||
@@ -45,7 +43,6 @@ export function GlobalDropZone({ onSuccess, children, currentFolderId }: GlobalD
 | 
			
		||||
  const [isDragOver, setIsDragOver] = useState(false);
 | 
			
		||||
  const [fileUploads, setFileUploads] = useState<FileUpload[]>([]);
 | 
			
		||||
  const [hasShownSuccessToast, setHasShownSuccessToast] = useState(false);
 | 
			
		||||
  const [isS3Enabled, setIsS3Enabled] = useState<boolean | null>(null);
 | 
			
		||||
 | 
			
		||||
  const generateFileId = useCallback(() => {
 | 
			
		||||
    return Date.now().toString() + Math.random().toString(36).substr(2, 9);
 | 
			
		||||
@@ -64,18 +61,6 @@ export function GlobalDropZone({ onSuccess, children, currentFolderId }: GlobalD
 | 
			
		||||
    [generateFileId]
 | 
			
		||||
  );
 | 
			
		||||
 | 
			
		||||
  const calculateUploadTimeout = useCallback((fileSize: number): number => {
 | 
			
		||||
    const baseTimeout = 300000;
 | 
			
		||||
    const fileSizeMB = fileSize / (1024 * 1024);
 | 
			
		||||
    if (fileSizeMB > 500) {
 | 
			
		||||
      const extraMB = fileSizeMB - 500;
 | 
			
		||||
      const extraMinutes = Math.ceil(extraMB / 100);
 | 
			
		||||
      return baseTimeout + extraMinutes * 60000;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    return baseTimeout;
 | 
			
		||||
  }, []);
 | 
			
		||||
 | 
			
		||||
  const handleDragOver = useCallback((event: DragEvent) => {
 | 
			
		||||
    event.preventDefault();
 | 
			
		||||
    event.stopPropagation();
 | 
			
		||||
@@ -140,61 +125,28 @@ export function GlobalDropZone({ onSuccess, children, currentFolderId }: GlobalD
 | 
			
		||||
        const abortController = new AbortController();
 | 
			
		||||
        setFileUploads((prev) => prev.map((u) => (u.id === id ? { ...u, abortController } : u)));
 | 
			
		||||
 | 
			
		||||
        const shouldUseChunked = ChunkedUploader.shouldUseChunkedUpload(file.size, isS3Enabled ?? undefined);
 | 
			
		||||
        // Always use S3 direct upload
 | 
			
		||||
        const result = await S3Uploader.uploadFile({
 | 
			
		||||
          file,
 | 
			
		||||
          presignedUrl: url,
 | 
			
		||||
          signal: abortController.signal,
 | 
			
		||||
          onProgress: (progress: number) => {
 | 
			
		||||
            setFileUploads((prev) => prev.map((u) => (u.id === id ? { ...u, progress } : u)));
 | 
			
		||||
          },
 | 
			
		||||
        });
 | 
			
		||||
 | 
			
		||||
        if (shouldUseChunked) {
 | 
			
		||||
          const chunkSize = ChunkedUploader.calculateOptimalChunkSize(file.size);
 | 
			
		||||
 | 
			
		||||
          const result = await ChunkedUploader.uploadFile({
 | 
			
		||||
            file,
 | 
			
		||||
            url,
 | 
			
		||||
            chunkSize,
 | 
			
		||||
            signal: abortController.signal,
 | 
			
		||||
            isS3Enabled: isS3Enabled ?? undefined,
 | 
			
		||||
            onProgress: (progress) => {
 | 
			
		||||
              setFileUploads((prev) => prev.map((u) => (u.id === id ? { ...u, progress } : u)));
 | 
			
		||||
            },
 | 
			
		||||
          });
 | 
			
		||||
 | 
			
		||||
          if (!result.success) {
 | 
			
		||||
            throw new Error(result.error || "Chunked upload failed");
 | 
			
		||||
          }
 | 
			
		||||
 | 
			
		||||
          const finalObjectName = result.finalObjectName || objectName;
 | 
			
		||||
 | 
			
		||||
          await registerFile({
 | 
			
		||||
            name: fileName,
 | 
			
		||||
            objectName: finalObjectName,
 | 
			
		||||
            size: file.size,
 | 
			
		||||
            extension: extension,
 | 
			
		||||
            folderId: currentFolderId,
 | 
			
		||||
          });
 | 
			
		||||
        } else {
 | 
			
		||||
          const uploadTimeout = calculateUploadTimeout(file.size);
 | 
			
		||||
 | 
			
		||||
          await axios.put(url, file, {
 | 
			
		||||
            headers: {
 | 
			
		||||
              "Content-Type": file.type,
 | 
			
		||||
            },
 | 
			
		||||
            signal: abortController.signal,
 | 
			
		||||
            timeout: uploadTimeout, // Dynamic timeout based on file size
 | 
			
		||||
            maxContentLength: Infinity,
 | 
			
		||||
            maxBodyLength: Infinity,
 | 
			
		||||
            onUploadProgress: (progressEvent: any) => {
 | 
			
		||||
              const progress = (progressEvent.loaded / (progressEvent.total || file.size)) * 100;
 | 
			
		||||
              setFileUploads((prev) => prev.map((u) => (u.id === id ? { ...u, progress: Math.round(progress) } : u)));
 | 
			
		||||
            },
 | 
			
		||||
          });
 | 
			
		||||
 | 
			
		||||
          await registerFile({
 | 
			
		||||
            name: fileName,
 | 
			
		||||
            objectName: objectName,
 | 
			
		||||
            size: file.size,
 | 
			
		||||
            extension: extension,
 | 
			
		||||
            folderId: currentFolderId,
 | 
			
		||||
          });
 | 
			
		||||
        if (!result.success) {
 | 
			
		||||
          throw new Error(result.error || "Upload failed");
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        await registerFile({
 | 
			
		||||
          name: fileName,
 | 
			
		||||
          objectName: objectName,
 | 
			
		||||
          size: file.size,
 | 
			
		||||
          extension: extension,
 | 
			
		||||
          folderId: currentFolderId,
 | 
			
		||||
        });
 | 
			
		||||
 | 
			
		||||
        setFileUploads((prev) =>
 | 
			
		||||
          prev.map((u) =>
 | 
			
		||||
            u.id === id ? { ...u, status: UploadStatus.SUCCESS, progress: 100, abortController: undefined } : u
 | 
			
		||||
@@ -220,7 +172,7 @@ export function GlobalDropZone({ onSuccess, children, currentFolderId }: GlobalD
 | 
			
		||||
        );
 | 
			
		||||
      }
 | 
			
		||||
    },
 | 
			
		||||
    [t, isS3Enabled, currentFolderId, calculateUploadTimeout]
 | 
			
		||||
    [t, currentFolderId]
 | 
			
		||||
  );
 | 
			
		||||
 | 
			
		||||
  const handleDrop = useCallback(
 | 
			
		||||
@@ -288,20 +240,6 @@ export function GlobalDropZone({ onSuccess, children, currentFolderId }: GlobalD
 | 
			
		||||
    [uploadFile, t, createFileUpload]
 | 
			
		||||
  );
 | 
			
		||||
 | 
			
		||||
  useEffect(() => {
 | 
			
		||||
    const fetchSystemInfo = async () => {
 | 
			
		||||
      try {
 | 
			
		||||
        const response = await getSystemInfo();
 | 
			
		||||
        setIsS3Enabled(response.data.s3Enabled);
 | 
			
		||||
      } catch (error) {
 | 
			
		||||
        console.warn("Failed to fetch system info, defaulting to filesystem mode:", error);
 | 
			
		||||
        setIsS3Enabled(false);
 | 
			
		||||
      }
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    fetchSystemInfo();
 | 
			
		||||
  }, []);
 | 
			
		||||
 | 
			
		||||
  useEffect(() => {
 | 
			
		||||
    document.addEventListener("dragover", handleDragOver);
 | 
			
		||||
    document.addEventListener("dragleave", handleDragLeave);
 | 
			
		||||
 
 | 
			
		||||
@@ -3,7 +3,6 @@ import Link from "next/link";
 | 
			
		||||
import { IconLayoutDashboard } from "@tabler/icons-react";
 | 
			
		||||
import { useTranslations } from "next-intl";
 | 
			
		||||
 | 
			
		||||
import { DownloadQueueIndicator } from "@/components/download-queue-indicator";
 | 
			
		||||
import { Navbar } from "@/components/layout/navbar";
 | 
			
		||||
import {
 | 
			
		||||
  Breadcrumb,
 | 
			
		||||
@@ -21,14 +20,6 @@ interface FileManagerLayoutProps {
 | 
			
		||||
  icon: ReactNode;
 | 
			
		||||
  breadcrumbLabel?: string;
 | 
			
		||||
  showBreadcrumb?: boolean;
 | 
			
		||||
  pendingDownloads?: Array<{
 | 
			
		||||
    downloadId: string;
 | 
			
		||||
    fileName: string;
 | 
			
		||||
    objectName: string;
 | 
			
		||||
    startTime: number;
 | 
			
		||||
    status: "pending" | "queued" | "downloading" | "completed" | "failed";
 | 
			
		||||
  }>;
 | 
			
		||||
  onCancelDownload?: (downloadId: string) => void;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export function FileManagerLayout({
 | 
			
		||||
@@ -37,8 +28,6 @@ export function FileManagerLayout({
 | 
			
		||||
  icon,
 | 
			
		||||
  breadcrumbLabel,
 | 
			
		||||
  showBreadcrumb = true,
 | 
			
		||||
  pendingDownloads = [],
 | 
			
		||||
  onCancelDownload,
 | 
			
		||||
}: FileManagerLayoutProps) {
 | 
			
		||||
  const t = useTranslations();
 | 
			
		||||
 | 
			
		||||
@@ -79,8 +68,6 @@ export function FileManagerLayout({
 | 
			
		||||
        </div>
 | 
			
		||||
      </div>
 | 
			
		||||
      <DefaultFooter />
 | 
			
		||||
 | 
			
		||||
      <DownloadQueueIndicator pendingDownloads={pendingDownloads} onCancelDownload={onCancelDownload} />
 | 
			
		||||
    </div>
 | 
			
		||||
  );
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -2,7 +2,6 @@
 | 
			
		||||
 | 
			
		||||
import { useEffect, useRef, useState } from "react";
 | 
			
		||||
import { IconAlertTriangle, IconCheck, IconCloudUpload, IconLoader, IconTrash, IconX } from "@tabler/icons-react";
 | 
			
		||||
import axios from "axios";
 | 
			
		||||
import { useTranslations } from "next-intl";
 | 
			
		||||
import { toast } from "sonner";
 | 
			
		||||
 | 
			
		||||
@@ -10,12 +9,11 @@ import { Button } from "@/components/ui/button";
 | 
			
		||||
import { Dialog, DialogContent, DialogFooter, DialogHeader, DialogTitle } from "@/components/ui/dialog";
 | 
			
		||||
import { Progress } from "@/components/ui/progress";
 | 
			
		||||
import { checkFile, getFilePresignedUrl, registerFile } from "@/http/endpoints";
 | 
			
		||||
import { getSystemInfo } from "@/http/endpoints/app";
 | 
			
		||||
import { ChunkedUploader } from "@/utils/chunked-upload";
 | 
			
		||||
import { getFileIcon } from "@/utils/file-icons";
 | 
			
		||||
import { generateSafeFileName } from "@/utils/file-utils";
 | 
			
		||||
import { formatFileSize } from "@/utils/format-file-size";
 | 
			
		||||
import getErrorData from "@/utils/getErrorData";
 | 
			
		||||
import { S3Uploader } from "@/utils/s3-upload";
 | 
			
		||||
 | 
			
		||||
interface UploadFileModalProps {
 | 
			
		||||
  isOpen: boolean;
 | 
			
		||||
@@ -89,23 +87,8 @@ export function UploadFileModal({ isOpen, onClose, onSuccess, currentFolderId }:
 | 
			
		||||
  const [isDragOver, setIsDragOver] = useState(false);
 | 
			
		||||
  const [showConfirmation, setShowConfirmation] = useState(false);
 | 
			
		||||
  const [hasShownSuccessToast, setHasShownSuccessToast] = useState(false);
 | 
			
		||||
  const [isS3Enabled, setIsS3Enabled] = useState<boolean | null>(null);
 | 
			
		||||
  const fileInputRef = useRef<HTMLInputElement>(null);
 | 
			
		||||
 | 
			
		||||
  useEffect(() => {
 | 
			
		||||
    const fetchSystemInfo = async () => {
 | 
			
		||||
      try {
 | 
			
		||||
        const response = await getSystemInfo();
 | 
			
		||||
        setIsS3Enabled(response.data.s3Enabled);
 | 
			
		||||
      } catch (error) {
 | 
			
		||||
        console.warn("Failed to fetch system info, defaulting to filesystem mode:", error);
 | 
			
		||||
        setIsS3Enabled(false);
 | 
			
		||||
      }
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    fetchSystemInfo();
 | 
			
		||||
  }, []);
 | 
			
		||||
 | 
			
		||||
  useEffect(() => {
 | 
			
		||||
    return () => {
 | 
			
		||||
      fileUploads.forEach((upload) => {
 | 
			
		||||
@@ -231,18 +214,6 @@ export function UploadFileModal({ isOpen, onClose, onSuccess, currentFolderId }:
 | 
			
		||||
    );
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
  const calculateUploadTimeout = (fileSize: number): number => {
 | 
			
		||||
    const baseTimeout = 300000;
 | 
			
		||||
    const fileSizeMB = fileSize / (1024 * 1024);
 | 
			
		||||
    if (fileSizeMB > 500) {
 | 
			
		||||
      const extraMB = fileSizeMB - 500;
 | 
			
		||||
      const extraMinutes = Math.ceil(extraMB / 100);
 | 
			
		||||
      return baseTimeout + extraMinutes * 60000;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    return baseTimeout;
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
  const uploadFile = async (fileUpload: FileUpload) => {
 | 
			
		||||
    const { file, id } = fileUpload;
 | 
			
		||||
 | 
			
		||||
@@ -294,60 +265,28 @@ export function UploadFileModal({ isOpen, onClose, onSuccess, currentFolderId }:
 | 
			
		||||
      const abortController = new AbortController();
 | 
			
		||||
      setFileUploads((prev) => prev.map((u) => (u.id === id ? { ...u, abortController } : u)));
 | 
			
		||||
 | 
			
		||||
      const shouldUseChunked = ChunkedUploader.shouldUseChunkedUpload(file.size, isS3Enabled ?? undefined);
 | 
			
		||||
      // Always use S3 direct upload (no chunking needed)
 | 
			
		||||
      const result = await S3Uploader.uploadFile({
 | 
			
		||||
        file,
 | 
			
		||||
        presignedUrl: url,
 | 
			
		||||
        signal: abortController.signal,
 | 
			
		||||
        onProgress: (progress: number) => {
 | 
			
		||||
          setFileUploads((prev) => prev.map((u) => (u.id === id ? { ...u, progress } : u)));
 | 
			
		||||
        },
 | 
			
		||||
      });
 | 
			
		||||
 | 
			
		||||
      if (shouldUseChunked) {
 | 
			
		||||
        const chunkSize = ChunkedUploader.calculateOptimalChunkSize(file.size);
 | 
			
		||||
 | 
			
		||||
        const result = await ChunkedUploader.uploadFile({
 | 
			
		||||
          file,
 | 
			
		||||
          url,
 | 
			
		||||
          chunkSize,
 | 
			
		||||
          signal: abortController.signal,
 | 
			
		||||
          isS3Enabled: isS3Enabled ?? undefined,
 | 
			
		||||
          onProgress: (progress) => {
 | 
			
		||||
            setFileUploads((prev) => prev.map((u) => (u.id === id ? { ...u, progress } : u)));
 | 
			
		||||
          },
 | 
			
		||||
        });
 | 
			
		||||
 | 
			
		||||
        if (!result.success) {
 | 
			
		||||
          throw new Error(result.error || "Chunked upload failed");
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        const finalObjectName = result.finalObjectName || objectName;
 | 
			
		||||
 | 
			
		||||
        await registerFile({
 | 
			
		||||
          name: fileName,
 | 
			
		||||
          objectName: finalObjectName,
 | 
			
		||||
          size: file.size,
 | 
			
		||||
          extension: extension,
 | 
			
		||||
          folderId: currentFolderId,
 | 
			
		||||
        });
 | 
			
		||||
      } else {
 | 
			
		||||
        const uploadTimeout = calculateUploadTimeout(file.size);
 | 
			
		||||
        await axios.put(url, file, {
 | 
			
		||||
          headers: {
 | 
			
		||||
            "Content-Type": file.type,
 | 
			
		||||
          },
 | 
			
		||||
          signal: abortController.signal,
 | 
			
		||||
          timeout: uploadTimeout,
 | 
			
		||||
          maxContentLength: Infinity,
 | 
			
		||||
          maxBodyLength: Infinity,
 | 
			
		||||
          onUploadProgress: (progressEvent) => {
 | 
			
		||||
            const progress = (progressEvent.loaded / (progressEvent.total || file.size)) * 100;
 | 
			
		||||
            setFileUploads((prev) => prev.map((u) => (u.id === id ? { ...u, progress: Math.round(progress) } : u)));
 | 
			
		||||
          },
 | 
			
		||||
        });
 | 
			
		||||
 | 
			
		||||
        await registerFile({
 | 
			
		||||
          name: fileName,
 | 
			
		||||
          objectName: objectName,
 | 
			
		||||
          size: file.size,
 | 
			
		||||
          extension: extension,
 | 
			
		||||
          folderId: currentFolderId,
 | 
			
		||||
        });
 | 
			
		||||
      if (!result.success) {
 | 
			
		||||
        throw new Error(result.error || "Upload failed");
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      await registerFile({
 | 
			
		||||
        name: fileName,
 | 
			
		||||
        objectName: objectName,
 | 
			
		||||
        size: file.size,
 | 
			
		||||
        extension: extension,
 | 
			
		||||
        folderId: currentFolderId,
 | 
			
		||||
      });
 | 
			
		||||
 | 
			
		||||
      setFileUploads((prev) =>
 | 
			
		||||
        prev.map((u) =>
 | 
			
		||||
          u.id === id ? { ...u, status: UploadStatus.SUCCESS, progress: 100, abortController: undefined } : u
 | 
			
		||||
 
 | 
			
		||||
@@ -1,144 +0,0 @@
 | 
			
		||||
import { useCallback, useEffect, useState } from "react";
 | 
			
		||||
import { useTranslations } from "next-intl";
 | 
			
		||||
import { toast } from "sonner";
 | 
			
		||||
 | 
			
		||||
import {
 | 
			
		||||
  cancelQueuedDownload,
 | 
			
		||||
  getDownloadQueueStatus,
 | 
			
		||||
  type DownloadQueueStatus,
 | 
			
		||||
} from "@/http/endpoints/download-queue";
 | 
			
		||||
 | 
			
		||||
export interface DownloadQueueHook {
 | 
			
		||||
  queueStatus: DownloadQueueStatus | null;
 | 
			
		||||
  isLoading: boolean;
 | 
			
		||||
  error: string | null;
 | 
			
		||||
  refreshQueue: () => Promise<void>;
 | 
			
		||||
  cancelDownload: (downloadId: string) => Promise<void>;
 | 
			
		||||
  getQueuePosition: (downloadId: string) => number | null;
 | 
			
		||||
  isDownloadQueued: (downloadId: string) => boolean;
 | 
			
		||||
  getEstimatedWaitTime: (downloadId: string) => string | null;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export function useDownloadQueue(autoRefresh = true, initialIntervalMs = 3000) {
 | 
			
		||||
  const t = useTranslations();
 | 
			
		||||
  const [queueStatus, setQueueStatus] = useState<DownloadQueueStatus | null>(null);
 | 
			
		||||
  const [isLoading, setIsLoading] = useState(false);
 | 
			
		||||
  const [error, setError] = useState<string | null>(null);
 | 
			
		||||
  const [currentInterval, setCurrentInterval] = useState(initialIntervalMs);
 | 
			
		||||
  const [noActivityCount, setNoActivityCount] = useState(0);
 | 
			
		||||
 | 
			
		||||
  const refreshQueue = useCallback(async () => {
 | 
			
		||||
    try {
 | 
			
		||||
      setIsLoading(true);
 | 
			
		||||
      setError(null);
 | 
			
		||||
      const response = await getDownloadQueueStatus();
 | 
			
		||||
      const newStatus = response.data;
 | 
			
		||||
 | 
			
		||||
      const hasActivity = newStatus.activeDownloads > 0 || newStatus.queueLength > 0;
 | 
			
		||||
      const previousActivity = (queueStatus?.activeDownloads || 0) > 0 || (queueStatus?.queueLength || 0) > 0;
 | 
			
		||||
      const statusChanged = JSON.stringify(queueStatus) !== JSON.stringify(newStatus);
 | 
			
		||||
 | 
			
		||||
      if (!hasActivity && !previousActivity && !statusChanged) {
 | 
			
		||||
        setNoActivityCount((prev) => prev + 1);
 | 
			
		||||
      } else {
 | 
			
		||||
        setNoActivityCount(0);
 | 
			
		||||
        setCurrentInterval(initialIntervalMs);
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      setQueueStatus(newStatus);
 | 
			
		||||
    } catch (err: any) {
 | 
			
		||||
      const errorMessage = err?.response?.data?.error || err?.message || "Failed to fetch queue status";
 | 
			
		||||
      setError(errorMessage);
 | 
			
		||||
      console.error("Error fetching download queue status:", err);
 | 
			
		||||
    } finally {
 | 
			
		||||
      setIsLoading(false);
 | 
			
		||||
    }
 | 
			
		||||
  }, [queueStatus, initialIntervalMs]);
 | 
			
		||||
 | 
			
		||||
  const cancelDownload = useCallback(
 | 
			
		||||
    async (downloadId: string) => {
 | 
			
		||||
      try {
 | 
			
		||||
        await cancelQueuedDownload(downloadId);
 | 
			
		||||
        toast.success(t("downloadQueue.cancelSuccess"));
 | 
			
		||||
        await refreshQueue();
 | 
			
		||||
      } catch (err: any) {
 | 
			
		||||
        const errorMessage = err?.response?.data?.error || err?.message || "Failed to cancel download";
 | 
			
		||||
        toast.error(t("downloadQueue.cancelError", { error: errorMessage }));
 | 
			
		||||
        console.error("Error cancelling download:", err);
 | 
			
		||||
      }
 | 
			
		||||
    },
 | 
			
		||||
    [refreshQueue, t]
 | 
			
		||||
  );
 | 
			
		||||
 | 
			
		||||
  const getQueuePosition = useCallback(
 | 
			
		||||
    (downloadId: string): number | null => {
 | 
			
		||||
      if (!queueStatus) return null;
 | 
			
		||||
      const download = queueStatus.queuedDownloads.find((d) => d.downloadId === downloadId);
 | 
			
		||||
      return download?.position || null;
 | 
			
		||||
    },
 | 
			
		||||
    [queueStatus]
 | 
			
		||||
  );
 | 
			
		||||
 | 
			
		||||
  const isDownloadQueued = useCallback(
 | 
			
		||||
    (downloadId: string): boolean => {
 | 
			
		||||
      if (!queueStatus) return false;
 | 
			
		||||
      return queueStatus.queuedDownloads.some((d) => d.downloadId === downloadId);
 | 
			
		||||
    },
 | 
			
		||||
    [queueStatus]
 | 
			
		||||
  );
 | 
			
		||||
 | 
			
		||||
  const getEstimatedWaitTime = useCallback(
 | 
			
		||||
    (downloadId: string): string | null => {
 | 
			
		||||
      if (!queueStatus) return null;
 | 
			
		||||
 | 
			
		||||
      const download = queueStatus.queuedDownloads.find((d) => d.downloadId === downloadId);
 | 
			
		||||
      if (!download) return null;
 | 
			
		||||
 | 
			
		||||
      const waitTimeMs = download.waitTime;
 | 
			
		||||
      const waitTimeSeconds = Math.floor(waitTimeMs / 1000);
 | 
			
		||||
 | 
			
		||||
      if (waitTimeSeconds < 60) {
 | 
			
		||||
        return t("downloadQueue.waitTime.seconds", { seconds: waitTimeSeconds });
 | 
			
		||||
      } else if (waitTimeSeconds < 3600) {
 | 
			
		||||
        const minutes = Math.floor(waitTimeSeconds / 60);
 | 
			
		||||
        return t("downloadQueue.waitTime.minutes", { minutes });
 | 
			
		||||
      } else {
 | 
			
		||||
        const hours = Math.floor(waitTimeSeconds / 3600);
 | 
			
		||||
        const minutes = Math.floor((waitTimeSeconds % 3600) / 60);
 | 
			
		||||
        return t("downloadQueue.waitTime.hoursMinutes", { hours, minutes });
 | 
			
		||||
      }
 | 
			
		||||
    },
 | 
			
		||||
    [queueStatus, t]
 | 
			
		||||
  );
 | 
			
		||||
 | 
			
		||||
  useEffect(() => {
 | 
			
		||||
    if (!autoRefresh) return;
 | 
			
		||||
 | 
			
		||||
    let actualInterval = currentInterval;
 | 
			
		||||
 | 
			
		||||
    if (noActivityCount > 5) {
 | 
			
		||||
      console.log("[DOWNLOAD QUEUE] No activity detected, stopping polling");
 | 
			
		||||
      return;
 | 
			
		||||
    } else if (noActivityCount > 2) {
 | 
			
		||||
      actualInterval = 10000;
 | 
			
		||||
      setCurrentInterval(10000);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    refreshQueue();
 | 
			
		||||
 | 
			
		||||
    const interval = setInterval(refreshQueue, actualInterval);
 | 
			
		||||
 | 
			
		||||
    return () => clearInterval(interval);
 | 
			
		||||
  }, [autoRefresh, refreshQueue, currentInterval, noActivityCount]);
 | 
			
		||||
 | 
			
		||||
  return {
 | 
			
		||||
    queueStatus,
 | 
			
		||||
    isLoading,
 | 
			
		||||
    error,
 | 
			
		||||
    refreshQueue,
 | 
			
		||||
    cancelDownload,
 | 
			
		||||
    getQueuePosition,
 | 
			
		||||
    isDownloadQueued,
 | 
			
		||||
    getEstimatedWaitTime,
 | 
			
		||||
  };
 | 
			
		||||
}
 | 
			
		||||
@@ -1,11 +1,9 @@
 | 
			
		||||
import { useCallback, useEffect, useState } from "react";
 | 
			
		||||
import { useCallback, useState } from "react";
 | 
			
		||||
import { useTranslations } from "next-intl";
 | 
			
		||||
import { toast } from "sonner";
 | 
			
		||||
 | 
			
		||||
import { deleteFile, getDownloadUrl, updateFile } from "@/http/endpoints";
 | 
			
		||||
import { deleteFolder, registerFolder, updateFolder } from "@/http/endpoints/folders";
 | 
			
		||||
import { useDownloadQueue } from "./use-download-queue";
 | 
			
		||||
import { usePushNotifications } from "./use-push-notifications";
 | 
			
		||||
 | 
			
		||||
interface FileToRename {
 | 
			
		||||
  id: string;
 | 
			
		||||
@@ -83,14 +81,6 @@ interface BulkFolder {
 | 
			
		||||
  };
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
interface PendingDownload {
 | 
			
		||||
  downloadId: string;
 | 
			
		||||
  fileName: string;
 | 
			
		||||
  objectName: string;
 | 
			
		||||
  startTime: number;
 | 
			
		||||
  status: "pending" | "queued" | "downloading" | "completed" | "failed";
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export interface EnhancedFileManagerHook {
 | 
			
		||||
  previewFile: PreviewFile | null;
 | 
			
		||||
  fileToDelete: any;
 | 
			
		||||
@@ -101,7 +91,6 @@ export interface EnhancedFileManagerHook {
 | 
			
		||||
  filesToDownload: BulkFile[] | null;
 | 
			
		||||
  foldersToDelete: BulkFolder[] | null;
 | 
			
		||||
  isBulkDownloadModalOpen: boolean;
 | 
			
		||||
  pendingDownloads: PendingDownload[];
 | 
			
		||||
 | 
			
		||||
  folderToDelete: FolderToDelete | null;
 | 
			
		||||
  folderToRename: FolderToRename | null;
 | 
			
		||||
@@ -144,15 +133,10 @@ export interface EnhancedFileManagerHook {
 | 
			
		||||
 | 
			
		||||
  clearSelection?: () => void;
 | 
			
		||||
  setClearSelectionCallback?: (callback: () => void) => void;
 | 
			
		||||
  getDownloadStatus: (objectName: string) => PendingDownload | null;
 | 
			
		||||
  cancelPendingDownload: (downloadId: string) => Promise<void>;
 | 
			
		||||
  isDownloadPending: (objectName: string) => boolean;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export function useEnhancedFileManager(onRefresh: () => Promise<void>, clearSelection?: () => void) {
 | 
			
		||||
  const t = useTranslations();
 | 
			
		||||
  const downloadQueue = useDownloadQueue(true, 3000);
 | 
			
		||||
  const notifications = usePushNotifications();
 | 
			
		||||
 | 
			
		||||
  const [previewFile, setPreviewFile] = useState<PreviewFile | null>(null);
 | 
			
		||||
  const [fileToRename, setFileToRename] = useState<FileToRename | null>(null);
 | 
			
		||||
@@ -168,145 +152,36 @@ export function useEnhancedFileManager(onRefresh: () => Promise<void>, clearSele
 | 
			
		||||
  const [folderToShare, setFolderToShare] = useState<FolderToShare | null>(null);
 | 
			
		||||
  const [isCreateFolderModalOpen, setCreateFolderModalOpen] = useState(false);
 | 
			
		||||
  const [isBulkDownloadModalOpen, setBulkDownloadModalOpen] = useState(false);
 | 
			
		||||
  const [pendingDownloads, setPendingDownloads] = useState<PendingDownload[]>([]);
 | 
			
		||||
  const [clearSelectionCallback, setClearSelectionCallbackState] = useState<(() => void) | null>(null);
 | 
			
		||||
 | 
			
		||||
  const [foldersToShare, setFoldersToShare] = useState<BulkFolder[] | null>(null);
 | 
			
		||||
  const [foldersToDownload, setFoldersToDownload] = useState<BulkFolder[] | null>(null);
 | 
			
		||||
 | 
			
		||||
  const startActualDownload = async (
 | 
			
		||||
    downloadId: string,
 | 
			
		||||
    objectName: string,
 | 
			
		||||
    fileName: string,
 | 
			
		||||
    downloadUrl?: string
 | 
			
		||||
  ) => {
 | 
			
		||||
    try {
 | 
			
		||||
      setPendingDownloads((prev) =>
 | 
			
		||||
        prev.map((d) => (d.downloadId === downloadId ? { ...d, status: "downloading" } : d))
 | 
			
		||||
      );
 | 
			
		||||
 | 
			
		||||
      let url = downloadUrl;
 | 
			
		||||
      if (!url) {
 | 
			
		||||
        const response = await getDownloadUrl(objectName);
 | 
			
		||||
        url = response.data.url;
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      const link = document.createElement("a");
 | 
			
		||||
      link.href = url;
 | 
			
		||||
      link.download = fileName;
 | 
			
		||||
      document.body.appendChild(link);
 | 
			
		||||
      link.click();
 | 
			
		||||
      document.body.removeChild(link);
 | 
			
		||||
 | 
			
		||||
      const wasQueued = pendingDownloads.some((d) => d.downloadId === downloadId);
 | 
			
		||||
 | 
			
		||||
      if (wasQueued) {
 | 
			
		||||
        setPendingDownloads((prev) =>
 | 
			
		||||
          prev.map((d) => (d.downloadId === downloadId ? { ...d, status: "completed" } : d))
 | 
			
		||||
        );
 | 
			
		||||
 | 
			
		||||
        const completedDownload = pendingDownloads.find((d) => d.downloadId === downloadId);
 | 
			
		||||
        if (completedDownload) {
 | 
			
		||||
          const fileSize = completedDownload.startTime ? Date.now() - completedDownload.startTime : undefined;
 | 
			
		||||
          await notifications.notifyDownloadComplete(fileName, fileSize);
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        setTimeout(() => {
 | 
			
		||||
          setPendingDownloads((prev) => prev.filter((d) => d.downloadId !== downloadId));
 | 
			
		||||
        }, 5000);
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      if (!wasQueued) {
 | 
			
		||||
        toast.success(t("files.downloadStart", { fileName }));
 | 
			
		||||
      }
 | 
			
		||||
    } catch (error: any) {
 | 
			
		||||
      const wasQueued = pendingDownloads.some((d) => d.downloadId === downloadId);
 | 
			
		||||
 | 
			
		||||
      if (wasQueued) {
 | 
			
		||||
        setPendingDownloads((prev) => prev.map((d) => (d.downloadId === downloadId ? { ...d, status: "failed" } : d)));
 | 
			
		||||
 | 
			
		||||
        const errorMessage =
 | 
			
		||||
          error?.response?.data?.message || error?.message || t("notifications.downloadFailed.unknownError");
 | 
			
		||||
        await notifications.notifyDownloadFailed(fileName, errorMessage);
 | 
			
		||||
 | 
			
		||||
        setTimeout(() => {
 | 
			
		||||
          setPendingDownloads((prev) => prev.filter((d) => d.downloadId !== downloadId));
 | 
			
		||||
        }, 10000);
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      if (!pendingDownloads.some((d) => d.downloadId === downloadId)) {
 | 
			
		||||
        toast.error(t("files.downloadError"));
 | 
			
		||||
      }
 | 
			
		||||
      throw error;
 | 
			
		||||
    }
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
  useEffect(() => {
 | 
			
		||||
    if (!downloadQueue.queueStatus) return;
 | 
			
		||||
 | 
			
		||||
    pendingDownloads.forEach(async (download) => {
 | 
			
		||||
      if (download.status === "queued") {
 | 
			
		||||
        const stillQueued = downloadQueue.queueStatus?.queuedDownloads.find((qd) => qd.fileName === download.fileName);
 | 
			
		||||
 | 
			
		||||
        if (!stillQueued) {
 | 
			
		||||
          console.log(`[DOWNLOAD] Processing queued download: ${download.fileName}`);
 | 
			
		||||
 | 
			
		||||
          await notifications.notifyQueueProcessing(download.fileName);
 | 
			
		||||
 | 
			
		||||
          await startActualDownload(download.downloadId, download.objectName, download.fileName);
 | 
			
		||||
        }
 | 
			
		||||
      }
 | 
			
		||||
    });
 | 
			
		||||
    // eslint-disable-next-line react-hooks/exhaustive-deps
 | 
			
		||||
  }, [downloadQueue.queueStatus, pendingDownloads, notifications]);
 | 
			
		||||
 | 
			
		||||
  const setClearSelectionCallback = useCallback((callback: () => void) => {
 | 
			
		||||
    setClearSelectionCallbackState(() => callback);
 | 
			
		||||
  }, []);
 | 
			
		||||
 | 
			
		||||
  const handleDownload = async (objectName: string, fileName: string) => {
 | 
			
		||||
    try {
 | 
			
		||||
      const { downloadFileWithQueue } = await import("@/utils/download-queue-utils");
 | 
			
		||||
      const loadingToast = toast.loading(t("share.messages.downloadStarted"));
 | 
			
		||||
 | 
			
		||||
      await toast.promise(
 | 
			
		||||
        downloadFileWithQueue(objectName, fileName, {
 | 
			
		||||
          silent: true,
 | 
			
		||||
          showToasts: false,
 | 
			
		||||
        }),
 | 
			
		||||
        {
 | 
			
		||||
          loading: t("share.messages.downloadStarted"),
 | 
			
		||||
          success: t("shareManager.downloadSuccess"),
 | 
			
		||||
          error: t("share.errors.downloadFailed"),
 | 
			
		||||
        }
 | 
			
		||||
      );
 | 
			
		||||
      const response = await getDownloadUrl(objectName);
 | 
			
		||||
 | 
			
		||||
      const link = document.createElement("a");
 | 
			
		||||
      link.href = response.data.url;
 | 
			
		||||
      link.download = fileName;
 | 
			
		||||
      document.body.appendChild(link);
 | 
			
		||||
      link.click();
 | 
			
		||||
      document.body.removeChild(link);
 | 
			
		||||
 | 
			
		||||
      toast.dismiss(loadingToast);
 | 
			
		||||
      toast.success(t("shareManager.downloadSuccess"));
 | 
			
		||||
    } catch (error) {
 | 
			
		||||
      console.error("Download error:", error);
 | 
			
		||||
      toast.error(t("share.errors.downloadFailed"));
 | 
			
		||||
    }
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
  const cancelPendingDownload = async (downloadId: string) => {
 | 
			
		||||
    try {
 | 
			
		||||
      await downloadQueue.cancelDownload(downloadId);
 | 
			
		||||
      setPendingDownloads((prev) => prev.filter((d) => d.downloadId !== downloadId));
 | 
			
		||||
    } catch (error) {
 | 
			
		||||
      console.error("Error cancelling download:", error);
 | 
			
		||||
    }
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
  const getDownloadStatus = useCallback(
 | 
			
		||||
    (objectName: string): PendingDownload | null => {
 | 
			
		||||
      return pendingDownloads.find((d) => d.objectName === objectName) || null;
 | 
			
		||||
    },
 | 
			
		||||
    [pendingDownloads]
 | 
			
		||||
  );
 | 
			
		||||
 | 
			
		||||
  const isDownloadPending = useCallback(
 | 
			
		||||
    (objectName: string): boolean => {
 | 
			
		||||
      return pendingDownloads.some((d) => d.objectName === objectName && d.status !== "completed");
 | 
			
		||||
    },
 | 
			
		||||
    [pendingDownloads]
 | 
			
		||||
  );
 | 
			
		||||
 | 
			
		||||
  const handleRename = async (fileId: string, newName: string, description?: string) => {
 | 
			
		||||
    try {
 | 
			
		||||
      await updateFile(fileId, {
 | 
			
		||||
@@ -362,67 +237,58 @@ export function useEnhancedFileManager(onRefresh: () => Promise<void>, clearSele
 | 
			
		||||
    }
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
  const handleSingleFolderDownload = async (folderId: string, folderName: string) => {
 | 
			
		||||
  const handleSingleFolderDownload = async () => {
 | 
			
		||||
    try {
 | 
			
		||||
      const { downloadFolderWithQueue } = await import("@/utils/download-queue-utils");
 | 
			
		||||
      const loadingToast = toast.loading(t("shareManager.creatingZip"));
 | 
			
		||||
 | 
			
		||||
      await toast.promise(
 | 
			
		||||
        downloadFolderWithQueue(folderId, folderName, {
 | 
			
		||||
          silent: true,
 | 
			
		||||
          showToasts: false,
 | 
			
		||||
        }),
 | 
			
		||||
        {
 | 
			
		||||
          loading: t("shareManager.creatingZip"),
 | 
			
		||||
          success: t("shareManager.zipDownloadSuccess"),
 | 
			
		||||
          error: t("share.errors.downloadFailed"),
 | 
			
		||||
        }
 | 
			
		||||
      );
 | 
			
		||||
      // For folder downloads, direct implementation will be needed
 | 
			
		||||
      // For now, show a message to use bulk download
 | 
			
		||||
      toast.dismiss(loadingToast);
 | 
			
		||||
      toast.info("Use bulk download modal for folders");
 | 
			
		||||
    } catch (error) {
 | 
			
		||||
      console.error("Error downloading folder:", error);
 | 
			
		||||
      toast.error(t("share.errors.downloadFailed"));
 | 
			
		||||
    }
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
  const handleBulkDownloadWithZip = async (files: BulkFile[], zipName: string) => {
 | 
			
		||||
    try {
 | 
			
		||||
      const folders = foldersToDownload || [];
 | 
			
		||||
      const { bulkDownloadWithQueue } = await import("@/utils/download-queue-utils");
 | 
			
		||||
 | 
			
		||||
      const allItems = [
 | 
			
		||||
        ...files.map((file) => ({
 | 
			
		||||
          objectName: file.objectName,
 | 
			
		||||
          name: file.relativePath || file.name,
 | 
			
		||||
          isReverseShare: false,
 | 
			
		||||
          type: "file" as const,
 | 
			
		||||
        })),
 | 
			
		||||
        ...folders.map((folder) => ({
 | 
			
		||||
          id: folder.id,
 | 
			
		||||
          name: folder.name,
 | 
			
		||||
          type: "folder" as const,
 | 
			
		||||
        })),
 | 
			
		||||
      ];
 | 
			
		||||
 | 
			
		||||
      if (allItems.length === 0) {
 | 
			
		||||
      if (files.length === 0 && folders.length === 0) {
 | 
			
		||||
        toast.error(t("shareManager.noFilesToDownload"));
 | 
			
		||||
        return;
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      toast.promise(
 | 
			
		||||
        bulkDownloadWithQueue(allItems, zipName, undefined, false).then(() => {
 | 
			
		||||
          setBulkDownloadModalOpen(false);
 | 
			
		||||
          setFilesToDownload(null);
 | 
			
		||||
          setFoldersToDownload(null);
 | 
			
		||||
          if (clearSelectionCallback) {
 | 
			
		||||
            clearSelectionCallback();
 | 
			
		||||
          }
 | 
			
		||||
        }),
 | 
			
		||||
        {
 | 
			
		||||
          loading: t("shareManager.creatingZip"),
 | 
			
		||||
          success: t("shareManager.zipDownloadSuccess"),
 | 
			
		||||
          error: t("shareManager.zipDownloadError"),
 | 
			
		||||
        }
 | 
			
		||||
      const loadingToast = toast.loading(t("shareManager.preparingDownload"));
 | 
			
		||||
 | 
			
		||||
      // Get presigned URLs for all files
 | 
			
		||||
      const downloadItems = await Promise.all(
 | 
			
		||||
        files.map(async (file) => {
 | 
			
		||||
          const response = await getDownloadUrl(file.objectName);
 | 
			
		||||
          return {
 | 
			
		||||
            url: response.data.url,
 | 
			
		||||
            name: file.name,
 | 
			
		||||
          };
 | 
			
		||||
        })
 | 
			
		||||
      );
 | 
			
		||||
 | 
			
		||||
      // Create ZIP with all files
 | 
			
		||||
      const { downloadFilesAsZip } = await import("@/utils/zip-download");
 | 
			
		||||
      await downloadFilesAsZip(downloadItems, zipName.endsWith(".zip") ? zipName : `${zipName}.zip`);
 | 
			
		||||
 | 
			
		||||
      toast.dismiss(loadingToast);
 | 
			
		||||
      toast.success(t("shareManager.downloadSuccess"));
 | 
			
		||||
 | 
			
		||||
      setBulkDownloadModalOpen(false);
 | 
			
		||||
      setFilesToDownload(null);
 | 
			
		||||
      setFoldersToDownload(null);
 | 
			
		||||
      if (clearSelectionCallback) {
 | 
			
		||||
        clearSelectionCallback();
 | 
			
		||||
      }
 | 
			
		||||
    } catch (error) {
 | 
			
		||||
      console.error("Error in bulk download:", error);
 | 
			
		||||
      toast.error(t("shareManager.zipDownloadError"));
 | 
			
		||||
      setBulkDownloadModalOpen(false);
 | 
			
		||||
      setFilesToDownload(null);
 | 
			
		||||
      setFoldersToDownload(null);
 | 
			
		||||
@@ -522,7 +388,6 @@ export function useEnhancedFileManager(onRefresh: () => Promise<void>, clearSele
 | 
			
		||||
    setFoldersToDelete,
 | 
			
		||||
    isBulkDownloadModalOpen,
 | 
			
		||||
    setBulkDownloadModalOpen,
 | 
			
		||||
    pendingDownloads,
 | 
			
		||||
    handleDownload,
 | 
			
		||||
    handleRename,
 | 
			
		||||
    handleDelete,
 | 
			
		||||
@@ -552,9 +417,6 @@ export function useEnhancedFileManager(onRefresh: () => Promise<void>, clearSele
 | 
			
		||||
 | 
			
		||||
    clearSelection,
 | 
			
		||||
    setClearSelectionCallback,
 | 
			
		||||
    getDownloadStatus,
 | 
			
		||||
    handleSingleFolderDownload,
 | 
			
		||||
    cancelPendingDownload,
 | 
			
		||||
    isDownloadPending,
 | 
			
		||||
  };
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -4,7 +4,6 @@ import { toast } from "sonner";
 | 
			
		||||
 | 
			
		||||
import { getDownloadUrl } from "@/http/endpoints";
 | 
			
		||||
import { downloadReverseShareFile } from "@/http/endpoints/reverse-shares";
 | 
			
		||||
import { downloadFileWithQueue, downloadReverseShareWithQueue } from "@/utils/download-queue-utils";
 | 
			
		||||
import { getFileExtension, getFileType, type FileType } from "@/utils/file-types";
 | 
			
		||||
 | 
			
		||||
interface FilePreviewState {
 | 
			
		||||
@@ -241,18 +240,32 @@ export function useFilePreview({ file, isOpen, isReverseShare = false, sharePass
 | 
			
		||||
    if (!fileKey) return;
 | 
			
		||||
 | 
			
		||||
    try {
 | 
			
		||||
      const loadingToast = toast.loading(t("filePreview.downloading") || "Downloading...");
 | 
			
		||||
 | 
			
		||||
      let url: string;
 | 
			
		||||
      if (isReverseShare) {
 | 
			
		||||
        await downloadReverseShareWithQueue(file.id!, file.name, {
 | 
			
		||||
          onFail: () => toast.error(t("filePreview.downloadError")),
 | 
			
		||||
        });
 | 
			
		||||
        const response = await downloadReverseShareFile(file.id!);
 | 
			
		||||
        url = response.data.url;
 | 
			
		||||
      } else {
 | 
			
		||||
        await downloadFileWithQueue(file.objectName, file.name, {
 | 
			
		||||
          sharePassword,
 | 
			
		||||
          onFail: () => toast.error(t("filePreview.downloadError")),
 | 
			
		||||
        });
 | 
			
		||||
        const response = await getDownloadUrl(
 | 
			
		||||
          file.objectName,
 | 
			
		||||
          sharePassword ? { headers: { "x-share-password": sharePassword } } : undefined
 | 
			
		||||
        );
 | 
			
		||||
        url = response.data.url;
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      const link = document.createElement("a");
 | 
			
		||||
      link.href = url;
 | 
			
		||||
      link.download = file.name;
 | 
			
		||||
      document.body.appendChild(link);
 | 
			
		||||
      link.click();
 | 
			
		||||
      document.body.removeChild(link);
 | 
			
		||||
 | 
			
		||||
      toast.dismiss(loadingToast);
 | 
			
		||||
      toast.success(t("filePreview.downloadSuccess") || "Download started");
 | 
			
		||||
    } catch (error) {
 | 
			
		||||
      console.error("Download error:", error);
 | 
			
		||||
      toast.error(t("filePreview.downloadError"));
 | 
			
		||||
    }
 | 
			
		||||
  }, [isReverseShare, file.id, file.objectName, file.name, sharePassword, t]);
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -4,10 +4,16 @@ import { useCallback, useState } from "react";
 | 
			
		||||
import { useTranslations } from "next-intl";
 | 
			
		||||
import { toast } from "sonner";
 | 
			
		||||
 | 
			
		||||
import { addRecipients, createShareAlias, deleteShare, notifyRecipients, updateShare } from "@/http/endpoints";
 | 
			
		||||
import {
 | 
			
		||||
  addRecipients,
 | 
			
		||||
  createShareAlias,
 | 
			
		||||
  deleteShare,
 | 
			
		||||
  getDownloadUrl,
 | 
			
		||||
  notifyRecipients,
 | 
			
		||||
  updateShare,
 | 
			
		||||
} from "@/http/endpoints";
 | 
			
		||||
import { updateFolder } from "@/http/endpoints/folders";
 | 
			
		||||
import type { Share } from "@/http/endpoints/shares/types";
 | 
			
		||||
import { bulkDownloadShareWithQueue, downloadFileWithQueue } from "@/utils/download-queue-utils";
 | 
			
		||||
 | 
			
		||||
export interface ShareManagerHook {
 | 
			
		||||
  shareToDelete: Share | null;
 | 
			
		||||
@@ -230,20 +236,43 @@ export function useShareManager(onSuccess: () => void) {
 | 
			
		||||
          return;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        toast.promise(
 | 
			
		||||
          bulkDownloadShareWithQueue(allItems, share.files || [], share.folders || [], zipName, undefined, true).then(
 | 
			
		||||
            () => {
 | 
			
		||||
              if (clearSelectionCallback) {
 | 
			
		||||
                clearSelectionCallback();
 | 
			
		||||
              }
 | 
			
		||||
            }
 | 
			
		||||
          ),
 | 
			
		||||
          {
 | 
			
		||||
            loading: t("shareManager.creatingZip"),
 | 
			
		||||
            success: t("shareManager.zipDownloadSuccess"),
 | 
			
		||||
            error: t("shareManager.zipDownloadError"),
 | 
			
		||||
        const loadingToast = toast.loading(t("shareManager.preparingDownload"));
 | 
			
		||||
 | 
			
		||||
        try {
 | 
			
		||||
          // Get presigned URLs for all files
 | 
			
		||||
          const downloadItems = await Promise.all(
 | 
			
		||||
            allItems
 | 
			
		||||
              .filter((item) => item.type === "file" && item.objectName)
 | 
			
		||||
              .map(async (item) => {
 | 
			
		||||
                const response = await getDownloadUrl(item.objectName!);
 | 
			
		||||
                return {
 | 
			
		||||
                  url: response.data.url,
 | 
			
		||||
                  name: item.name,
 | 
			
		||||
                };
 | 
			
		||||
              })
 | 
			
		||||
          );
 | 
			
		||||
 | 
			
		||||
          if (downloadItems.length === 0) {
 | 
			
		||||
            toast.dismiss(loadingToast);
 | 
			
		||||
            toast.error(t("shareManager.noFilesToDownload"));
 | 
			
		||||
            return;
 | 
			
		||||
          }
 | 
			
		||||
        );
 | 
			
		||||
 | 
			
		||||
          // Create ZIP with all files
 | 
			
		||||
          const { downloadFilesAsZip } = await import("@/utils/zip-download");
 | 
			
		||||
          await downloadFilesAsZip(downloadItems, zipName.endsWith(".zip") ? zipName : `${zipName}.zip`);
 | 
			
		||||
 | 
			
		||||
          toast.dismiss(loadingToast);
 | 
			
		||||
          toast.success(t("shareManager.zipDownloadSuccess"));
 | 
			
		||||
 | 
			
		||||
          if (clearSelectionCallback) {
 | 
			
		||||
            clearSelectionCallback();
 | 
			
		||||
          }
 | 
			
		||||
        } catch (error) {
 | 
			
		||||
          toast.dismiss(loadingToast);
 | 
			
		||||
          toast.error(t("shareManager.zipDownloadError"));
 | 
			
		||||
          throw error;
 | 
			
		||||
        }
 | 
			
		||||
      } else {
 | 
			
		||||
        toast.error("Multiple share download not yet supported - please download shares individually");
 | 
			
		||||
      }
 | 
			
		||||
@@ -255,9 +284,8 @@ export function useShareManager(onSuccess: () => void) {
 | 
			
		||||
  const handleBulkDownload = (shares: Share[]) => {
 | 
			
		||||
    const zipName =
 | 
			
		||||
      shares.length === 1
 | 
			
		||||
        ? t("shareManager.singleShareZipName", { shareName: shares[0].name || t("shareManager.defaultShareName") })
 | 
			
		||||
        ? `${shares[0].name || t("shareManager.defaultShareName")}.zip`
 | 
			
		||||
        : t("shareManager.multipleSharesZipName", { count: shares.length });
 | 
			
		||||
 | 
			
		||||
    handleBulkDownloadWithZip(shares, zipName);
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
@@ -273,17 +301,24 @@ export function useShareManager(onSuccess: () => void) {
 | 
			
		||||
    if (totalFiles === 1 && totalFolders === 0) {
 | 
			
		||||
      const file = share.files[0];
 | 
			
		||||
      try {
 | 
			
		||||
        await downloadFileWithQueue(file.objectName, file.name, {
 | 
			
		||||
          onComplete: () => toast.success(t("shareManager.downloadSuccess")),
 | 
			
		||||
          onFail: () => toast.error(t("shareManager.downloadError")),
 | 
			
		||||
        });
 | 
			
		||||
        const loadingToast = toast.loading(t("shareManager.downloading"));
 | 
			
		||||
        const response = await getDownloadUrl(file.objectName);
 | 
			
		||||
 | 
			
		||||
        const link = document.createElement("a");
 | 
			
		||||
        link.href = response.data.url;
 | 
			
		||||
        link.download = file.name;
 | 
			
		||||
        document.body.appendChild(link);
 | 
			
		||||
        link.click();
 | 
			
		||||
        document.body.removeChild(link);
 | 
			
		||||
 | 
			
		||||
        toast.dismiss(loadingToast);
 | 
			
		||||
        toast.success(t("shareManager.downloadSuccess"));
 | 
			
		||||
      } catch (error) {
 | 
			
		||||
        console.error("Download error:", error);
 | 
			
		||||
        toast.error(t("shareManager.downloadError"));
 | 
			
		||||
      }
 | 
			
		||||
    } else {
 | 
			
		||||
      const zipName = t("shareManager.singleShareZipName", {
 | 
			
		||||
        shareName: share.name || t("shareManager.defaultShareName"),
 | 
			
		||||
      });
 | 
			
		||||
      const zipName = `${share.name || t("shareManager.defaultShareName")}.zip`;
 | 
			
		||||
      await handleBulkDownloadWithZip([share], zipName);
 | 
			
		||||
    }
 | 
			
		||||
  };
 | 
			
		||||
 
 | 
			
		||||
@@ -33,7 +33,7 @@ export interface GetAppInfo200 {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export interface GetSystemInfo200 {
 | 
			
		||||
  storageProvider: "s3" | "filesystem";
 | 
			
		||||
  storageProvider: "s3";
 | 
			
		||||
  s3Enabled: boolean;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -1,63 +0,0 @@
 | 
			
		||||
import type { AxiosRequestConfig } from "axios";
 | 
			
		||||
 | 
			
		||||
import apiInstance from "@/config/api";
 | 
			
		||||
 | 
			
		||||
export interface QueuedDownload {
 | 
			
		||||
  downloadId: string;
 | 
			
		||||
  position: number;
 | 
			
		||||
  waitTime: number;
 | 
			
		||||
  fileName?: string;
 | 
			
		||||
  fileSize?: number;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export interface DownloadQueueStatus {
 | 
			
		||||
  queueLength: number;
 | 
			
		||||
  maxQueueSize: number;
 | 
			
		||||
  activeDownloads: number;
 | 
			
		||||
  maxConcurrent: number;
 | 
			
		||||
  queuedDownloads: QueuedDownload[];
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export interface DownloadQueueStatusResult {
 | 
			
		||||
  status: string;
 | 
			
		||||
  data: DownloadQueueStatus;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export interface CancelDownloadResult {
 | 
			
		||||
  message: string;
 | 
			
		||||
  downloadId: string;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export interface ClearQueueResult {
 | 
			
		||||
  message: string;
 | 
			
		||||
  clearedCount: number;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * Get current download queue status
 | 
			
		||||
 * @summary Get Download Queue Status
 | 
			
		||||
 */
 | 
			
		||||
export const getDownloadQueueStatus = <TData = DownloadQueueStatusResult>(
 | 
			
		||||
  options?: AxiosRequestConfig
 | 
			
		||||
): Promise<TData> => {
 | 
			
		||||
  return apiInstance.get(`/api/filesystem/download-queue/status`, options);
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * Cancel a specific queued download
 | 
			
		||||
 * @summary Cancel Queued Download
 | 
			
		||||
 */
 | 
			
		||||
export const cancelQueuedDownload = <TData = CancelDownloadResult>(
 | 
			
		||||
  downloadId: string,
 | 
			
		||||
  options?: AxiosRequestConfig
 | 
			
		||||
): Promise<TData> => {
 | 
			
		||||
  return apiInstance.delete(`/api/filesystem/download-queue/${downloadId}`, options);
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * Clear the entire download queue (admin operation)
 | 
			
		||||
 * @summary Clear Download Queue
 | 
			
		||||
 */
 | 
			
		||||
export const clearDownloadQueue = <TData = ClearQueueResult>(options?: AxiosRequestConfig): Promise<TData> => {
 | 
			
		||||
  return apiInstance.delete(`/api/filesystem/download-queue`, options);
 | 
			
		||||
};
 | 
			
		||||
@@ -1,311 +0,0 @@
 | 
			
		||||
import axios from "axios";
 | 
			
		||||
 | 
			
		||||
export interface ChunkedUploadOptions {
 | 
			
		||||
  file: File;
 | 
			
		||||
  url: string;
 | 
			
		||||
  chunkSize?: number;
 | 
			
		||||
  onProgress?: (progress: number) => void;
 | 
			
		||||
  onChunkComplete?: (chunkIndex: number, totalChunks: number) => void;
 | 
			
		||||
  signal?: AbortSignal;
 | 
			
		||||
  isS3Enabled?: boolean;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export interface ChunkedUploadResult {
 | 
			
		||||
  success: boolean;
 | 
			
		||||
  objectName?: string;
 | 
			
		||||
  finalObjectName?: string;
 | 
			
		||||
  error?: string;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export class ChunkedUploader {
 | 
			
		||||
  private static defaultChunkSizeInBytes = 100 * 1024 * 1024; // 100MB
 | 
			
		||||
 | 
			
		||||
  /**
 | 
			
		||||
   * Upload a file in chunks with streaming
 | 
			
		||||
   */
 | 
			
		||||
  static async uploadFile(options: ChunkedUploadOptions): Promise<ChunkedUploadResult> {
 | 
			
		||||
    const { file, url, chunkSize, onProgress, onChunkComplete, signal } = options;
 | 
			
		||||
 | 
			
		||||
    if (!this.shouldUseChunkedUpload(file.size, options.isS3Enabled)) {
 | 
			
		||||
      throw new Error(
 | 
			
		||||
        `File ${file.name} (${(file.size / (1024 * 1024)).toFixed(2)}MB) should not use chunked upload. Use regular upload instead.`
 | 
			
		||||
      );
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    const optimalChunkSize = chunkSize || this.calculateOptimalChunkSize(file.size);
 | 
			
		||||
 | 
			
		||||
    try {
 | 
			
		||||
      const fileId = this.generateFileId();
 | 
			
		||||
 | 
			
		||||
      const totalChunks = Math.ceil(file.size / optimalChunkSize);
 | 
			
		||||
 | 
			
		||||
      const uploadedChunks = new Set<number>();
 | 
			
		||||
      let completedChunks = 0;
 | 
			
		||||
      let lastChunkResponse: any = null;
 | 
			
		||||
 | 
			
		||||
      for (let chunkIndex = 0; chunkIndex < totalChunks; chunkIndex++) {
 | 
			
		||||
        if (signal?.aborted) {
 | 
			
		||||
          throw new Error("Upload cancelled");
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        const start = chunkIndex * optimalChunkSize;
 | 
			
		||||
        const end = Math.min(start + optimalChunkSize, file.size);
 | 
			
		||||
        const chunk = file.slice(start, end);
 | 
			
		||||
        const isLastChunk = chunkIndex === totalChunks - 1;
 | 
			
		||||
 | 
			
		||||
        let retries = 0;
 | 
			
		||||
        const maxRetries = 3;
 | 
			
		||||
        let chunkUploaded = false;
 | 
			
		||||
 | 
			
		||||
        while (retries < maxRetries && !chunkUploaded) {
 | 
			
		||||
          try {
 | 
			
		||||
            const response = await this.uploadChunk({
 | 
			
		||||
              fileId,
 | 
			
		||||
              chunk,
 | 
			
		||||
              chunkIndex,
 | 
			
		||||
              totalChunks,
 | 
			
		||||
              chunkSize: optimalChunkSize,
 | 
			
		||||
              totalSize: file.size,
 | 
			
		||||
              fileName: file.name,
 | 
			
		||||
              isLastChunk,
 | 
			
		||||
              url,
 | 
			
		||||
              signal,
 | 
			
		||||
            });
 | 
			
		||||
 | 
			
		||||
            if (isLastChunk) {
 | 
			
		||||
              lastChunkResponse = response;
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
            chunkUploaded = true;
 | 
			
		||||
          } catch (error: any) {
 | 
			
		||||
            retries++;
 | 
			
		||||
 | 
			
		||||
            if (
 | 
			
		||||
              error.response?.status === 400 &&
 | 
			
		||||
              (error.response?.data?.error?.includes("already uploaded") ||
 | 
			
		||||
                error.response?.data?.details?.includes("already uploaded"))
 | 
			
		||||
            ) {
 | 
			
		||||
              chunkUploaded = true;
 | 
			
		||||
              break;
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
            console.warn(`Chunk ${chunkIndex + 1} failed (attempt ${retries}/${maxRetries}):`, error.message);
 | 
			
		||||
 | 
			
		||||
            if (retries >= maxRetries) {
 | 
			
		||||
              throw error;
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
            const backoffDelay = error.message?.includes("timeout") ? 2000 * retries : 1000 * retries;
 | 
			
		||||
            await new Promise((resolve) => setTimeout(resolve, backoffDelay));
 | 
			
		||||
          }
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        if (!chunkUploaded) {
 | 
			
		||||
          throw new Error(`Failed to upload chunk ${chunkIndex + 1} after ${maxRetries} attempts`);
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        uploadedChunks.add(chunkIndex);
 | 
			
		||||
        completedChunks++;
 | 
			
		||||
 | 
			
		||||
        const progress = Math.round((completedChunks / totalChunks) * 100);
 | 
			
		||||
        onProgress?.(progress);
 | 
			
		||||
        onChunkComplete?.(chunkIndex, totalChunks);
 | 
			
		||||
 | 
			
		||||
        if (!isLastChunk) {
 | 
			
		||||
          await new Promise((resolve) => setTimeout(resolve, 100));
 | 
			
		||||
        }
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      await new Promise((resolve) => setTimeout(resolve, 500));
 | 
			
		||||
 | 
			
		||||
      return {
 | 
			
		||||
        success: true,
 | 
			
		||||
        finalObjectName: lastChunkResponse?.finalObjectName || lastChunkResponse?.objectName,
 | 
			
		||||
      };
 | 
			
		||||
    } catch (error: any) {
 | 
			
		||||
      console.error("Chunked upload failed:", error);
 | 
			
		||||
      return {
 | 
			
		||||
        success: false,
 | 
			
		||||
        error: error.message || "Upload failed",
 | 
			
		||||
      };
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  /**
 | 
			
		||||
   * Upload a single chunk
 | 
			
		||||
   */
 | 
			
		||||
  private static async uploadChunk({
 | 
			
		||||
    fileId,
 | 
			
		||||
    chunk,
 | 
			
		||||
    chunkIndex,
 | 
			
		||||
    totalChunks,
 | 
			
		||||
    chunkSize,
 | 
			
		||||
    totalSize,
 | 
			
		||||
    fileName,
 | 
			
		||||
    isLastChunk,
 | 
			
		||||
    url,
 | 
			
		||||
    signal,
 | 
			
		||||
  }: {
 | 
			
		||||
    fileId: string;
 | 
			
		||||
    chunk: Blob;
 | 
			
		||||
    chunkIndex: number;
 | 
			
		||||
    totalChunks: number;
 | 
			
		||||
    chunkSize: number;
 | 
			
		||||
    totalSize: number;
 | 
			
		||||
    fileName: string;
 | 
			
		||||
    isLastChunk: boolean;
 | 
			
		||||
    url: string;
 | 
			
		||||
    signal?: AbortSignal;
 | 
			
		||||
  }): Promise<any> {
 | 
			
		||||
    // Encode filename as base64 to handle UTF-8 characters in HTTP headers
 | 
			
		||||
    // This prevents errors when setting headers with non-ASCII characters
 | 
			
		||||
    const encodedFileName = btoa(unescape(encodeURIComponent(fileName)));
 | 
			
		||||
 | 
			
		||||
    const headers = {
 | 
			
		||||
      "Content-Type": "application/octet-stream",
 | 
			
		||||
      "X-File-Id": fileId,
 | 
			
		||||
      "X-Chunk-Index": chunkIndex.toString(),
 | 
			
		||||
      "X-Total-Chunks": totalChunks.toString(),
 | 
			
		||||
      "X-Chunk-Size": chunkSize.toString(),
 | 
			
		||||
      "X-Total-Size": totalSize.toString(),
 | 
			
		||||
      "X-File-Name": encodedFileName,
 | 
			
		||||
      "X-Is-Last-Chunk": isLastChunk.toString(),
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    try {
 | 
			
		||||
      const timeoutPer100MB = 120000; // 120 seconds per 100MB
 | 
			
		||||
      const chunkSizeMB = chunk.size / (1024 * 1024);
 | 
			
		||||
      const calculatedTimeout = Math.max(60000, Math.ceil(chunkSizeMB / 100) * timeoutPer100MB);
 | 
			
		||||
 | 
			
		||||
      const response = await axios.put(url, chunk, {
 | 
			
		||||
        headers,
 | 
			
		||||
        signal,
 | 
			
		||||
        timeout: calculatedTimeout,
 | 
			
		||||
        maxContentLength: Infinity,
 | 
			
		||||
        maxBodyLength: Infinity,
 | 
			
		||||
      });
 | 
			
		||||
 | 
			
		||||
      if (response.status !== 200) {
 | 
			
		||||
        throw new Error(`Failed to upload chunk ${chunkIndex}: ${response.statusText}`);
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      return response.data;
 | 
			
		||||
    } catch (error: any) {
 | 
			
		||||
      if (
 | 
			
		||||
        error.response?.status === 400 &&
 | 
			
		||||
        (error.response?.data?.error?.includes("already uploaded") ||
 | 
			
		||||
          error.response?.data?.details?.includes("already uploaded"))
 | 
			
		||||
      ) {
 | 
			
		||||
        return error.response.data;
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      if (error.code === "ECONNABORTED" || error.message?.includes("timeout")) {
 | 
			
		||||
        console.warn(`Chunk ${chunkIndex + 1} upload timed out, will retry`);
 | 
			
		||||
        throw new Error(`Upload timeout for chunk ${chunkIndex + 1}`);
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      throw error;
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  /**
 | 
			
		||||
   * Get upload progress
 | 
			
		||||
   */
 | 
			
		||||
  static async getUploadProgress(fileId: string): Promise<{
 | 
			
		||||
    uploaded: number;
 | 
			
		||||
    total: number;
 | 
			
		||||
    percentage: number;
 | 
			
		||||
  } | null> {
 | 
			
		||||
    try {
 | 
			
		||||
      const response = await axios.get(`/api/filesystem/upload-progress/${fileId}`);
 | 
			
		||||
      return response.data;
 | 
			
		||||
    } catch (error) {
 | 
			
		||||
      console.warn("Failed to get upload progress:", error);
 | 
			
		||||
      return null;
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  /**
 | 
			
		||||
   * Cancel upload
 | 
			
		||||
   */
 | 
			
		||||
  static async cancelUpload(fileId: string): Promise<void> {
 | 
			
		||||
    try {
 | 
			
		||||
      await axios.delete(`/api/filesystem/cancel-upload/${fileId}`);
 | 
			
		||||
    } catch (error) {
 | 
			
		||||
      console.warn("Failed to cancel upload:", error);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  /**
 | 
			
		||||
   * Generate unique file ID
 | 
			
		||||
   */
 | 
			
		||||
  private static generateFileId(): string {
 | 
			
		||||
    return `${Date.now()}-${Math.random().toString(36).substring(2, 15)}`;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  /**
 | 
			
		||||
   * Check if file should use chunked upload
 | 
			
		||||
   * Only use chunked upload for filesystem storage, not for S3
 | 
			
		||||
   */
 | 
			
		||||
  static shouldUseChunkedUpload(fileSize: number, isS3Enabled?: boolean): boolean {
 | 
			
		||||
    if (isS3Enabled) {
 | 
			
		||||
      return false;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    const threshold = this.getConfiguredChunkSize() || this.defaultChunkSizeInBytes;
 | 
			
		||||
    const shouldUse = fileSize > threshold;
 | 
			
		||||
 | 
			
		||||
    return shouldUse;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  /**
 | 
			
		||||
   * Calculate optimal chunk size based on file size
 | 
			
		||||
   */
 | 
			
		||||
  static calculateOptimalChunkSize(fileSize: number): number {
 | 
			
		||||
    const configuredChunkSize = this.getConfiguredChunkSize();
 | 
			
		||||
    const chunkSize = configuredChunkSize || this.defaultChunkSizeInBytes;
 | 
			
		||||
 | 
			
		||||
    if (fileSize <= chunkSize) {
 | 
			
		||||
      throw new Error(
 | 
			
		||||
        `calculateOptimalChunkSize should not be called for files <= ${chunkSize}. File size: ${(fileSize / (1024 * 1024)).toFixed(2)}MB`
 | 
			
		||||
      );
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if (configuredChunkSize) {
 | 
			
		||||
      return configuredChunkSize;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // For files > 1GB, use 150MB chunks
 | 
			
		||||
    if (fileSize > 1024 * 1024 * 1024) {
 | 
			
		||||
      return 150 * 1024 * 1024;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // For files > 500MB, use 100MB chunks
 | 
			
		||||
    if (fileSize > 500 * 1024 * 1024) {
 | 
			
		||||
      return 100 * 1024 * 1024;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // For files > 100MB, use 75MB chunks (minimum for chunked upload)
 | 
			
		||||
    return 75 * 1024 * 1024;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  private static getConfiguredChunkSize(): number | null {
 | 
			
		||||
    const configuredChunkSizeMb = process.env.NEXT_PUBLIC_UPLOAD_CHUNK_SIZE_MB;
 | 
			
		||||
 | 
			
		||||
    if (!configuredChunkSizeMb) {
 | 
			
		||||
      return null;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    const parsedValue = Number(configuredChunkSizeMb);
 | 
			
		||||
 | 
			
		||||
    if (Number.isNaN(parsedValue) || parsedValue <= 0) {
 | 
			
		||||
      console.warn(
 | 
			
		||||
        `Invalid NEXT_PUBLIC_UPLOAD_CHUNK_SIZE_MB value: ${configuredChunkSizeMb}. Falling back to optimal chunk size.`
 | 
			
		||||
      );
 | 
			
		||||
 | 
			
		||||
      return null;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    return Math.floor(parsedValue * 1024 * 1024);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
@@ -1,623 +0,0 @@
 | 
			
		||||
import { toast } from "sonner";
 | 
			
		||||
 | 
			
		||||
import { getDownloadUrl } from "@/http/endpoints";
 | 
			
		||||
import { downloadReverseShareFile } from "@/http/endpoints/reverse-shares";
 | 
			
		||||
 | 
			
		||||
interface DownloadWithQueueOptions {
 | 
			
		||||
  useQueue?: boolean;
 | 
			
		||||
  silent?: boolean;
 | 
			
		||||
  showToasts?: boolean;
 | 
			
		||||
  sharePassword?: string;
 | 
			
		||||
  onStart?: (downloadId: string) => void;
 | 
			
		||||
  onComplete?: (downloadId: string) => void;
 | 
			
		||||
  onFail?: (downloadId: string, error: string) => void;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
async function waitForDownloadReady(objectName: string, fileName: string): Promise<string> {
 | 
			
		||||
  let attempts = 0;
 | 
			
		||||
  const maxAttempts = 30;
 | 
			
		||||
  let currentDelay = 2000;
 | 
			
		||||
  const maxDelay = 10000;
 | 
			
		||||
 | 
			
		||||
  while (attempts < maxAttempts) {
 | 
			
		||||
    try {
 | 
			
		||||
      const response = await getDownloadUrl(objectName);
 | 
			
		||||
 | 
			
		||||
      if (response.status !== 202) {
 | 
			
		||||
        return response.data.url;
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      await new Promise((resolve) => setTimeout(resolve, currentDelay));
 | 
			
		||||
 | 
			
		||||
      if (attempts > 3 && currentDelay < maxDelay) {
 | 
			
		||||
        currentDelay = Math.min(currentDelay * 1.5, maxDelay);
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      attempts++;
 | 
			
		||||
    } catch (error) {
 | 
			
		||||
      console.error(`Error checking download status for ${fileName}:`, error);
 | 
			
		||||
      await new Promise((resolve) => setTimeout(resolve, currentDelay * 2));
 | 
			
		||||
      attempts++;
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  throw new Error(`Download timeout for ${fileName} after ${attempts} attempts`);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
async function waitForReverseShareDownloadReady(fileId: string, fileName: string): Promise<string> {
 | 
			
		||||
  let attempts = 0;
 | 
			
		||||
  const maxAttempts = 30;
 | 
			
		||||
  let currentDelay = 2000;
 | 
			
		||||
  const maxDelay = 10000;
 | 
			
		||||
 | 
			
		||||
  while (attempts < maxAttempts) {
 | 
			
		||||
    try {
 | 
			
		||||
      const response = await downloadReverseShareFile(fileId);
 | 
			
		||||
 | 
			
		||||
      if (response.status !== 202) {
 | 
			
		||||
        return response.data.url;
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      await new Promise((resolve) => setTimeout(resolve, currentDelay));
 | 
			
		||||
 | 
			
		||||
      if (attempts > 3 && currentDelay < maxDelay) {
 | 
			
		||||
        currentDelay = Math.min(currentDelay * 1.5, maxDelay);
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      attempts++;
 | 
			
		||||
    } catch (error) {
 | 
			
		||||
      console.error(`Error checking reverse share download status for ${fileName}:`, error);
 | 
			
		||||
      await new Promise((resolve) => setTimeout(resolve, currentDelay * 2));
 | 
			
		||||
      attempts++;
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  throw new Error(`Reverse share download timeout for ${fileName} after ${attempts} attempts`);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
async function performDownload(url: string, fileName: string): Promise<void> {
 | 
			
		||||
  const link = document.createElement("a");
 | 
			
		||||
  link.href = url;
 | 
			
		||||
  link.download = fileName;
 | 
			
		||||
  document.body.appendChild(link);
 | 
			
		||||
  link.click();
 | 
			
		||||
  document.body.removeChild(link);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export async function downloadFileWithQueue(
 | 
			
		||||
  objectName: string,
 | 
			
		||||
  fileName: string,
 | 
			
		||||
  options: DownloadWithQueueOptions = {}
 | 
			
		||||
): Promise<void> {
 | 
			
		||||
  const { useQueue = true, silent = false, showToasts = true, sharePassword } = options;
 | 
			
		||||
  const downloadId = `${Date.now()}-${Math.random().toString(36).substring(2, 11)}`;
 | 
			
		||||
 | 
			
		||||
  try {
 | 
			
		||||
    if (!silent) {
 | 
			
		||||
      options.onStart?.(downloadId);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // getDownloadUrl already handles encoding
 | 
			
		||||
    const params: Record<string, string> = {};
 | 
			
		||||
    if (sharePassword) params.password = sharePassword;
 | 
			
		||||
 | 
			
		||||
    const response = await getDownloadUrl(
 | 
			
		||||
      objectName,
 | 
			
		||||
      Object.keys(params).length > 0
 | 
			
		||||
        ? {
 | 
			
		||||
            params: { ...params },
 | 
			
		||||
          }
 | 
			
		||||
        : undefined
 | 
			
		||||
    );
 | 
			
		||||
 | 
			
		||||
    if (response.status === 202 && useQueue) {
 | 
			
		||||
      if (!silent && showToasts) {
 | 
			
		||||
        toast.info(`${fileName} was added to download queue`, {
 | 
			
		||||
          description: "Download will start automatically when queue space is available",
 | 
			
		||||
          duration: 5000,
 | 
			
		||||
        });
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      const actualDownloadUrl = await waitForDownloadReady(objectName, fileName);
 | 
			
		||||
      await performDownload(actualDownloadUrl, fileName);
 | 
			
		||||
    } else {
 | 
			
		||||
      await performDownload(response.data.url, fileName);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if (!silent) {
 | 
			
		||||
      options.onComplete?.(downloadId);
 | 
			
		||||
      if (showToasts) {
 | 
			
		||||
        toast.success(`${fileName} downloaded successfully`);
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  } catch (error: any) {
 | 
			
		||||
    if (!silent) {
 | 
			
		||||
      options.onFail?.(downloadId, error?.message || "Download failed");
 | 
			
		||||
      if (showToasts) {
 | 
			
		||||
        toast.error(`Failed to download ${fileName}`);
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
    throw error;
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export async function downloadReverseShareWithQueue(
 | 
			
		||||
  fileId: string,
 | 
			
		||||
  fileName: string,
 | 
			
		||||
  options: DownloadWithQueueOptions = {}
 | 
			
		||||
): Promise<void> {
 | 
			
		||||
  const { silent = false, showToasts = true } = options;
 | 
			
		||||
  const downloadId = `reverse-${Date.now()}-${Math.random().toString(36).substring(2, 11)}`;
 | 
			
		||||
 | 
			
		||||
  try {
 | 
			
		||||
    if (!silent) {
 | 
			
		||||
      options.onStart?.(downloadId);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    const response = await downloadReverseShareFile(fileId);
 | 
			
		||||
 | 
			
		||||
    if (response.status === 202) {
 | 
			
		||||
      if (!silent && showToasts) {
 | 
			
		||||
        toast.info(`${fileName} was added to download queue`, {
 | 
			
		||||
          description: "Download will start automatically when queue space is available",
 | 
			
		||||
          duration: 5000,
 | 
			
		||||
        });
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      const actualDownloadUrl = await waitForReverseShareDownloadReady(fileId, fileName);
 | 
			
		||||
      await performDownload(actualDownloadUrl, fileName);
 | 
			
		||||
    } else {
 | 
			
		||||
      await performDownload(response.data.url, fileName);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if (!silent) {
 | 
			
		||||
      options.onComplete?.(downloadId);
 | 
			
		||||
      if (showToasts) {
 | 
			
		||||
        toast.success(`${fileName} downloaded successfully`);
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  } catch (error: any) {
 | 
			
		||||
    if (!silent) {
 | 
			
		||||
      options.onFail?.(downloadId, error?.message || "Download failed");
 | 
			
		||||
      if (showToasts) {
 | 
			
		||||
        toast.error(`Failed to download ${fileName}`);
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
    throw error;
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export async function downloadFileAsBlobWithQueue(
 | 
			
		||||
  objectName: string,
 | 
			
		||||
  fileName: string,
 | 
			
		||||
  isReverseShare: boolean = false,
 | 
			
		||||
  fileId?: string,
 | 
			
		||||
  sharePassword?: string
 | 
			
		||||
): Promise<Blob> {
 | 
			
		||||
  try {
 | 
			
		||||
    let downloadUrl: string;
 | 
			
		||||
 | 
			
		||||
    if (isReverseShare && fileId) {
 | 
			
		||||
      const response = await downloadReverseShareFile(fileId);
 | 
			
		||||
 | 
			
		||||
      if (response.status === 202) {
 | 
			
		||||
        downloadUrl = await waitForReverseShareDownloadReady(fileId, fileName);
 | 
			
		||||
      } else {
 | 
			
		||||
        downloadUrl = response.data.url;
 | 
			
		||||
      }
 | 
			
		||||
    } else {
 | 
			
		||||
      // getDownloadUrl already handles encoding
 | 
			
		||||
      const params: Record<string, string> = {};
 | 
			
		||||
      if (sharePassword) params.password = sharePassword;
 | 
			
		||||
 | 
			
		||||
      const response = await getDownloadUrl(
 | 
			
		||||
        objectName,
 | 
			
		||||
        Object.keys(params).length > 0
 | 
			
		||||
          ? {
 | 
			
		||||
              params: { ...params },
 | 
			
		||||
            }
 | 
			
		||||
          : undefined
 | 
			
		||||
      );
 | 
			
		||||
 | 
			
		||||
      if (response.status === 202) {
 | 
			
		||||
        downloadUrl = await waitForDownloadReady(objectName, fileName);
 | 
			
		||||
      } else {
 | 
			
		||||
        downloadUrl = response.data.url;
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    const fetchResponse = await fetch(downloadUrl);
 | 
			
		||||
    if (!fetchResponse.ok) {
 | 
			
		||||
      throw new Error(`Failed to download ${fileName}: ${fetchResponse.status}`);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    return await fetchResponse.blob();
 | 
			
		||||
  } catch (error: any) {
 | 
			
		||||
    console.error(`Error downloading ${fileName}:`, error);
 | 
			
		||||
    throw error;
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
function collectFolderFiles(
 | 
			
		||||
  folderId: string,
 | 
			
		||||
  allFiles: any[],
 | 
			
		||||
  allFolders: any[],
 | 
			
		||||
  folderPath: string = ""
 | 
			
		||||
): Array<{ objectName: string; name: string; zipPath: string }> {
 | 
			
		||||
  const result: Array<{ objectName: string; name: string; zipPath: string }> = [];
 | 
			
		||||
 | 
			
		||||
  const directFiles = allFiles.filter((file: any) => file.folderId === folderId);
 | 
			
		||||
  for (const file of directFiles) {
 | 
			
		||||
    result.push({
 | 
			
		||||
      objectName: file.objectName,
 | 
			
		||||
      name: file.name,
 | 
			
		||||
      zipPath: folderPath + file.name,
 | 
			
		||||
    });
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  const subfolders = allFolders.filter((folder: any) => folder.parentId === folderId);
 | 
			
		||||
  for (const subfolder of subfolders) {
 | 
			
		||||
    const subfolderPath = folderPath + subfolder.name + "/";
 | 
			
		||||
    const subFiles = collectFolderFiles(subfolder.id, allFiles, allFolders, subfolderPath);
 | 
			
		||||
    result.push(...subFiles);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  return result;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
function collectEmptyFolders(folderId: string, allFiles: any[], allFolders: any[], folderPath: string = ""): string[] {
 | 
			
		||||
  const emptyFolders: string[] = [];
 | 
			
		||||
 | 
			
		||||
  const subfolders = allFolders.filter((folder: any) => folder.parentId === folderId);
 | 
			
		||||
  for (const subfolder of subfolders) {
 | 
			
		||||
    const subfolderPath = folderPath + subfolder.name + "/";
 | 
			
		||||
 | 
			
		||||
    const subfolderFiles = collectFolderFiles(subfolder.id, allFiles, allFolders, "");
 | 
			
		||||
 | 
			
		||||
    if (subfolderFiles.length === 0) {
 | 
			
		||||
      emptyFolders.push(subfolderPath.slice(0, -1));
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    const nestedEmptyFolders = collectEmptyFolders(subfolder.id, allFiles, allFolders, subfolderPath);
 | 
			
		||||
    emptyFolders.push(...nestedEmptyFolders);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  return emptyFolders;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export async function downloadFolderWithQueue(
 | 
			
		||||
  folderId: string,
 | 
			
		||||
  folderName: string,
 | 
			
		||||
  options: DownloadWithQueueOptions = {}
 | 
			
		||||
): Promise<void> {
 | 
			
		||||
  const { silent = false, showToasts = true } = options;
 | 
			
		||||
  const downloadId = `folder-${Date.now()}-${Math.random().toString(36).substring(2, 11)}`;
 | 
			
		||||
 | 
			
		||||
  try {
 | 
			
		||||
    if (!silent) {
 | 
			
		||||
      options.onStart?.(downloadId);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    const { listFiles } = await import("@/http/endpoints/files");
 | 
			
		||||
    const { listFolders } = await import("@/http/endpoints/folders");
 | 
			
		||||
 | 
			
		||||
    const [allFilesResponse, allFoldersResponse] = await Promise.all([listFiles(), listFolders()]);
 | 
			
		||||
    const allFiles = allFilesResponse.data.files || [];
 | 
			
		||||
    const allFolders = allFoldersResponse.data.folders || [];
 | 
			
		||||
 | 
			
		||||
    const folderFiles = collectFolderFiles(folderId, allFiles, allFolders, `${folderName}/`);
 | 
			
		||||
    const emptyFolders = collectEmptyFolders(folderId, allFiles, allFolders, `${folderName}/`);
 | 
			
		||||
 | 
			
		||||
    if (folderFiles.length === 0 && emptyFolders.length === 0) {
 | 
			
		||||
      const message = "Folder is empty";
 | 
			
		||||
      if (showToasts) {
 | 
			
		||||
        toast.error(message);
 | 
			
		||||
      }
 | 
			
		||||
      throw new Error(message);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    const JSZip = (await import("jszip")).default;
 | 
			
		||||
    const zip = new JSZip();
 | 
			
		||||
 | 
			
		||||
    for (const emptyFolderPath of emptyFolders) {
 | 
			
		||||
      zip.folder(emptyFolderPath);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    for (const file of folderFiles) {
 | 
			
		||||
      try {
 | 
			
		||||
        const blob = await downloadFileAsBlobWithQueue(file.objectName, file.name);
 | 
			
		||||
        zip.file(file.zipPath, blob);
 | 
			
		||||
      } catch (error) {
 | 
			
		||||
        console.error(`Error downloading file ${file.name}:`, error);
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    const zipBlob = await zip.generateAsync({ type: "blob" });
 | 
			
		||||
    const url = URL.createObjectURL(zipBlob);
 | 
			
		||||
    const a = document.createElement("a");
 | 
			
		||||
    a.href = url;
 | 
			
		||||
    a.download = `${folderName}.zip`;
 | 
			
		||||
    document.body.appendChild(a);
 | 
			
		||||
    a.click();
 | 
			
		||||
    document.body.removeChild(a);
 | 
			
		||||
    URL.revokeObjectURL(url);
 | 
			
		||||
 | 
			
		||||
    if (!silent) {
 | 
			
		||||
      options.onComplete?.(downloadId);
 | 
			
		||||
      if (showToasts) {
 | 
			
		||||
        toast.success(`${folderName} downloaded successfully`);
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  } catch (error: any) {
 | 
			
		||||
    if (!silent) {
 | 
			
		||||
      options.onFail?.(downloadId, error?.message || "Download failed");
 | 
			
		||||
      if (showToasts) {
 | 
			
		||||
        toast.error(`Failed to download ${folderName}`);
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
    throw error;
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export async function downloadShareFolderWithQueue(
 | 
			
		||||
  folderId: string,
 | 
			
		||||
  folderName: string,
 | 
			
		||||
  shareFiles: any[],
 | 
			
		||||
  shareFolders: any[],
 | 
			
		||||
  options: DownloadWithQueueOptions = {}
 | 
			
		||||
): Promise<void> {
 | 
			
		||||
  const { silent = false, showToasts = true, sharePassword } = options;
 | 
			
		||||
  const downloadId = `share-folder-${Date.now()}-${Math.random().toString(36).substring(2, 11)}`;
 | 
			
		||||
 | 
			
		||||
  try {
 | 
			
		||||
    if (!silent) {
 | 
			
		||||
      options.onStart?.(downloadId);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    const folderFiles = collectFolderFiles(folderId, shareFiles, shareFolders, `${folderName}/`);
 | 
			
		||||
    const emptyFolders = collectEmptyFolders(folderId, shareFiles, shareFolders, `${folderName}/`);
 | 
			
		||||
 | 
			
		||||
    if (folderFiles.length === 0 && emptyFolders.length === 0) {
 | 
			
		||||
      const message = "Folder is empty";
 | 
			
		||||
      if (showToasts) {
 | 
			
		||||
        toast.error(message);
 | 
			
		||||
      }
 | 
			
		||||
      throw new Error(message);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    const JSZip = (await import("jszip")).default;
 | 
			
		||||
    const zip = new JSZip();
 | 
			
		||||
 | 
			
		||||
    for (const emptyFolderPath of emptyFolders) {
 | 
			
		||||
      zip.folder(emptyFolderPath);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    for (const file of folderFiles) {
 | 
			
		||||
      try {
 | 
			
		||||
        const blob = await downloadFileAsBlobWithQueue(file.objectName, file.name, false, undefined, sharePassword);
 | 
			
		||||
        zip.file(file.zipPath, blob);
 | 
			
		||||
      } catch (error) {
 | 
			
		||||
        console.error(`Error downloading file ${file.name}:`, error);
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    const zipBlob = await zip.generateAsync({ type: "blob" });
 | 
			
		||||
    const url = URL.createObjectURL(zipBlob);
 | 
			
		||||
    const a = document.createElement("a");
 | 
			
		||||
    a.href = url;
 | 
			
		||||
    a.download = `${folderName}.zip`;
 | 
			
		||||
    document.body.appendChild(a);
 | 
			
		||||
    a.click();
 | 
			
		||||
    document.body.removeChild(a);
 | 
			
		||||
    URL.revokeObjectURL(url);
 | 
			
		||||
 | 
			
		||||
    if (!silent) {
 | 
			
		||||
      options.onComplete?.(downloadId);
 | 
			
		||||
      if (showToasts) {
 | 
			
		||||
        toast.success(`${folderName} downloaded successfully`);
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  } catch (error: any) {
 | 
			
		||||
    if (!silent) {
 | 
			
		||||
      options.onFail?.(downloadId, error?.message || "Download failed");
 | 
			
		||||
      if (showToasts) {
 | 
			
		||||
        toast.error(`Failed to download ${folderName}`);
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
    throw error;
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export async function bulkDownloadWithQueue(
 | 
			
		||||
  items: Array<{
 | 
			
		||||
    objectName?: string;
 | 
			
		||||
    name: string;
 | 
			
		||||
    id?: string;
 | 
			
		||||
    isReverseShare?: boolean;
 | 
			
		||||
    type?: "file" | "folder";
 | 
			
		||||
  }>,
 | 
			
		||||
  zipName: string,
 | 
			
		||||
  onProgress?: (current: number, total: number) => void,
 | 
			
		||||
  wrapInFolder?: boolean
 | 
			
		||||
): Promise<void> {
 | 
			
		||||
  try {
 | 
			
		||||
    const JSZip = (await import("jszip")).default;
 | 
			
		||||
    const zip = new JSZip();
 | 
			
		||||
 | 
			
		||||
    const files = items.filter((item) => item.type !== "folder");
 | 
			
		||||
    const folders = items.filter((item) => item.type === "folder");
 | 
			
		||||
 | 
			
		||||
    // eslint-disable-next-line prefer-const
 | 
			
		||||
    let allFilesToDownload: Array<{
 | 
			
		||||
      objectName: string;
 | 
			
		||||
      name: string;
 | 
			
		||||
      zipPath: string;
 | 
			
		||||
      isReverseShare?: boolean;
 | 
			
		||||
      fileId?: string;
 | 
			
		||||
    }> = [];
 | 
			
		||||
    // eslint-disable-next-line prefer-const
 | 
			
		||||
    let allEmptyFolders: string[] = [];
 | 
			
		||||
 | 
			
		||||
    if (folders.length > 0) {
 | 
			
		||||
      const { listFiles } = await import("@/http/endpoints/files");
 | 
			
		||||
      const { listFolders } = await import("@/http/endpoints/folders");
 | 
			
		||||
 | 
			
		||||
      const [allFilesResponse, allFoldersResponse] = await Promise.all([listFiles(), listFolders()]);
 | 
			
		||||
      const allFiles = allFilesResponse.data.files || [];
 | 
			
		||||
      const allFolders = allFoldersResponse.data.folders || [];
 | 
			
		||||
 | 
			
		||||
      const wrapperPath = wrapInFolder ? `${zipName.replace(".zip", "")}/` : "";
 | 
			
		||||
      for (const folder of folders) {
 | 
			
		||||
        const folderPath = wrapperPath + `${folder.name}/`;
 | 
			
		||||
        const folderFiles = collectFolderFiles(folder.id!, allFiles, allFolders, folderPath);
 | 
			
		||||
        const emptyFolders = collectEmptyFolders(folder.id!, allFiles, allFolders, folderPath);
 | 
			
		||||
 | 
			
		||||
        allFilesToDownload.push(...folderFiles);
 | 
			
		||||
        allEmptyFolders.push(...emptyFolders);
 | 
			
		||||
 | 
			
		||||
        if (folderFiles.length === 0 && emptyFolders.length === 0) {
 | 
			
		||||
          allEmptyFolders.push(folderPath.slice(0, -1));
 | 
			
		||||
        }
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      const filesInFolders = new Set(allFilesToDownload.map((f) => f.objectName));
 | 
			
		||||
      for (const file of files) {
 | 
			
		||||
        if (!file.objectName || !filesInFolders.has(file.objectName)) {
 | 
			
		||||
          allFilesToDownload.push({
 | 
			
		||||
            objectName: file.objectName || file.name,
 | 
			
		||||
            name: file.name,
 | 
			
		||||
            zipPath: wrapperPath + file.name,
 | 
			
		||||
            isReverseShare: file.isReverseShare,
 | 
			
		||||
            fileId: file.id,
 | 
			
		||||
          });
 | 
			
		||||
        }
 | 
			
		||||
      }
 | 
			
		||||
    } else {
 | 
			
		||||
      const wrapperPath = wrapInFolder ? `${zipName.replace(".zip", "")}/` : "";
 | 
			
		||||
      for (const file of files) {
 | 
			
		||||
        allFilesToDownload.push({
 | 
			
		||||
          objectName: file.objectName || file.name,
 | 
			
		||||
          name: file.name,
 | 
			
		||||
          zipPath: wrapperPath + file.name,
 | 
			
		||||
          isReverseShare: file.isReverseShare,
 | 
			
		||||
          fileId: file.id,
 | 
			
		||||
        });
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    for (const emptyFolderPath of allEmptyFolders) {
 | 
			
		||||
      zip.folder(emptyFolderPath);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    for (let i = 0; i < allFilesToDownload.length; i++) {
 | 
			
		||||
      const file = allFilesToDownload[i];
 | 
			
		||||
      try {
 | 
			
		||||
        const blob = await downloadFileAsBlobWithQueue(
 | 
			
		||||
          file.objectName,
 | 
			
		||||
          file.name,
 | 
			
		||||
          file.isReverseShare || false,
 | 
			
		||||
          file.fileId
 | 
			
		||||
        );
 | 
			
		||||
        zip.file(file.zipPath, blob);
 | 
			
		||||
        onProgress?.(i + 1, allFilesToDownload.length);
 | 
			
		||||
      } catch (error) {
 | 
			
		||||
        console.error(`Error downloading file ${file.name}:`, error);
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    const zipBlob = await zip.generateAsync({ type: "blob" });
 | 
			
		||||
    const url = URL.createObjectURL(zipBlob);
 | 
			
		||||
    const a = document.createElement("a");
 | 
			
		||||
    a.href = url;
 | 
			
		||||
    a.download = zipName.endsWith(".zip") ? zipName : `${zipName}.zip`;
 | 
			
		||||
    document.body.appendChild(a);
 | 
			
		||||
    a.click();
 | 
			
		||||
    document.body.removeChild(a);
 | 
			
		||||
    URL.revokeObjectURL(url);
 | 
			
		||||
  } catch (error) {
 | 
			
		||||
    console.error("Error creating ZIP:", error);
 | 
			
		||||
    throw error;
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export async function bulkDownloadShareWithQueue(
 | 
			
		||||
  items: Array<{
 | 
			
		||||
    objectName?: string;
 | 
			
		||||
    name: string;
 | 
			
		||||
    id?: string;
 | 
			
		||||
    type?: "file" | "folder";
 | 
			
		||||
  }>,
 | 
			
		||||
  shareFiles: any[],
 | 
			
		||||
  shareFolders: any[],
 | 
			
		||||
  zipName: string,
 | 
			
		||||
  onProgress?: (current: number, total: number) => void,
 | 
			
		||||
  wrapInFolder?: boolean,
 | 
			
		||||
  sharePassword?: string
 | 
			
		||||
): Promise<void> {
 | 
			
		||||
  try {
 | 
			
		||||
    const JSZip = (await import("jszip")).default;
 | 
			
		||||
    const zip = new JSZip();
 | 
			
		||||
 | 
			
		||||
    const files = items.filter((item) => item.type !== "folder");
 | 
			
		||||
    const folders = items.filter((item) => item.type === "folder");
 | 
			
		||||
 | 
			
		||||
    // eslint-disable-next-line prefer-const
 | 
			
		||||
    let allFilesToDownload: Array<{ objectName: string; name: string; zipPath: string }> = [];
 | 
			
		||||
    // eslint-disable-next-line prefer-const
 | 
			
		||||
    let allEmptyFolders: string[] = [];
 | 
			
		||||
 | 
			
		||||
    const wrapperPath = wrapInFolder ? `${zipName.replace(".zip", "")}/` : "";
 | 
			
		||||
 | 
			
		||||
    for (const folder of folders) {
 | 
			
		||||
      const folderPath = wrapperPath + `${folder.name}/`;
 | 
			
		||||
      const folderFiles = collectFolderFiles(folder.id!, shareFiles, shareFolders, folderPath);
 | 
			
		||||
      const emptyFolders = collectEmptyFolders(folder.id!, shareFiles, shareFolders, folderPath);
 | 
			
		||||
 | 
			
		||||
      allFilesToDownload.push(...folderFiles);
 | 
			
		||||
      allEmptyFolders.push(...emptyFolders);
 | 
			
		||||
 | 
			
		||||
      if (folderFiles.length === 0 && emptyFolders.length === 0) {
 | 
			
		||||
        allEmptyFolders.push(folderPath.slice(0, -1));
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    const filesInFolders = new Set(allFilesToDownload.map((f) => f.objectName));
 | 
			
		||||
    for (const file of files) {
 | 
			
		||||
      if (!file.objectName || !filesInFolders.has(file.objectName)) {
 | 
			
		||||
        allFilesToDownload.push({
 | 
			
		||||
          objectName: file.objectName!,
 | 
			
		||||
          name: file.name,
 | 
			
		||||
          zipPath: wrapperPath + file.name,
 | 
			
		||||
        });
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    for (const emptyFolderPath of allEmptyFolders) {
 | 
			
		||||
      zip.folder(emptyFolderPath);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    for (let i = 0; i < allFilesToDownload.length; i++) {
 | 
			
		||||
      const file = allFilesToDownload[i];
 | 
			
		||||
      try {
 | 
			
		||||
        const blob = await downloadFileAsBlobWithQueue(file.objectName, file.name, false, undefined, sharePassword);
 | 
			
		||||
        zip.file(file.zipPath, blob);
 | 
			
		||||
        onProgress?.(i + 1, allFilesToDownload.length);
 | 
			
		||||
      } catch (error) {
 | 
			
		||||
        console.error(`Error downloading file ${file.name}:`, error);
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    const zipBlob = await zip.generateAsync({ type: "blob" });
 | 
			
		||||
    const url = URL.createObjectURL(zipBlob);
 | 
			
		||||
    const a = document.createElement("a");
 | 
			
		||||
    a.href = url;
 | 
			
		||||
    a.download = zipName.endsWith(".zip") ? zipName : `${zipName}.zip`;
 | 
			
		||||
    document.body.appendChild(a);
 | 
			
		||||
    a.click();
 | 
			
		||||
    document.body.removeChild(a);
 | 
			
		||||
    URL.revokeObjectURL(url);
 | 
			
		||||
  } catch (error) {
 | 
			
		||||
    console.error("Error creating ZIP:", error);
 | 
			
		||||
    throw error;
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										91
									
								
								apps/web/src/utils/s3-upload.ts
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										91
									
								
								apps/web/src/utils/s3-upload.ts
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,91 @@
 | 
			
		||||
import axios from "axios";
 | 
			
		||||
 | 
			
		||||
export interface S3UploadOptions {
 | 
			
		||||
  file: File;
 | 
			
		||||
  presignedUrl: string;
 | 
			
		||||
  onProgress?: (progress: number) => void;
 | 
			
		||||
  signal?: AbortSignal;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export interface S3UploadResult {
 | 
			
		||||
  success: boolean;
 | 
			
		||||
  error?: string;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * Simple S3 upload utility using presigned URLs
 | 
			
		||||
 * No chunking needed - browser handles the upload directly to S3
 | 
			
		||||
 */
 | 
			
		||||
export class S3Uploader {
 | 
			
		||||
  /**
 | 
			
		||||
   * Upload a file directly to S3 using a presigned URL
 | 
			
		||||
   */
 | 
			
		||||
  static async uploadFile(options: S3UploadOptions): Promise<S3UploadResult> {
 | 
			
		||||
    const { file, presignedUrl, onProgress, signal } = options;
 | 
			
		||||
 | 
			
		||||
    try {
 | 
			
		||||
      // Calculate timeout based on file size
 | 
			
		||||
      // Base: 2 minutes + 1 minute per 100MB
 | 
			
		||||
      const fileSizeMB = file.size / (1024 * 1024);
 | 
			
		||||
      const baseTimeout = 120000; // 2 minutes
 | 
			
		||||
      const timeoutPerMB = 600; // 600ms per MB (~1 minute per 100MB)
 | 
			
		||||
      const calculatedTimeout = Math.max(baseTimeout, Math.ceil(fileSizeMB * timeoutPerMB));
 | 
			
		||||
 | 
			
		||||
      await axios.put(presignedUrl, file, {
 | 
			
		||||
        headers: {
 | 
			
		||||
          "Content-Type": file.type || "application/octet-stream",
 | 
			
		||||
        },
 | 
			
		||||
        signal,
 | 
			
		||||
        timeout: calculatedTimeout,
 | 
			
		||||
        maxContentLength: Infinity,
 | 
			
		||||
        maxBodyLength: Infinity,
 | 
			
		||||
        onUploadProgress: (progressEvent) => {
 | 
			
		||||
          if (onProgress && progressEvent.total) {
 | 
			
		||||
            const progress = (progressEvent.loaded / progressEvent.total) * 100;
 | 
			
		||||
            onProgress(Math.round(progress));
 | 
			
		||||
          }
 | 
			
		||||
        },
 | 
			
		||||
      });
 | 
			
		||||
 | 
			
		||||
      return {
 | 
			
		||||
        success: true,
 | 
			
		||||
      };
 | 
			
		||||
    } catch (error: any) {
 | 
			
		||||
      console.error("S3 upload failed:", error);
 | 
			
		||||
 | 
			
		||||
      let errorMessage = "Upload failed";
 | 
			
		||||
      if (axios.isAxiosError(error)) {
 | 
			
		||||
        if (error.code === "ECONNABORTED" || error.message?.includes("timeout")) {
 | 
			
		||||
          errorMessage = "Upload timeout - file is too large or connection is slow";
 | 
			
		||||
        } else if (error.response) {
 | 
			
		||||
          errorMessage = `Upload failed: ${error.response.status} ${error.response.statusText}`;
 | 
			
		||||
        } else if (error.request) {
 | 
			
		||||
          errorMessage = "No response from server - check your connection";
 | 
			
		||||
        }
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      return {
 | 
			
		||||
        success: false,
 | 
			
		||||
        error: errorMessage,
 | 
			
		||||
      };
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  /**
 | 
			
		||||
   * Calculate upload timeout based on file size
 | 
			
		||||
   */
 | 
			
		||||
  static calculateTimeout(fileSizeBytes: number): number {
 | 
			
		||||
    const fileSizeMB = fileSizeBytes / (1024 * 1024);
 | 
			
		||||
    const baseTimeout = 120000; // 2 minutes
 | 
			
		||||
    const timeoutPerMB = 600; // 600ms per MB
 | 
			
		||||
 | 
			
		||||
    // For very large files (>500MB), add extra time
 | 
			
		||||
    if (fileSizeMB > 500) {
 | 
			
		||||
      const extraMB = fileSizeMB - 500;
 | 
			
		||||
      const extraMinutes = Math.ceil(extraMB / 100);
 | 
			
		||||
      return baseTimeout + fileSizeMB * timeoutPerMB + extraMinutes * 60000;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    return Math.max(baseTimeout, Math.ceil(fileSizeMB * timeoutPerMB));
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										59
									
								
								apps/web/src/utils/zip-download.ts
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										59
									
								
								apps/web/src/utils/zip-download.ts
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,59 @@
 | 
			
		||||
import JSZip from "jszip";
 | 
			
		||||
 | 
			
		||||
interface DownloadItem {
 | 
			
		||||
  url: string;
 | 
			
		||||
  name: string;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * Downloads multiple files and creates a ZIP archive in the browser
 | 
			
		||||
 * @param items Array of items with presigned URLs and file names
 | 
			
		||||
 * @param zipName Name for the ZIP file
 | 
			
		||||
 */
 | 
			
		||||
export async function downloadFilesAsZip(items: DownloadItem[], zipName: string): Promise<void> {
 | 
			
		||||
  const zip = new JSZip();
 | 
			
		||||
 | 
			
		||||
  // Download all files and add to ZIP
 | 
			
		||||
  const promises = items.map(async (item) => {
 | 
			
		||||
    try {
 | 
			
		||||
      const response = await fetch(item.url);
 | 
			
		||||
      if (!response.ok) {
 | 
			
		||||
        throw new Error(`Failed to download ${item.name}`);
 | 
			
		||||
      }
 | 
			
		||||
      const blob = await response.blob();
 | 
			
		||||
      zip.file(item.name, blob);
 | 
			
		||||
    } catch (error) {
 | 
			
		||||
      console.error(`Error downloading ${item.name}:`, error);
 | 
			
		||||
      throw error;
 | 
			
		||||
    }
 | 
			
		||||
  });
 | 
			
		||||
 | 
			
		||||
  await Promise.all(promises);
 | 
			
		||||
 | 
			
		||||
  // Generate ZIP file
 | 
			
		||||
  const zipBlob = await zip.generateAsync({ type: "blob" });
 | 
			
		||||
 | 
			
		||||
  // Trigger download
 | 
			
		||||
  const url = URL.createObjectURL(zipBlob);
 | 
			
		||||
  const link = document.createElement("a");
 | 
			
		||||
  link.href = url;
 | 
			
		||||
  link.download = zipName;
 | 
			
		||||
  document.body.appendChild(link);
 | 
			
		||||
  link.click();
 | 
			
		||||
  document.body.removeChild(link);
 | 
			
		||||
  URL.revokeObjectURL(url);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * Downloads a single file directly
 | 
			
		||||
 * @param url Presigned URL
 | 
			
		||||
 * @param fileName File name
 | 
			
		||||
 */
 | 
			
		||||
export async function downloadFile(url: string, fileName: string): Promise<void> {
 | 
			
		||||
  const link = document.createElement("a");
 | 
			
		||||
  link.href = url;
 | 
			
		||||
  link.download = fileName;
 | 
			
		||||
  document.body.appendChild(link);
 | 
			
		||||
  link.click();
 | 
			
		||||
  document.body.removeChild(link);
 | 
			
		||||
}
 | 
			
		||||
@@ -3,31 +3,54 @@ services:
 | 
			
		||||
    image: kyantech/palmr:latest
 | 
			
		||||
    container_name: palmr
 | 
			
		||||
    environment:
 | 
			
		||||
      # Optional: Uncomment and configure as needed (if you don`t use, you can remove)
 | 
			
		||||
      # - ENABLE_S3=false # Set to true to enable S3-compatible storage (OPTIONAL - default is false)
 | 
			
		||||
      # - S3_REJECT_UNAUTHORIZED=false # Set to false to allow self-signed certificates (OPTIONAL - default is true)
 | 
			
		||||
      # - DISABLE_FILESYSTEM_ENCRYPTION=true # Set to false to enable file encryption (ENCRYPTION_KEY becomes required) | (OPTIONAL - default is true)
 | 
			
		||||
      # - ENCRYPTION_KEY=change-this-key-in-production-min-32-chars # CHANGE THIS KEY FOR SECURITY (REQUIRED if DISABLE_FILESYSTEM_ENCRYPTION is false)
 | 
			
		||||
      # - PALMR_UID=1000 # UID for the container processes (OPTIONAL - default is 1000) | See our UID/GID Documentation for more information
 | 
			
		||||
      # - PALMR_GID=1000 # GID for the container processes (OPTIONAL - default is 1000) | See our UID/GID Documentation for more information
 | 
			
		||||
      # - DEFAULT_LANGUAGE=en-US # Default language for the application (optional, defaults to en-US) | See the docs to see all supported languages
 | 
			
		||||
      # - PRESIGNED_URL_EXPIRATION=3600 # Duration in seconds for presigned URL expiration (OPTIONAL - default is 3600 seconds / 1 hour)
 | 
			
		||||
      # - SECURE_SITE=true # Set to true if you are using a reverse proxy (OPTIONAL - default is false)
 | 
			
		||||
 | 
			
		||||
      # Download Memory Management Configuration (OPTIONAL - See documentation for details)
 | 
			
		||||
      # - DOWNLOAD_MAX_CONCURRENT=5 # Maximum number of simultaneous downloads (OPTIONAL - auto-scales based on system memory if not set)
 | 
			
		||||
      # - DOWNLOAD_MEMORY_THRESHOLD_MB=2048 # Memory threshold in MB before throttling (OPTIONAL - auto-scales based on system memory if not set)
 | 
			
		||||
      # - DOWNLOAD_QUEUE_SIZE=25 # Maximum queue size for pending downloads (OPTIONAL - auto-scales based on system memory if not set)
 | 
			
		||||
      # - DOWNLOAD_MIN_FILE_SIZE_GB=3.0 # Minimum file size in GB to activate memory management (OPTIONAL - default is 3.0)
 | 
			
		||||
      # - DOWNLOAD_AUTO_SCALE=true # Enable auto-scaling based on system memory (OPTIONAL - default is true)
 | 
			
		||||
      # - NODE_OPTIONS=--expose-gc # Enable garbage collection for large file downloads (RECOMMENDED for production)
 | 
			
		||||
      # - NEXT_PUBLIC_UPLOAD_CHUNK_SIZE_MB=100 # Chunk size in MB for large file uploads (OPTIONAL - auto-calculates if not set)
 | 
			
		||||
      # ==============================================================================
 | 
			
		||||
      # STORAGE CONFIGURATION
 | 
			
		||||
      # ==============================================================================
 | 
			
		||||
      # By default, Palmr uses internal storage - ZERO CONFIG NEEDED!
 | 
			
		||||
      # Files are managed automatically with no setup required.
 | 
			
		||||
      #
 | 
			
		||||
      # Want to use external S3 storage (AWS, S3-compatible, etc)? Just add:
 | 
			
		||||
      # - ENABLE_S3=true                    # Enable external S3
 | 
			
		||||
      # - S3_ENDPOINT=s3.amazonaws.com      # Your S3 endpoint
 | 
			
		||||
      # - S3_ACCESS_KEY=your-access-key     # Your access key
 | 
			
		||||
      # - S3_SECRET_KEY=your-secret-key     # Your secret key
 | 
			
		||||
      # - S3_BUCKET_NAME=palmr-files        # Your bucket name
 | 
			
		||||
      # - S3_REGION=us-east-1               # Region (optional)
 | 
			
		||||
      # - S3_USE_SSL=true                   # Use SSL (optional)
 | 
			
		||||
      # - S3_FORCE_PATH_STYLE=false         # Path-style URLs (optional, true for Minio)
 | 
			
		||||
      # - S3_REJECT_UNAUTHORIZED=true       # Reject self-signed certs (optional)
 | 
			
		||||
      #
 | 
			
		||||
      # ==============================================================================
 | 
			
		||||
      # USER/GROUP CONFIGURATION
 | 
			
		||||
      # ==============================================================================
 | 
			
		||||
      # - PALMR_UID=1000                    # UID for container processes (optional)
 | 
			
		||||
      # - PALMR_GID=1000                    # GID for container processes (optional)
 | 
			
		||||
      #
 | 
			
		||||
      # ==============================================================================
 | 
			
		||||
      # APPLICATION SETTINGS
 | 
			
		||||
      # ==============================================================================
 | 
			
		||||
      # - DEFAULT_LANGUAGE=en-US            # Default language (optional)
 | 
			
		||||
      # - PRESIGNED_URL_EXPIRATION=3600     # Presigned URL expiration in seconds (optional)
 | 
			
		||||
      # - SECURE_SITE=true                  # Set true if using HTTPS reverse proxy (optional)
 | 
			
		||||
      STORAGE_URL: "https://palmr-demo:9379" # REQUIRED for internal storage: Full storage URL with protocol (e.g., https://syrg.palmr.com or http://192.168.1.100:9379). Not needed when ENABLE_S3=true.
 | 
			
		||||
      #
 | 
			
		||||
    ports:
 | 
			
		||||
      - "5487:5487" # Web port
 | 
			
		||||
      - "3333:3333" # API port (OPTIONAL EXPOSED - ONLY IF YOU WANT TO ACCESS THE API DIRECTLY)
 | 
			
		||||
      - "5487:5487" # Web interface
 | 
			
		||||
      - "3333:3333" # API (optional, only if you need direct API access)
 | 
			
		||||
      - "9379:9379" # Internal storage (S3-compatible, required for file uploads when using internal storage)
 | 
			
		||||
      - "9378:9378" # Storage Console (optional, for storage management UI)
 | 
			
		||||
    volumes:
 | 
			
		||||
      - palmr_data:/app/server # Volume for the application data (changed from /data to /app/server)
 | 
			
		||||
    restart: unless-stopped # Restart the container unless it is stopped
 | 
			
		||||
      - palmr_data:/app/server
 | 
			
		||||
      # ==============================================================================
 | 
			
		||||
      # ADVANCED: Use a different disk for file storage (for larger capacity)
 | 
			
		||||
      # ==============================================================================
 | 
			
		||||
      # Uncomment the line below to store files on a different disk:
 | 
			
		||||
      # - /path/to/your/large/disk:/app/server/data
 | 
			
		||||
      #
 | 
			
		||||
      # Example: Mount a 2TB drive for files while keeping database on fast SSD
 | 
			
		||||
      # - /mnt/storage/palmr-files:/app/server/data
 | 
			
		||||
      #
 | 
			
		||||
    restart: unless-stopped
 | 
			
		||||
 | 
			
		||||
volumes:
 | 
			
		||||
  palmr_data:
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										60
									
								
								infra/install-minio.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										60
									
								
								infra/install-minio.sh
									
									
									
									
									
										Executable file
									
								
							@@ -0,0 +1,60 @@
 | 
			
		||||
#!/bin/sh
 | 
			
		||||
# Download storage system binary for the appropriate architecture
 | 
			
		||||
# This script is run during Docker build
 | 
			
		||||
 | 
			
		||||
set -e
 | 
			
		||||
 | 
			
		||||
MINIO_VERSION="RELEASE.2024-10-13T13-34-11Z"
 | 
			
		||||
ARCH=$(uname -m)
 | 
			
		||||
 | 
			
		||||
echo "[BUILD] Downloading storage system ${MINIO_VERSION} for ${ARCH}..."
 | 
			
		||||
 | 
			
		||||
case "$ARCH" in
 | 
			
		||||
    x86_64)
 | 
			
		||||
        MINIO_ARCH="linux-amd64"
 | 
			
		||||
        ;;
 | 
			
		||||
    aarch64|arm64)
 | 
			
		||||
        MINIO_ARCH="linux-arm64"
 | 
			
		||||
        ;;
 | 
			
		||||
    *)
 | 
			
		||||
        echo "[BUILD] Unsupported architecture: $ARCH"
 | 
			
		||||
        echo "[BUILD] Palmr will fallback to external S3"
 | 
			
		||||
        exit 0
 | 
			
		||||
        ;;
 | 
			
		||||
esac
 | 
			
		||||
 | 
			
		||||
DOWNLOAD_URL="https://dl.min.io/server/minio/release/${MINIO_ARCH}/archive/minio.${MINIO_VERSION}"
 | 
			
		||||
 | 
			
		||||
echo "[BUILD] Downloading from: $DOWNLOAD_URL"
 | 
			
		||||
 | 
			
		||||
# Download with retry
 | 
			
		||||
MAX_RETRIES=3
 | 
			
		||||
RETRY_COUNT=0
 | 
			
		||||
 | 
			
		||||
while [ $RETRY_COUNT -lt $MAX_RETRIES ]; do
 | 
			
		||||
    if wget -O /tmp/minio "$DOWNLOAD_URL" 2>/dev/null; then
 | 
			
		||||
        echo "[BUILD] ✓ Download successful"
 | 
			
		||||
        break
 | 
			
		||||
    fi
 | 
			
		||||
    
 | 
			
		||||
    RETRY_COUNT=$((RETRY_COUNT + 1))
 | 
			
		||||
    echo "[BUILD] Download failed, retry $RETRY_COUNT/$MAX_RETRIES..."
 | 
			
		||||
    sleep 2
 | 
			
		||||
done
 | 
			
		||||
 | 
			
		||||
if [ $RETRY_COUNT -eq $MAX_RETRIES ]; then
 | 
			
		||||
    echo "[BUILD] ✗ Failed to download storage system after $MAX_RETRIES attempts"
 | 
			
		||||
    echo "[BUILD] Palmr will fallback to external S3"
 | 
			
		||||
    exit 0
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
# Install binary
 | 
			
		||||
chmod +x /tmp/minio
 | 
			
		||||
mv /tmp/minio /usr/local/bin/minio
 | 
			
		||||
 | 
			
		||||
echo "[BUILD] ✓ Storage system installed successfully"
 | 
			
		||||
/usr/local/bin/minio --version
 | 
			
		||||
 | 
			
		||||
exit 0
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										26
									
								
								infra/load-minio-credentials.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										26
									
								
								infra/load-minio-credentials.sh
									
									
									
									
									
										Executable file
									
								
							@@ -0,0 +1,26 @@
 | 
			
		||||
#!/bin/sh
 | 
			
		||||
# Load storage system credentials and export as environment variables
 | 
			
		||||
 | 
			
		||||
CREDENTIALS_FILE="/app/server/.minio-credentials"
 | 
			
		||||
 | 
			
		||||
if [ -f "$CREDENTIALS_FILE" ]; then
 | 
			
		||||
    echo "[PALMR] Loading storage system credentials..."
 | 
			
		||||
    
 | 
			
		||||
    # Read and export each line
 | 
			
		||||
    while IFS= read -r line; do
 | 
			
		||||
        # Skip empty lines and comments
 | 
			
		||||
        case "$line" in
 | 
			
		||||
            ''|'#'*) continue ;;
 | 
			
		||||
        esac
 | 
			
		||||
        
 | 
			
		||||
        # Export the variable
 | 
			
		||||
        export "$line"
 | 
			
		||||
    done < "$CREDENTIALS_FILE"
 | 
			
		||||
    
 | 
			
		||||
    echo "[PALMR] ✓ Storage system credentials loaded"
 | 
			
		||||
else
 | 
			
		||||
    echo "[PALMR] ⚠ Storage system credentials not found at $CREDENTIALS_FILE"
 | 
			
		||||
    echo "[PALMR] ⚠ No S3 configured - check your setup"
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										96
									
								
								infra/minio-setup.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										96
									
								
								infra/minio-setup.sh
									
									
									
									
									
										Executable file
									
								
							@@ -0,0 +1,96 @@
 | 
			
		||||
#!/bin/sh
 | 
			
		||||
# Storage System Automatic Setup Script
 | 
			
		||||
# This script automatically configures storage system on first boot
 | 
			
		||||
# No user intervention required
 | 
			
		||||
 | 
			
		||||
set -e
 | 
			
		||||
 | 
			
		||||
# Configuration
 | 
			
		||||
MINIO_DATA_DIR="${MINIO_DATA_DIR:-/app/server/minio-data}"
 | 
			
		||||
MINIO_ROOT_USER="palmr-minio-admin"
 | 
			
		||||
MINIO_ROOT_PASSWORD="$(cat /app/server/.minio-root-password 2>/dev/null || echo 'password-not-generated')"
 | 
			
		||||
MINIO_BUCKET="${MINIO_BUCKET:-palmr-files}"
 | 
			
		||||
MINIO_INITIALIZED_FLAG="/app/server/.minio-initialized"
 | 
			
		||||
MINIO_CREDENTIALS="/app/server/.minio-credentials"
 | 
			
		||||
 | 
			
		||||
echo "[STORAGE-SYSTEM-SETUP] Starting storage system configuration..."
 | 
			
		||||
 | 
			
		||||
# Create data directory
 | 
			
		||||
mkdir -p "$MINIO_DATA_DIR"
 | 
			
		||||
 | 
			
		||||
# Wait for storage system to start (managed by supervisor)
 | 
			
		||||
echo "[STORAGE-SYSTEM-SETUP] Waiting for storage system to start..."
 | 
			
		||||
MAX_RETRIES=30
 | 
			
		||||
RETRY_COUNT=0
 | 
			
		||||
 | 
			
		||||
while [ $RETRY_COUNT -lt $MAX_RETRIES ]; do
 | 
			
		||||
    if curl -sf http://127.0.0.1:9379/minio/health/live > /dev/null 2>&1; then
 | 
			
		||||
        echo "[STORAGE-SYSTEM-SETUP]   ✓ Storage system is responding"
 | 
			
		||||
        break
 | 
			
		||||
    fi
 | 
			
		||||
    RETRY_COUNT=$((RETRY_COUNT + 1))
 | 
			
		||||
    echo "[STORAGE-SYSTEM-SETUP]   Waiting... ($RETRY_COUNT/$MAX_RETRIES)"
 | 
			
		||||
    sleep 2
 | 
			
		||||
done
 | 
			
		||||
 | 
			
		||||
if [ $RETRY_COUNT -eq $MAX_RETRIES ]; then
 | 
			
		||||
    echo "[STORAGE-SYSTEM-SETUP] ✗ Storage system failed to start"
 | 
			
		||||
    exit 1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
# Configure storage client (mc) - ALWAYS reconfigure with current password
 | 
			
		||||
echo "[STORAGE-SYSTEM-SETUP] Configuring storage client..."
 | 
			
		||||
mc alias set palmr-local http://127.0.0.1:9379 "$MINIO_ROOT_USER" "$MINIO_ROOT_PASSWORD" 2>/dev/null || {
 | 
			
		||||
    echo "[STORAGE-SYSTEM-SETUP] ✗ Failed to configure storage client"
 | 
			
		||||
    exit 1
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
# Create bucket (idempotent - won't fail if exists)
 | 
			
		||||
echo "[STORAGE-SYSTEM-SETUP] Ensuring storage bucket exists: $MINIO_BUCKET..."
 | 
			
		||||
if mc ls palmr-local/$MINIO_BUCKET > /dev/null 2>&1; then
 | 
			
		||||
    echo "[STORAGE-SYSTEM-SETUP]   ✓ Bucket '$MINIO_BUCKET' already exists"
 | 
			
		||||
else
 | 
			
		||||
    echo "[STORAGE-SYSTEM-SETUP]   Creating bucket '$MINIO_BUCKET'..."
 | 
			
		||||
    mc mb "palmr-local/$MINIO_BUCKET" 2>/dev/null || {
 | 
			
		||||
        echo "[STORAGE-SYSTEM-SETUP] ✗ Failed to create bucket"
 | 
			
		||||
        exit 1
 | 
			
		||||
    }
 | 
			
		||||
    echo "[STORAGE-SYSTEM-SETUP]   ✓ Bucket created"
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
# Set bucket policy to private (always reapply)
 | 
			
		||||
echo "[STORAGE-SYSTEM-SETUP] Setting bucket policy..."
 | 
			
		||||
mc anonymous set none "palmr-local/$MINIO_BUCKET" 2>/dev/null || true
 | 
			
		||||
 | 
			
		||||
# Save credentials for Palmr to use
 | 
			
		||||
echo "[STORAGE-SYSTEM-SETUP] Saving credentials to $MINIO_CREDENTIALS..."
 | 
			
		||||
 | 
			
		||||
# Create credentials file
 | 
			
		||||
cat > "$MINIO_CREDENTIALS" <<EOF
 | 
			
		||||
S3_ENDPOINT=127.0.0.1
 | 
			
		||||
S3_PORT=9379
 | 
			
		||||
S3_ACCESS_KEY=$MINIO_ROOT_USER
 | 
			
		||||
S3_SECRET_KEY=$MINIO_ROOT_PASSWORD
 | 
			
		||||
S3_BUCKET_NAME=$MINIO_BUCKET
 | 
			
		||||
S3_REGION=us-east-1
 | 
			
		||||
S3_USE_SSL=false
 | 
			
		||||
S3_FORCE_PATH_STYLE=true
 | 
			
		||||
EOF
 | 
			
		||||
 | 
			
		||||
# Verify file was created
 | 
			
		||||
if [ ! -f "$MINIO_CREDENTIALS" ]; then
 | 
			
		||||
    echo "[STORAGE-SYSTEM-SETUP] ✗ ERROR: Failed to create credentials file!"
 | 
			
		||||
    echo "[STORAGE-SYSTEM-SETUP] Check permissions on /app/server directory"
 | 
			
		||||
    exit 1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
chmod 644 "$MINIO_CREDENTIALS" 2>/dev/null || true
 | 
			
		||||
echo "[STORAGE-SYSTEM-SETUP] ✓ Credentials file created and readable"
 | 
			
		||||
 | 
			
		||||
echo "[STORAGE-SYSTEM-SETUP] ✓✓✓ Storage system configured successfully!"
 | 
			
		||||
echo "[STORAGE-SYSTEM-SETUP]   Bucket: $MINIO_BUCKET"
 | 
			
		||||
echo "[STORAGE-SYSTEM-SETUP]   Credentials: saved to .minio-credentials"
 | 
			
		||||
echo "[STORAGE-SYSTEM-SETUP]   Palmr will use storage system"
 | 
			
		||||
 | 
			
		||||
exit 0
 | 
			
		||||
 | 
			
		||||
@@ -1,7 +1,35 @@
 | 
			
		||||
#!/bin/sh
 | 
			
		||||
set -e
 | 
			
		||||
 | 
			
		||||
echo "🌴 Starting Palmr Server..."
 | 
			
		||||
echo "🚀 Starting Palmr Server..."
 | 
			
		||||
 | 
			
		||||
# Wait for storage system credentials to be ready (if using internal storage)
 | 
			
		||||
if [ "${ENABLE_S3}" != "true" ]; then
 | 
			
		||||
    echo "⏳ Waiting for internal storage to initialize..."
 | 
			
		||||
    MAX_WAIT=60
 | 
			
		||||
    WAIT_COUNT=0
 | 
			
		||||
    
 | 
			
		||||
    while [ $WAIT_COUNT -lt $MAX_WAIT ]; do
 | 
			
		||||
        if [ -f "/app/server/.minio-credentials" ]; then
 | 
			
		||||
            echo "✅ Internal storage ready!"
 | 
			
		||||
            break
 | 
			
		||||
        fi
 | 
			
		||||
        
 | 
			
		||||
        WAIT_COUNT=$((WAIT_COUNT + 1))
 | 
			
		||||
        echo "   Waiting for storage... ($WAIT_COUNT/$MAX_WAIT)"
 | 
			
		||||
        sleep 1
 | 
			
		||||
    done
 | 
			
		||||
    
 | 
			
		||||
    if [ $WAIT_COUNT -eq $MAX_WAIT ]; then
 | 
			
		||||
        echo "⚠️  WARNING: Internal storage not ready after ${MAX_WAIT}s"
 | 
			
		||||
        echo "⚠️  Server will start but storage may not work until ready"
 | 
			
		||||
    fi
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
# Load storage system credentials if available
 | 
			
		||||
if [ -f "/app/load-minio-credentials.sh" ]; then
 | 
			
		||||
    . /app/load-minio-credentials.sh
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
TARGET_UID=${PALMR_UID:-1000}
 | 
			
		||||
TARGET_GID=${PALMR_GID:-1000}
 | 
			
		||||
@@ -31,8 +59,10 @@ echo "📁 Creating data directories..."
 | 
			
		||||
mkdir -p /app/server/prisma /app/server/uploads /app/server/temp-uploads
 | 
			
		||||
 | 
			
		||||
if [ "$(id -u)" = "0" ]; then
 | 
			
		||||
    echo "🔐 Ensuring proper ownership before database operations..."
 | 
			
		||||
    chown -R $TARGET_UID:$TARGET_GID /app/server/prisma 2>/dev/null || true
 | 
			
		||||
    echo "🔐 Ensuring proper ownership for all operations..."
 | 
			
		||||
    # Fix permissions for entire /app/server to allow migration and storage system operations
 | 
			
		||||
    chown -R $TARGET_UID:$TARGET_GID /app/server 2>/dev/null || true
 | 
			
		||||
    chmod -R 755 /app/server 2>/dev/null || true
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
run_as_user() {
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										84
									
								
								infra/start-minio.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										84
									
								
								infra/start-minio.sh
									
									
									
									
									
										Executable file
									
								
							@@ -0,0 +1,84 @@
 | 
			
		||||
#!/bin/sh
 | 
			
		||||
# Start storage system with persistent root password
 | 
			
		||||
# This script MUST run as root to fix permissions, then drops to palmr user
 | 
			
		||||
 | 
			
		||||
set -e
 | 
			
		||||
 | 
			
		||||
DATA_DIR="/app/server/minio-data"
 | 
			
		||||
PASSWORD_FILE="/app/server/.minio-root-password"
 | 
			
		||||
MINIO_USER="palmr"
 | 
			
		||||
 | 
			
		||||
echo "[STORAGE-SYSTEM] Initializing storage..."
 | 
			
		||||
 | 
			
		||||
# DYNAMIC: Detect palmr user's actual UID and GID
 | 
			
		||||
# This works with any Docker user configuration
 | 
			
		||||
MINIO_UID=$(id -u $MINIO_USER 2>/dev/null || echo "1001")
 | 
			
		||||
MINIO_GID=$(id -g $MINIO_USER 2>/dev/null || echo "1001")
 | 
			
		||||
 | 
			
		||||
echo "[STORAGE-SYSTEM]   Target user: $MINIO_USER (UID:$MINIO_UID, GID:$MINIO_GID)"
 | 
			
		||||
 | 
			
		||||
# CRITICAL: Fix permissions as root (supervisor runs this as root via user=root)
 | 
			
		||||
# This MUST happen before dropping to palmr user
 | 
			
		||||
if [ "$(id -u)" = "0" ]; then
 | 
			
		||||
    echo "[STORAGE-SYSTEM]   Fixing permissions (running as root)..."
 | 
			
		||||
    
 | 
			
		||||
    # Clean metadata
 | 
			
		||||
    if [ -d "$DATA_DIR/.minio.sys" ]; then
 | 
			
		||||
        echo "[STORAGE-SYSTEM]   Cleaning metadata..."
 | 
			
		||||
        rm -rf "$DATA_DIR/.minio.sys" 2>/dev/null || true
 | 
			
		||||
    fi
 | 
			
		||||
    
 | 
			
		||||
    # Ensure directory exists
 | 
			
		||||
    mkdir -p "$DATA_DIR"
 | 
			
		||||
    
 | 
			
		||||
    # FIX: Change ownership to palmr (using detected UID:GID)
 | 
			
		||||
    chown -R ${MINIO_UID}:${MINIO_GID} "$DATA_DIR" 2>/dev/null || {
 | 
			
		||||
        echo "[STORAGE-SYSTEM] ⚠️  chown -R failed, trying non-recursive..."
 | 
			
		||||
        chown ${MINIO_UID}:${MINIO_GID} "$DATA_DIR" 2>/dev/null || true
 | 
			
		||||
    }
 | 
			
		||||
    
 | 
			
		||||
    chmod 755 "$DATA_DIR" 2>/dev/null || true
 | 
			
		||||
    
 | 
			
		||||
    # Force filesystem sync to ensure changes are visible immediately
 | 
			
		||||
    sync
 | 
			
		||||
    
 | 
			
		||||
    echo "[STORAGE-SYSTEM]   ✓ Permissions fixed (owner: ${MINIO_UID}:${MINIO_GID})"
 | 
			
		||||
else
 | 
			
		||||
    echo "[STORAGE-SYSTEM] ⚠️  WARNING: Not running as root, cannot fix permissions"
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
# Verify directory is writable (test as palmr with detected UID:GID)
 | 
			
		||||
su-exec ${MINIO_UID}:${MINIO_GID} sh -c "
 | 
			
		||||
    if ! touch '$DATA_DIR/.test-write' 2>/dev/null; then
 | 
			
		||||
        echo '[STORAGE-SYSTEM] ❌ FATAL: Still cannot write to $DATA_DIR'
 | 
			
		||||
        ls -la '$DATA_DIR'
 | 
			
		||||
        echo '[STORAGE-SYSTEM] This should not happen after chown!'
 | 
			
		||||
        exit 1
 | 
			
		||||
    fi
 | 
			
		||||
    rm -f '$DATA_DIR/.test-write'
 | 
			
		||||
    echo '[STORAGE-SYSTEM]   ✓ Write test passed'
 | 
			
		||||
"
 | 
			
		||||
 | 
			
		||||
# Generate or reuse password (as root, then chown to palmr)
 | 
			
		||||
if [ -f "$PASSWORD_FILE" ]; then
 | 
			
		||||
    MINIO_ROOT_PASSWORD=$(cat "$PASSWORD_FILE")
 | 
			
		||||
    echo "[STORAGE-SYSTEM] Using existing root password"
 | 
			
		||||
else
 | 
			
		||||
    MINIO_ROOT_PASSWORD="$(openssl rand -hex 16)"
 | 
			
		||||
    echo "$MINIO_ROOT_PASSWORD" > "$PASSWORD_FILE"
 | 
			
		||||
    chmod 600 "$PASSWORD_FILE"
 | 
			
		||||
    chown ${MINIO_UID}:${MINIO_GID} "$PASSWORD_FILE" 2>/dev/null || true
 | 
			
		||||
    echo "[STORAGE-SYSTEM] Generated new root password"
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
# Export for storage system
 | 
			
		||||
export MINIO_ROOT_USER="palmr-minio-admin"
 | 
			
		||||
export MINIO_ROOT_PASSWORD="$MINIO_ROOT_PASSWORD"
 | 
			
		||||
 | 
			
		||||
echo "[STORAGE-SYSTEM] Starting storage server on 0.0.0.0:9379 as user palmr..."
 | 
			
		||||
 | 
			
		||||
# Execute storage system as palmr user (dropping from root)
 | 
			
		||||
exec su-exec ${MINIO_UID}:${MINIO_GID} /usr/local/bin/minio server "$DATA_DIR" \
 | 
			
		||||
    --address 0.0.0.0:9379 \
 | 
			
		||||
    --console-address 0.0.0.0:9378
 | 
			
		||||
 | 
			
		||||
@@ -6,6 +6,35 @@ logfile_maxbytes=0
 | 
			
		||||
pidfile=/var/run/supervisord.pid
 | 
			
		||||
loglevel=info
 | 
			
		||||
 | 
			
		||||
[program:minio]
 | 
			
		||||
command=/bin/sh /app/start-minio.sh
 | 
			
		||||
directory=/app/server
 | 
			
		||||
user=root
 | 
			
		||||
autostart=true
 | 
			
		||||
autorestart=true
 | 
			
		||||
stdout_logfile=/dev/stdout
 | 
			
		||||
stdout_logfile_maxbytes=0
 | 
			
		||||
stderr_logfile=/dev/stderr
 | 
			
		||||
stderr_logfile_maxbytes=0
 | 
			
		||||
environment=HOME="/root"
 | 
			
		||||
priority=50
 | 
			
		||||
startsecs=3
 | 
			
		||||
 | 
			
		||||
[program:minio-setup]
 | 
			
		||||
command=/bin/sh /app/minio-setup.sh
 | 
			
		||||
directory=/app
 | 
			
		||||
user=palmr
 | 
			
		||||
autostart=true
 | 
			
		||||
autorestart=unexpected
 | 
			
		||||
exitcodes=0
 | 
			
		||||
stdout_logfile=/dev/stdout
 | 
			
		||||
stdout_logfile_maxbytes=0
 | 
			
		||||
stderr_logfile=/dev/stderr
 | 
			
		||||
stderr_logfile_maxbytes=0
 | 
			
		||||
environment=HOME="/home/palmr"
 | 
			
		||||
priority=60
 | 
			
		||||
startsecs=0
 | 
			
		||||
 | 
			
		||||
[program:server]
 | 
			
		||||
command=/bin/sh -c "export DATABASE_URL='file:/app/server/prisma/palmr.db' && /app/server-start.sh"
 | 
			
		||||
directory=/app/palmr-app
 | 
			
		||||
@@ -31,4 +60,4 @@ stderr_logfile=/dev/stderr
 | 
			
		||||
stderr_logfile_maxbytes=0
 | 
			
		||||
environment=PORT=5487,HOSTNAME="0.0.0.0",HOME="/home/palmr",API_BASE_URL="http://127.0.0.1:3333"
 | 
			
		||||
priority=200
 | 
			
		||||
startsecs=10 
 | 
			
		||||
startsecs=10 
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user