11import type { Request , Response } from "express" ;
22import multer from "multer" ;
3+ import archiver from "archiver" ;
4+ import fs from "fs" ;
5+ import path from "path" ;
6+ import os from "os" ;
37import { FileService } from "../services/FileService" ;
48
59const upload = multer ( {
@@ -14,9 +18,15 @@ const uploadMultiple = multer({
1418
1519export class FileController {
1620 private fileService : FileService ;
21+ private ZIP_TEMP_DIR = path . join ( os . tmpdir ( ) , 'file-manager-zips' ) ;
1722
1823 constructor ( ) {
1924 this . fileService = new FileService ( ) ;
25+
26+ // Ensure temp directory exists
27+ if ( ! fs . existsSync ( this . ZIP_TEMP_DIR ) ) {
28+ fs . mkdirSync ( this . ZIP_TEMP_DIR , { recursive : true } ) ;
29+ }
2030 }
2131
2232 uploadFile = [
@@ -586,4 +596,318 @@ export class FileController {
586596 res . status ( 500 ) . json ( { error : "Failed to get storage usage" } ) ;
587597 }
588598 } ;
599+
600+ /**
601+ * Download multiple files as ZIP. Creates zip on disk then serves it.
602+ * Zip is deleted after serving via finally block.
603+ */
604+ downloadFilesAsZip = async ( req : Request , res : Response ) => {
605+ let output : fs . WriteStream | null = null ;
606+ let archive : archiver . Archiver | null = null ;
607+ let zipPath : string | null = null ;
608+
609+ try {
610+ if ( ! req . user ) {
611+ return res
612+ . status ( 401 )
613+ . json ( { error : "Authentication required" } ) ;
614+ }
615+
616+ let { files, fileIds } = req . body ;
617+
618+ // Handle form-encoded data where files is a JSON string
619+ if ( typeof files === 'string' ) {
620+ try {
621+ files = JSON . parse ( files ) ;
622+ } catch {
623+ return res . status ( 400 ) . json ( { error : "Invalid files JSON" } ) ;
624+ }
625+ }
626+
627+ // Support both formats: { files: [{id, path}] } or { fileIds: [id] }
628+ let fileEntries : Array < { id : string ; path : string } > ;
629+
630+ if ( Array . isArray ( files ) && files . length > 0 ) {
631+ fileEntries = files . map ( ( f : any ) => ( {
632+ id : typeof f === 'string' ? f : f . id ,
633+ path : ( typeof f === 'object' && f . path ) || '' ,
634+ } ) ) ;
635+ } else if ( Array . isArray ( fileIds ) && fileIds . length > 0 ) {
636+ fileEntries = fileIds . map ( ( id : string ) => ( { id, path : '' } ) ) ;
637+ } else {
638+ return res . status ( 400 ) . json ( { error : "files or fileIds array is required" } ) ;
639+ }
640+
641+ if ( fileEntries . length > 500 ) {
642+ return res . status ( 400 ) . json ( { error : "Maximum 500 files per download" } ) ;
643+ }
644+
645+ // Validate all file IDs are non-empty strings
646+ for ( const entry of fileEntries ) {
647+ if ( ! entry . id || typeof entry . id !== 'string' || entry . id . trim ( ) === '' ) {
648+ return res . status ( 400 ) . json ( { error : "Invalid file id in request" } ) ;
649+ }
650+ }
651+
652+ // Validate all files exist and user has access
653+ const validatedFiles = await this . fileService . getFilesMetadataByIds (
654+ fileEntries . map ( f => f . id ) ,
655+ req . user . id
656+ ) ;
657+
658+ if ( validatedFiles . length === 0 ) {
659+ return res . status ( 404 ) . json ( { error : "No accessible files found" } ) ;
660+ }
661+
662+ // Create a map of id -> metadata for quick lookup
663+ const fileMetaMap = new Map ( validatedFiles . map ( f => [ f . id , f ] ) ) ;
664+
665+ // Create zip file on disk with timestamp
666+ const timestamp = new Date ( ) . toISOString ( ) . replace ( / [: .] / g, '-' ) . slice ( 0 , 19 ) ;
667+ const zipFilename = `files-${ timestamp } .zip` ;
668+ zipPath = path . join ( this . ZIP_TEMP_DIR , zipFilename ) ;
669+
670+ console . log ( `[ZIP] Creating zip file at: ${ zipPath } ` ) ;
671+
672+ output = fs . createWriteStream ( zipPath ) ;
673+ archive = archiver ( 'zip' , {
674+ store : true , // No compression for speed
675+ } ) ;
676+
677+ console . log ( `[ZIP] Archive and output stream initialized` ) ;
678+
679+ // Track if request was aborted
680+ let aborted = false ;
681+
682+ // Handle client disconnect
683+ req . on ( 'close' , ( ) => {
684+ if ( ! res . writableEnded ) {
685+ aborted = true ;
686+ if ( archive ) archive . abort ( ) ;
687+ console . log ( 'Download aborted by client' ) ;
688+ }
689+ } ) ;
690+
691+ // Handle archive errors
692+ archive . on ( 'error' , ( err : Error ) => {
693+ if ( aborted ) return ;
694+ console . error ( 'Archive error:' , err ) ;
695+ if ( ! res . headersSent ) {
696+ res . status ( 500 ) . json ( { error : 'Failed to create archive' } ) ;
697+ }
698+ } ) ;
699+
700+ // Pipe archive to file on disk
701+ archive . pipe ( output ) ;
702+
703+ // Set up promise to wait for file write completion (BEFORE finalize)
704+ const writeComplete = new Promise < void > ( ( resolve , reject ) => {
705+ output ! . on ( 'finish' , resolve ) ; // 'finish' fires when all data written
706+ output ! . on ( 'error' , reject ) ;
707+ } ) ;
708+
709+ // Track full paths to handle duplicates
710+ const usedPaths = new Map < string , number > ( ) ;
711+
712+ // Sanitize filename to prevent zip-slip attacks
713+ const sanitizeFilename = ( filename : string ) : string => {
714+ if ( ! filename ) return 'file' ;
715+
716+ let safe = filename ;
717+
718+ // Convert backslashes to forward slashes
719+ safe = safe . replace ( / \\ / g, '/' ) ;
720+
721+ // Strip Windows drive letters (C:, D:, etc.)
722+ safe = safe . replace ( / ^ [ a - z A - Z ] : / , '' ) ;
723+
724+ // Strip any leading slashes or dots
725+ safe = safe . replace ( / ^ [ \/ \. ] + / , '' ) ;
726+
727+ // Take only the basename (after last slash)
728+ const lastSlash = safe . lastIndexOf ( '/' ) ;
729+ if ( lastSlash !== - 1 ) {
730+ safe = safe . slice ( lastSlash + 1 ) ;
731+ }
732+
733+ // Remove or replace dangerous characters
734+ // Keep: alphanumeric, spaces, dots, dashes, underscores, parentheses
735+ safe = safe . replace ( / [ ^ \w \s . \- ( ) ] / g, '_' ) ;
736+
737+ // Collapse multiple dots to prevent .. traversal
738+ safe = safe . replace ( / \. { 2 , } / g, '.' ) ;
739+
740+ // Remove leading/trailing dots and spaces
741+ safe = safe . replace ( / ^ [ . \s ] + | [ . \s ] + $ / g, '' ) ;
742+
743+ // If empty after sanitization, use default
744+ if ( ! safe ) return 'file' ;
745+
746+ return safe ;
747+ } ;
748+
749+ // Sanitize path to prevent directory traversal attacks
750+ const sanitizePath = ( p : string ) : string => {
751+ if ( ! p ) return '' ;
752+
753+ // Normalize separators: convert backslashes to forward slashes
754+ let normalized = p . replace ( / \\ / g, '/' ) ;
755+
756+ // Strip Windows drive letters (C:, D:, etc.)
757+ normalized = normalized . replace ( / ^ [ a - z A - Z ] : / , '' ) ;
758+
759+ // Strip UNC paths (//server/share or \\server\share already normalized)
760+ normalized = normalized . replace ( / ^ \/ \/ [ ^ / ] * \/ [ ^ / ] * / , '' ) ;
761+
762+ // Strip any leading slashes
763+ normalized = normalized . replace ( / ^ \/ + / , '' ) ;
764+
765+ // Split into segments and resolve . and ..
766+ const segments = normalized . split ( '/' ) ;
767+ const resolved : string [ ] = [ ] ;
768+ let escapedRoot = false ;
769+
770+ for ( const segment of segments ) {
771+ // Skip empty segments and current directory references
772+ if ( segment === '' || segment === '.' ) {
773+ continue ;
774+ }
775+
776+ if ( segment === '..' ) {
777+ // Pop parent directory if possible
778+ if ( resolved . length > 0 ) {
779+ resolved . pop ( ) ;
780+ } else {
781+ // Attempted to escape root - mark as invalid
782+ escapedRoot = true ;
783+ }
784+ } else {
785+ // Regular segment - add it
786+ resolved . push ( segment ) ;
787+ }
788+ }
789+
790+ // If any attempt to escape root was detected, return empty string
791+ if ( escapedRoot ) {
792+ return '' ;
793+ }
794+
795+ return resolved . join ( '/' ) ;
796+ } ;
797+
798+ // Stream each file into the archive one at a time
799+ for ( const entry of fileEntries ) {
800+ // Stop processing if client disconnected
801+ if ( aborted ) break ;
802+
803+ const fileMeta = fileMetaMap . get ( entry . id ) ;
804+ if ( ! fileMeta ) continue ; // User doesn't have access
805+
806+ try {
807+ const fileData = await this . fileService . getFileDataStream ( entry . id , req . user . id ) ;
808+
809+ if ( fileData ) {
810+ const sanitizedPath = sanitizePath ( entry . path ) ;
811+ const baseName = sanitizeFilename ( fileData . name ) ;
812+
813+ // Build full path in zip
814+ let fullPath = sanitizedPath ? `${ sanitizedPath } /${ baseName } ` : baseName ;
815+
816+ // Handle duplicate paths by appending a number
817+ const count = usedPaths . get ( fullPath ) || 0 ;
818+ if ( count > 0 ) {
819+ const ext = baseName . lastIndexOf ( '.' ) ;
820+ let uniqueName : string ;
821+ if ( ext > 0 ) {
822+ uniqueName = `${ baseName . slice ( 0 , ext ) } (${ count } )${ baseName . slice ( ext ) } ` ;
823+ } else {
824+ uniqueName = `${ baseName } (${ count } )` ;
825+ }
826+ fullPath = sanitizedPath ? `${ sanitizedPath } /${ uniqueName } ` : uniqueName ;
827+ }
828+ usedPaths . set ( sanitizedPath ? `${ sanitizedPath } /${ baseName } ` : baseName , count + 1 ) ;
829+
830+ // Append stream to archive
831+ archive . append ( fileData . stream , { name : fullPath } ) ;
832+ }
833+ } catch ( fileError ) {
834+ console . error ( `Error adding file ${ entry . id } to archive:` , fileError ) ;
835+ // Continue with other files
836+ }
837+ }
838+
839+ // Finalize the archive (this is when the stream ends)
840+ if ( ! aborted && output && archive ) {
841+ console . log ( `[ZIP] Finalizing archive...` ) ;
842+ await archive . finalize ( ) ;
843+ console . log ( `[ZIP] Archive finalized, waiting for disk write...` ) ;
844+
845+ // Wait for file to be completely written to disk
846+ await writeComplete ;
847+ console . log ( `[ZIP] Disk write complete!` ) ;
848+
849+ // Send the file
850+ console . log ( `[ZIP] Starting to stream file: ${ zipPath } , size: ${ fs . statSync ( zipPath ) . size } bytes` ) ;
851+
852+ res . setHeader ( 'Content-Type' , 'application/zip' ) ;
853+ res . setHeader ( 'Content-Disposition' , `attachment; filename="${ zipFilename } "` ) ;
854+ res . setHeader ( 'Content-Length' , fs . statSync ( zipPath ) . size . toString ( ) ) ;
855+
856+ const fileStream = fs . createReadStream ( zipPath ) ;
857+
858+ // Wait for the stream to finish BEFORE exiting try block (so finally doesn't delete file mid-stream)
859+ await new Promise < void > ( ( resolve , reject ) => {
860+ fileStream . on ( 'end' , ( ) => {
861+ console . log ( `[ZIP] Finished streaming file: ${ zipPath } ` ) ;
862+ resolve ( ) ;
863+ } ) ;
864+ fileStream . on ( 'error' , ( err ) => {
865+ console . error ( 'Error streaming zip file:' , err ) ;
866+ if ( ! res . headersSent ) {
867+ res . status ( 500 ) . json ( { error : 'Failed to send zip file' } ) ;
868+ }
869+ reject ( err ) ;
870+ } ) ;
871+
872+ fileStream . pipe ( res ) ;
873+ } ) ;
874+ }
875+
876+ } catch ( error ) {
877+ console . error ( "Error creating zip download:" , error ) ;
878+ if ( ! res . headersSent ) {
879+ res . status ( 500 ) . json ( { error : "Failed to create zip download" } ) ;
880+ }
881+ } finally {
882+ console . log ( `[ZIP] Cleanup starting for: ${ zipPath } ` ) ;
883+
884+ // Always cleanup resources
885+ if ( archive ) {
886+ try {
887+ archive . abort ( ) ;
888+ } catch ( e ) {
889+ // Ignore abort errors (may already be finalized)
890+ }
891+ }
892+ if ( output ) {
893+ try {
894+ output . close ( ) ;
895+ } catch ( e ) {
896+ // Ignore close errors
897+ }
898+ }
899+ // Delete zip file - no longer needed after serving
900+ if ( zipPath && fs . existsSync ( zipPath ) ) {
901+ try {
902+ console . log ( `[ZIP] Deleting temp file: ${ zipPath } ` ) ;
903+ fs . unlinkSync ( zipPath ) ;
904+ console . log ( `[ZIP] Successfully deleted: ${ zipPath } ` ) ;
905+ } catch ( e ) {
906+ console . error ( '[ZIP] Error deleting temp zip:' , e ) ;
907+ }
908+ } else {
909+ console . log ( `[ZIP] File already gone or path not set: ${ zipPath } ` ) ;
910+ }
911+ }
912+ } ;
589913}
0 commit comments