@@ -316,84 +316,81 @@ bool CodeStreamDecompress::decompressImpl(std::set<uint16_t> pendingTiles)
316316 if (pendingTiles.empty ())
317317 return true ;
318318
319- // synchronous batch init
320- if (doTileBatching () && !cp_.hasTLM ())
321- {
322- batchTileUnscheduledSequential_ = (uint16_t )pendingTiles.size ();
323- batchTileScheduleHeadroomSequential_ =
324- batchTileHeadroomIncrement (batchTileInitialRows_, batchTileUnscheduledSequential_);
325- batchTileScheduledRows_ = batchTileInitialRows_;
326- }
327-
328- // prepare for different types of decompression
319+ // prepare for differential decompression
329320 if (doDifferential)
330321 {
331322 differentialUpdate (scratchImage_.get ());
323+ for (auto & tileIndex : pendingTiles)
324+ {
325+ auto cacheEntry = tileCache_->get (tileIndex);
326+ auto tileProcessor = cacheEntry->processor ;
327+ if (!tileProcessor->differentialUpdate (headerImage_->getBounds ()))
328+ return false ;
329+ if (!schedule (tileProcessor, true ))
330+ return false ;
331+ }
332+ return true ;
332333 }
333- else if (cp_.hasTLM ())
334+
335+ // one-time dispatch initialization
336+ if (!decompressStart_)
334337 {
335- // a) begin network fetch
336- auto generator = [this ](ITileProcessor* tp) {
337- return postMultiTile (tp); // Return the result directly
338- };
338+ if (cp_.hasTLM ())
339+ decompressStart_ = [this ](auto & pt) { return startTLMDecompress (pt); };
340+ else
341+ decompressStart_ = [this ](auto & pt) { return startSequentialDecompress (pt); };
342+ }
339343
340- if ( fetchByTile ( pendingTiles, scratchImage_-> getBounds (), generator))
341- return true ;
344+ return decompressStart_ ( pendingTiles);
345+ }
342346
343- // b) prepare for TLM decompress
344- tilePartFetchFlat_ = std::make_shared<TPFetchSeq>();
345- tilePartFetchByTile_ =
346- std::make_shared<std::unordered_map<uint16_t , std::shared_ptr<TPFetchSeq>>>();
347- TPFetchSeq::genCollections (&cp_.tlmMarkers_ ->getTileParts (), pendingTiles, tilePartFetchFlat_,
348- tilePartFetchByTile_);
349- }
350- else
351- {
352- // a) begin network fetch
353- auto fetcher = stream_->getFetcher ();
354- if (stream_->getFetcher ())
355- {
356- auto chunkSize = cp_.t_width_ * cp_.t_height_ ;
357- chunkBuffer_ = std::make_shared<ChunkBuffer<>>(chunkSize, markerCache_->getTileStreamStart (),
358- fetcher->size ());
359- fetcher->fetchChunks (chunkBuffer_);
360- stream_->setChunkBuffer (chunkBuffer_);
361- }
347+ bool CodeStreamDecompress::startTLMDecompress (std::set<uint16_t >& pendingTiles)
348+ {
349+ // begin network fetch
350+ auto generator = [this ](ITileProcessor* tp) { return postMultiTile (tp); };
362351
363- // b) prepare for sequential decompress
364- decompressSequentialPrepare ();
365- }
352+ if (fetchByTile (pendingTiles, scratchImage_->getBounds (), generator))
353+ return true ;
366354
367- // schedule decompression
355+ // prepare TLM decompress
356+ tilePartFetchFlat_ = std::make_shared<TPFetchSeq>();
357+ tilePartFetchByTile_ =
358+ std::make_shared<std::unordered_map<uint16_t , std::shared_ptr<TPFetchSeq>>>();
359+ TPFetchSeq::genCollections (&cp_.tlmMarkers_ ->getTileParts (), pendingTiles, tilePartFetchFlat_,
360+ tilePartFetchByTile_);
368361
369- // 1. differential decompression
370- if (doDifferential)
362+ // start decompress worker
363+ decompressWorker_ = std::thread ([this , pendingTiles]() { decompressTLM (pendingTiles); });
364+ return true ;
365+ }
366+
367+ bool CodeStreamDecompress::startSequentialDecompress (std::set<uint16_t >& pendingTiles)
368+ {
369+ // batch init
370+ if (doTileBatching ())
371371 {
372- for (auto & tileIndex : pendingTiles)
373- {
374- if (doDifferential)
375- {
376- auto cacheEntry = tileCache_->get (tileIndex);
377- auto tileProcessor = cacheEntry->processor ;
378- if (!tileProcessor->differentialUpdate (headerImage_->getBounds ()))
379- {
380- return false ;
381- }
372+ batchTileUnscheduledSequential_ = (uint16_t )pendingTiles.size ();
373+ batchTileScheduleHeadroomSequential_ =
374+ batchTileHeadroomIncrement (batchTileInitialRows_, batchTileUnscheduledSequential_);
375+ batchTileScheduledRows_ = batchTileInitialRows_;
376+ }
382377
383- if (!schedule (tileProcessor, true ))
384- return false ;
385- }
386- }
387- return true ;
378+ // begin network fetch
379+ auto fetcher = stream_->getFetcher ();
380+ if (fetcher)
381+ {
382+ auto chunkSize = cp_.t_width_ * cp_.t_height_ ;
383+ chunkBuffer_ = std::make_shared<ChunkBuffer<>>(chunkSize, markerCache_->getTileStreamStart (),
384+ fetcher->size ());
385+ fetcher->fetchChunks (chunkBuffer_);
386+ stream_->setChunkBuffer (chunkBuffer_);
388387 }
389388
390- std::function<void ()> task;
391- if (cp_.hasTLM ())
392- task = [this , pendingTiles]() { decompressTLM (pendingTiles); };
393- else
394- task = [this , pendingTiles]() { decompressSequential (pendingTiles); };
389+ // prepare sequential decompress
390+ decompressSequentialPrepare ();
395391
396- decompressWorker_ = std::thread (task);
392+ // start decompress worker
393+ decompressWorker_ = std::thread ([this , pendingTiles]() { decompressSequential (pendingTiles); });
397394 return true ;
398395}
399396
0 commit comments