@@ -554,13 +554,43 @@ bool map::finalize_(size_t size) NOEXCEPT
554554 return false ;
555555 }
556556
557- // TODO: madvise with large length value fails on linux, does 0 imply all?
558- if (::madvise (memory_map_, 0 , MADV_RANDOM) == fail)
559- {
560- set_first_code (error::madvise_failure);
561- unmap_ ();
562- return false ;
563- }
557+ // TODO: also, madvise only the size increase.
558+ // TODO: madvise with large length value fails on linux (and zero is noop).
559+ // TODO: Random may not be best since writes are sequential (for bodies).
560+ // TODO: Heads are truly read/write random so could benefit.
561+ // TODO: this fails with large and/or unaligned size. Align size to page,
562+ // TODO: rounded up. Iterate over calls at 1GB (1_u64 << 30), sample:
563+ // //// Get page size (usually 4KB)
564+ // //const size_t page_size = sysconf(_SC_PAGESIZE);
565+ // //if (page_size == static_cast<size_t>(-1))
566+ // //{
567+ // // set_first_code(error::sysconf_failure);
568+ // // unmap_();
569+ // // return false;
570+ // //}
571+ // //
572+ // //// Align size up to page boundary
573+ // //const size_t aligned_size = (size + page_size - 1) & ~(page_size - 1);
574+ // //
575+ // //// Use 1GB chunks to avoid large-length issues
576+ // //constexpr size_t chunk_size = 1ULL << 30; // 1GB
577+ // //uint8_t* ptr = memory_map_;
578+ // //for (size_t offset = 0; offset < aligned_size; offset += chunk_size)
579+ // //{
580+ // // size_t len = std::min(chunk_size, aligned_size - offset);
581+ // // if (::madvise(ptr + offset, len, MADV_SEQUENTIAL) == fail)
582+ // // {
583+ // // set_first_code(error::madvise_failure);
584+ // // unmap_();
585+ // // return false;
586+ // // }
587+ // //}
588+ // //if (::madvise(memory_map_, size, MADV_RANDOM) == fail)
589+ // //{
590+ // // set_first_code(error::madvise_failure);
591+ // // unmap_();
592+ // // return false;
593+ // //}
564594
565595 loaded_ = true ;
566596 capacity_ = size;
0 commit comments