2929namespace libbitcoin {
3030namespace database {
3131
32+ // configuration
33+ // ----------------------------------------------------------------------------
34+
3235TEMPLATE
3336CLASS::hashhead (storage& head, size_t bits) NOEXCEPT
3437 : file_(head),
35- buckets_ (system::power2<bucket_integer >(bits)),
36- mask_(system::unmask_right<bucket_integer >(bits))
38+ buckets_ (system::power2<link >(bits)),
39+ mask_(system::unmask_right<link >(bits))
3740{
3841 BC_ASSERT_MSG (mask_ < max_size_t , " insufficient domain" );
3942}
@@ -88,7 +91,7 @@ bool CLASS::get_body_count(Link& count) const NOEXCEPT
8891 if (!ptr)
8992 return false ;
9093
91- count = to_array<Link::size> (ptr->data ());
94+ link_array ( count. value ) = link_array (ptr->data ());
9295 return true ;
9396}
9497
@@ -100,112 +103,174 @@ bool CLASS::set_body_count(const Link& count) NOEXCEPT
100103 return false ;
101104
102105 // If head is padded then last bytes are fill (0xff).
103- to_array<Link::size>(ptr->data ()) = count;
106+ auto value = count.value ;
107+ link_array (ptr->data ()) = link_array (value);
104108 return true ;
105109}
106110
111+ // operation
112+ // ----------------------------------------------------------------------------
113+
107114TEMPLATE
108115inline Link CLASS::index (const Key& key) const NOEXCEPT
109116{
110117 using namespace system ;
111- const auto index = possible_narrow_cast<bucket_integer>(keys::hash<Key>(key));
112- return bit_and<bucket_integer>(mask_, index);
118+ const auto index = possible_narrow_cast<link>(keys::hash<Key>(key));
119+ return bit_and<link>(mask_, index);
120+ }
121+
122+ TEMPLATE
123+ inline Link CLASS::top (const Link& index) const NOEXCEPT
124+ {
125+ return to_link (get_cell (index));
113126}
114127
115128TEMPLATE
116129inline Link CLASS::top (const Key& key) const NOEXCEPT
117130{
118- return top (index (key));
131+ const auto value = get_cell (index (key));
132+ if (is_collision (value, key))
133+ return to_link (value);
134+
135+ return {};
119136}
120137
121138TEMPLATE
122- inline Link CLASS::top (const Link& index) const NOEXCEPT
139+ inline bool CLASS::push (const Link& current, bytes& next,
140+ const Key& key) NOEXCEPT
141+ {
142+ return push (current, next, index (key));
143+ }
144+
145+ TEMPLATE
146+ inline bool CLASS::push (const Link& current, bytes& next,
147+ const Link& index) NOEXCEPT
148+ {
149+ return set_cell (next, current, index) != terminal;
150+ }
151+
152+ TEMPLATE
153+ inline bool CLASS::push (bool & collision, const Link& current, bytes& next,
154+ const Key& key) NOEXCEPT
155+ {
156+ const auto previous = set_cell (next, current, index (key));
157+ if (previous == terminal)
158+ return false ;
159+
160+ // Caller searches Link{ next } for duplicate in case of filter collision.
161+ collision = is_collision (previous, key);
162+ return true ;
163+ }
164+
165+ // protected
166+ // ----------------------------------------------------------------------------
167+ // read/write
168+
169+ TEMPLATE
170+ inline CLASS::cell CLASS::get_cell (const Link& index) const NOEXCEPT
123171{
124172 using namespace system ;
125173 const auto raw = file_.get_raw (link_to_position (index));
126174 if (is_null (raw))
127- return {} ;
175+ return terminal ;
128176
129177 if constexpr (aligned)
130178 {
131179 // Reads full padded word.
132180 // xcode clang++16 does not support C++20 std::atomic_ref.
133- // //const std::atomic_ref<bucket_integer> head (unsafe_byte_cast<bucket_integer >(raw));
134- const auto & head = *pointer_cast<std::atomic<bucket_integer >>(raw);
181+ // //const std::atomic_ref<cell> top (unsafe_byte_cast<cell >(raw));
182+ const auto & top = *pointer_cast<std::atomic<cell >>(raw);
135183
136184 // Acquire is necessary to synchronize with push release.
137185 // Relaxed would miss next updates, so acquire is optimal.
138- return head .load (std::memory_order_acquire);
186+ return top .load (std::memory_order_acquire);
139187 }
140188 else
141189 {
142- const auto & head = to_array<bucket_size>(raw);
190+ cell top{};
191+ const auto & head = cell_array (raw);
192+
143193 mutex_.lock_shared ();
144- const auto top = head;
194+ cell_array ( top) = head;
145195 mutex_.unlock_shared ();
196+
146197 return top;
147198 }
148-
149- // TODO: return terminal if filtered.
150199}
151200
152201TEMPLATE
153- inline bool CLASS::push (const Link& current, bytes& next,
154- const Key& key) NOEXCEPT
155- {
156- return push (current, next, index (key));
157- }
158-
159- TEMPLATE
160- inline bool CLASS::push (const Link& current, bytes& next,
161- const Link& index) NOEXCEPT
162- {
163- bool collision{};
164- return push (collision, current, next, index);
165- }
166-
167- TEMPLATE
168- inline bool CLASS::push (bool & collision, const Link& current, bytes& next,
202+ inline CLASS::cell CLASS::set_cell (bytes& next, const Link& current,
169203 const Link& index) NOEXCEPT
170204{
171205 using namespace system ;
172206 const auto raw = file_.get_raw (link_to_position (index));
173207 if (is_null (raw))
174- return false ;
208+ return terminal ;
175209
176210 if constexpr (aligned)
177211 {
178212 // Writes full padded word (0x00 fill).
179213 // xcode clang++16 does not support C++20 std::atomic_ref.
180- // //const std::atomic_ref<bucket_integer > head(unsafe_byte_cast<bucket_integer >(raw));
181- auto & head = *pointer_cast<std::atomic<bucket_integer >>(raw);
182- auto top = head .load (std::memory_order_acquire);
214+ // //const std::atomic_ref<cell > head(unsafe_byte_cast<cell >(raw));
215+ auto & top = *pointer_cast<std::atomic<cell >>(raw);
216+ auto head = top .load (std::memory_order_acquire);
183217 do
184218 {
185- // Compiler could order this after head .store, which would expose key
219+ // Compiler could order this after top .store, which would expose key
186220 // to search before next entry is linked. Thread fence imposes order.
187221 // A release fence ensures that all prior writes (like next) are
188222 // completed before any subsequent atomic store.
189- next = Link{ top } ;
223+ next = link_array (head) ;
190224 std::atomic_thread_fence (std::memory_order_release);
191225 }
192- while (!head .compare_exchange_weak (top, current,
226+ while (!top .compare_exchange_weak (head, to_cell (head, current) ,
193227 std::memory_order_release, std::memory_order_acquire));
194228 }
195229 else
196230 {
197- auto & head = to_array<bucket_size>(raw);
231+ cell top{};
232+ auto & head = cell_array (raw);
233+
198234 mutex_.lock ();
199- next = head;
200- head = current;
235+ cell_array (top) = head;
236+ next = link_array (top);
237+ auto bytes = to_cell (top, current);
238+ head = cell_array (bytes);
201239 mutex_.unlock ();
202240 }
203241
204- // TODO: set collision when unfiltered or fingerprint matches filter.
205- collision = false ;
242+ return true ;
243+ }
244+
245+ // protected
246+ // ----------------------------------------------------------------------------
247+ // filters
248+
249+ TEMPLATE
250+ constexpr CLASS::link CLASS::to_link (cell value) NOEXCEPT
251+ {
252+ if (value == terminal)
253+ return {};
254+
255+ using namespace system ;
256+ constexpr auto mask = unmask_right<cell>(link_bits);
257+ return possible_narrow_cast<link>(bit_and (value, mask));
258+ }
259+
260+ TEMPLATE
261+ constexpr CLASS::cell CLASS::to_cell (cell, link current) NOEXCEPT
262+ {
263+ // TODO:
264+ return current;
265+ }
266+
267+ TEMPLATE
268+ constexpr bool CLASS::is_collision (cell value, const Key&) NOEXCEPT
269+ {
270+ if (value == terminal)
271+ return false ;
206272
207- // The returned next is set to prevous head, which is where collisions may
208- // be resolved to duplicate or not, when 'collision' is set to true.
273+ // TODO:
209274 return true ;
210275}
211276
0 commit comments