Line data Source code
1 : /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 : * vim: set ts=8 sts=4 et sw=4 tw=99:
3 : * This Source Code Form is subject to the terms of the Mozilla Public
4 : * License, v. 2.0. If a copy of the MPL was not distributed with this
5 : * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 :
7 : #ifndef ds_OrderedHashTable_h
8 : #define ds_OrderedHashTable_h
9 :
10 : /*
11 : * Define two collection templates, js::OrderedHashMap and js::OrderedHashSet.
12 : * They are like js::HashMap and js::HashSet except that:
13 : *
14 : * - Iterating over an Ordered hash table visits the entries in the order in
15 : * which they were inserted. This means that unlike a HashMap, the behavior
16 : * of an OrderedHashMap is deterministic (as long as the HashPolicy methods
17 : * are effect-free and consistent); the hashing is a pure performance
18 : * optimization.
19 : *
20 : * - Range objects over Ordered tables remain valid even when entries are
21 : * added or removed or the table is resized. (However in the case of
22 : * removing entries, note the warning on class Range below.)
23 : *
24 : * - The API is a little different, so it's not a drop-in replacement.
25 : * In particular, the hash policy is a little different.
26 : * Also, the Ordered templates lack the Ptr and AddPtr types.
27 : *
28 : * Hash policies
29 : *
30 : * See the comment about "Hash policy" in HashTable.h for general features that
31 : * hash policy classes must provide. Hash policies for OrderedHashMaps and Sets
32 : * differ in that the hash() method takes an extra argument:
33 : * static js::HashNumber hash(Lookup, const HashCodeScrambler&);
34 : * They must additionally provide a distinguished "empty" key value and the
35 : * following static member functions:
36 : * bool isEmpty(const Key&);
37 : * void makeEmpty(Key*);
38 : */
39 :
40 : #include "mozilla/HashFunctions.h"
41 : #include "mozilla/Move.h"
42 :
43 : using mozilla::Forward;
44 : using mozilla::Move;
45 :
46 : namespace js {
47 :
48 : namespace detail {
49 :
50 : /*
51 : * detail::OrderedHashTable is the underlying data structure used to implement both
52 : * OrderedHashMap and OrderedHashSet. Programs should use one of those two
53 : * templates rather than OrderedHashTable.
54 : */
55 : template <class T, class Ops, class AllocPolicy>
56 : class OrderedHashTable
57 : {
58 : public:
59 : typedef typename Ops::KeyType Key;
60 : typedef typename Ops::Lookup Lookup;
61 :
62 688 : struct Data
63 : {
64 : T element;
65 : Data* chain;
66 :
67 498 : Data(const T& e, Data* c) : element(e), chain(c) {}
68 1190 : Data(T&& e, Data* c) : element(Move(e)), chain(c) {}
69 : };
70 :
71 : class Range;
72 : friend class Range;
73 :
74 : private:
75 : Data** hashTable; // hash table (has hashBuckets() elements)
76 : Data* data; // data vector, an array of Data objects
77 : // data[0:dataLength] are constructed
78 : uint32_t dataLength; // number of constructed elements in data
79 : uint32_t dataCapacity; // size of data, in elements
80 : uint32_t liveCount; // dataLength less empty (removed) entries
81 : uint32_t hashShift; // multiplicative hash shift
82 : Range* ranges; // list of all live Ranges on this table
83 : AllocPolicy alloc;
84 : mozilla::HashCodeScrambler hcs; // don't reveal pointer hash codes
85 :
86 : public:
87 439 : OrderedHashTable(AllocPolicy& ap, mozilla::HashCodeScrambler hcs)
88 439 : : hashTable(nullptr), data(nullptr), dataLength(0), ranges(nullptr), alloc(ap), hcs(hcs) {}
89 :
90 441 : MOZ_MUST_USE bool init() {
91 441 : MOZ_ASSERT(!hashTable, "init must be called at most once");
92 :
93 441 : uint32_t buckets = initialBuckets();
94 441 : Data** tableAlloc = alloc.template pod_malloc<Data*>(buckets);
95 441 : if (!tableAlloc)
96 0 : return false;
97 1323 : for (uint32_t i = 0; i < buckets; i++)
98 882 : tableAlloc[i] = nullptr;
99 :
100 441 : uint32_t capacity = uint32_t(buckets * fillFactor());
101 441 : Data* dataAlloc = alloc.template pod_malloc<Data>(capacity);
102 441 : if (!dataAlloc) {
103 0 : alloc.free_(tableAlloc);
104 0 : return false;
105 : }
106 :
107 : // clear() requires that members are assigned only after all allocation
108 : // has succeeded, and that this->ranges is left untouched.
109 441 : hashTable = tableAlloc;
110 441 : data = dataAlloc;
111 441 : dataLength = 0;
112 441 : dataCapacity = capacity;
113 441 : liveCount = 0;
114 441 : hashShift = HashNumberSizeBits - initialBucketsLog2();
115 441 : MOZ_ASSERT(hashBuckets() == buckets);
116 441 : return true;
117 : }
118 :
119 0 : ~OrderedHashTable() {
120 0 : for (Range* r = ranges; r; ) {
121 0 : Range* next = r->next;
122 0 : r->onTableDestroyed();
123 0 : r = next;
124 : }
125 0 : alloc.free_(hashTable);
126 0 : freeData(data, dataLength);
127 0 : }
128 :
129 : /* Return the number of elements in the table. */
130 30 : uint32_t count() const { return liveCount; }
131 :
132 : /* True if any element matches l. */
133 610 : bool has(const Lookup& l) const {
134 610 : return lookup(l) != nullptr;
135 : }
136 :
137 : /* Return a pointer to the element, if any, that matches l, or nullptr. */
138 595 : T* get(const Lookup& l) {
139 595 : Data* e = lookup(l, prepareHash(l));
140 595 : return e ? &e->element : nullptr;
141 : }
142 :
143 : /* Return a pointer to the element, if any, that matches l, or nullptr. */
144 : const T* get(const Lookup& l) const {
145 : return const_cast<OrderedHashTable*>(this)->get(l);
146 : }
147 :
148 : /*
149 : * If the table already contains an entry that matches |element|,
150 : * replace that entry with |element|. Otherwise add a new entry.
151 : *
152 : * On success, return true, whether there was already a matching element or
153 : * not. On allocation failure, return false. If this returns false, it
154 : * means the element was not added to the table.
155 : */
156 : template <typename ElementInput>
157 1131 : MOZ_MUST_USE bool put(ElementInput&& element) {
158 1131 : HashNumber h = prepareHash(Ops::getKey(element));
159 1131 : if (Data* e = lookup(Ops::getKey(element), h)) {
160 123 : e->element = Forward<ElementInput>(element);
161 123 : return true;
162 : }
163 :
164 1008 : if (dataLength == dataCapacity) {
165 : // If the hashTable is more than 1/4 deleted data, simply rehash in
166 : // place to free up some space. Otherwise, grow the table.
167 70 : uint32_t newHashShift = liveCount >= dataCapacity * 0.75 ? hashShift - 1 : hashShift;
168 70 : if (!rehash(newHashShift))
169 0 : return false;
170 : }
171 :
172 1008 : h >>= hashShift;
173 1008 : liveCount++;
174 1008 : Data* e = &data[dataLength++];
175 1008 : new (e) Data(Forward<ElementInput>(element), hashTable[h]);
176 1008 : hashTable[h] = e;
177 1008 : return true;
178 : }
179 :
180 : /*
181 : * If the table contains an element matching l, remove it and set *foundp
182 : * to true. Otherwise set *foundp to false.
183 : *
184 : * Return true on success, false if we tried to shrink the table and hit an
185 : * allocation failure. Even if this returns false, *foundp is set correctly
186 : * and the matching element was removed. Shrinking is an optimization and
187 : * it's OK for it to fail.
188 : */
189 54 : bool remove(const Lookup& l, bool* foundp) {
190 : // Note: This could be optimized so that removing the last entry,
191 : // data[dataLength - 1], decrements dataLength. LIFO use cases would
192 : // benefit.
193 :
194 : // If a matching entry exists, empty it.
195 54 : Data* e = lookup(l, prepareHash(l));
196 54 : if (e == nullptr) {
197 23 : *foundp = false;
198 23 : return true;
199 : }
200 :
201 31 : *foundp = true;
202 31 : liveCount--;
203 31 : Ops::makeEmpty(&e->element);
204 :
205 : // Update active Ranges.
206 31 : uint32_t pos = e - data;
207 35 : for (Range* r = ranges; r; r = r->next)
208 4 : r->onRemove(pos);
209 :
210 : // If many entries have been removed, try to shrink the table.
211 31 : if (hashBuckets() > initialBuckets() && liveCount < dataLength * minDataFill()) {
212 0 : if (!rehash(hashShift + 1))
213 0 : return false;
214 : }
215 31 : return true;
216 : }
217 :
218 : /*
219 : * Remove all entries.
220 : *
221 : * Returns false on OOM, leaving the OrderedHashTable and any live Ranges
222 : * in the old state.
223 : *
224 : * The effect on live Ranges is the same as removing all entries; in
225 : * particular, those Ranges are still live and will see any entries added
226 : * after a successful clear().
227 : */
228 8 : MOZ_MUST_USE bool clear() {
229 8 : if (dataLength != 0) {
230 2 : Data** oldHashTable = hashTable;
231 2 : Data* oldData = data;
232 2 : uint32_t oldDataLength = dataLength;
233 :
234 2 : hashTable = nullptr;
235 2 : if (!init()) {
236 : // init() only mutates members on success; see comment above.
237 0 : hashTable = oldHashTable;
238 0 : return false;
239 : }
240 :
241 2 : alloc.free_(oldHashTable);
242 2 : freeData(oldData, oldDataLength);
243 2 : for (Range* r = ranges; r; r = r->next)
244 0 : r->onClear();
245 : }
246 :
247 8 : MOZ_ASSERT(hashTable);
248 8 : MOZ_ASSERT(data);
249 8 : MOZ_ASSERT(dataLength == 0);
250 8 : MOZ_ASSERT(liveCount == 0);
251 8 : return true;
252 : }
253 :
254 : /*
255 : * Ranges are used to iterate over OrderedHashTables.
256 : *
257 : * Suppose 'Map' is some instance of OrderedHashMap, and 'map' is a Map.
258 : * Then you can walk all the key-value pairs like this:
259 : *
260 : * for (Map::Range r = map.all(); !r.empty(); r.popFront()) {
261 : * Map::Entry& pair = r.front();
262 : * ... do something with pair ...
263 : * }
264 : *
265 : * Ranges remain valid for the lifetime of the OrderedHashTable, even if
266 : * entries are added or removed or the table is resized. Don't do anything
267 : * to a Range, except destroy it, after the OrderedHashTable has been
268 : * destroyed. (We support destroying the two objects in either order to
269 : * humor the GC, bless its nondeterministic heart.)
270 : *
271 : * Warning: The behavior when the current front() entry is removed from the
272 : * table is subtly different from js::HashTable<>::Enum::removeFront()!
273 : * HashTable::Enum doesn't skip any entries when you removeFront() and then
274 : * popFront(). OrderedHashTable::Range does! (This is useful for using a
275 : * Range to implement JS Map.prototype.iterator.)
276 : *
277 : * The workaround is to call popFront() as soon as possible,
278 : * before there's any possibility of modifying the table:
279 : *
280 : * for (Map::Range r = map.all(); !r.empty(); ) {
281 : * Key key = r.front().key; // this won't modify map
282 : * Value val = r.front().value; // this won't modify map
283 : * r.popFront();
284 : * // ...do things that might modify map...
285 : * }
286 : */
287 : class Range
288 : {
289 : friend class OrderedHashTable;
290 :
291 : // Cannot be a reference since we need to be able to do
292 : // |offsetof(Range, ht)|.
293 : OrderedHashTable* ht;
294 :
295 : /* The index of front() within ht->data. */
296 : uint32_t i;
297 :
298 : /*
299 : * The number of nonempty entries in ht->data to the left of front().
300 : * This is used when the table is resized or compacted.
301 : */
302 : uint32_t count;
303 :
304 : /*
305 : * Links in the doubly-linked list of active Ranges on ht.
306 : *
307 : * prevp points to the previous Range's .next field;
308 : * or to ht->ranges if this is the first Range in the list.
309 : * next points to the next Range;
310 : * or nullptr if this is the last Range in the list.
311 : *
312 : * Invariant: *prevp == this.
313 : */
314 : Range** prevp;
315 : Range* next;
316 :
317 : /*
318 : * Create a Range over all the entries in ht.
319 : * (This is private on purpose. End users must use ht->all().)
320 : */
321 289 : explicit Range(OrderedHashTable* ht) : ht(ht), i(0), count(0), prevp(&ht->ranges), next(ht->ranges) {
322 289 : *prevp = this;
323 289 : if (next)
324 19 : next->prevp = &next;
325 289 : seek();
326 289 : }
327 :
328 : public:
329 289 : Range(const Range& other)
330 289 : : ht(other.ht), i(other.i), count(other.count), prevp(&ht->ranges), next(ht->ranges)
331 : {
332 289 : *prevp = this;
333 289 : if (next)
334 289 : next->prevp = &next;
335 289 : }
336 :
337 567 : ~Range() {
338 567 : *prevp = next;
339 567 : if (next)
340 35 : next->prevp = prevp;
341 567 : }
342 :
343 : private:
344 : // Prohibit copy assignment.
345 : Range& operator=(const Range& other) = delete;
346 :
347 1336 : void seek() {
348 1336 : while (i < ht->dataLength && Ops::isEmpty(Ops::getKey(ht->data[i].element)))
349 0 : i++;
350 1336 : }
351 :
352 : /*
353 : * The hash table calls this when an entry is removed.
354 : * j is the index of the removed entry.
355 : */
356 4 : void onRemove(uint32_t j) {
357 4 : MOZ_ASSERT(valid());
358 4 : if (j < i)
359 4 : count--;
360 4 : if (j == i)
361 0 : seek();
362 4 : }
363 :
364 : /*
365 : * The hash table calls this when the table is resized or compacted.
366 : * Since |count| is the number of nonempty entries to the left of
367 : * front(), discarding the empty entries will not affect count, and it
368 : * will make i and count equal.
369 : */
370 1 : void onCompact() {
371 1 : MOZ_ASSERT(valid());
372 1 : i = count;
373 1 : }
374 :
375 : /* The hash table calls this when cleared. */
376 0 : void onClear() {
377 0 : MOZ_ASSERT(valid());
378 0 : i = count = 0;
379 0 : }
380 :
381 6394 : bool valid() const {
382 6394 : return next != this;
383 : }
384 :
385 0 : void onTableDestroyed() {
386 0 : MOZ_ASSERT(valid());
387 0 : prevp = &next;
388 0 : next = this;
389 0 : }
390 :
391 : public:
392 3857 : bool empty() const {
393 3857 : MOZ_ASSERT(valid());
394 3857 : return i >= ht->dataLength;
395 : }
396 :
397 : /*
398 : * Return the first element in the range. This must not be called if
399 : * this->empty().
400 : *
401 : * Warning: Removing an entry from the table also removes it from any
402 : * live Ranges, and a Range can become empty that way, rendering
403 : * front() invalid. If in doubt, check empty() before calling front().
404 : */
405 1485 : T& front() {
406 1485 : MOZ_ASSERT(valid());
407 1485 : MOZ_ASSERT(!empty());
408 1485 : return ht->data[i].element;
409 : }
410 :
411 : /*
412 : * Remove the first element from this range.
413 : * This must not be called if this->empty().
414 : *
415 : * Warning: Removing an entry from the table also removes it from any
416 : * live Ranges, and a Range can become empty that way, rendering
417 : * popFront() invalid. If in doubt, check empty() before calling
418 : * popFront().
419 : */
420 1047 : void popFront() {
421 1047 : MOZ_ASSERT(valid());
422 1047 : MOZ_ASSERT(!empty());
423 1047 : MOZ_ASSERT(!Ops::isEmpty(Ops::getKey(ht->data[i].element)));
424 1047 : count++;
425 1047 : i++;
426 1047 : seek();
427 1047 : }
428 :
429 : /*
430 : * Change the key of the front entry.
431 : *
432 : * This calls Ops::hash on both the current key and the new key.
433 : * Ops::hash on the current key must return the same hash code as
434 : * when the entry was added to the table.
435 : */
436 0 : void rekeyFront(const Key& k) {
437 0 : MOZ_ASSERT(valid());
438 0 : Data& entry = ht->data[i];
439 0 : HashNumber oldHash = ht->prepareHash(Ops::getKey(entry.element)) >> ht->hashShift;
440 0 : HashNumber newHash = ht->prepareHash(k) >> ht->hashShift;
441 0 : Ops::setKey(entry.element, k);
442 0 : if (newHash != oldHash) {
443 : // Remove this entry from its old hash chain. (If this crashes
444 : // reading nullptr, it would mean we did not find this entry on
445 : // the hash chain where we expected it. That probably means the
446 : // key's hash code changed since it was inserted, breaking the
447 : // hash code invariant.)
448 0 : Data** ep = &ht->hashTable[oldHash];
449 0 : while (*ep != &entry)
450 0 : ep = &(*ep)->chain;
451 0 : *ep = entry.chain;
452 :
453 : // Add it to the new hash chain. We could just insert it at the
454 : // beginning of the chain. Instead, we do a bit of work to
455 : // preserve the invariant that hash chains always go in reverse
456 : // insertion order (descending memory order). No code currently
457 : // depends on this invariant, so it's fine to kill it if
458 : // needed.
459 0 : ep = &ht->hashTable[newHash];
460 0 : while (*ep && *ep > &entry)
461 0 : ep = &(*ep)->chain;
462 0 : entry.chain = *ep;
463 0 : *ep = &entry;
464 : }
465 0 : }
466 :
467 0 : static size_t offsetOfHashTable() {
468 0 : return offsetof(Range, ht);
469 : }
470 0 : static size_t offsetOfI() {
471 0 : return offsetof(Range, i);
472 : }
473 0 : static size_t offsetOfCount() {
474 0 : return offsetof(Range, count);
475 : }
476 0 : static size_t offsetOfPrevP() {
477 0 : return offsetof(Range, prevp);
478 : }
479 0 : static size_t offsetOfNext() {
480 0 : return offsetof(Range, next);
481 : }
482 : };
483 :
484 289 : Range all() { return Range(this); }
485 :
486 : /*
487 : * Change the value of the given key.
488 : *
489 : * This calls Ops::hash on both the current key and the new key.
490 : * Ops::hash on the current key must return the same hash code as
491 : * when the entry was added to the table.
492 : */
493 64 : void rekeyOneEntry(const Key& current, const Key& newKey, const T& element) {
494 64 : if (current == newKey)
495 0 : return;
496 :
497 64 : Data* entry = lookup(current, prepareHash(current));
498 64 : if (!entry)
499 0 : return;
500 :
501 64 : HashNumber oldHash = prepareHash(current) >> hashShift;
502 64 : HashNumber newHash = prepareHash(newKey) >> hashShift;
503 :
504 64 : entry->element = element;
505 :
506 : // Remove this entry from its old hash chain. (If this crashes
507 : // reading nullptr, it would mean we did not find this entry on
508 : // the hash chain where we expected it. That probably means the
509 : // key's hash code changed since it was inserted, breaking the
510 : // hash code invariant.)
511 64 : Data** ep = &hashTable[oldHash];
512 102 : while (*ep != entry)
513 19 : ep = &(*ep)->chain;
514 64 : *ep = entry->chain;
515 :
516 : // Add it to the new hash chain. We could just insert it at the
517 : // beginning of the chain. Instead, we do a bit of work to
518 : // preserve the invariant that hash chains always go in reverse
519 : // insertion order (descending memory order). No code currently
520 : // depends on this invariant, so it's fine to kill it if
521 : // needed.
522 64 : ep = &hashTable[newHash];
523 92 : while (*ep && *ep > entry)
524 14 : ep = &(*ep)->chain;
525 64 : entry->chain = *ep;
526 64 : *ep = entry;
527 : }
528 :
529 0 : static size_t offsetOfDataLength() {
530 0 : return offsetof(OrderedHashTable, dataLength);
531 : }
532 0 : static size_t offsetOfData() {
533 0 : return offsetof(OrderedHashTable, data);
534 : }
535 0 : static constexpr size_t offsetOfDataElement() {
536 : static_assert(offsetof(Data, element) == 0,
537 : "RangeFront and RangePopFront depend on offsetof(Data, element) being 0");
538 0 : return offsetof(Data, element);
539 : }
540 0 : static constexpr size_t sizeofData() {
541 0 : return sizeof(Data);
542 : }
543 :
544 : private:
545 : /* Logarithm base 2 of the number of buckets in the hash table initially. */
546 913 : static uint32_t initialBucketsLog2() { return 1; }
547 472 : static uint32_t initialBuckets() { return 1 << initialBucketsLog2(); }
548 :
549 : /*
550 : * The maximum load factor (mean number of entries per bucket).
551 : * It is an invariant that
552 : * dataCapacity == floor(hashBuckets() * fillFactor()).
553 : *
554 : * The fill factor should be between 2 and 4, and it should be chosen so that
555 : * the fill factor times sizeof(Data) is close to but <= a power of 2.
556 : * This fixed fill factor was chosen to make the size of the data
557 : * array, in bytes, close to a power of two when sizeof(T) is 16.
558 : */
559 510 : static double fillFactor() { return 8.0 / 3.0; }
560 :
561 : /*
562 : * The minimum permitted value of (liveCount / dataLength).
563 : * If that ratio drops below this value, we shrink the table.
564 : */
565 1 : static double minDataFill() { return 0.25; }
566 :
567 : public:
568 3398 : HashNumber prepareHash(const Lookup& l) const {
569 3398 : return ScrambleHashCode(Ops::hash(l, hcs));
570 : }
571 :
572 : private:
573 : /* The size of hashTable, in elements. Always a power of two. */
574 542 : uint32_t hashBuckets() const {
575 542 : return 1 << (HashNumberSizeBits - hashShift);
576 : }
577 :
578 71 : static void destroyData(Data* data, uint32_t length) {
579 756 : for (Data* p = data + length; p != data; )
580 685 : (--p)->~Data();
581 71 : }
582 :
583 71 : void freeData(Data* data, uint32_t length) {
584 71 : destroyData(data, length);
585 71 : alloc.free_(data);
586 71 : }
587 :
588 2454 : Data* lookup(const Lookup& l, HashNumber h) {
589 4503 : for (Data* e = hashTable[h >> hashShift]; e; e = e->chain) {
590 3178 : if (Ops::match(Ops::getKey(e->element), l))
591 1129 : return e;
592 : }
593 1325 : return nullptr;
594 : }
595 :
596 610 : const Data* lookup(const Lookup& l) const {
597 610 : return const_cast<OrderedHashTable*>(this)->lookup(l, prepareHash(l));
598 : }
599 :
600 : /* This is called after rehashing the table. */
601 70 : void compacted() {
602 : // If we had any empty entries, compacting may have moved live entries
603 : // to the left within |data|. Notify all live Ranges of the change.
604 71 : for (Range* r = ranges; r; r = r->next)
605 1 : r->onCompact();
606 70 : }
607 :
608 : /* Compact the entries in |data| and rehash them. */
609 1 : void rehashInPlace() {
610 3 : for (uint32_t i = 0, N = hashBuckets(); i < N; i++)
611 2 : hashTable[i] = nullptr;
612 1 : Data* wp = data;
613 1 : Data* end = data + dataLength;
614 6 : for (Data* rp = data; rp != end; rp++) {
615 5 : if (!Ops::isEmpty(Ops::getKey(rp->element))) {
616 2 : HashNumber h = prepareHash(Ops::getKey(rp->element)) >> hashShift;
617 2 : if (rp != wp)
618 2 : wp->element = Move(rp->element);
619 2 : wp->chain = hashTable[h];
620 2 : hashTable[h] = wp;
621 2 : wp++;
622 : }
623 : }
624 1 : MOZ_ASSERT(wp == data + liveCount);
625 :
626 3 : while (wp != end)
627 3 : (--end)->~Data();
628 1 : dataLength = liveCount;
629 1 : compacted();
630 1 : }
631 :
632 : /*
633 : * Grow, shrink, or compact both |hashTable| and |data|.
634 : *
635 : * On success, this returns true, dataLength == liveCount, and there are no
636 : * empty elements in data[0:dataLength]. On allocation failure, this
637 : * leaves everything as it was and returns false.
638 : */
639 70 : MOZ_MUST_USE bool rehash(uint32_t newHashShift) {
640 : // If the size of the table is not changing, rehash in place to avoid
641 : // allocating memory.
642 70 : if (newHashShift == hashShift) {
643 1 : rehashInPlace();
644 1 : return true;
645 : }
646 :
647 : size_t newHashBuckets =
648 69 : size_t(1) << (HashNumberSizeBits - newHashShift);
649 69 : Data** newHashTable = alloc.template pod_malloc<Data*>(newHashBuckets);
650 69 : if (!newHashTable)
651 0 : return false;
652 601 : for (uint32_t i = 0; i < newHashBuckets; i++)
653 532 : newHashTable[i] = nullptr;
654 :
655 69 : uint32_t newCapacity = uint32_t(newHashBuckets * fillFactor());
656 69 : Data* newData = alloc.template pod_malloc<Data>(newCapacity);
657 69 : if (!newData) {
658 0 : alloc.free_(newHashTable);
659 0 : return false;
660 : }
661 :
662 69 : Data* wp = newData;
663 69 : Data* end = data + dataLength;
664 749 : for (Data* p = data; p != end; p++) {
665 680 : if (!Ops::isEmpty(Ops::getKey(p->element))) {
666 680 : HashNumber h = prepareHash(Ops::getKey(p->element)) >> newHashShift;
667 680 : new (wp) Data(Move(p->element), newHashTable[h]);
668 680 : newHashTable[h] = wp;
669 680 : wp++;
670 : }
671 : }
672 69 : MOZ_ASSERT(wp == newData + liveCount);
673 :
674 69 : alloc.free_(hashTable);
675 69 : freeData(data, dataLength);
676 :
677 69 : hashTable = newHashTable;
678 69 : data = newData;
679 69 : dataLength = liveCount;
680 69 : dataCapacity = newCapacity;
681 69 : hashShift = newHashShift;
682 69 : MOZ_ASSERT(hashBuckets() == newHashBuckets);
683 :
684 69 : compacted();
685 69 : return true;
686 : }
687 :
688 : // Not copyable.
689 : OrderedHashTable& operator=(const OrderedHashTable&) = delete;
690 : OrderedHashTable(const OrderedHashTable&) = delete;
691 : };
692 :
693 : } // namespace detail
694 :
695 : template <class Key, class Value, class OrderedHashPolicy, class AllocPolicy>
696 0 : class OrderedHashMap
697 : {
698 : public:
699 831 : class Entry
700 : {
701 : template <class, class, class> friend class detail::OrderedHashTable;
702 56 : void operator=(const Entry& rhs) {
703 56 : const_cast<Key&>(key) = rhs.key;
704 56 : value = rhs.value;
705 56 : }
706 :
707 11 : void operator=(Entry&& rhs) {
708 11 : MOZ_ASSERT(this != &rhs, "self-move assignment is prohibited");
709 11 : const_cast<Key&>(key) = Move(rhs.key);
710 11 : value = Move(rhs.value);
711 11 : }
712 :
713 : public:
714 : Entry() : key(), value() {}
715 : template <typename V>
716 575 : Entry(const Key& k, V&& v) : key(k), value(Forward<V>(v)) {}
717 814 : Entry(Entry&& rhs) : key(Move(rhs.key)), value(Move(rhs.value)) {}
718 :
719 : const Key key;
720 : Value value;
721 :
722 0 : static size_t offsetOfKey() {
723 0 : return offsetof(Entry, key);
724 : }
725 0 : static size_t offsetOfValue() {
726 0 : return offsetof(Entry, value);
727 : }
728 : };
729 :
730 : private:
731 : struct MapOps : OrderedHashPolicy
732 : {
733 : typedef Key KeyType;
734 20 : static void makeEmpty(Entry* e) {
735 20 : OrderedHashPolicy::makeEmpty(const_cast<Key*>(&e->key));
736 :
737 : // Clear the value. Destroying it is another possibility, but that
738 : // would complicate class Entry considerably.
739 20 : e->value = Value();
740 20 : }
741 5463 : static const Key& getKey(const Entry& e) { return e.key; }
742 0 : static void setKey(Entry& e, const Key& k) { const_cast<Key&>(e.key) = k; }
743 : };
744 :
745 : typedef detail::OrderedHashTable<Entry, MapOps, AllocPolicy> Impl;
746 : Impl impl;
747 :
748 : public:
749 : typedef typename Impl::Range Range;
750 :
751 296 : OrderedHashMap(AllocPolicy ap, mozilla::HashCodeScrambler hcs) : impl(ap, hcs) {}
752 296 : MOZ_MUST_USE bool init() { return impl.init(); }
753 18 : uint32_t count() const { return impl.count(); }
754 494 : bool has(const Key& key) const { return impl.has(key); }
755 207 : Range all() { return impl.all(); }
756 : const Entry* get(const Key& key) const { return impl.get(key); }
757 595 : Entry* get(const Key& key) { return impl.get(key); }
758 37 : bool remove(const Key& key, bool* foundp) { return impl.remove(key, foundp); }
759 7 : MOZ_MUST_USE bool clear() { return impl.clear(); }
760 :
761 : template <typename V>
762 519 : MOZ_MUST_USE bool put(const Key& key, V&& value) {
763 519 : return impl.put(Entry(key, Forward<V>(value)));
764 : }
765 :
766 118 : HashNumber hash(const Key& key) const { return impl.prepareHash(key); }
767 :
768 59 : void rekeyOneEntry(const Key& current, const Key& newKey) {
769 59 : const Entry* e = get(current);
770 59 : if (!e)
771 3 : return;
772 56 : return impl.rekeyOneEntry(current, newKey, Entry(newKey, e->value));
773 : }
774 :
775 0 : static size_t offsetOfEntryKey() {
776 0 : return Entry::offsetOfKey();
777 : }
778 0 : static size_t offsetOfImplDataLength() {
779 0 : return Impl::offsetOfDataLength();
780 : }
781 0 : static size_t offsetOfImplData() {
782 0 : return Impl::offsetOfData();
783 : }
784 0 : static constexpr size_t offsetOfImplDataElement() {
785 0 : return Impl::offsetOfDataElement();
786 : }
787 0 : static constexpr size_t sizeofImplData() {
788 0 : return Impl::sizeofData();
789 : }
790 : };
791 :
792 : template <class T, class OrderedHashPolicy, class AllocPolicy>
793 0 : class OrderedHashSet
794 : {
795 : private:
796 : struct SetOps : OrderedHashPolicy
797 : {
798 : typedef const T KeyType;
799 3441 : static const T& getKey(const T& v) { return v; }
800 0 : static void setKey(const T& e, const T& v) { const_cast<T&>(e) = v; }
801 : };
802 :
803 : typedef detail::OrderedHashTable<T, SetOps, AllocPolicy> Impl;
804 : Impl impl;
805 :
806 : public:
807 : typedef typename Impl::Range Range;
808 :
809 143 : explicit OrderedHashSet(AllocPolicy ap, mozilla::HashCodeScrambler hcs) : impl(ap, hcs) {}
810 143 : MOZ_MUST_USE bool init() { return impl.init(); }
811 12 : uint32_t count() const { return impl.count(); }
812 116 : bool has(const T& value) const { return impl.has(value); }
813 82 : Range all() { return impl.all(); }
814 612 : MOZ_MUST_USE bool put(const T& value) { return impl.put(value); }
815 17 : bool remove(const T& value, bool* foundp) { return impl.remove(value, foundp); }
816 1 : MOZ_MUST_USE bool clear() { return impl.clear(); }
817 :
818 16 : HashNumber hash(const T& value) const { return impl.prepareHash(value); }
819 :
820 8 : void rekeyOneEntry(const T& current, const T& newKey) {
821 8 : return impl.rekeyOneEntry(current, newKey, newKey);
822 : }
823 :
824 0 : static size_t offsetOfEntryKey() {
825 0 : return 0;
826 : }
827 0 : static size_t offsetOfImplDataLength() {
828 0 : return Impl::offsetOfDataLength();
829 : }
830 0 : static size_t offsetOfImplData() {
831 0 : return Impl::offsetOfData();
832 : }
833 0 : static constexpr size_t offsetOfImplDataElement() {
834 0 : return Impl::offsetOfDataElement();
835 : }
836 0 : static constexpr size_t sizeofImplData() {
837 0 : return Impl::sizeofData();
838 : }
839 : };
840 :
841 : } // namespace js
842 :
843 : #endif /* ds_OrderedHashTable_h */
|