LCOV - code coverage report
Current view: top level - js/src/ds - PageProtectingVector.h (source / functions) Hit Total Coverage
Test: output.info Lines: 138 277 49.8 %
Date: 2017-07-14 16:53:18 Functions: 32 46 69.6 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
       2             :  * vim: set ts=8 sts=4 et sw=4 tw=99:
       3             :  * This Source Code Form is subject to the terms of the Mozilla Public
       4             :  * License, v. 2.0. If a copy of the MPL was not distributed with this
       5             :  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
       6             : 
       7             : #ifndef ds_PageProtectingVector_h
       8             : #define ds_PageProtectingVector_h
       9             : 
      10             : #include "mozilla/Atomics.h"
      11             : #include "mozilla/IntegerPrintfMacros.h"
      12             : #include "mozilla/PodOperations.h"
      13             : #include "mozilla/Types.h"
      14             : #include "mozilla/Vector.h"
      15             : 
      16             : #ifdef MALLOC_H
      17             : # include MALLOC_H
      18             : #endif
      19             : 
      20             : #include "ds/MemoryProtectionExceptionHandler.h"
      21             : #include "gc/Memory.h"
      22             : #include "js/Utility.h"
      23             : 
      24             : #ifdef MOZ_MEMORY
      25             : # ifdef XP_DARWIN
      26             : #  define malloc_usable_size malloc_size
      27             : # else
      28             : extern "C" MFBT_API size_t malloc_usable_size(MALLOC_USABLE_SIZE_CONST_PTR void* p);
      29             : # endif
      30             : #endif
      31             : 
      32             : namespace js {
      33             : 
      34             : /*
      35             :  * PageProtectingVector is a vector that can only grow or be cleared, restricts
      36             :  * access to memory pages that haven't been used yet, and marks all of its fully
      37             :  * used memory pages as read-only. It can be used to detect heap corruption in
      38             :  * important buffers, since anything that tries to write into its protected
      39             :  * pages will crash. On Nightly and Aurora, these crashes will additionally be
      40             :  * annotated with a moz crash reason using MemoryProtectionExceptionHandler.
      41             :  *
      42             :  * PageProtectingVector's protection is limited to full pages. If the front
      43             :  * of its buffer is not aligned on a page boundary, elems preceding the first
      44             :  * page boundary will not be protected. Similarly, the end of the buffer will
      45             :  * not be fully protected unless it is aligned on a page boundary. Altogether,
      46             :  * up to two pages of memory may not be protected.
      47             :  */
      48             : template<typename T,
      49             :          size_t MinInlineCapacity = 0,
      50             :          class AllocPolicy = mozilla::MallocAllocPolicy,
      51             :          bool ProtectUsed = true,
      52             :          bool ProtectUnused = true,
      53             :          size_t InitialLowerBound = 0,
      54             :          bool PoisonUnused = true,
      55             :          uint8_t PoisonPattern = 0xe3>
      56             : class PageProtectingVector final
      57             : {
      58             :     mozilla::Vector<T, MinInlineCapacity, AllocPolicy> vector;
      59             : 
      60             :     static constexpr size_t toShift(size_t v) { return v <= 1 ? 0 : 1 + toShift(v >> 1); }
      61             : 
      62             :     static_assert((sizeof(T) & (sizeof(T) - 1)) == 0, "For performance reasons, "
      63             :                   "PageProtectingVector only works with power-of-2 sized elements!");
      64             : 
      65             :     static const size_t elemShift = toShift(sizeof(T));
      66             :     static const size_t elemSize = 1 << elemShift;
      67             :     static const size_t elemMask = elemSize - 1;
      68             : 
      69             :     /* We hardcode the page size here to minimize administrative overhead. */
      70             :     static const size_t pageShift = 12;
      71             :     static const size_t pageSize = 1 << pageShift;
      72             :     static const size_t pageMask = pageSize - 1;
      73             : 
      74             :     /*
      75             :      * The number of elements that can be added before we need to either adjust
      76             :      * the active page or resize the buffer. If |elemsUntilTest < 0| we will
      77             :      * take the slow paths in the append calls.
      78             :      */
      79             :     intptr_t elemsUntilTest;
      80             : 
      81             :     /*
      82             :      * The offset of the currently 'active' page - that is, the page that is
      83             :      * currently being written to. If both used and unused bytes are protected,
      84             :      * this will be the only (fully owned) page with read and write access.
      85             :      */
      86             :     size_t currPage;
      87             : 
      88             :     /*
      89             :      * The first fully owned page. This is the first page that can
      90             :      * be protected, but it may not be the first *active* page.
      91             :      */
      92             :     size_t initPage;
      93             : 
      94             :     /*
      95             :      * The last fully owned page. This is the last page that can
      96             :      * be protected, but it may not be the last *active* page.
      97             :      */
      98             :     size_t lastPage;
      99             : 
     100             :     /*
     101             :      * The size in elems that a buffer needs to be before its pages will be
     102             :      * protected. This is intended to reduce churn for small vectors while
     103             :      * still offering protection when they grow large enough.
     104             :      */
     105             :     size_t lowerBound;
     106             : 
     107             : #ifdef DEBUG
     108             :     bool regionUnprotected;
     109             : #endif
     110             : 
     111             :     bool usable;
     112             :     bool enabled;
     113             :     bool protectUsedEnabled;
     114             :     bool protectUnusedEnabled;
     115             : 
     116           0 :     MOZ_ALWAYS_INLINE void resetTest() {
     117           0 :         MOZ_ASSERT(protectUsedEnabled || protectUnusedEnabled);
     118           0 :         size_t nextPage = (pageSize - (uintptr_t(begin() + length()) & pageMask)) >> elemShift;
     119           0 :         size_t nextResize = capacity() - length();
     120           0 :         if (MOZ_LIKELY(nextPage <= nextResize))
     121           0 :             elemsUntilTest = intptr_t(nextPage);
     122             :         else
     123           0 :             elemsUntilTest = intptr_t(nextResize);
     124           0 :     }
     125             : 
     126        8466 :     MOZ_ALWAYS_INLINE void setTestInitial() {
     127        8466 :         if (MOZ_LIKELY(!protectUsedEnabled && !protectUnusedEnabled))
     128        8466 :             elemsUntilTest = intptr_t(capacity() - length());
     129             :         else
     130           0 :             resetTest();
     131        8466 :     }
     132             : 
     133        8466 :     MOZ_ALWAYS_INLINE void resetForNewBuffer() {
     134        8466 :         initPage = (uintptr_t(begin() - 1) >> pageShift) + 1;
     135        8466 :         currPage = (uintptr_t(begin() + length()) >> pageShift);
     136        8466 :         lastPage = (uintptr_t(begin() + capacity()) >> pageShift) - 1;
     137        8466 :         protectUsedEnabled = ProtectUsed && usable && enabled && initPage <= lastPage &&
     138             :                              (uintptr_t(begin()) & elemMask) == 0 && capacity() >= lowerBound;
     139        8466 :         protectUnusedEnabled = ProtectUnused && usable && enabled && initPage <= lastPage &&
     140             :                                (uintptr_t(begin()) & elemMask) == 0 && capacity() >= lowerBound;
     141        8466 :         setTestInitial();
     142        8466 :     }
     143             : 
     144        8466 :     MOZ_ALWAYS_INLINE void poisonNewBuffer() {
     145             :         if (!PoisonUnused)
     146             :             return;
     147        8466 :         T* addr = begin() + length();
     148        8466 :         size_t toPoison = (capacity() - length()) * sizeof(T);
     149        8466 :         memset(addr, PoisonPattern, toPoison);
     150             :     }
     151             : 
     152        8466 :     MOZ_ALWAYS_INLINE void addExceptionHandler() {
     153        8466 :         if (MOZ_UNLIKELY(protectUsedEnabled || protectUnusedEnabled))
     154           0 :             MemoryProtectionExceptionHandler::addRegion(begin(), capacity() << elemShift);
     155        8466 :     }
     156             : 
     157        8466 :     MOZ_ALWAYS_INLINE void removeExceptionHandler() {
     158        8466 :         if (MOZ_UNLIKELY(protectUsedEnabled || protectUnusedEnabled))
     159           0 :             MemoryProtectionExceptionHandler::removeRegion(begin());
     160        8466 :     }
     161             : 
     162        8466 :     MOZ_ALWAYS_INLINE void protectUsed() {
     163        8466 :         if (MOZ_LIKELY(!protectUsedEnabled))
     164        8466 :             return;
     165           0 :         if (MOZ_UNLIKELY(currPage <= initPage))
     166           0 :             return;
     167           0 :         T* addr = reinterpret_cast<T*>(initPage << pageShift);
     168           0 :         size_t size = (currPage - initPage) << pageShift;
     169           0 :         gc::MakePagesReadOnly(addr, size);
     170             :     }
     171             : 
     172        8466 :     MOZ_ALWAYS_INLINE void unprotectUsed() {
     173        8466 :         if (MOZ_LIKELY(!protectUsedEnabled))
     174        8466 :             return;
     175           0 :         if (MOZ_UNLIKELY(currPage <= initPage))
     176           0 :             return;
     177           0 :         T* addr = reinterpret_cast<T*>(initPage << pageShift);
     178           0 :         size_t size = (currPage - initPage) << pageShift;
     179           0 :         gc::UnprotectPages(addr, size);
     180             :     }
     181             : 
     182        8466 :     MOZ_ALWAYS_INLINE void protectUnused() {
     183        8466 :         if (MOZ_LIKELY(!protectUnusedEnabled))
     184        8466 :             return;
     185           0 :         if (MOZ_UNLIKELY(currPage >= lastPage))
     186           0 :             return;
     187           0 :         T* addr = reinterpret_cast<T*>((currPage + 1) << pageShift);
     188           0 :         size_t size = (lastPage - currPage) << pageShift;
     189           0 :         gc::ProtectPages(addr, size);
     190             :     }
     191             : 
     192        8466 :     MOZ_ALWAYS_INLINE void unprotectUnused() {
     193        8466 :         if (MOZ_LIKELY(!protectUnusedEnabled))
     194        8466 :             return;
     195           0 :         if (MOZ_UNLIKELY(currPage >= lastPage))
     196           0 :             return;
     197           0 :         T* addr = reinterpret_cast<T*>((currPage + 1) << pageShift);
     198           0 :         size_t size = (lastPage - currPage) << pageShift;
     199           0 :         gc::UnprotectPages(addr, size);
     200             :     }
     201             : 
     202        8466 :     MOZ_ALWAYS_INLINE void protectNewBuffer() {
     203        8466 :         resetForNewBuffer();
     204        8466 :         addExceptionHandler();
     205        8466 :         poisonNewBuffer();
     206        8466 :         protectUsed();
     207        8466 :         protectUnused();
     208        8466 :     }
     209             : 
     210        8466 :     MOZ_ALWAYS_INLINE void unprotectOldBuffer() {
     211        8466 :         MOZ_ASSERT(!regionUnprotected);
     212        8466 :         unprotectUnused();
     213        8466 :         unprotectUsed();
     214        8466 :         removeExceptionHandler();
     215        8466 :     }
     216             : 
     217           0 :     MOZ_ALWAYS_INLINE void protectUnusedPartial(size_t curr, size_t next) {
     218           0 :         if (MOZ_LIKELY(!protectUnusedEnabled))
     219           0 :             return;
     220           0 :         if (MOZ_UNLIKELY(next > lastPage))
     221           0 :             --next;
     222           0 :         if (MOZ_UNLIKELY(next == curr))
     223           0 :             return;
     224           0 :         void* addr = reinterpret_cast<T*>((curr + 1) << pageShift);
     225           0 :         size_t size = (next - curr) << pageShift;
     226           0 :         gc::ProtectPages(addr, size);
     227             :     }
     228             : 
     229           0 :     MOZ_ALWAYS_INLINE void unprotectUnusedPartial(size_t curr, size_t next) {
     230           0 :         if (MOZ_LIKELY(!protectUnusedEnabled))
     231           0 :             return;
     232           0 :         if (MOZ_UNLIKELY(next > lastPage))
     233           0 :             --next;
     234           0 :         if (MOZ_UNLIKELY(next == curr))
     235           0 :             return;
     236           0 :         void* addr = reinterpret_cast<T*>((curr + 1) << pageShift);
     237           0 :         size_t size = (next - curr) << pageShift;
     238           0 :         gc::UnprotectPages(addr, size);
     239             :     }
     240             : 
     241           0 :     MOZ_ALWAYS_INLINE void protectUsedPartial(size_t curr, size_t next) {
     242           0 :         if (MOZ_LIKELY(!protectUsedEnabled))
     243           0 :             return;
     244           0 :         if (MOZ_UNLIKELY(curr < initPage))
     245           0 :             ++curr;
     246           0 :         if (MOZ_UNLIKELY(next == curr))
     247           0 :             return;
     248           0 :         void* addr = reinterpret_cast<T*>(curr << pageShift);
     249           0 :         size_t size = (next - curr) << pageShift;
     250           0 :         gc::MakePagesReadOnly(addr, size);
     251             :     }
     252             : 
     253        3963 :     MOZ_ALWAYS_INLINE MOZ_MUST_USE bool reserveNewBuffer(size_t size) {
     254        3963 :         unprotectOldBuffer();
     255        3963 :         bool ret = vector.reserve(size);
     256        3963 :         protectNewBuffer();
     257        3963 :         return ret;
     258             :     }
     259             : 
     260             :     template<typename U>
     261           0 :     MOZ_ALWAYS_INLINE void infallibleAppendNewPage(const U* values, size_t size) {
     262           0 :         size_t nextPage = uintptr_t(begin() + length() + size) >> pageShift;
     263           0 :         MOZ_ASSERT(currPage < nextPage);
     264           0 :         unprotectUnusedPartial(currPage, nextPage);
     265           0 :         vector.infallibleAppend(values, size);
     266           0 :         protectUsedPartial(currPage, nextPage);
     267           0 :         currPage = nextPage;
     268           0 :         resetTest();
     269           0 :     }
     270             : 
     271             :     template<typename U>
     272           0 :     MOZ_ALWAYS_INLINE MOZ_MUST_USE bool appendNewPage(const U* values, size_t size) {
     273           0 :         size_t nextPage = uintptr_t(begin() + length() + size) >> pageShift;
     274           0 :         MOZ_ASSERT(currPage < nextPage);
     275           0 :         unprotectUnusedPartial(currPage, nextPage);
     276           0 :         bool ret = vector.append(values, size);
     277           0 :         if (MOZ_LIKELY(ret)) {
     278           0 :             protectUsedPartial(currPage, nextPage);
     279           0 :             currPage = nextPage;
     280             :         } else {
     281           0 :             protectUnusedPartial(currPage, nextPage);
     282             :         }
     283           0 :         resetTest();
     284           0 :         return ret;
     285             :     }
     286             : 
     287             :     template<typename U>
     288           0 :     MOZ_ALWAYS_INLINE MOZ_MUST_USE bool appendNewBuffer(const U* values, size_t size) {
     289           0 :         unprotectOldBuffer();
     290           0 :         bool ret = vector.append(values, size);
     291           0 :         protectNewBuffer();
     292           0 :         return ret;
     293             :     }
     294             : 
     295             :     MOZ_NEVER_INLINE void unprotectRegionSlow(uintptr_t l, uintptr_t r);
     296             :     MOZ_NEVER_INLINE void reprotectRegionSlow(uintptr_t l, uintptr_t r);
     297             : 
     298             :     MOZ_NEVER_INLINE MOZ_MUST_USE bool reserveSlow(size_t size);
     299             : 
     300             :     template<typename U>
     301             :     MOZ_NEVER_INLINE void infallibleAppendSlow(const U* values, size_t size);
     302             : 
     303             :     template<typename U>
     304             :     MOZ_NEVER_INLINE MOZ_MUST_USE bool appendSlow(const U* values, size_t size);
     305             : 
     306             :   public:
     307        4503 :     explicit PageProtectingVector(AllocPolicy policy = AllocPolicy())
     308             :       : vector(policy),
     309             :         elemsUntilTest(0),
     310             :         currPage(0),
     311             :         initPage(0),
     312             :         lastPage(0),
     313             :         lowerBound(InitialLowerBound),
     314             : #ifdef DEBUG
     315             :         regionUnprotected(false),
     316             : #endif
     317             :         usable(true),
     318             :         enabled(true),
     319             :         protectUsedEnabled(false),
     320        4503 :         protectUnusedEnabled(false)
     321             :     {
     322        4503 :         if (gc::SystemPageSize() != pageSize)
     323           0 :             usable = false;
     324        4503 :         protectNewBuffer();
     325        4503 :     }
     326             : 
     327        4503 :     ~PageProtectingVector() { unprotectOldBuffer(); }
     328             : 
     329           0 :     void disableProtection() {
     330           0 :         MOZ_ASSERT(enabled);
     331           0 :         unprotectOldBuffer();
     332           0 :         enabled = false;
     333           0 :         resetForNewBuffer();
     334           0 :     }
     335             : 
     336             :     void enableProtection() {
     337             :         MOZ_ASSERT(!enabled);
     338             :         enabled = true;
     339             :         protectNewBuffer();
     340             :     }
     341             : 
     342             :     /*
     343             :      * Sets the lower bound on the size, in elems, that this vector's underlying
     344             :      * capacity has to be before its used pages will be protected.
     345             :      */
     346             :     void setLowerBoundForProtection(size_t elems) {
     347             :         if (lowerBound != elems) {
     348             :             unprotectOldBuffer();
     349             :             lowerBound = elems;
     350             :             protectNewBuffer();
     351             :         }
     352             :     }
     353             : 
     354             :     /* Disable protection on the smallest containing region. */
     355             :     MOZ_ALWAYS_INLINE void unprotectRegion(T* first, size_t size) {
     356             : #ifdef DEBUG
     357             :         regionUnprotected = true;
     358             : #endif
     359             :         if (MOZ_UNLIKELY(protectUsedEnabled)) {
     360             :             uintptr_t l = uintptr_t(first) >> pageShift;
     361             :             uintptr_t r = uintptr_t(first + size - 1) >> pageShift;
     362             :             if (r >= initPage && l < currPage)
     363             :                 unprotectRegionSlow(l, r);
     364             :         }
     365             :     }
     366             : 
     367             :     /* Re-enable protection on the smallest containing region. */
     368             :     MOZ_ALWAYS_INLINE void reprotectRegion(T* first, size_t size) {
     369             : #ifdef DEBUG
     370             :         regionUnprotected = false;
     371             : #endif
     372             :         if (MOZ_UNLIKELY(protectUsedEnabled)) {
     373             :             uintptr_t l = uintptr_t(first) >> pageShift;
     374             :             uintptr_t r = uintptr_t(first + size - 1) >> pageShift;
     375             :             if (r >= initPage && l < currPage)
     376             :                 reprotectRegionSlow(l, r);
     377             :         }
     378             :     }
     379             : 
     380     1145327 :     MOZ_ALWAYS_INLINE size_t capacity() const { return vector.capacity(); }
     381     1816061 :     MOZ_ALWAYS_INLINE size_t length() const { return vector.length(); }
     382             : 
     383      176463 :     MOZ_ALWAYS_INLINE T* begin() { return vector.begin(); }
     384        4499 :     MOZ_ALWAYS_INLINE const T* begin() const { return vector.begin(); }
     385             : 
     386           0 :     void clear() {
     387           0 :         unprotectOldBuffer();
     388           0 :         vector.clear();
     389           0 :         protectNewBuffer();
     390           0 :     }
     391             : 
     392     1119916 :     MOZ_ALWAYS_INLINE MOZ_MUST_USE bool reserve(size_t size) {
     393     1119916 :         if (MOZ_LIKELY(size <= capacity()))
     394     1115973 :             return vector.reserve(size);
     395        3963 :         return reserveSlow(size);
     396             :     }
     397             : 
     398             :     template<typename U>
     399     3724009 :     MOZ_ALWAYS_INLINE void infallibleAppend(const U* values, size_t size) {
     400     3724009 :         elemsUntilTest -= size;
     401     3724009 :         if (MOZ_LIKELY(elemsUntilTest >= 0))
     402     3724009 :             return vector.infallibleAppend(values, size);
     403           0 :         infallibleAppendSlow(values, size);
     404             :     }
     405             : 
     406             :     template<typename U>
     407         306 :     MOZ_ALWAYS_INLINE MOZ_MUST_USE bool append(const U* values, size_t size) {
     408         306 :         elemsUntilTest -= size;
     409         306 :         if (MOZ_LIKELY(elemsUntilTest >= 0))
     410         306 :             return vector.append(values, size);
     411           0 :         return appendSlow(values, size);
     412             :     }
     413             : };
     414             : 
     415             : template<typename T, size_t A, class B, bool C, bool D, size_t E, bool F, uint8_t G>
     416             : MOZ_NEVER_INLINE void
     417             : PageProtectingVector<T, A, B, C, D, E, F, G>::unprotectRegionSlow(uintptr_t l, uintptr_t r)
     418             : {
     419             :     if (l < initPage)
     420             :         l = initPage;
     421             :     if (r >= currPage)
     422             :         r = currPage - 1;
     423             :     T* addr = reinterpret_cast<T*>(l << pageShift);
     424             :     size_t size = (r - l + 1) << pageShift;
     425             :     gc::UnprotectPages(addr, size);
     426             : }
     427             : 
     428             : template<typename T, size_t A, class B, bool C, bool D, size_t E, bool F, uint8_t G>
     429             : MOZ_NEVER_INLINE void
     430             : PageProtectingVector<T, A, B, C, D, E, F, G>::reprotectRegionSlow(uintptr_t l, uintptr_t r)
     431             : {
     432             :     if (l < initPage)
     433             :         l = initPage;
     434             :     if (r >= currPage)
     435             :         r = currPage - 1;
     436             :     T* addr = reinterpret_cast<T*>(l << pageShift);
     437             :     size_t size = (r - l + 1) << pageShift;
     438             :     gc::MakePagesReadOnly(addr, size);
     439             : }
     440             : 
     441             : template<typename T, size_t A, class B, bool C, bool D, size_t E, bool F, uint8_t G>
     442             : MOZ_NEVER_INLINE MOZ_MUST_USE bool
     443        3963 : PageProtectingVector<T, A, B, C, D, E, F, G>::reserveSlow(size_t size)
     444             : {
     445        3963 :     return reserveNewBuffer(size);
     446             : }
     447             : 
     448             : template<typename T, size_t A, class B, bool C, bool D, size_t E, bool F, uint8_t G>
     449             : template<typename U>
     450             : MOZ_NEVER_INLINE void
     451           0 : PageProtectingVector<T, A, B, C, D, E, F, G>::infallibleAppendSlow(const U* values, size_t size)
     452             : {
     453             :     // Ensure that we're here because we reached a page
     454             :     // boundary and not because of a buffer overflow.
     455           0 :     MOZ_RELEASE_ASSERT(MOZ_LIKELY(length() + size <= capacity()),
     456             :                        "About to overflow our AssemblerBuffer using infallibleAppend!");
     457           0 :     infallibleAppendNewPage(values, size);
     458           0 : }
     459             : 
     460             : template<typename T, size_t A, class B, bool C, bool D, size_t E, bool F, uint8_t G>
     461             : template<typename U>
     462             : MOZ_NEVER_INLINE MOZ_MUST_USE bool
     463           0 : PageProtectingVector<T, A, B, C, D, E, F, G>::appendSlow(const U* values, size_t size)
     464             : {
     465           0 :     if (MOZ_LIKELY(length() + size <= capacity()))
     466           0 :         return appendNewPage(values, size);
     467           0 :     return appendNewBuffer(values, size);
     468             : }
     469             : 
     470             : class ProtectedReallocPolicy
     471             : {
     472             :     uintptr_t currAddr;
     473             :     size_t currSize;
     474             :     uintptr_t prevAddr;
     475             :     size_t prevSize;
     476             : 
     477             :     static const uint8_t PoisonPattern = 0xe5;
     478             : 
     479        5172 :     template <typename T> void update(T* newAddr, size_t newSize) {
     480        5172 :         prevAddr = currAddr;
     481        5172 :         prevSize = currSize;
     482        5172 :         currAddr = uintptr_t(newAddr);
     483        5172 :         currSize = newSize * sizeof(T);
     484        5172 :     }
     485             : 
     486        1209 :     template <typename T> void updateIfValid(T* newAddr, size_t newSize) {
     487        1209 :         if (newAddr)
     488        1209 :             update<T>(newAddr, newSize);
     489        1209 :     }
     490             : 
     491           0 :     template <typename T> T* reallocUpdate(T* oldAddr, size_t oldSize, size_t newSize) {
     492           0 :         T* newAddr = js_pod_realloc<T>(oldAddr, oldSize, newSize);
     493           0 :         updateIfValid<T>(newAddr, newSize);
     494           0 :         return newAddr;
     495             :     }
     496             : 
     497           0 :     void crashWithInfo(const uint8_t* buffer, size_t bytes, const char* type) {
     498           0 :         size_t start = 0;
     499           0 :         while (start < bytes) {
     500           0 :             if (MOZ_LIKELY(buffer[start] != PoisonPattern)) {
     501           0 :                 ++start;
     502           0 :                 continue;
     503             :             }
     504             :             size_t limit;
     505           0 :             for (limit = start + 1; limit < bytes && buffer[limit] == PoisonPattern; ++limit);
     506           0 :             size_t size = limit - start;
     507           0 :             if (size >= 16) {
     508           0 :                 MOZ_CRASH_UNSAFE_PRINTF("maybe_pod_realloc: %s buffer (old size = %" PRIu64
     509             :                                         ") contains %" PRIu64 " bytes of poison starting from"
     510             :                                         " offset %" PRIu64 "!", type, uint64_t(bytes),
     511             :                                         uint64_t(size), uint64_t(start));
     512             :             }
     513           0 :             start = limit;
     514             :         }
     515           0 :         MOZ_CRASH("Could not confirm the presence of poison!");
     516             :     }
     517             : 
     518             :   public:
     519        4503 :     ProtectedReallocPolicy() : currAddr(0), currSize(0), prevAddr(0), prevSize(0) {}
     520             : 
     521       27018 :     ~ProtectedReallocPolicy() {
     522       13509 :         MOZ_RELEASE_ASSERT(!currSize && !currAddr);
     523       13509 :     }
     524             : 
     525        1209 :     template <typename T> T* maybe_pod_malloc(size_t numElems) {
     526        1209 :         MOZ_RELEASE_ASSERT(!currSize && !currAddr);
     527        1209 :         T* addr = js_pod_malloc<T>(numElems);
     528        1209 :         updateIfValid<T>(addr, numElems);
     529        1209 :         return addr;
     530             :     }
     531             : 
     532             :     template <typename T> T* maybe_pod_calloc(size_t numElems) {
     533             :         MOZ_RELEASE_ASSERT(!currSize && !currAddr);
     534             :         T* addr = js_pod_calloc<T>(numElems);
     535             :         updateIfValid<T>(addr, numElems);
     536             :         return addr;
     537             :     }
     538             : 
     539        2754 :     template <typename T> T* maybe_pod_realloc(T* oldAddr, size_t oldSize, size_t newSize) {
     540        2754 :         if (uintptr_t(oldAddr) != currAddr) {
     541           0 :             MOZ_CRASH_UNSAFE_PRINTF("maybe_pod_realloc: oldAddr and currAddr don't match "
     542             :                                     "(0x%" PRIx64 " != 0x%" PRIx64 ", %" PRIu64 ")!",
     543             :                                     uint64_t(oldAddr), uint64_t(currAddr), uint64_t(currSize));
     544             :         }
     545        2754 :         if (oldSize * sizeof(T) != currSize) {
     546           0 :             MOZ_CRASH_UNSAFE_PRINTF("maybe_pod_realloc: oldSize and currSize don't match "
     547             :                                     "(%" PRIu64 " != %" PRIu64 ", 0x%" PRIx64 ")!",
     548             :                                     uint64_t(oldSize * sizeof(T)), uint64_t(currSize),
     549             :                                     uint64_t(currAddr));
     550             :         }
     551             : 
     552        2754 :         MOZ_ASSERT_IF(oldAddr, oldSize);
     553        2754 :         if (MOZ_UNLIKELY(!newSize))
     554           0 :             return nullptr;
     555        2754 :         if (MOZ_UNLIKELY(!oldAddr))
     556           0 :             return maybe_pod_malloc<T>(newSize);
     557             : 
     558             : #ifdef MOZ_MEMORY
     559        2754 :         size_t usableSize = malloc_usable_size(oldAddr);
     560        2754 :         if (usableSize < currSize) {
     561           0 :             MOZ_CRASH_UNSAFE_PRINTF("maybe_pod_realloc: usableSize < currSize "
     562             :                                     "(%" PRIu64 " < %" PRIu64 ", %" PRIu64 ", %s)!",
     563             :                                     uint64_t(usableSize), uint64_t(currSize),
     564             :                                     uint64_t(prevSize), prevAddr == currAddr ? "true" : "false");
     565             :         }
     566             : #endif
     567             : 
     568        2754 :         size_t bytes = (newSize >= oldSize ? oldSize : newSize) * sizeof(T);
     569             : 
     570             :         // Check for the poison pattern every so often.
     571        2754 :         const uint8_t* oldAddrBytes = reinterpret_cast<const uint8_t*>(oldAddr);
     572        9408 :         for (size_t j, i = 0; i + 16 <= bytes; i += 1024) {
     573        6654 :             for (j = 0; j < 16 && oldAddrBytes[i + j] == PoisonPattern; ++j);
     574        6654 :             if (MOZ_UNLIKELY(j == 16))
     575           0 :                 crashWithInfo(oldAddrBytes, bytes, "old");
     576             :         }
     577             : 
     578        2754 :         T* tmpAddr = js_pod_malloc<T>(newSize);
     579        2754 :         if (MOZ_UNLIKELY(!tmpAddr))
     580           0 :             return reallocUpdate<T>(oldAddr, oldSize, newSize);
     581             : 
     582        2754 :         memcpy(tmpAddr, oldAddr, bytes);
     583             : 
     584        2754 :         const uint8_t* tmpAddrBytes = reinterpret_cast<const uint8_t*>(tmpAddr);
     585        9408 :         for (size_t j, i = 0; i + 16 <= bytes; i += 1024) {
     586        6654 :             for (j = 0; j < 16 && tmpAddrBytes[i + j] == PoisonPattern; ++j);
     587        6654 :             if (MOZ_UNLIKELY(j == 16))
     588           0 :                 crashWithInfo(tmpAddrBytes, bytes, "tmp");
     589             :         }
     590             : 
     591        2754 :         if (!mozilla::PodEqual(oldAddrBytes, tmpAddrBytes, bytes))
     592           0 :             MOZ_CRASH("maybe_pod_realloc: tmp buffer doesn't match old buffer!");
     593             : 
     594        2754 :         T* newAddr = js_pod_realloc<T>(oldAddr, oldSize, newSize);
     595        2754 :         if (MOZ_UNLIKELY(!newAddr)) {
     596           0 :             js_free(tmpAddr);
     597           0 :             return reallocUpdate<T>(oldAddr, oldSize, newSize);
     598             :         }
     599             : 
     600        2754 :         const uint8_t* newAddrBytes = reinterpret_cast<const uint8_t*>(newAddr);
     601        9408 :         for (size_t j, i = 0; i + 16 <= bytes; i += 1024) {
     602        6654 :             for (j = 0; j < 16 && newAddrBytes[i + j] == PoisonPattern; ++j);
     603        6654 :             if (MOZ_UNLIKELY(j == 16))
     604           0 :                 crashWithInfo(newAddrBytes, bytes, "new");
     605             :         }
     606             : 
     607        2754 :         if (!mozilla::PodEqual(tmpAddrBytes, newAddrBytes, bytes)) {
     608             : #ifdef MOZ_MEMORY
     609           0 :             MOZ_CRASH_UNSAFE_PRINTF("maybe_pod_realloc: buffers don't match "
     610             :                                     "(%" PRIu64 " >= %" PRIu64 ", %" PRIu64 ", %s)!",
     611             :                                     uint64_t(usableSize), uint64_t(currSize),
     612             :                                     uint64_t(prevSize), prevAddr == currAddr ? "true" : "false");
     613             : #else
     614             :             MOZ_CRASH_UNSAFE_PRINTF("maybe_pod_realloc: buffers don't match "
     615             :                                     "(%" PRIu64 ", %" PRIu64 ", %s)!",
     616             :                                     uint64_t(currSize), uint64_t(prevSize),
     617             :                                     prevAddr == currAddr ? "true" : "false");
     618             : #endif
     619             :         }
     620             : 
     621        2754 :         js_free(tmpAddr);
     622        2754 :         update<T>(newAddr, newSize);
     623        2754 :         return newAddr;
     624             :     }
     625             : 
     626        1209 :     template <typename T> T* pod_malloc(size_t numElems) { return maybe_pod_malloc<T>(numElems); }
     627             :     template <typename T> T* pod_calloc(size_t numElems) { return maybe_pod_calloc<T>(numElems); }
     628        2754 :     template <typename T> T* pod_realloc(T* p, size_t oldSize, size_t newSize) {
     629        2754 :         return maybe_pod_realloc<T>(p, oldSize, newSize);
     630             :     }
     631             : 
     632        1209 :     void free_(void* p) {
     633        1209 :         MOZ_RELEASE_ASSERT(uintptr_t(p) == currAddr);
     634             : #ifdef MOZ_MEMORY
     635        1209 :         size_t usableSize = malloc_usable_size(p);
     636        1209 :         if (usableSize < currSize) {
     637           0 :             MOZ_CRASH_UNSAFE_PRINTF("free_: usableSize < currSize "
     638             :                                     "(%" PRIu64 " < %" PRIu64 ", %" PRIu64 ", %s)!",
     639             :                                     uint64_t(usableSize), uint64_t(currSize),
     640             :                                     uint64_t(prevSize), prevAddr == currAddr ? "true" : "false");
     641             :         }
     642             : #endif
     643        1209 :         js_free(p);
     644        1209 :         update<uint8_t>(0, 0);
     645        1209 :     }
     646             : 
     647           0 :     void reportAllocOverflow() const {}
     648      962369 :     bool checkSimulatedOOM() const {
     649      962369 :         return !js::oom::ShouldFailWithOOM();
     650             :     }
     651             : };
     652             : 
     653             : } /* namespace js */
     654             : 
     655             : #endif /* ds_PageProtectingVector_h */

Generated by: LCOV version 1.13