LCOV - code coverage report
Current view: top level - js/src/gc - Memory.cpp (source / functions) Hit Total Coverage
Test: output.info Lines: 70 184 38.0 %
Date: 2017-07-14 16:53:18 Functions: 12 20 60.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
       2             :  * vim: set ts=8 sts=4 et sw=4 tw=99:
       3             :  * This Source Code Form is subject to the terms of the Mozilla Public
       4             :  * License, v. 2.0. If a copy of the MPL was not distributed with this
       5             :  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
       6             : 
       7             : #include "gc/Memory.h"
       8             : 
       9             : #include "mozilla/Atomics.h"
      10             : #include "mozilla/TaggedAnonymousMemory.h"
      11             : 
      12             : #include "js/HeapAPI.h"
      13             : #include "vm/Runtime.h"
      14             : 
      15             : #if defined(XP_WIN)
      16             : 
      17             : #include "mozilla/Sprintf.h"
      18             : #include "jswin.h"
      19             : #include <psapi.h>
      20             : 
      21             : #elif defined(SOLARIS)
      22             : 
      23             : #include <sys/mman.h>
      24             : #include <unistd.h>
      25             : 
      26             : #elif defined(XP_UNIX)
      27             : 
      28             : #include <algorithm>
      29             : #include <errno.h>
      30             : #include <sys/mman.h>
      31             : #include <sys/resource.h>
      32             : #include <sys/stat.h>
      33             : #include <sys/types.h>
      34             : #include <unistd.h>
      35             : 
      36             : #endif
      37             : 
      38             : namespace js {
      39             : namespace gc {
      40             : 
      41             : // The GC can only safely decommit memory when the page size of the
      42             : // running process matches the compiled arena size.
      43             : static size_t pageSize = 0;
      44             : 
      45             : // The OS allocation granularity may not match the page size.
      46             : static size_t allocGranularity = 0;
      47             : 
      48             : #if defined(XP_UNIX)
      49             : // The addresses handed out by mmap may grow up or down.
      50             : static mozilla::Atomic<int, mozilla::Relaxed> growthDirection(0);
      51             : #endif
      52             : 
      53             : // Data from OOM crashes shows there may be up to 24 chunksized but unusable
      54             : // chunks available in low memory situations. These chunks may all need to be
      55             : // used up before we gain access to remaining *alignable* chunksized regions,
      56             : // so we use a generous limit of 32 unusable chunks to ensure we reach them.
      57             : static const int MaxLastDitchAttempts = 32;
      58             : 
      59             : static void GetNewChunk(void** aAddress, void** aRetainedAddr, size_t size, size_t alignment);
      60             : static void* MapAlignedPagesSlow(size_t size, size_t alignment);
      61             : static void* MapAlignedPagesLastDitch(size_t size, size_t alignment);
      62             : 
      63             : size_t
      64       13523 : SystemPageSize()
      65             : {
      66       13523 :     return pageSize;
      67             : }
      68             : 
      69             : static bool
      70        6707 : DecommitEnabled()
      71             : {
      72        6707 :     return pageSize == ArenaSize;
      73             : }
      74             : 
      75             : /*
      76             :  * This returns the offset of address p from the nearest aligned address at
      77             :  * or below p - or alternatively, the number of unaligned bytes at the end of
      78             :  * the region starting at p (as we assert that allocation size is an integer
      79             :  * multiple of the alignment).
      80             :  */
      81             : static inline size_t
      82        6762 : OffsetFromAligned(void* p, size_t alignment)
      83             : {
      84        6762 :     return uintptr_t(p) % alignment;
      85             : }
      86             : 
      87             : void*
      88           0 : TestMapAlignedPagesLastDitch(size_t size, size_t alignment)
      89             : {
      90           0 :     return MapAlignedPagesLastDitch(size, alignment);
      91             : }
      92             : 
      93             : 
      94             : #if defined(XP_WIN)
      95             : 
      96             : void
      97             : InitMemorySubsystem()
      98             : {
      99             :     if (pageSize == 0) {
     100             :         SYSTEM_INFO sysinfo;
     101             :         GetSystemInfo(&sysinfo);
     102             :         pageSize = sysinfo.dwPageSize;
     103             :         allocGranularity = sysinfo.dwAllocationGranularity;
     104             :     }
     105             : }
     106             : 
     107             : #  if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)
     108             : 
     109             : static inline void*
     110             : MapMemoryAt(void* desired, size_t length, int flags, int prot = PAGE_READWRITE)
     111             : {
     112             :     return VirtualAlloc(desired, length, flags, prot);
     113             : }
     114             : 
     115             : static inline void*
     116             : MapMemory(size_t length, int flags, int prot = PAGE_READWRITE)
     117             : {
     118             :     return VirtualAlloc(nullptr, length, flags, prot);
     119             : }
     120             : 
     121             : void*
     122             : MapAlignedPages(size_t size, size_t alignment)
     123             : {
     124             :     MOZ_ASSERT(size >= alignment);
     125             :     MOZ_ASSERT(size >= allocGranularity);
     126             :     MOZ_ASSERT(size % alignment == 0);
     127             :     MOZ_ASSERT(size % pageSize == 0);
     128             :     MOZ_ASSERT_IF(alignment < allocGranularity, allocGranularity % alignment == 0);
     129             :     MOZ_ASSERT_IF(alignment > allocGranularity, alignment % allocGranularity == 0);
     130             : 
     131             :     void* p = MapMemory(size, MEM_COMMIT | MEM_RESERVE);
     132             : 
     133             :     /* Special case: If we want allocation alignment, no further work is needed. */
     134             :     if (alignment == allocGranularity)
     135             :         return p;
     136             : 
     137             :     if (OffsetFromAligned(p, alignment) == 0)
     138             :         return p;
     139             : 
     140             :     void* retainedAddr;
     141             :     GetNewChunk(&p, &retainedAddr, size, alignment);
     142             :     if (retainedAddr)
     143             :         UnmapPages(retainedAddr, size);
     144             :     if (p) {
     145             :         if (OffsetFromAligned(p, alignment) == 0)
     146             :             return p;
     147             :         UnmapPages(p, size);
     148             :     }
     149             : 
     150             :     p = MapAlignedPagesSlow(size, alignment);
     151             :     if (!p)
     152             :         return MapAlignedPagesLastDitch(size, alignment);
     153             : 
     154             :     MOZ_ASSERT(OffsetFromAligned(p, alignment) == 0);
     155             :     return p;
     156             : }
     157             : 
     158             : static void*
     159             : MapAlignedPagesSlow(size_t size, size_t alignment)
     160             : {
     161             :     /*
     162             :      * Windows requires that there be a 1:1 mapping between VM allocation
     163             :      * and deallocation operations.  Therefore, take care here to acquire the
     164             :      * final result via one mapping operation.  This means unmapping any
     165             :      * preliminary result that is not correctly aligned.
     166             :      */
     167             :     void* p;
     168             :     do {
     169             :         /*
     170             :          * Over-allocate in order to map a memory region that is definitely
     171             :          * large enough, then deallocate and allocate again the correct size,
     172             :          * within the over-sized mapping.
     173             :          *
     174             :          * Since we're going to unmap the whole thing anyway, the first
     175             :          * mapping doesn't have to commit pages.
     176             :          */
     177             :         size_t reserveSize = size + alignment - pageSize;
     178             :         p = MapMemory(reserveSize, MEM_RESERVE);
     179             :         if (!p)
     180             :             return nullptr;
     181             :         void* chunkStart = (void*)AlignBytes(uintptr_t(p), alignment);
     182             :         UnmapPages(p, reserveSize);
     183             :         p = MapMemoryAt(chunkStart, size, MEM_COMMIT | MEM_RESERVE);
     184             : 
     185             :         /* Failure here indicates a race with another thread, so try again. */
     186             :     } while (!p);
     187             : 
     188             :     return p;
     189             : }
     190             : 
     191             : /*
     192             :  * In a low memory or high fragmentation situation, alignable chunks of the
     193             :  * desired size may still be available, even if there are no more contiguous
     194             :  * free chunks that meet the |size + alignment - pageSize| requirement of
     195             :  * MapAlignedPagesSlow. In this case, try harder to find an alignable chunk
     196             :  * by temporarily holding onto the unaligned parts of each chunk until the
     197             :  * allocator gives us a chunk that either is, or can be aligned.
     198             :  */
     199             : static void*
     200             : MapAlignedPagesLastDitch(size_t size, size_t alignment)
     201             : {
     202             :     void* tempMaps[MaxLastDitchAttempts];
     203             :     int attempt = 0;
     204             :     void* p = MapMemory(size, MEM_COMMIT | MEM_RESERVE);
     205             :     if (OffsetFromAligned(p, alignment) == 0)
     206             :         return p;
     207             :     for (; attempt < MaxLastDitchAttempts; ++attempt) {
     208             :         GetNewChunk(&p, tempMaps + attempt, size, alignment);
     209             :         if (OffsetFromAligned(p, alignment) == 0) {
     210             :             if (tempMaps[attempt])
     211             :                 UnmapPages(tempMaps[attempt], size);
     212             :             break;
     213             :         }
     214             :         if (!tempMaps[attempt])
     215             :             break; /* Bail if GetNewChunk failed. */
     216             :     }
     217             :     if (OffsetFromAligned(p, alignment)) {
     218             :         UnmapPages(p, size);
     219             :         p = nullptr;
     220             :     }
     221             :     while (--attempt >= 0)
     222             :         UnmapPages(tempMaps[attempt], size);
     223             :     return p;
     224             : }
     225             : 
     226             : /*
     227             :  * On Windows, map and unmap calls must be matched, so we deallocate the
     228             :  * unaligned chunk, then reallocate the unaligned part to block off the
     229             :  * old address and force the allocator to give us a new one.
     230             :  */
     231             : static void
     232             : GetNewChunk(void** aAddress, void** aRetainedAddr, size_t size, size_t alignment)
     233             : {
     234             :     void* address = *aAddress;
     235             :     void* retainedAddr = nullptr;
     236             :     do {
     237             :         size_t retainedSize;
     238             :         size_t offset = OffsetFromAligned(address, alignment);
     239             :         if (!offset)
     240             :             break;
     241             :         UnmapPages(address, size);
     242             :         retainedSize = alignment - offset;
     243             :         retainedAddr = MapMemoryAt(address, retainedSize, MEM_RESERVE);
     244             :         address = MapMemory(size, MEM_COMMIT | MEM_RESERVE);
     245             :         /* If retainedAddr is null here, we raced with another thread. */
     246             :     } while (!retainedAddr);
     247             :     *aAddress = address;
     248             :     *aRetainedAddr = retainedAddr;
     249             : }
     250             : 
     251             : void
     252             : UnmapPages(void* p, size_t size)
     253             : {
     254             :     MOZ_ALWAYS_TRUE(VirtualFree(p, 0, MEM_RELEASE));
     255             : }
     256             : 
     257             : bool
     258             : MarkPagesUnused(void* p, size_t size)
     259             : {
     260             :     if (!DecommitEnabled())
     261             :         return true;
     262             : 
     263             :     MOZ_ASSERT(OffsetFromAligned(p, pageSize) == 0);
     264             :     LPVOID p2 = MapMemoryAt(p, size, MEM_RESET);
     265             :     return p2 == p;
     266             : }
     267             : 
     268             : void
     269             : MarkPagesInUse(void* p, size_t size)
     270             : {
     271             :     if (!DecommitEnabled())
     272             :         return;
     273             : 
     274             :     MOZ_ASSERT(OffsetFromAligned(p, pageSize) == 0);
     275             : }
     276             : 
     277             : size_t
     278             : GetPageFaultCount()
     279             : {
     280             :     PROCESS_MEMORY_COUNTERS pmc;
     281             :     if (!GetProcessMemoryInfo(GetCurrentProcess(), &pmc, sizeof(pmc)))
     282             :         return 0;
     283             :     return pmc.PageFaultCount;
     284             : }
     285             : 
     286             : void*
     287             : AllocateMappedContent(int fd, size_t offset, size_t length, size_t alignment)
     288             : {
     289             :     MOZ_ASSERT(length && alignment);
     290             : 
     291             :     // The allocation granularity and the requested offset
     292             :     // must both be divisible by the requested alignment.
     293             :     // Alignments larger than the allocation granularity are not supported.
     294             :     if (allocGranularity % alignment != 0 || offset % alignment != 0)
     295             :         return nullptr;
     296             : 
     297             :     HANDLE hFile = reinterpret_cast<HANDLE>(intptr_t(fd));
     298             : 
     299             :     // This call will fail if the file does not exist, which is what we want.
     300             :     HANDLE hMap = CreateFileMapping(hFile, nullptr, PAGE_READONLY, 0, 0, nullptr);
     301             :     if (!hMap)
     302             :         return nullptr;
     303             : 
     304             :     size_t alignedOffset = offset - (offset % allocGranularity);
     305             :     size_t alignedLength = length + (offset % allocGranularity);
     306             : 
     307             :     DWORD offsetH = uint32_t(uint64_t(alignedOffset) >> 32);
     308             :     DWORD offsetL = uint32_t(alignedOffset);
     309             : 
     310             :     // If the offset or length are out of bounds, this call will fail.
     311             :     uint8_t* map = static_cast<uint8_t*>(MapViewOfFile(hMap, FILE_MAP_COPY, offsetH,
     312             :                                                        offsetL, alignedLength));
     313             : 
     314             :     // This just decreases the file mapping object's internal reference count;
     315             :     // it won't actually be destroyed until we unmap the associated view.
     316             :     CloseHandle(hMap);
     317             : 
     318             :     if (!map)
     319             :         return nullptr;
     320             : 
     321             : #ifdef DEBUG
     322             :     // Zero out data before and after the desired mapping to catch errors early.
     323             :     if (offset != alignedOffset)
     324             :         memset(map, 0, offset - alignedOffset);
     325             :     if (alignedLength % pageSize)
     326             :         memset(map + alignedLength, 0, pageSize - (alignedLength % pageSize));
     327             : #endif
     328             : 
     329             :     return map + (offset - alignedOffset);
     330             : }
     331             : 
     332             : void
     333             : DeallocateMappedContent(void* p, size_t /*length*/)
     334             : {
     335             :     if (!p)
     336             :         return;
     337             : 
     338             :     // Calculate the address originally returned by MapViewOfFile.
     339             :     // This is needed because AllocateMappedContent returns a pointer
     340             :     // that might be offset from the view, as the beginning of a
     341             :     // view must be aligned with the allocation granularity.
     342             :     uintptr_t map = uintptr_t(p) - (uintptr_t(p) % allocGranularity);
     343             :     MOZ_ALWAYS_TRUE(UnmapViewOfFile(reinterpret_cast<void*>(map)));
     344             : }
     345             : 
     346             : #  else // Various APIs are unavailable.
     347             : 
     348             : void*
     349             : MapAlignedPages(size_t size, size_t alignment)
     350             : {
     351             :     MOZ_ASSERT(size >= alignment);
     352             :     MOZ_ASSERT(size >= allocGranularity);
     353             :     MOZ_ASSERT(size % alignment == 0);
     354             :     MOZ_ASSERT(size % pageSize == 0);
     355             :     MOZ_ASSERT_IF(alignment < allocGranularity, allocGranularity % alignment == 0);
     356             :     MOZ_ASSERT_IF(alignment > allocGranularity, alignment % allocGranularity == 0);
     357             : 
     358             :     void* p = _aligned_malloc(size, alignment);
     359             : 
     360             :     MOZ_ASSERT(OffsetFromAligned(p, alignment) == 0);
     361             :     return p;
     362             : }
     363             : 
     364             : static void*
     365             : MapAlignedPagesLastDitch(size_t size, size_t alignment)
     366             : {
     367             :     return nullptr;
     368             : }
     369             : 
     370             : void
     371             : UnmapPages(void* p, size_t size)
     372             : {
     373             :     _aligned_free(p);
     374             : }
     375             : 
     376             : bool
     377             : MarkPagesUnused(void* p, size_t size)
     378             : {
     379             :     MOZ_ASSERT(OffsetFromAligned(p, pageSize) == 0);
     380             :     return true;
     381             : }
     382             : 
     383             : bool
     384             : MarkPagesInUse(void* p, size_t size)
     385             : {
     386             :     MOZ_ASSERT(OffsetFromAligned(p, pageSize) == 0);
     387             : }
     388             : 
     389             : size_t
     390             : GetPageFaultCount()
     391             : {
     392             :     // GetProcessMemoryInfo is unavailable.
     393             :     return 0;
     394             : }
     395             : 
     396             : void*
     397             : AllocateMappedContent(int fd, size_t offset, size_t length, size_t alignment)
     398             : {
     399             :     // Not implemented.
     400             :     return nullptr;
     401             : }
     402             : 
     403             : // Deallocate mapped memory for object.
     404             : void
     405             : DeallocateMappedContent(void* p, size_t length)
     406             : {
     407             :     // Not implemented.
     408             : }
     409             : 
     410             : #  endif
     411             : 
     412             : #elif defined(SOLARIS)
     413             : 
     414             : #ifndef MAP_NOSYNC
     415             : # define MAP_NOSYNC 0
     416             : #endif
     417             : 
     418             : void
     419             : InitMemorySubsystem()
     420             : {
     421             :     if (pageSize == 0)
     422             :         pageSize = allocGranularity = size_t(sysconf(_SC_PAGESIZE));
     423             : }
     424             : 
     425             : void*
     426             : MapAlignedPages(size_t size, size_t alignment)
     427             : {
     428             :     MOZ_ASSERT(size >= alignment);
     429             :     MOZ_ASSERT(size >= allocGranularity);
     430             :     MOZ_ASSERT(size % alignment == 0);
     431             :     MOZ_ASSERT(size % pageSize == 0);
     432             :     MOZ_ASSERT_IF(alignment < allocGranularity, allocGranularity % alignment == 0);
     433             :     MOZ_ASSERT_IF(alignment > allocGranularity, alignment % allocGranularity == 0);
     434             : 
     435             :     int prot = PROT_READ | PROT_WRITE;
     436             :     int flags = MAP_PRIVATE | MAP_ANON | MAP_ALIGN | MAP_NOSYNC;
     437             : 
     438             :     void* p = mmap((caddr_t)alignment, size, prot, flags, -1, 0);
     439             :     if (p == MAP_FAILED)
     440             :         return nullptr;
     441             :     return p;
     442             : }
     443             : 
     444             : static void*
     445             : MapAlignedPagesLastDitch(size_t size, size_t alignment)
     446             : {
     447             :     return nullptr;
     448             : }
     449             : 
     450             : void
     451             : UnmapPages(void* p, size_t size)
     452             : {
     453             :     MOZ_ALWAYS_TRUE(0 == munmap((caddr_t)p, size));
     454             : }
     455             : 
     456             : bool
     457             : MarkPagesUnused(void* p, size_t size)
     458             : {
     459             :     MOZ_ASSERT(OffsetFromAligned(p, pageSize) == 0);
     460             :     return true;
     461             : }
     462             : 
     463             : bool
     464             : MarkPagesInUse(void* p, size_t size)
     465             : {
     466             :     if (!DecommitEnabled())
     467             :         return;
     468             : 
     469             :     MOZ_ASSERT(OffsetFromAligned(p, pageSize) == 0);
     470             : }
     471             : 
     472             : size_t
     473             : GetPageFaultCount()
     474             : {
     475             :     return 0;
     476             : }
     477             : 
     478             : void*
     479             : AllocateMappedContent(int fd, size_t offset, size_t length, size_t alignment)
     480             : {
     481             :     // Not implemented.
     482             :     return nullptr;
     483             : }
     484             : 
     485             : // Deallocate mapped memory for object.
     486             : void
     487             : DeallocateMappedContent(void* p, size_t length)
     488             : {
     489             :     // Not implemented.
     490             : }
     491             : 
     492             : #elif defined(XP_UNIX)
     493             : 
     494             : void
     495           3 : InitMemorySubsystem()
     496             : {
     497           3 :     if (pageSize == 0)
     498           3 :         pageSize = allocGranularity = size_t(sysconf(_SC_PAGESIZE));
     499           3 : }
     500             : 
     501             : static inline void*
     502           6 : MapMemoryAt(void* desired, size_t length, int prot = PROT_READ | PROT_WRITE,
     503             :             int flags = MAP_PRIVATE | MAP_ANON, int fd = -1, off_t offset = 0)
     504             : {
     505             : 
     506             : #if defined(__ia64__) || defined(__aarch64__) || \
     507             :     (defined(__sparc__) && defined(__arch64__) && (defined(__NetBSD__) || defined(__linux__)))
     508             :     MOZ_ASSERT((0xffff800000000000ULL & (uintptr_t(desired) + length - 1)) == 0);
     509             : #endif
     510           6 :     void* region = mmap(desired, length, prot, flags, fd, offset);
     511           6 :     if (region == MAP_FAILED)
     512           0 :         return nullptr;
     513             :     /*
     514             :      * mmap treats the given address as a hint unless the MAP_FIXED flag is
     515             :      * used (which isn't usually what you want, as this overrides existing
     516             :      * mappings), so check that the address we got is the address we wanted.
     517             :      */
     518           6 :     if (region != desired) {
     519           0 :         if (munmap(region, length))
     520           0 :             MOZ_ASSERT(errno == ENOMEM);
     521           0 :         return nullptr;
     522             :     }
     523           6 :     return region;
     524             : }
     525             : 
     526             : static inline void*
     527          37 : MapMemory(size_t length, int prot = PROT_READ | PROT_WRITE,
     528             :           int flags = MAP_PRIVATE | MAP_ANON, int fd = -1, off_t offset = 0)
     529             : {
     530             : #if defined(__ia64__) || (defined(__sparc__) && defined(__arch64__) && defined(__NetBSD__))
     531             :     /*
     532             :      * The JS engine assumes that all allocated pointers have their high 17 bits clear,
     533             :      * which ia64's mmap doesn't support directly. However, we can emulate it by passing
     534             :      * mmap an "addr" parameter with those bits clear. The mmap will return that address,
     535             :      * or the nearest available memory above that address, providing a near-guarantee
     536             :      * that those bits are clear. If they are not, we return nullptr below to indicate
     537             :      * out-of-memory.
     538             :      *
     539             :      * The addr is chosen as 0x0000070000000000, which still allows about 120TB of virtual
     540             :      * address space.
     541             :      *
     542             :      * See Bug 589735 for more information.
     543             :      */
     544             :     void* region = mmap((void*)0x0000070000000000, length, prot, flags, fd, offset);
     545             :     if (region == MAP_FAILED)
     546             :         return nullptr;
     547             :     /*
     548             :      * If the allocated memory doesn't have its upper 17 bits clear, consider it
     549             :      * as out of memory.
     550             :      */
     551             :     if ((uintptr_t(region) + (length - 1)) & 0xffff800000000000) {
     552             :         if (munmap(region, length))
     553             :             MOZ_ASSERT(errno == ENOMEM);
     554             :         return nullptr;
     555             :     }
     556             :     return region;
     557             : #elif defined(__aarch64__) || (defined(__sparc__) && defined(__arch64__) && defined(__linux__))
     558             :    /*
     559             :     * There might be similar virtual address issue on arm64 which depends on
     560             :     * hardware and kernel configurations. But the work around is slightly
     561             :     * different due to the different mmap behavior.
     562             :     *
     563             :     * TODO: Merge with the above code block if this implementation works for
     564             :     * ia64 and sparc64.
     565             :     */
     566             :     const uintptr_t start = UINT64_C(0x0000070000000000);
     567             :     const uintptr_t end   = UINT64_C(0x0000800000000000);
     568             :     const uintptr_t step  = ChunkSize;
     569             :    /*
     570             :     * Optimization options if there are too many retries in practice:
     571             :     * 1. Examine /proc/self/maps to find an available address. This file is
     572             :     *    not always available, however. In addition, even if we examine
     573             :     *    /proc/self/maps, we may still need to retry several times due to
     574             :     *    racing with other threads.
     575             :     * 2. Use a global/static variable with lock to track the addresses we have
     576             :     *    allocated or tried.
     577             :     */
     578             :     uintptr_t hint;
     579             :     void* region = MAP_FAILED;
     580             :     for (hint = start; region == MAP_FAILED && hint + length <= end; hint += step) {
     581             :         region = mmap((void*)hint, length, prot, flags, fd, offset);
     582             :         if (region != MAP_FAILED) {
     583             :             if ((uintptr_t(region) + (length - 1)) & 0xffff800000000000) {
     584             :                 if (munmap(region, length)) {
     585             :                     MOZ_ASSERT(errno == ENOMEM);
     586             :                 }
     587             :                 region = MAP_FAILED;
     588             :             }
     589             :         }
     590             :     }
     591             :     return region == MAP_FAILED ? nullptr : region;
     592             : #else
     593          37 :     void* region = MozTaggedAnonymousMmap(nullptr, length, prot, flags, fd, offset, "js-gc-heap");
     594          37 :     if (region == MAP_FAILED)
     595           0 :         return nullptr;
     596          37 :     return region;
     597             : #endif
     598             : }
     599             : 
     600             : void*
     601          37 : MapAlignedPages(size_t size, size_t alignment)
     602             : {
     603          37 :     MOZ_ASSERT(size >= alignment);
     604          37 :     MOZ_ASSERT(size >= allocGranularity);
     605          37 :     MOZ_ASSERT(size % alignment == 0);
     606          37 :     MOZ_ASSERT(size % pageSize == 0);
     607          37 :     MOZ_ASSERT_IF(alignment < allocGranularity, allocGranularity % alignment == 0);
     608          37 :     MOZ_ASSERT_IF(alignment > allocGranularity, alignment % allocGranularity == 0);
     609             : 
     610          37 :     void* p = MapMemory(size);
     611             : 
     612             :     /* Special case: If we want page alignment, no further work is needed. */
     613          37 :     if (alignment == allocGranularity)
     614           0 :         return p;
     615             : 
     616          37 :     if (OffsetFromAligned(p, alignment) == 0)
     617          31 :         return p;
     618             : 
     619             :     void* retainedAddr;
     620           6 :     GetNewChunk(&p, &retainedAddr, size, alignment);
     621           6 :     if (retainedAddr)
     622           0 :         UnmapPages(retainedAddr, size);
     623           6 :     if (p) {
     624           6 :         if (OffsetFromAligned(p, alignment) == 0)
     625           6 :             return p;
     626           0 :         UnmapPages(p, size);
     627             :     }
     628             : 
     629           0 :     p = MapAlignedPagesSlow(size, alignment);
     630           0 :     if (!p)
     631           0 :         return MapAlignedPagesLastDitch(size, alignment);
     632             : 
     633           0 :     MOZ_ASSERT(OffsetFromAligned(p, alignment) == 0);
     634           0 :     return p;
     635             : }
     636             : 
     637             : static void*
     638           0 : MapAlignedPagesSlow(size_t size, size_t alignment)
     639             : {
     640             :     /* Overallocate and unmap the region's edges. */
     641           0 :     size_t reqSize = size + alignment - pageSize;
     642           0 :     void* region = MapMemory(reqSize);
     643           0 :     if (!region)
     644           0 :         return nullptr;
     645             : 
     646           0 :     void* regionEnd = (void*)(uintptr_t(region) + reqSize);
     647             :     void* front;
     648             :     void* end;
     649           0 :     if (growthDirection <= 0) {
     650           0 :         size_t offset = OffsetFromAligned(regionEnd, alignment);
     651           0 :         end = (void*)(uintptr_t(regionEnd) - offset);
     652           0 :         front = (void*)(uintptr_t(end) - size);
     653             :     } else {
     654           0 :         size_t offset = OffsetFromAligned(region, alignment);
     655           0 :         front = (void*)(uintptr_t(region) + (offset ? alignment - offset : 0));
     656           0 :         end = (void*)(uintptr_t(front) + size);
     657             :     }
     658             : 
     659           0 :     if (front != region)
     660           0 :         UnmapPages(region, uintptr_t(front) - uintptr_t(region));
     661           0 :     if (end != regionEnd)
     662           0 :         UnmapPages(end, uintptr_t(regionEnd) - uintptr_t(end));
     663             : 
     664           0 :     return front;
     665             : }
     666             : 
     667             : /*
     668             :  * In a low memory or high fragmentation situation, alignable chunks of the
     669             :  * desired size may still be available, even if there are no more contiguous
     670             :  * free chunks that meet the |size + alignment - pageSize| requirement of
     671             :  * MapAlignedPagesSlow. In this case, try harder to find an alignable chunk
     672             :  * by temporarily holding onto the unaligned parts of each chunk until the
     673             :  * allocator gives us a chunk that either is, or can be aligned.
     674             :  */
     675             : static void*
     676           0 : MapAlignedPagesLastDitch(size_t size, size_t alignment)
     677             : {
     678             :     void* tempMaps[MaxLastDitchAttempts];
     679           0 :     int attempt = 0;
     680           0 :     void* p = MapMemory(size);
     681           0 :     if (OffsetFromAligned(p, alignment) == 0)
     682           0 :         return p;
     683           0 :     for (; attempt < MaxLastDitchAttempts; ++attempt) {
     684           0 :         GetNewChunk(&p, tempMaps + attempt, size, alignment);
     685           0 :         if (OffsetFromAligned(p, alignment) == 0) {
     686           0 :             if (tempMaps[attempt])
     687           0 :                 UnmapPages(tempMaps[attempt], size);
     688           0 :             break;
     689             :         }
     690           0 :         if (!tempMaps[attempt])
     691           0 :             break; /* Bail if GetNewChunk failed. */
     692             :     }
     693           0 :     if (OffsetFromAligned(p, alignment)) {
     694           0 :         UnmapPages(p, size);
     695           0 :         p = nullptr;
     696             :     }
     697           0 :     while (--attempt >= 0)
     698           0 :         UnmapPages(tempMaps[attempt], size);
     699           0 :     return p;
     700             : }
     701             : 
     702             : /*
     703             :  * mmap calls don't have to be matched with calls to munmap, so we can unmap
     704             :  * just the pages we don't need. However, as we don't know a priori if addresses
     705             :  * are handed out in increasing or decreasing order, we have to try both
     706             :  * directions (depending on the environment, one will always fail).
     707             :  */
     708             : static void
     709           6 : GetNewChunk(void** aAddress, void** aRetainedAddr, size_t size, size_t alignment)
     710             : {
     711           6 :     void* address = *aAddress;
     712           6 :     void* retainedAddr = nullptr;
     713           6 :     bool addrsGrowDown = growthDirection <= 0;
     714           6 :     int i = 0;
     715           6 :     for (; i < 2; ++i) {
     716             :         /* Try the direction indicated by growthDirection. */
     717           6 :         if (addrsGrowDown) {
     718           6 :             size_t offset = OffsetFromAligned(address, alignment);
     719           6 :             void* head = (void*)((uintptr_t)address - offset);
     720           6 :             void* tail = (void*)((uintptr_t)head + size);
     721           6 :             if (MapMemoryAt(head, offset)) {
     722           6 :                 UnmapPages(tail, offset);
     723           6 :                 if (growthDirection >= -8)
     724           6 :                     --growthDirection;
     725           6 :                 address = head;
     726           6 :                 break;
     727             :             }
     728             :         } else {
     729           0 :             size_t offset = alignment - OffsetFromAligned(address, alignment);
     730           0 :             void* head = (void*)((uintptr_t)address + offset);
     731           0 :             void* tail = (void*)((uintptr_t)address + size);
     732           0 :             if (MapMemoryAt(tail, offset)) {
     733           0 :                 UnmapPages(address, offset);
     734           0 :                 if (growthDirection <= 8)
     735           0 :                     ++growthDirection;
     736           0 :                 address = head;
     737           0 :                 break;
     738             :             }
     739             :         }
     740             :         /* If we're confident in the growth direction, don't try the other. */
     741           0 :         if (growthDirection < -8 || growthDirection > 8)
     742           0 :             break;
     743             :         /* If that failed, try the opposite direction. */
     744           0 :         addrsGrowDown = !addrsGrowDown;
     745             :     }
     746             :     /* If our current chunk cannot be aligned, see if the next one is aligned. */
     747           6 :     if (OffsetFromAligned(address, alignment)) {
     748           0 :         retainedAddr = address;
     749           0 :         address = MapMemory(size);
     750             :     }
     751           6 :     *aAddress = address;
     752           6 :     *aRetainedAddr = retainedAddr;
     753           6 : }
     754             : 
     755             : void
     756           6 : UnmapPages(void* p, size_t size)
     757             : {
     758           6 :     if (munmap(p, size))
     759           0 :         MOZ_ASSERT(errno == ENOMEM);
     760           6 : }
     761             : 
     762             : bool
     763          50 : MarkPagesUnused(void* p, size_t size)
     764             : {
     765          50 :     if (!DecommitEnabled())
     766           0 :         return false;
     767             : 
     768          50 :     MOZ_ASSERT(OffsetFromAligned(p, pageSize) == 0);
     769             : #if defined(XP_SOLARIS)
     770             :     int result = posix_madvise(p, size, POSIX_MADV_DONTNEED);
     771             : #else
     772          50 :     int result = madvise(p, size, MADV_DONTNEED);
     773             : #endif
     774          50 :     return result != -1;
     775             : }
     776             : 
     777             : void
     778        6657 : MarkPagesInUse(void* p, size_t size)
     779             : {
     780        6657 :     if (!DecommitEnabled())
     781           0 :         return;
     782             : 
     783        6657 :     MOZ_ASSERT(OffsetFromAligned(p, pageSize) == 0);
     784             : }
     785             : 
     786             : size_t
     787           6 : GetPageFaultCount()
     788             : {
     789             :     struct rusage usage;
     790           6 :     int err = getrusage(RUSAGE_SELF, &usage);
     791           6 :     if (err)
     792           0 :         return 0;
     793           6 :     return usage.ru_majflt;
     794             : }
     795             : 
     796             : void*
     797           0 : AllocateMappedContent(int fd, size_t offset, size_t length, size_t alignment)
     798             : {
     799           0 :     MOZ_ASSERT(length && alignment);
     800             : 
     801             :     // The allocation granularity and the requested offset
     802             :     // must both be divisible by the requested alignment.
     803             :     // Alignments larger than the allocation granularity are not supported.
     804           0 :     if (allocGranularity % alignment != 0 || offset % alignment != 0)
     805           0 :         return nullptr;
     806             : 
     807             :     // Sanity check the offset and size, as mmap does not do this for us.
     808             :     struct stat st;
     809           0 :     if (fstat(fd, &st) || offset >= uint64_t(st.st_size) || length > uint64_t(st.st_size) - offset)
     810           0 :         return nullptr;
     811             : 
     812           0 :     size_t alignedOffset = offset - (offset % allocGranularity);
     813           0 :     size_t alignedLength = length + (offset % allocGranularity);
     814             : 
     815           0 :     uint8_t* map = static_cast<uint8_t*>(MapMemory(alignedLength, PROT_READ | PROT_WRITE,
     816           0 :                                                    MAP_PRIVATE, fd, alignedOffset));
     817           0 :     if (!map)
     818           0 :         return nullptr;
     819             : 
     820             : #ifdef DEBUG
     821             :     // Zero out data before and after the desired mapping to catch errors early.
     822           0 :     if (offset != alignedOffset)
     823           0 :         memset(map, 0, offset - alignedOffset);
     824           0 :     if (alignedLength % pageSize)
     825           0 :         memset(map + alignedLength, 0, pageSize - (alignedLength % pageSize));
     826             : #endif
     827             : 
     828           0 :     return map + (offset - alignedOffset);
     829             : }
     830             : 
     831             : void
     832           0 : DeallocateMappedContent(void* p, size_t length)
     833             : {
     834           0 :     if (!p)
     835           0 :         return;
     836             : 
     837             :     // Calculate the address originally returned by mmap.
     838             :     // This is needed because AllocateMappedContent returns a pointer
     839             :     // that might be offset from the mapping, as the beginning of a
     840             :     // mapping must be aligned with the allocation granularity.
     841           0 :     uintptr_t map = uintptr_t(p) - (uintptr_t(p) % allocGranularity);
     842           0 :     size_t alignedLength = length + (uintptr_t(p) % allocGranularity);
     843           0 :     UnmapPages(reinterpret_cast<void*>(map), alignedLength);
     844             : }
     845             : 
     846             : #else
     847             : #error "Memory mapping functions are not defined for your OS."
     848             : #endif
     849             : 
     850             : void
     851           0 : ProtectPages(void* p, size_t size)
     852             : {
     853           0 :     MOZ_ASSERT(size % pageSize == 0);
     854           0 :     MOZ_RELEASE_ASSERT(size > 0);
     855           0 :     MOZ_RELEASE_ASSERT(p);
     856             : #if defined(XP_WIN)
     857             :     DWORD oldProtect;
     858             :     if (!VirtualProtect(p, size, PAGE_NOACCESS, &oldProtect)) {
     859             :         MOZ_CRASH_UNSAFE_PRINTF("VirtualProtect(PAGE_NOACCESS) failed! Error code: %lu",
     860             :                                 GetLastError());
     861             :     }
     862             :     MOZ_ASSERT(oldProtect == PAGE_READWRITE);
     863             : #else  // assume Unix
     864           0 :     if (mprotect(p, size, PROT_NONE))
     865           0 :         MOZ_CRASH("mprotect(PROT_NONE) failed");
     866             : #endif
     867           0 : }
     868             : 
     869             : void
     870           0 : MakePagesReadOnly(void* p, size_t size)
     871             : {
     872           0 :     MOZ_ASSERT(size % pageSize == 0);
     873           0 :     MOZ_RELEASE_ASSERT(size > 0);
     874           0 :     MOZ_RELEASE_ASSERT(p);
     875             : #if defined(XP_WIN)
     876             :     DWORD oldProtect;
     877             :     if (!VirtualProtect(p, size, PAGE_READONLY, &oldProtect)) {
     878             :         MOZ_CRASH_UNSAFE_PRINTF("VirtualProtect(PAGE_READONLY) failed! Error code: %lu",
     879             :                                 GetLastError());
     880             :     }
     881             :     MOZ_ASSERT(oldProtect == PAGE_READWRITE);
     882             : #else  // assume Unix
     883           0 :     if (mprotect(p, size, PROT_READ))
     884           0 :         MOZ_CRASH("mprotect(PROT_READ) failed");
     885             : #endif
     886           0 : }
     887             : 
     888             : void
     889           0 : UnprotectPages(void* p, size_t size)
     890             : {
     891           0 :     MOZ_ASSERT(size % pageSize == 0);
     892           0 :     MOZ_RELEASE_ASSERT(size > 0);
     893           0 :     MOZ_RELEASE_ASSERT(p);
     894             : #if defined(XP_WIN)
     895             :     DWORD oldProtect;
     896             :     if (!VirtualProtect(p, size, PAGE_READWRITE, &oldProtect)) {
     897             :         MOZ_CRASH_UNSAFE_PRINTF("VirtualProtect(PAGE_READWRITE) failed! Error code: %lu",
     898             :                                 GetLastError());
     899             :     }
     900             :     MOZ_ASSERT(oldProtect == PAGE_NOACCESS || oldProtect == PAGE_READONLY);
     901             : #else  // assume Unix
     902           0 :     if (mprotect(p, size, PROT_READ | PROT_WRITE))
     903           0 :         MOZ_CRASH("mprotect(PROT_READ | PROT_WRITE) failed");
     904             : #endif
     905           0 : }
     906             : 
     907             : } // namespace gc
     908             : } // namespace js

Generated by: LCOV version 1.13