LCOV - code coverage report
Current view: top level - memory/mozjemalloc - mozjemalloc.cpp (source / functions) Hit Total Coverage
Test: output.info Lines: 993 1267 78.4 %
Date: 2017-07-14 16:53:18 Functions: 100 115 87.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /* -*- Mode: C; tab-width: 8; c-basic-offset: 8; indent-tabs-mode: t -*- */
       2             : /* vim:set softtabstop=8 shiftwidth=8 noet: */
       3             : /*-
       4             :  * Copyright (C) 2006-2008 Jason Evans <jasone@FreeBSD.org>.
       5             :  * All rights reserved.
       6             :  *
       7             :  * Redistribution and use in source and binary forms, with or without
       8             :  * modification, are permitted provided that the following conditions
       9             :  * are met:
      10             :  * 1. Redistributions of source code must retain the above copyright
      11             :  *    notice(s), this list of conditions and the following disclaimer as
      12             :  *    the first lines of this file unmodified other than the possible
      13             :  *    addition of one or more copyright notices.
      14             :  * 2. Redistributions in binary form must reproduce the above copyright
      15             :  *    notice(s), this list of conditions and the following disclaimer in
      16             :  *    the documentation and/or other materials provided with the
      17             :  *    distribution.
      18             :  *
      19             :  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
      20             :  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
      21             :  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
      22             :  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE
      23             :  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
      24             :  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
      25             :  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
      26             :  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
      27             :  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
      28             :  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
      29             :  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      30             :  *
      31             :  *******************************************************************************
      32             :  *
      33             :  * This allocator implementation is designed to provide scalable performance
      34             :  * for multi-threaded programs on multi-processor systems.  The following
      35             :  * features are included for this purpose:
      36             :  *
      37             :  *   + Multiple arenas are used if there are multiple CPUs, which reduces lock
      38             :  *     contention and cache sloshing.
      39             :  *
      40             :  *   + Cache line sharing between arenas is avoided for internal data
      41             :  *     structures.
      42             :  *
      43             :  *   + Memory is managed in chunks and runs (chunks can be split into runs),
      44             :  *     rather than as individual pages.  This provides a constant-time
      45             :  *     mechanism for associating allocations with particular arenas.
      46             :  *
      47             :  * Allocation requests are rounded up to the nearest size class, and no record
      48             :  * of the original request size is maintained.  Allocations are broken into
      49             :  * categories according to size class.  Assuming runtime defaults, 4 kB pages
      50             :  * and a 16 byte quantum on a 32-bit system, the size classes in each category
      51             :  * are as follows:
      52             :  *
      53             :  *   |=====================================|
      54             :  *   | Category | Subcategory    |    Size |
      55             :  *   |=====================================|
      56             :  *   | Small    | Tiny           |       2 |
      57             :  *   |          |                |       4 |
      58             :  *   |          |                |       8 |
      59             :  *   |          |----------------+---------|
      60             :  *   |          | Quantum-spaced |      16 |
      61             :  *   |          |                |      32 |
      62             :  *   |          |                |      48 |
      63             :  *   |          |                |     ... |
      64             :  *   |          |                |     480 |
      65             :  *   |          |                |     496 |
      66             :  *   |          |                |     512 |
      67             :  *   |          |----------------+---------|
      68             :  *   |          | Sub-page       |    1 kB |
      69             :  *   |          |                |    2 kB |
      70             :  *   |=====================================|
      71             :  *   | Large                     |    4 kB |
      72             :  *   |                           |    8 kB |
      73             :  *   |                           |   12 kB |
      74             :  *   |                           |     ... |
      75             :  *   |                           | 1012 kB |
      76             :  *   |                           | 1016 kB |
      77             :  *   |                           | 1020 kB |
      78             :  *   |=====================================|
      79             :  *   | Huge                      |    1 MB |
      80             :  *   |                           |    2 MB |
      81             :  *   |                           |    3 MB |
      82             :  *   |                           |     ... |
      83             :  *   |=====================================|
      84             :  *
      85             :  * NOTE: Due to Mozilla bug 691003, we cannot reserve less than one word for an
      86             :  * allocation on Linux or Mac.  So on 32-bit *nix, the smallest bucket size is
      87             :  * 4 bytes, and on 64-bit, the smallest bucket size is 8 bytes.
      88             :  *
      89             :  * A different mechanism is used for each category:
      90             :  *
      91             :  *   Small : Each size class is segregated into its own set of runs.  Each run
      92             :  *           maintains a bitmap of which regions are free/allocated.
      93             :  *
      94             :  *   Large : Each allocation is backed by a dedicated run.  Metadata are stored
      95             :  *           in the associated arena chunk header maps.
      96             :  *
      97             :  *   Huge : Each allocation is backed by a dedicated contiguous set of chunks.
      98             :  *          Metadata are stored in a separate red-black tree.
      99             :  *
     100             :  *******************************************************************************
     101             :  */
     102             : 
     103             : #include "mozmemory_wrap.h"
     104             : #include "mozilla/Sprintf.h"
     105             : 
     106             : #ifdef MOZ_MEMORY_ANDROID
     107             : #define NO_TLS
     108             : #endif
     109             : 
     110             : /*
     111             :  * On Linux, we use madvise(MADV_DONTNEED) to release memory back to the
     112             :  * operating system.  If we release 1MB of live pages with MADV_DONTNEED, our
     113             :  * RSS will decrease by 1MB (almost) immediately.
     114             :  *
     115             :  * On Mac, we use madvise(MADV_FREE).  Unlike MADV_DONTNEED on Linux, MADV_FREE
     116             :  * on Mac doesn't cause the OS to release the specified pages immediately; the
     117             :  * OS keeps them in our process until the machine comes under memory pressure.
     118             :  *
     119             :  * It's therefore difficult to measure the process's RSS on Mac, since, in the
     120             :  * absence of memory pressure, the contribution from the heap to RSS will not
     121             :  * decrease due to our madvise calls.
     122             :  *
     123             :  * We therefore define MALLOC_DOUBLE_PURGE on Mac.  This causes jemalloc to
     124             :  * track which pages have been MADV_FREE'd.  You can then call
     125             :  * jemalloc_purge_freed_pages(), which will force the OS to release those
     126             :  * MADV_FREE'd pages, making the process's RSS reflect its true memory usage.
     127             :  *
     128             :  * The jemalloc_purge_freed_pages definition in memory/build/mozmemory.h needs
     129             :  * to be adjusted if MALLOC_DOUBLE_PURGE is ever enabled on Linux.
     130             :  */
     131             : #ifdef MOZ_MEMORY_DARWIN
     132             : #define MALLOC_DOUBLE_PURGE
     133             : #endif
     134             : 
     135             : #include <sys/types.h>
     136             : 
     137             : #include <errno.h>
     138             : #include <stdlib.h>
     139             : #include <limits.h>
     140             : #include <stdarg.h>
     141             : #include <stdio.h>
     142             : #include <string.h>
     143             : 
     144             : #ifdef MOZ_MEMORY_WINDOWS
     145             : 
     146             : /* Some defines from the CRT internal headers that we need here. */
     147             : #define _CRT_SPINCOUNT 5000
     148             : #include <io.h>
     149             : #include <windows.h>
     150             : #include <intrin.h>
     151             : #include <algorithm>
     152             : 
     153             : #define SIZE_T_MAX SIZE_MAX
     154             : #define STDERR_FILENO 2
     155             : 
     156             : #ifndef NO_TLS
     157             : static unsigned long tlsIndex = 0xffffffff;
     158             : #endif
     159             : 
     160             : /* use MSVC intrinsics */
     161             : #pragma intrinsic(_BitScanForward)
     162             : static __forceinline int
     163             : ffs(int x)
     164             : {
     165             :         unsigned long i;
     166             : 
     167             :         if (_BitScanForward(&i, x) != 0)
     168             :                 return (i + 1);
     169             : 
     170             :         return (0);
     171             : }
     172             : 
     173             : /* Implement getenv without using malloc */
     174             : static char mozillaMallocOptionsBuf[64];
     175             : 
     176             : #define getenv xgetenv
     177             : static char *
     178             : getenv(const char *name)
     179             : {
     180             : 
     181             :         if (GetEnvironmentVariableA(name, (LPSTR)&mozillaMallocOptionsBuf,
     182             :                     sizeof(mozillaMallocOptionsBuf)) > 0)
     183             :                 return (mozillaMallocOptionsBuf);
     184             : 
     185             :         return nullptr;
     186             : }
     187             : 
     188             : #if defined(_WIN64)
     189             : typedef long long ssize_t;
     190             : #else
     191             : typedef long ssize_t;
     192             : #endif
     193             : 
     194             : #define MALLOC_DECOMMIT
     195             : #endif
     196             : 
     197             : #ifndef MOZ_MEMORY_WINDOWS
     198             : #ifndef MOZ_MEMORY_SOLARIS
     199             : #include <sys/cdefs.h>
     200             : #endif
     201             : #include <sys/mman.h>
     202             : #ifndef MADV_FREE
     203             : #  define MADV_FREE     MADV_DONTNEED
     204             : #endif
     205             : #ifndef MAP_NOSYNC
     206             : #  define MAP_NOSYNC    0
     207             : #endif
     208             : #include <sys/param.h>
     209             : #include <sys/time.h>
     210             : #include <sys/types.h>
     211             : #if !defined(MOZ_MEMORY_SOLARIS) && !defined(MOZ_MEMORY_ANDROID)
     212             : #include <sys/sysctl.h>
     213             : #endif
     214             : #include <sys/uio.h>
     215             : 
     216             : #include <errno.h>
     217             : #include <limits.h>
     218             : #ifndef SIZE_T_MAX
     219             : #  define SIZE_T_MAX    SIZE_MAX
     220             : #endif
     221             : #include <pthread.h>
     222             : #include <sched.h>
     223             : #include <stdarg.h>
     224             : #include <stdio.h>
     225             : #include <stdbool.h>
     226             : #include <stdint.h>
     227             : #include <stdlib.h>
     228             : #include <string.h>
     229             : #ifndef MOZ_MEMORY_DARWIN
     230             : #include <strings.h>
     231             : #endif
     232             : #include <unistd.h>
     233             : 
     234             : #ifdef MOZ_MEMORY_DARWIN
     235             : #include <libkern/OSAtomic.h>
     236             : #include <mach/mach_error.h>
     237             : #include <mach/mach_init.h>
     238             : #include <mach/vm_map.h>
     239             : #include <malloc/malloc.h>
     240             : #endif
     241             : 
     242             : #endif
     243             : 
     244             : #include "mozjemalloc_types.h"
     245             : #include "linkedlist.h"
     246             : 
     247             : extern "C" void moz_abort();
     248             : 
     249             : /* Some tools, such as /dev/dsp wrappers, LD_PRELOAD libraries that
     250             :  * happen to override mmap() and call dlsym() from their overridden
     251             :  * mmap(). The problem is that dlsym() calls malloc(), and this ends
     252             :  * up in a dead lock in jemalloc.
     253             :  * On these systems, we prefer to directly use the system call.
     254             :  * We do that for Linux systems and kfreebsd with GNU userland.
     255             :  * Note sanity checks are not done (alignment of offset, ...) because
     256             :  * the uses of mmap are pretty limited, in jemalloc.
     257             :  *
     258             :  * On Alpha, glibc has a bug that prevents syscall() to work for system
     259             :  * calls with 6 arguments
     260             :  */
     261             : #if (defined(MOZ_MEMORY_LINUX) && !defined(__alpha__)) || \
     262             :     (defined(MOZ_MEMORY_BSD) && defined(__GLIBC__))
     263             : #include <sys/syscall.h>
     264             : #if defined(SYS_mmap) || defined(SYS_mmap2)
     265             : static inline
     266         137 : void *_mmap(void *addr, size_t length, int prot, int flags,
     267             :             int fd, off_t offset)
     268             : {
     269             : /* S390 only passes one argument to the mmap system call, which is a
     270             :  * pointer to a structure containing the arguments */
     271             : #ifdef __s390__
     272             :         struct {
     273             :                 void *addr;
     274             :                 size_t length;
     275             :                 long prot;
     276             :                 long flags;
     277             :                 long fd;
     278             :                 off_t offset;
     279             :         } args = { addr, length, prot, flags, fd, offset };
     280             :         return (void *) syscall(SYS_mmap, &args);
     281             : #else
     282             : #if defined(MOZ_MEMORY_ANDROID) && defined(__aarch64__) && defined(SYS_mmap2)
     283             : /* Android NDK defines SYS_mmap2 for AArch64 despite it not supporting mmap2 */
     284             : #undef SYS_mmap2
     285             : #endif
     286             : #ifdef SYS_mmap2
     287             :         return (void *) syscall(SYS_mmap2, addr, length, prot, flags,
     288             :                                fd, offset >> 12);
     289             : #else
     290         137 :         return (void *) syscall(SYS_mmap, addr, length, prot, flags,
     291         137 :                                fd, offset);
     292             : #endif
     293             : #endif
     294             : }
     295             : #define mmap _mmap
     296             : #define munmap(a, l) syscall(SYS_munmap, a, l)
     297             : #endif
     298             : #endif
     299             : 
     300             : #ifdef MOZ_MEMORY_DARWIN
     301             : static pthread_key_t tlsIndex;
     302             : #endif
     303             : 
     304             : #ifdef MOZ_MEMORY_WINDOWS
     305             :    /* MSVC++ does not support C99 variable-length arrays. */
     306             : #  define RB_NO_C99_VARARRAYS
     307             : #endif
     308             : #include "rb.h"
     309             : 
     310             : #ifdef MOZ_DEBUG
     311             :    /* Disable inlining to make debugging easier. */
     312             : #ifdef inline
     313             : #undef inline
     314             : #endif
     315             : 
     316             : #  define inline
     317             : #endif
     318             : 
     319             : /* Size of stack-allocated buffer passed to strerror_r(). */
     320             : #define STRERROR_BUF            64
     321             : 
     322             : /* Minimum alignment of non-tiny allocations is 2^QUANTUM_2POW_MIN bytes. */
     323             : #  define QUANTUM_2POW_MIN      4
     324             : #if defined(_WIN64) || defined(__LP64__)
     325             : #  define SIZEOF_PTR_2POW       3
     326             : #else
     327             : #  define SIZEOF_PTR_2POW       2
     328             : #endif
     329             : #define PIC
     330             : 
     331             : #define SIZEOF_PTR              (1U << SIZEOF_PTR_2POW)
     332             : 
     333             : /* sizeof(int) == (1U << SIZEOF_INT_2POW). */
     334             : #ifndef SIZEOF_INT_2POW
     335             : #  define SIZEOF_INT_2POW       2
     336             : #endif
     337             : 
     338             : /* We can't use TLS in non-PIC programs, since TLS relies on loader magic. */
     339             : #if (!defined(PIC) && !defined(NO_TLS))
     340             : #  define NO_TLS
     341             : #endif
     342             : 
     343             : /*
     344             :  * Size and alignment of memory chunks that are allocated by the OS's virtual
     345             :  * memory system.
     346             :  */
     347             : #define CHUNK_2POW_DEFAULT      20
     348             : /* Maximum number of dirty pages per arena. */
     349             : #define DIRTY_MAX_DEFAULT       (1U << 8)
     350             : 
     351             : /*
     352             :  * Maximum size of L1 cache line.  This is used to avoid cache line aliasing,
     353             :  * so over-estimates are okay (up to a point), but under-estimates will
     354             :  * negatively affect performance.
     355             :  */
     356             : #define CACHELINE_2POW          6
     357             : #define CACHELINE               ((size_t)(1U << CACHELINE_2POW))
     358             : 
     359             : /*
     360             :  * Smallest size class to support.  On Windows the smallest allocation size
     361             :  * must be 8 bytes on 32-bit, 16 bytes on 64-bit.  On Linux and Mac, even
     362             :  * malloc(1) must reserve a word's worth of memory (see Mozilla bug 691003).
     363             :  */
     364             : #ifdef MOZ_MEMORY_WINDOWS
     365             : #define TINY_MIN_2POW           (sizeof(void*) == 8 ? 4 : 3)
     366             : #else
     367             : #define TINY_MIN_2POW           (sizeof(void*) == 8 ? 3 : 2)
     368             : #endif
     369             : 
     370             : /*
     371             :  * Maximum size class that is a multiple of the quantum, but not (necessarily)
     372             :  * a power of 2.  Above this size, allocations are rounded up to the nearest
     373             :  * power of 2.
     374             :  */
     375             : #define SMALL_MAX_2POW_DEFAULT  9
     376             : #define SMALL_MAX_DEFAULT       (1U << SMALL_MAX_2POW_DEFAULT)
     377             : 
     378             : /*
     379             :  * RUN_MAX_OVRHD indicates maximum desired run header overhead.  Runs are sized
     380             :  * as small as possible such that this setting is still honored, without
     381             :  * violating other constraints.  The goal is to make runs as small as possible
     382             :  * without exceeding a per run external fragmentation threshold.
     383             :  *
     384             :  * We use binary fixed point math for overhead computations, where the binary
     385             :  * point is implicitly RUN_BFP bits to the left.
     386             :  *
     387             :  * Note that it is possible to set RUN_MAX_OVRHD low enough that it cannot be
     388             :  * honored for some/all object sizes, since there is one bit of header overhead
     389             :  * per object (plus a constant).  This constraint is relaxed (ignored) for runs
     390             :  * that are so small that the per-region overhead is greater than:
     391             :  *
     392             :  *   (RUN_MAX_OVRHD / (reg_size << (3+RUN_BFP))
     393             :  */
     394             : #define RUN_BFP                 12
     395             : /*                                    \/   Implicit binary fixed point. */
     396             : #define RUN_MAX_OVRHD           0x0000003dU
     397             : #define RUN_MAX_OVRHD_RELAX     0x00001800U
     398             : 
     399             : /******************************************************************************/
     400             : 
     401             : /* MALLOC_DECOMMIT and MALLOC_DOUBLE_PURGE are mutually exclusive. */
     402             : #if defined(MALLOC_DECOMMIT) && defined(MALLOC_DOUBLE_PURGE)
     403             : #error MALLOC_DECOMMIT and MALLOC_DOUBLE_PURGE are mutually exclusive.
     404             : #endif
     405             : 
     406             : /*
     407             :  * Mutexes based on spinlocks.  We can't use normal pthread spinlocks in all
     408             :  * places, because they require malloc()ed memory, which causes bootstrapping
     409             :  * issues in some cases.
     410             :  */
     411             : #if defined(MOZ_MEMORY_WINDOWS)
     412             : #define malloc_mutex_t CRITICAL_SECTION
     413             : #define malloc_spinlock_t CRITICAL_SECTION
     414             : #elif defined(MOZ_MEMORY_DARWIN)
     415             : typedef struct {
     416             :         OSSpinLock      lock;
     417             : } malloc_mutex_t;
     418             : typedef struct {
     419             :         OSSpinLock      lock;
     420             : } malloc_spinlock_t;
     421             : #else
     422             : typedef pthread_mutex_t malloc_mutex_t;
     423             : typedef pthread_mutex_t malloc_spinlock_t;
     424             : #endif
     425             : 
     426             : /* Set to true once the allocator has been initialized. */
     427             : static bool malloc_initialized = false;
     428             : 
     429             : #if defined(MOZ_MEMORY_WINDOWS)
     430             : /* No init lock for Windows. */
     431             : #elif defined(MOZ_MEMORY_DARWIN)
     432             : static malloc_mutex_t init_lock = {OS_SPINLOCK_INIT};
     433             : #elif defined(MOZ_MEMORY_LINUX) && !defined(MOZ_MEMORY_ANDROID)
     434             : static malloc_mutex_t init_lock = PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP;
     435             : #else
     436             : static malloc_mutex_t init_lock = PTHREAD_MUTEX_INITIALIZER;
     437             : #endif
     438             : 
     439             : /******************************************************************************/
     440             : /*
     441             :  * Statistics data structures.
     442             :  */
     443             : 
     444             : typedef struct malloc_bin_stats_s malloc_bin_stats_t;
     445             : struct malloc_bin_stats_s {
     446             :         /* Current number of runs in this bin. */
     447             :         unsigned long   curruns;
     448             : };
     449             : 
     450             : typedef struct arena_stats_s arena_stats_t;
     451             : struct arena_stats_s {
     452             :         /* Number of bytes currently mapped. */
     453             :         size_t          mapped;
     454             : 
     455             :         /* Current number of committed pages. */
     456             :         size_t          committed;
     457             : 
     458             :         /* Per-size-category statistics. */
     459             :         size_t          allocated_small;
     460             : 
     461             :         size_t          allocated_large;
     462             : };
     463             : 
     464             : /******************************************************************************/
     465             : /*
     466             :  * Extent data structures.
     467             :  */
     468             : 
     469             : enum ChunkType {
     470             :   UNKNOWN_CHUNK,
     471             :   ZEROED_CHUNK,   // chunk only contains zeroes
     472             :   ARENA_CHUNK,    // used to back arena runs created by arena_run_alloc
     473             :   HUGE_CHUNK,     // used to back huge allocations (e.g. huge_malloc)
     474             :   RECYCLED_CHUNK, // chunk has been stored for future use by chunk_recycle
     475             : };
     476             : 
     477             : /* Tree of extents. */
     478             : typedef struct extent_node_s extent_node_t;
     479             : struct extent_node_s {
     480             :         /* Linkage for the size/address-ordered tree. */
     481             :         rb_node(extent_node_t) link_szad;
     482             : 
     483             :         /* Linkage for the address-ordered tree. */
     484             :         rb_node(extent_node_t) link_ad;
     485             : 
     486             :         /* Pointer to the extent that this tree node is responsible for. */
     487             :         void    *addr;
     488             : 
     489             :         /* Total region size. */
     490             :         size_t  size;
     491             : 
     492             :         /* What type of chunk is there; used by chunk recycling code. */
     493             :         ChunkType chunk_type;
     494             : };
     495             : typedef rb_tree(extent_node_t) extent_tree_t;
     496             : 
     497             : /******************************************************************************/
     498             : /*
     499             :  * Radix tree data structures.
     500             :  */
     501             : 
     502             : /*
     503             :  * Size of each radix tree node (must be a power of 2).  This impacts tree
     504             :  * depth.
     505             :  */
     506             : #if (SIZEOF_PTR == 4)
     507             : #  define MALLOC_RTREE_NODESIZE (1U << 14)
     508             : #else
     509             : #  define MALLOC_RTREE_NODESIZE CACHELINE
     510             : #endif
     511             : 
     512             : typedef struct malloc_rtree_s malloc_rtree_t;
     513             : struct malloc_rtree_s {
     514             :         malloc_spinlock_t       lock;
     515             :         void                    **root;
     516             :         unsigned                height;
     517             :         unsigned                level2bits[1]; /* Dynamically sized. */
     518             : };
     519             : 
     520             : /******************************************************************************/
     521             : /*
     522             :  * Arena data structures.
     523             :  */
     524             : 
     525             : typedef struct arena_s arena_t;
     526             : typedef struct arena_bin_s arena_bin_t;
     527             : 
     528             : /* Each element of the chunk map corresponds to one page within the chunk. */
     529             : typedef struct arena_chunk_map_s arena_chunk_map_t;
     530             : struct arena_chunk_map_s {
     531             :         /*
     532             :          * Linkage for run trees.  There are two disjoint uses:
     533             :          *
     534             :          * 1) arena_t's runs_avail tree.
     535             :          * 2) arena_run_t conceptually uses this linkage for in-use non-full
     536             :          *    runs, rather than directly embedding linkage.
     537             :          */
     538             :         rb_node(arena_chunk_map_t)      link;
     539             : 
     540             :         /*
     541             :          * Run address (or size) and various flags are stored together.  The bit
     542             :          * layout looks like (assuming 32-bit system):
     543             :          *
     544             :          *   ???????? ???????? ????---- -mckdzla
     545             :          *
     546             :          * ? : Unallocated: Run address for first/last pages, unset for internal
     547             :          *                  pages.
     548             :          *     Small: Run address.
     549             :          *     Large: Run size for first page, unset for trailing pages.
     550             :          * - : Unused.
     551             :          * m : MADV_FREE/MADV_DONTNEED'ed?
     552             :          * c : decommitted?
     553             :          * k : key?
     554             :          * d : dirty?
     555             :          * z : zeroed?
     556             :          * l : large?
     557             :          * a : allocated?
     558             :          *
     559             :          * Following are example bit patterns for the three types of runs.
     560             :          *
     561             :          * r : run address
     562             :          * s : run size
     563             :          * x : don't care
     564             :          * - : 0
     565             :          * [cdzla] : bit set
     566             :          *
     567             :          *   Unallocated:
     568             :          *     ssssssss ssssssss ssss---- --c-----
     569             :          *     xxxxxxxx xxxxxxxx xxxx---- ----d---
     570             :          *     ssssssss ssssssss ssss---- -----z--
     571             :          *
     572             :          *   Small:
     573             :          *     rrrrrrrr rrrrrrrr rrrr---- -------a
     574             :          *     rrrrrrrr rrrrrrrr rrrr---- -------a
     575             :          *     rrrrrrrr rrrrrrrr rrrr---- -------a
     576             :          *
     577             :          *   Large:
     578             :          *     ssssssss ssssssss ssss---- ------la
     579             :          *     -------- -------- -------- ------la
     580             :          *     -------- -------- -------- ------la
     581             :          */
     582             :         size_t                          bits;
     583             : 
     584             : /* Note that CHUNK_MAP_DECOMMITTED's meaning varies depending on whether
     585             :  * MALLOC_DECOMMIT and MALLOC_DOUBLE_PURGE are defined.
     586             :  *
     587             :  * If MALLOC_DECOMMIT is defined, a page which is CHUNK_MAP_DECOMMITTED must be
     588             :  * re-committed with pages_commit() before it may be touched.  If
     589             :  * MALLOC_DECOMMIT is defined, MALLOC_DOUBLE_PURGE may not be defined.
     590             :  *
     591             :  * If neither MALLOC_DECOMMIT nor MALLOC_DOUBLE_PURGE is defined, pages which
     592             :  * are madvised (with either MADV_DONTNEED or MADV_FREE) are marked with
     593             :  * CHUNK_MAP_MADVISED.
     594             :  *
     595             :  * Otherwise, if MALLOC_DECOMMIT is not defined and MALLOC_DOUBLE_PURGE is
     596             :  * defined, then a page which is madvised is marked as CHUNK_MAP_MADVISED.
     597             :  * When it's finally freed with jemalloc_purge_freed_pages, the page is marked
     598             :  * as CHUNK_MAP_DECOMMITTED.
     599             :  */
     600             : #define CHUNK_MAP_MADVISED      ((size_t)0x40U)
     601             : #define CHUNK_MAP_DECOMMITTED   ((size_t)0x20U)
     602             : #define CHUNK_MAP_MADVISED_OR_DECOMMITTED (CHUNK_MAP_MADVISED | CHUNK_MAP_DECOMMITTED)
     603             : #define CHUNK_MAP_KEY           ((size_t)0x10U)
     604             : #define CHUNK_MAP_DIRTY         ((size_t)0x08U)
     605             : #define CHUNK_MAP_ZEROED        ((size_t)0x04U)
     606             : #define CHUNK_MAP_LARGE         ((size_t)0x02U)
     607             : #define CHUNK_MAP_ALLOCATED     ((size_t)0x01U)
     608             : };
     609             : typedef rb_tree(arena_chunk_map_t) arena_avail_tree_t;
     610             : typedef rb_tree(arena_chunk_map_t) arena_run_tree_t;
     611             : 
     612             : /* Arena chunk header. */
     613             : typedef struct arena_chunk_s arena_chunk_t;
     614             : struct arena_chunk_s {
     615             :         /* Arena that owns the chunk. */
     616             :         arena_t         *arena;
     617             : 
     618             :         /* Linkage for the arena's chunks_dirty tree. */
     619             :         rb_node(arena_chunk_t) link_dirty;
     620             : 
     621             : #ifdef MALLOC_DOUBLE_PURGE
     622             :         /* If we're double-purging, we maintain a linked list of chunks which
     623             :          * have pages which have been madvise(MADV_FREE)'d but not explicitly
     624             :          * purged.
     625             :          *
     626             :          * We're currently lazy and don't remove a chunk from this list when
     627             :          * all its madvised pages are recommitted. */
     628             :         LinkedList      chunks_madvised_elem;
     629             : #endif
     630             : 
     631             :         /* Number of dirty pages. */
     632             :         size_t          ndirty;
     633             : 
     634             :         /* Map of pages within chunk that keeps track of free/large/small. */
     635             :         arena_chunk_map_t map[1]; /* Dynamically sized. */
     636             : };
     637             : typedef rb_tree(arena_chunk_t) arena_chunk_tree_t;
     638             : 
     639             : typedef struct arena_run_s arena_run_t;
     640             : struct arena_run_s {
     641             : #if defined(MOZ_DEBUG) || defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
     642             :         uint32_t        magic;
     643             : #  define ARENA_RUN_MAGIC 0x384adf93
     644             : #endif
     645             : 
     646             :         /* Bin this run is associated with. */
     647             :         arena_bin_t     *bin;
     648             : 
     649             :         /* Index of first element that might have a free region. */
     650             :         unsigned        regs_minelm;
     651             : 
     652             :         /* Number of free regions in run. */
     653             :         unsigned        nfree;
     654             : 
     655             :         /* Bitmask of in-use regions (0: in use, 1: free). */
     656             :         unsigned        regs_mask[1]; /* Dynamically sized. */
     657             : };
     658             : 
     659             : struct arena_bin_s {
     660             :         /*
     661             :          * Current run being used to service allocations of this bin's size
     662             :          * class.
     663             :          */
     664             :         arena_run_t     *runcur;
     665             : 
     666             :         /*
     667             :          * Tree of non-full runs.  This tree is used when looking for an
     668             :          * existing run when runcur is no longer usable.  We choose the
     669             :          * non-full run that is lowest in memory; this policy tends to keep
     670             :          * objects packed well, and it can also help reduce the number of
     671             :          * almost-empty chunks.
     672             :          */
     673             :         arena_run_tree_t runs;
     674             : 
     675             :         /* Size of regions in a run for this bin's size class. */
     676             :         size_t          reg_size;
     677             : 
     678             :         /* Total size of a run for this bin's size class. */
     679             :         size_t          run_size;
     680             : 
     681             :         /* Total number of regions in a run for this bin's size class. */
     682             :         uint32_t        nregs;
     683             : 
     684             :         /* Number of elements in a run's regs_mask for this bin's size class. */
     685             :         uint32_t        regs_mask_nelms;
     686             : 
     687             :         /* Offset of first region in a run for this bin's size class. */
     688             :         uint32_t        reg0_offset;
     689             : 
     690             :         /* Bin statistics. */
     691             :         malloc_bin_stats_t stats;
     692             : };
     693             : 
     694             : struct arena_s {
     695             : #if defined(MOZ_DEBUG) || defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
     696             :         uint32_t                magic;
     697             : #  define ARENA_MAGIC 0x947d3d24
     698             : #endif
     699             : 
     700             :         /* All operations on this arena require that lock be locked. */
     701             :         malloc_spinlock_t       lock;
     702             : 
     703             :         arena_stats_t           stats;
     704             : 
     705             :         /* Tree of dirty-page-containing chunks this arena manages. */
     706             :         arena_chunk_tree_t      chunks_dirty;
     707             : 
     708             : #ifdef MALLOC_DOUBLE_PURGE
     709             :         /* Head of a linked list of MADV_FREE'd-page-containing chunks this
     710             :          * arena manages. */
     711             :         LinkedList              chunks_madvised;
     712             : #endif
     713             : 
     714             :         /*
     715             :          * In order to avoid rapid chunk allocation/deallocation when an arena
     716             :          * oscillates right on the cusp of needing a new chunk, cache the most
     717             :          * recently freed chunk.  The spare is left in the arena's chunk trees
     718             :          * until it is deleted.
     719             :          *
     720             :          * There is one spare chunk per arena, rather than one spare total, in
     721             :          * order to avoid interactions between multiple threads that could make
     722             :          * a single spare inadequate.
     723             :          */
     724             :         arena_chunk_t           *spare;
     725             : 
     726             :         /*
     727             :          * Current count of pages within unused runs that are potentially
     728             :          * dirty, and for which madvise(... MADV_FREE) has not been called.  By
     729             :          * tracking this, we can institute a limit on how much dirty unused
     730             :          * memory is mapped for each arena.
     731             :          */
     732             :         size_t                  ndirty;
     733             : 
     734             :         /*
     735             :          * Size/address-ordered tree of this arena's available runs.  This tree
     736             :          * is used for first-best-fit run allocation.
     737             :          */
     738             :         arena_avail_tree_t      runs_avail;
     739             : 
     740             :         /*
     741             :          * bins is used to store rings of free regions of the following sizes,
     742             :          * assuming a 16-byte quantum, 4kB pagesize, and default MALLOC_OPTIONS.
     743             :          *
     744             :          *   bins[i] | size |
     745             :          *   --------+------+
     746             :          *        0  |    2 |
     747             :          *        1  |    4 |
     748             :          *        2  |    8 |
     749             :          *   --------+------+
     750             :          *        3  |   16 |
     751             :          *        4  |   32 |
     752             :          *        5  |   48 |
     753             :          *        6  |   64 |
     754             :          *           :      :
     755             :          *           :      :
     756             :          *       33  |  496 |
     757             :          *       34  |  512 |
     758             :          *   --------+------+
     759             :          *       35  | 1024 |
     760             :          *       36  | 2048 |
     761             :          *   --------+------+
     762             :          */
     763             :         arena_bin_t             bins[1]; /* Dynamically sized. */
     764             : };
     765             : 
     766             : /******************************************************************************/
     767             : /*
     768             :  * Data.
     769             :  */
     770             : 
     771             : /*
     772             :  * When MALLOC_STATIC_SIZES is defined most of the parameters
     773             :  * controlling the malloc behavior are defined as compile-time constants
     774             :  * for best performance and cannot be altered at runtime.
     775             :  */
     776             : #if !defined(__ia64__) && !defined(__sparc__) && !defined(__mips__) && !defined(__aarch64__)
     777             : #define MALLOC_STATIC_SIZES 1
     778             : #endif
     779             : 
     780             : #ifdef MALLOC_STATIC_SIZES
     781             : 
     782             : /*
     783             :  * VM page size. It must divide the runtime CPU page size or the code
     784             :  * will abort.
     785             :  * Platform specific page size conditions copied from js/public/HeapAPI.h
     786             :  */
     787             : #if (defined(SOLARIS) || defined(__FreeBSD__)) && \
     788             :     (defined(__sparc) || defined(__sparcv9) || defined(__ia64))
     789             : #define pagesize_2pow                   ((size_t) 13)
     790             : #elif defined(__powerpc64__)
     791             : #define pagesize_2pow                   ((size_t) 16)
     792             : #else
     793             : #define pagesize_2pow                   ((size_t) 12)
     794             : #endif
     795             : #define pagesize                        ((size_t) 1 << pagesize_2pow)
     796             : #define pagesize_mask                   (pagesize - 1)
     797             : 
     798             : /* Various quantum-related settings. */
     799             : 
     800             : #define QUANTUM_DEFAULT                 ((size_t) 1 << QUANTUM_2POW_MIN)
     801             : static const size_t     quantum =       QUANTUM_DEFAULT;
     802             : static const size_t     quantum_mask =  QUANTUM_DEFAULT - 1;
     803             : 
     804             : /* Various bin-related settings. */
     805             : 
     806             : static const size_t     small_min =     (QUANTUM_DEFAULT >> 1) + 1;
     807             : static const size_t     small_max =     (size_t) SMALL_MAX_DEFAULT;
     808             : 
     809             : /* Max size class for bins. */
     810             : static const size_t     bin_maxclass =  pagesize >> 1;
     811             : 
     812             :  /* Number of (2^n)-spaced tiny bins. */
     813             : static const unsigned   ntbins =        (unsigned)
     814             :                                         (QUANTUM_2POW_MIN - TINY_MIN_2POW);
     815             : 
     816             :  /* Number of quantum-spaced bins. */
     817             : static const unsigned   nqbins =        (unsigned)
     818             :                                         (SMALL_MAX_DEFAULT >> QUANTUM_2POW_MIN);
     819             : 
     820             : /* Number of (2^n)-spaced sub-page bins. */
     821             : static const unsigned   nsbins =        (unsigned)
     822             :                                         (pagesize_2pow -
     823             :                                          SMALL_MAX_2POW_DEFAULT - 1);
     824             : 
     825             : #else /* !MALLOC_STATIC_SIZES */
     826             : 
     827             : /* VM page size. */
     828             : static size_t           pagesize;
     829             : static size_t           pagesize_mask;
     830             : static size_t           pagesize_2pow;
     831             : 
     832             : /* Various bin-related settings. */
     833             : static size_t           bin_maxclass; /* Max size class for bins. */
     834             : static unsigned         ntbins; /* Number of (2^n)-spaced tiny bins. */
     835             : static unsigned         nqbins; /* Number of quantum-spaced bins. */
     836             : static unsigned         nsbins; /* Number of (2^n)-spaced sub-page bins. */
     837             : static size_t           small_min;
     838             : static size_t           small_max;
     839             : 
     840             : /* Various quantum-related settings. */
     841             : static size_t           quantum;
     842             : static size_t           quantum_mask; /* (quantum - 1). */
     843             : 
     844             : #endif
     845             : 
     846             : /* Various chunk-related settings. */
     847             : 
     848             : /*
     849             :  * Compute the header size such that it is large enough to contain the page map
     850             :  * and enough nodes for the worst case: one node per non-header page plus one
     851             :  * extra for situations where we briefly have one more node allocated than we
     852             :  * will need.
     853             :  */
     854             : #define calculate_arena_header_size()                                   \
     855             :         (sizeof(arena_chunk_t) + sizeof(arena_chunk_map_t) * (chunk_npages - 1))
     856             : 
     857             : #define calculate_arena_header_pages()                                  \
     858             :         ((calculate_arena_header_size() >> pagesize_2pow) +               \
     859             :          ((calculate_arena_header_size() & pagesize_mask) ? 1 : 0))
     860             : 
     861             : /* Max size class for arenas. */
     862             : #define calculate_arena_maxclass()                                      \
     863             :         (chunksize - (arena_chunk_header_npages << pagesize_2pow))
     864             : 
     865             : /*
     866             :  * Recycle at most 128 chunks. With 1 MiB chunks, this means we retain at most
     867             :  * 6.25% of the process address space on a 32-bit OS for later use.
     868             :  */
     869             : #define CHUNK_RECYCLE_LIMIT 128
     870             : 
     871             : #ifdef MALLOC_STATIC_SIZES
     872             : #define CHUNKSIZE_DEFAULT               ((size_t) 1 << CHUNK_2POW_DEFAULT)
     873             : static const size_t     chunksize =     CHUNKSIZE_DEFAULT;
     874             : static const size_t     chunksize_mask =CHUNKSIZE_DEFAULT - 1;
     875             : static const size_t     chunk_npages =  CHUNKSIZE_DEFAULT >> pagesize_2pow;
     876             : #define arena_chunk_header_npages       calculate_arena_header_pages()
     877             : #define arena_maxclass                  calculate_arena_maxclass()
     878             : static const size_t     recycle_limit = CHUNK_RECYCLE_LIMIT * CHUNKSIZE_DEFAULT;
     879             : #else
     880             : static size_t           chunksize;
     881             : static size_t           chunksize_mask; /* (chunksize - 1). */
     882             : static size_t           chunk_npages;
     883             : static size_t           arena_chunk_header_npages;
     884             : static size_t           arena_maxclass; /* Max size class for arenas. */
     885             : static size_t           recycle_limit;
     886             : #endif
     887             : 
     888             : /* The current amount of recycled bytes, updated atomically. */
     889             : static size_t recycled_size;
     890             : 
     891             : /********/
     892             : /*
     893             :  * Chunks.
     894             :  */
     895             : 
     896             : static malloc_rtree_t *chunk_rtree;
     897             : 
     898             : /* Protects chunk-related data structures. */
     899             : static malloc_mutex_t   chunks_mtx;
     900             : 
     901             : /*
     902             :  * Trees of chunks that were previously allocated (trees differ only in node
     903             :  * ordering).  These are used when allocating chunks, in an attempt to re-use
     904             :  * address space.  Depending on function, different tree orderings are needed,
     905             :  * which is why there are two trees with the same contents.
     906             :  */
     907             : static extent_tree_t    chunks_szad_mmap;
     908             : static extent_tree_t    chunks_ad_mmap;
     909             : 
     910             : /* Protects huge allocation-related data structures. */
     911             : static malloc_mutex_t   huge_mtx;
     912             : 
     913             : /* Tree of chunks that are stand-alone huge allocations. */
     914             : static extent_tree_t    huge;
     915             : 
     916             : /* Huge allocation statistics. */
     917             : static uint64_t         huge_nmalloc;
     918             : static uint64_t         huge_ndalloc;
     919             : static size_t           huge_allocated;
     920             : static size_t           huge_mapped;
     921             : 
     922             : /****************************/
     923             : /*
     924             :  * base (internal allocation).
     925             :  */
     926             : 
     927             : /*
     928             :  * Current pages that are being used for internal memory allocations.  These
     929             :  * pages are carved up in cacheline-size quanta, so that there is no chance of
     930             :  * false cache line sharing.
     931             :  */
     932             : static void             *base_pages;
     933             : static void             *base_next_addr;
     934             : static void             *base_next_decommitted;
     935             : static void             *base_past_addr; /* Addr immediately past base_pages. */
     936             : static extent_node_t    *base_nodes;
     937             : static malloc_mutex_t   base_mtx;
     938             : static size_t           base_mapped;
     939             : static size_t           base_committed;
     940             : 
     941             : /********/
     942             : /*
     943             :  * Arenas.
     944             :  */
     945             : 
     946             : /*
     947             :  * Arenas that are used to service external requests.  Not all elements of the
     948             :  * arenas array are necessarily used; arenas are created lazily as needed.
     949             :  */
     950             : static arena_t          **arenas;
     951             : static unsigned         narenas;
     952             : static malloc_spinlock_t arenas_lock; /* Protects arenas initialization. */
     953             : 
     954             : #ifndef NO_TLS
     955             : /*
     956             :  * Map of pthread_self() --> arenas[???], used for selecting an arena to use
     957             :  * for allocations.
     958             :  */
     959             : #if !defined(MOZ_MEMORY_WINDOWS) && !defined(MOZ_MEMORY_DARWIN)
     960             : static __thread arena_t *arenas_map;
     961             : #endif
     962             : #endif
     963             : 
     964             : /*******************************/
     965             : /*
     966             :  * Runtime configuration options.
     967             :  */
     968             : const uint8_t kAllocJunk = 0xe4;
     969             : const uint8_t kAllocPoison = 0xe5;
     970             : 
     971             : #ifdef MOZ_DEBUG
     972             : static bool     opt_junk = true;
     973             : static bool     opt_zero = false;
     974             : #else
     975             : static const bool       opt_junk = false;
     976             : static const bool       opt_zero = false;
     977             : #endif
     978             : 
     979             : static size_t   opt_dirty_max = DIRTY_MAX_DEFAULT;
     980             : #ifdef MALLOC_STATIC_SIZES
     981             : #define opt_quantum_2pow        QUANTUM_2POW_MIN
     982             : #define opt_small_max_2pow      SMALL_MAX_2POW_DEFAULT
     983             : #define opt_chunk_2pow          CHUNK_2POW_DEFAULT
     984             : #else
     985             : static size_t   opt_quantum_2pow = QUANTUM_2POW_MIN;
     986             : static size_t   opt_small_max_2pow = SMALL_MAX_2POW_DEFAULT;
     987             : static size_t   opt_chunk_2pow = CHUNK_2POW_DEFAULT;
     988             : #endif
     989             : 
     990             : /******************************************************************************/
     991             : /*
     992             :  * Begin forward declarations.
     993             :  */
     994             : 
     995             : static void     *chunk_alloc(size_t size, size_t alignment, bool base, bool *zeroed=nullptr);
     996             : static void     chunk_dealloc(void *chunk, size_t size, ChunkType chunk_type);
     997             : static void     chunk_ensure_zero(void* ptr, size_t size, bool zeroed);
     998             : static arena_t  *arenas_extend();
     999             : static void     *huge_malloc(size_t size, bool zero);
    1000             : static void     *huge_palloc(size_t size, size_t alignment, bool zero);
    1001             : static void     *huge_ralloc(void *ptr, size_t size, size_t oldsize);
    1002             : static void     huge_dalloc(void *ptr);
    1003             : #ifdef MOZ_MEMORY_WINDOWS
    1004             : extern "C"
    1005             : #else
    1006             : static
    1007             : #endif
    1008             : bool            malloc_init_hard(void);
    1009             : 
    1010             : #ifdef MOZ_MEMORY_DARWIN
    1011             : #define FORK_HOOK extern "C"
    1012             : #else
    1013             : #define FORK_HOOK static
    1014             : #endif
    1015             : FORK_HOOK void _malloc_prefork(void);
    1016             : FORK_HOOK void _malloc_postfork_parent(void);
    1017             : FORK_HOOK void _malloc_postfork_child(void);
    1018             : 
    1019             : /*
    1020             :  * End forward declarations.
    1021             :  */
    1022             : /******************************************************************************/
    1023             : 
    1024             : static inline size_t
    1025             : load_acquire_z(size_t *p)
    1026             : {
    1027           3 :         volatile size_t result = *p;
    1028             : #  ifdef MOZ_MEMORY_WINDOWS
    1029             :         /*
    1030             :          * We use InterlockedExchange with a dummy value to insert a memory
    1031             :          * barrier. This has been confirmed to generate the right instruction
    1032             :          * and is also used by MinGW.
    1033             :          */
    1034             :         volatile long dummy = 0;
    1035             :         InterlockedExchange(&dummy, 1);
    1036             : #  else
    1037           3 :         __sync_synchronize();
    1038             : #  endif
    1039           3 :         return result;
    1040             : }
    1041             : 
    1042             : static void
    1043           0 : _malloc_message(const char *p)
    1044             : {
    1045             : #if !defined(MOZ_MEMORY_WINDOWS)
    1046             : #define _write  write
    1047             : #endif
    1048             :   // Pretend to check _write() errors to suppress gcc warnings about
    1049             :   // warn_unused_result annotations in some versions of glibc headers.
    1050           0 :   if (_write(STDERR_FILENO, p, (unsigned int) strlen(p)) < 0)
    1051             :     return;
    1052             : }
    1053             : 
    1054             : template <typename... Args>
    1055             : static void
    1056           0 : _malloc_message(const char *p, Args... args)
    1057             : {
    1058           0 :   _malloc_message(p);
    1059           0 :   _malloc_message(args...);
    1060           0 : }
    1061             : 
    1062             : #include "mozilla/Assertions.h"
    1063             : #include "mozilla/Attributes.h"
    1064             : #include "mozilla/TaggedAnonymousMemory.h"
    1065             : // Note: MozTaggedAnonymousMmap() could call an LD_PRELOADed mmap
    1066             : // instead of the one defined here; use only MozTagAnonymousMemory().
    1067             : 
    1068             : #ifdef MOZ_MEMORY_ANDROID
    1069             : // Android's pthread.h does not declare pthread_atfork() until SDK 21.
    1070             : extern "C" MOZ_EXPORT
    1071             : int pthread_atfork(void (*)(void), void (*)(void), void(*)(void));
    1072             : #endif
    1073             : 
    1074             : /******************************************************************************/
    1075             : /*
    1076             :  * Begin mutex.  We can't use normal pthread mutexes in all places, because
    1077             :  * they require malloc()ed memory, which causes bootstrapping issues in some
    1078             :  * cases.
    1079             :  */
    1080             : 
    1081             : static bool
    1082          18 : malloc_mutex_init(malloc_mutex_t *mutex)
    1083             : {
    1084             : #if defined(MOZ_MEMORY_WINDOWS)
    1085             :         if (!InitializeCriticalSectionAndSpinCount(mutex, _CRT_SPINCOUNT))
    1086             :                 return (true);
    1087             : #elif defined(MOZ_MEMORY_DARWIN)
    1088             :         mutex->lock = OS_SPINLOCK_INIT;
    1089             : #elif defined(MOZ_MEMORY_LINUX) && !defined(MOZ_MEMORY_ANDROID)
    1090             :         pthread_mutexattr_t attr;
    1091          18 :         if (pthread_mutexattr_init(&attr) != 0)
    1092             :                 return (true);
    1093          18 :         pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ADAPTIVE_NP);
    1094          18 :         if (pthread_mutex_init(mutex, &attr) != 0) {
    1095           0 :                 pthread_mutexattr_destroy(&attr);
    1096           0 :                 return (true);
    1097             :         }
    1098          18 :         pthread_mutexattr_destroy(&attr);
    1099             : #else
    1100             :         if (pthread_mutex_init(mutex, nullptr) != 0)
    1101             :                 return (true);
    1102             : #endif
    1103          18 :         return (false);
    1104             : }
    1105             : 
    1106             : static inline void
    1107             : malloc_mutex_lock(malloc_mutex_t *mutex)
    1108             : {
    1109             : 
    1110             : #if defined(MOZ_MEMORY_WINDOWS)
    1111             :         EnterCriticalSection(mutex);
    1112             : #elif defined(MOZ_MEMORY_DARWIN)
    1113             :         OSSpinLockLock(&mutex->lock);
    1114             : #else
    1115     1955540 :         pthread_mutex_lock(mutex);
    1116             : #endif
    1117             : }
    1118             : 
    1119             : static inline void
    1120             : malloc_mutex_unlock(malloc_mutex_t *mutex)
    1121             : {
    1122             : 
    1123             : #if defined(MOZ_MEMORY_WINDOWS)
    1124             :         LeaveCriticalSection(mutex);
    1125             : #elif defined(MOZ_MEMORY_DARWIN)
    1126             :         OSSpinLockUnlock(&mutex->lock);
    1127             : #else
    1128     1956094 :         pthread_mutex_unlock(mutex);
    1129             : #endif
    1130             : }
    1131             : 
    1132             : #if (defined(__GNUC__))
    1133             : __attribute__((unused))
    1134             : #  endif
    1135             : static bool
    1136             : malloc_spin_init(malloc_spinlock_t *lock)
    1137             : {
    1138             : #if defined(MOZ_MEMORY_WINDOWS)
    1139             :         if (!InitializeCriticalSectionAndSpinCount(lock, _CRT_SPINCOUNT))
    1140             :                         return (true);
    1141             : #elif defined(MOZ_MEMORY_DARWIN)
    1142             :         lock->lock = OS_SPINLOCK_INIT;
    1143             : #elif defined(MOZ_MEMORY_LINUX) && !defined(MOZ_MEMORY_ANDROID)
    1144             :         pthread_mutexattr_t attr;
    1145             :         if (pthread_mutexattr_init(&attr) != 0)
    1146             :                 return (true);
    1147             :         pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ADAPTIVE_NP);
    1148             :         if (pthread_mutex_init(lock, &attr) != 0) {
    1149             :                 pthread_mutexattr_destroy(&attr);
    1150             :                 return (true);
    1151             :         }
    1152             :         pthread_mutexattr_destroy(&attr);
    1153             : #else
    1154             :         if (pthread_mutex_init(lock, nullptr) != 0)
    1155             :                 return (true);
    1156             : #endif
    1157             :         return (false);
    1158             : }
    1159             : 
    1160             : static inline void
    1161             : malloc_spin_lock(malloc_spinlock_t *lock)
    1162             : {
    1163             : 
    1164             : #if defined(MOZ_MEMORY_WINDOWS)
    1165             :         EnterCriticalSection(lock);
    1166             : #elif defined(MOZ_MEMORY_DARWIN)
    1167             :         OSSpinLockLock(&lock->lock);
    1168             : #else
    1169             :         pthread_mutex_lock(lock);
    1170             : #endif
    1171             : }
    1172             : 
    1173             : static inline void
    1174             : malloc_spin_unlock(malloc_spinlock_t *lock)
    1175             : {
    1176             : #if defined(MOZ_MEMORY_WINDOWS)
    1177             :         LeaveCriticalSection(lock);
    1178             : #elif defined(MOZ_MEMORY_DARWIN)
    1179             :         OSSpinLockUnlock(&lock->lock);
    1180             : #else
    1181             :         pthread_mutex_unlock(lock);
    1182             : #endif
    1183             : }
    1184             : 
    1185             : /*
    1186             :  * End mutex.
    1187             :  */
    1188             : /******************************************************************************/
    1189             : /*
    1190             :  * Begin spin lock.  Spin locks here are actually adaptive mutexes that block
    1191             :  * after a period of spinning, because unbounded spinning would allow for
    1192             :  * priority inversion.
    1193             :  */
    1194             : 
    1195             : #if !defined(MOZ_MEMORY_DARWIN)
    1196             : #  define       malloc_spin_init        malloc_mutex_init
    1197             : #  define       malloc_spin_lock        malloc_mutex_lock
    1198             : #  define       malloc_spin_unlock      malloc_mutex_unlock
    1199             : #endif
    1200             : 
    1201             : /*
    1202             :  * End spin lock.
    1203             :  */
    1204             : /******************************************************************************/
    1205             : /*
    1206             :  * Begin Utility functions/macros.
    1207             :  */
    1208             : 
    1209             : /* Return the chunk address for allocation address a. */
    1210             : #define CHUNK_ADDR2BASE(a)                                              \
    1211             :         ((void *)((uintptr_t)(a) & ~chunksize_mask))
    1212             : 
    1213             : /* Return the chunk offset of address a. */
    1214             : #define CHUNK_ADDR2OFFSET(a)                                            \
    1215             :         ((size_t)((uintptr_t)(a) & chunksize_mask))
    1216             : 
    1217             : /* Return the smallest chunk multiple that is >= s. */
    1218             : #define CHUNK_CEILING(s)                                                \
    1219             :         (((s) + chunksize_mask) & ~chunksize_mask)
    1220             : 
    1221             : /* Return the smallest cacheline multiple that is >= s. */
    1222             : #define CACHELINE_CEILING(s)                                            \
    1223             :         (((s) + (CACHELINE - 1)) & ~(CACHELINE - 1))
    1224             : 
    1225             : /* Return the smallest quantum multiple that is >= a. */
    1226             : #define QUANTUM_CEILING(a)                                              \
    1227             :         (((a) + quantum_mask) & ~quantum_mask)
    1228             : 
    1229             : /* Return the smallest pagesize multiple that is >= s. */
    1230             : #define PAGE_CEILING(s)                                                 \
    1231             :         (((s) + pagesize_mask) & ~pagesize_mask)
    1232             : 
    1233             : /* Compute the smallest power of 2 that is >= x. */
    1234             : static inline size_t
    1235      186077 : pow2_ceil(size_t x)
    1236             : {
    1237             : 
    1238      186080 :         x--;
    1239      186080 :         x |= x >> 1;
    1240      186080 :         x |= x >> 2;
    1241      186080 :         x |= x >> 4;
    1242      186080 :         x |= x >> 8;
    1243      186080 :         x |= x >> 16;
    1244             : #if (SIZEOF_PTR == 8)
    1245      186080 :         x |= x >> 32;
    1246             : #endif
    1247      186080 :         x++;
    1248      186077 :         return (x);
    1249             : }
    1250             : 
    1251             : static inline const char *
    1252             : _getprogname(void)
    1253             : {
    1254             : 
    1255             :         return ("<jemalloc>");
    1256             : }
    1257             : 
    1258             : /******************************************************************************/
    1259             : 
    1260             : static inline void
    1261             : pages_decommit(void *addr, size_t size)
    1262             : {
    1263             : 
    1264             : #ifdef MOZ_MEMORY_WINDOWS
    1265             :         /*
    1266             :         * The region starting at addr may have been allocated in multiple calls
    1267             :         * to VirtualAlloc and recycled, so decommitting the entire region in one
    1268             :         * go may not be valid. However, since we allocate at least a chunk at a
    1269             :         * time, we may touch any region in chunksized increments.
    1270             :         */
    1271             :         size_t pages_size = std::min(size, chunksize -
    1272             :                 CHUNK_ADDR2OFFSET((uintptr_t)addr));
    1273             :         while (size > 0) {
    1274             :                 if (!VirtualFree(addr, pages_size, MEM_DECOMMIT))
    1275             :                         moz_abort();
    1276             :                 addr = (void *)((uintptr_t)addr + pages_size);
    1277             :                 size -= pages_size;
    1278             :                 pages_size = std::min(size, chunksize);
    1279             :         }
    1280             : #else
    1281             :         if (mmap(addr, size, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1,
    1282             :             0) == MAP_FAILED)
    1283             :                 moz_abort();
    1284             :         MozTagAnonymousMemory(addr, size, "jemalloc-decommitted");
    1285             : #endif
    1286             : }
    1287             : 
    1288             : static inline void
    1289             : pages_commit(void *addr, size_t size)
    1290             : {
    1291             : 
    1292             : #  ifdef MOZ_MEMORY_WINDOWS
    1293             :         /*
    1294             :         * The region starting at addr may have been allocated in multiple calls
    1295             :         * to VirtualAlloc and recycled, so committing the entire region in one
    1296             :         * go may not be valid. However, since we allocate at least a chunk at a
    1297             :         * time, we may touch any region in chunksized increments.
    1298             :         */
    1299             :         size_t pages_size = std::min(size, chunksize -
    1300             :                 CHUNK_ADDR2OFFSET((uintptr_t)addr));
    1301             :         while (size > 0) {
    1302             :                 if (!VirtualAlloc(addr, pages_size, MEM_COMMIT, PAGE_READWRITE))
    1303             :                         moz_abort();
    1304             :                 addr = (void *)((uintptr_t)addr + pages_size);
    1305             :                 size -= pages_size;
    1306             :                 pages_size = std::min(size, chunksize);
    1307             :         }
    1308             : #  else
    1309             :         if (mmap(addr, size, PROT_READ | PROT_WRITE, MAP_FIXED | MAP_PRIVATE |
    1310             :             MAP_ANON, -1, 0) == MAP_FAILED)
    1311             :                 moz_abort();
    1312             :         MozTagAnonymousMemory(addr, size, "jemalloc");
    1313             : #  endif
    1314             : }
    1315             : 
    1316             : static bool
    1317           3 : base_pages_alloc(size_t minsize)
    1318             : {
    1319             :         size_t csize;
    1320             :         size_t pminsize;
    1321             : 
    1322           3 :         MOZ_ASSERT(minsize != 0);
    1323           3 :         csize = CHUNK_CEILING(minsize);
    1324           3 :         base_pages = chunk_alloc(csize, chunksize, true);
    1325           3 :         if (!base_pages)
    1326             :                 return (true);
    1327           3 :         base_next_addr = base_pages;
    1328           3 :         base_past_addr = (void *)((uintptr_t)base_pages + csize);
    1329             :         /*
    1330             :          * Leave enough pages for minsize committed, since otherwise they would
    1331             :          * have to be immediately recommitted.
    1332             :          */
    1333           3 :         pminsize = PAGE_CEILING(minsize);
    1334           3 :         base_next_decommitted = (void *)((uintptr_t)base_pages + pminsize);
    1335             : #  if defined(MALLOC_DECOMMIT)
    1336             :         if (pminsize < csize)
    1337             :                 pages_decommit(base_next_decommitted, csize - pminsize);
    1338             : #  endif
    1339           3 :         base_mapped += csize;
    1340           3 :         base_committed += pminsize;
    1341             : 
    1342           3 :         return (false);
    1343             : }
    1344             : 
    1345             : static void *
    1346         128 : base_alloc(size_t size)
    1347             : {
    1348             :         void *ret;
    1349             :         size_t csize;
    1350             : 
    1351             :         /* Round size up to nearest multiple of the cacheline size. */
    1352         128 :         csize = CACHELINE_CEILING(size);
    1353             : 
    1354         128 :         malloc_mutex_lock(&base_mtx);
    1355             :         /* Make sure there's enough space for the allocation. */
    1356         128 :         if ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) {
    1357           3 :                 if (base_pages_alloc(csize)) {
    1358           0 :                         malloc_mutex_unlock(&base_mtx);
    1359           0 :                         return nullptr;
    1360             :                 }
    1361             :         }
    1362             :         /* Allocate. */
    1363         128 :         ret = base_next_addr;
    1364         128 :         base_next_addr = (void *)((uintptr_t)base_next_addr + csize);
    1365             :         /* Make sure enough pages are committed for the new allocation. */
    1366         128 :         if ((uintptr_t)base_next_addr > (uintptr_t)base_next_decommitted) {
    1367             :                 void *pbase_next_addr =
    1368           3 :                     (void *)(PAGE_CEILING((uintptr_t)base_next_addr));
    1369             : 
    1370             : #  ifdef MALLOC_DECOMMIT
    1371             :                 pages_commit(base_next_decommitted, (uintptr_t)pbase_next_addr -
    1372             :                     (uintptr_t)base_next_decommitted);
    1373             : #  endif
    1374           3 :                 base_next_decommitted = pbase_next_addr;
    1375             :                 base_committed += (uintptr_t)pbase_next_addr -
    1376             :                     (uintptr_t)base_next_decommitted;
    1377             :         }
    1378         128 :         malloc_mutex_unlock(&base_mtx);
    1379             : 
    1380         128 :         return (ret);
    1381             : }
    1382             : 
    1383             : static void *
    1384         115 : base_calloc(size_t number, size_t size)
    1385             : {
    1386             :         void *ret;
    1387             : 
    1388         115 :         ret = base_alloc(number * size);
    1389         230 :         memset(ret, 0, number * size);
    1390             : 
    1391         115 :         return (ret);
    1392             : }
    1393             : 
    1394             : static extent_node_t *
    1395           9 : base_node_alloc(void)
    1396             : {
    1397             :         extent_node_t *ret;
    1398             : 
    1399           9 :         malloc_mutex_lock(&base_mtx);
    1400           9 :         if (base_nodes) {
    1401           2 :                 ret = base_nodes;
    1402           2 :                 base_nodes = *(extent_node_t **)ret;
    1403             :                 malloc_mutex_unlock(&base_mtx);
    1404             :         } else {
    1405           7 :                 malloc_mutex_unlock(&base_mtx);
    1406           7 :                 ret = (extent_node_t *)base_alloc(sizeof(extent_node_t));
    1407             :         }
    1408             : 
    1409           9 :         return (ret);
    1410             : }
    1411             : 
    1412             : static void
    1413           2 : base_node_dealloc(extent_node_t *node)
    1414             : {
    1415             : 
    1416           2 :         malloc_mutex_lock(&base_mtx);
    1417           2 :         *(extent_node_t **)node = base_nodes;
    1418           2 :         base_nodes = node;
    1419           2 :         malloc_mutex_unlock(&base_mtx);
    1420           2 : }
    1421             : 
    1422             : /*
    1423             :  * End Utility functions/macros.
    1424             :  */
    1425             : /******************************************************************************/
    1426             : /*
    1427             :  * Begin extent tree code.
    1428             :  */
    1429             : 
    1430             : static inline int
    1431          57 : extent_szad_comp(extent_node_t *a, extent_node_t *b)
    1432             : {
    1433             :         int ret;
    1434          57 :         size_t a_size = a->size;
    1435          57 :         size_t b_size = b->size;
    1436             : 
    1437          57 :         ret = (a_size > b_size) - (a_size < b_size);
    1438          57 :         if (ret == 0) {
    1439          13 :                 uintptr_t a_addr = (uintptr_t)a->addr;
    1440          13 :                 uintptr_t b_addr = (uintptr_t)b->addr;
    1441             : 
    1442          13 :                 ret = (a_addr > b_addr) - (a_addr < b_addr);
    1443             :         }
    1444             : 
    1445          57 :         return (ret);
    1446             : }
    1447             : 
    1448             : /* Wrap red-black tree macros in functions. */
    1449         139 : rb_wrap(static, extent_tree_szad_, extent_tree_t, extent_node_t,
    1450             :     link_szad, extent_szad_comp)
    1451             : 
    1452             : static inline int
    1453             : extent_ad_comp(extent_node_t *a, extent_node_t *b)
    1454             : {
    1455          68 :         uintptr_t a_addr = (uintptr_t)a->addr;
    1456          68 :         uintptr_t b_addr = (uintptr_t)b->addr;
    1457             : 
    1458          68 :         return ((a_addr > b_addr) - (a_addr < b_addr));
    1459             : }
    1460             : 
    1461             : /* Wrap red-black tree macros in functions. */
    1462         121 : rb_wrap(static, extent_tree_ad_, extent_tree_t, extent_node_t, link_ad,
    1463             :     extent_ad_comp)
    1464             : 
    1465             : /*
    1466             :  * End extent tree code.
    1467             :  */
    1468             : /******************************************************************************/
    1469             : /*
    1470             :  * Begin chunk management functions.
    1471             :  */
    1472             : 
    1473             : #ifdef MOZ_MEMORY_WINDOWS
    1474             : 
    1475             : static void *
    1476             : pages_map(void *addr, size_t size)
    1477             : {
    1478             :         void *ret = nullptr;
    1479             :         ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE,
    1480             :             PAGE_READWRITE);
    1481             :         return (ret);
    1482             : }
    1483             : 
    1484             : static void
    1485             : pages_unmap(void *addr, size_t size)
    1486             : {
    1487             :         if (VirtualFree(addr, 0, MEM_RELEASE) == 0) {
    1488             :                 _malloc_message(_getprogname(),
    1489             :                     ": (malloc) Error in VirtualFree()\n");
    1490             :         }
    1491             : }
    1492             : #else
    1493             : 
    1494             : static void *
    1495         137 : pages_map(void *addr, size_t size)
    1496             : {
    1497             :         void *ret;
    1498             : #if defined(__ia64__) || (defined(__sparc__) && defined(__arch64__) && defined(__linux__))
    1499             :         /*
    1500             :          * The JS engine assumes that all allocated pointers have their high 17 bits clear,
    1501             :          * which ia64's mmap doesn't support directly. However, we can emulate it by passing
    1502             :          * mmap an "addr" parameter with those bits clear. The mmap will return that address,
    1503             :          * or the nearest available memory above that address, providing a near-guarantee
    1504             :          * that those bits are clear. If they are not, we return nullptr below to indicate
    1505             :          * out-of-memory.
    1506             :          *
    1507             :          * The addr is chosen as 0x0000070000000000, which still allows about 120TB of virtual
    1508             :          * address space.
    1509             :          *
    1510             :          * See Bug 589735 for more information.
    1511             :          */
    1512             :         bool check_placement = true;
    1513             :         if (!addr) {
    1514             :                 addr = (void*)0x0000070000000000;
    1515             :                 check_placement = false;
    1516             :         }
    1517             : #endif
    1518             : 
    1519             : #if defined(__sparc__) && defined(__arch64__) && defined(__linux__)
    1520             :     const uintptr_t start = 0x0000070000000000ULL;
    1521             :     const uintptr_t end   = 0x0000800000000000ULL;
    1522             : 
    1523             :     /* Copied from js/src/gc/Memory.cpp and adapted for this source */
    1524             : 
    1525             :     uintptr_t hint;
    1526             :     void* region = MAP_FAILED;
    1527             :     for (hint = start; region == MAP_FAILED && hint + size <= end; hint += chunksize) {
    1528             :            region = mmap((void*)hint, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
    1529             :            if (region != MAP_FAILED) {
    1530             :                    if (((size_t) region + (size - 1)) & 0xffff800000000000) {
    1531             :                            if (munmap(region, size)) {
    1532             :                                    MOZ_ASSERT(errno == ENOMEM);
    1533             :                            }
    1534             :                            region = MAP_FAILED;
    1535             :                    }
    1536             :            }
    1537             :     }
    1538             :     ret = region;
    1539             : #else
    1540             : 
    1541             :         /*
    1542             :          * We don't use MAP_FIXED here, because it can cause the *replacement*
    1543             :          * of existing mappings, and we only want to create new mappings.
    1544             :          */
    1545             :         ret = mmap(addr, size, PROT_READ | PROT_WRITE,
    1546         137 :                 MAP_PRIVATE | MAP_ANON, -1, 0);
    1547         137 :         MOZ_ASSERT(ret);
    1548             : #endif
    1549         137 :         if (ret == MAP_FAILED) {
    1550             :                 ret = nullptr;
    1551             :         }
    1552             : #if defined(__ia64__) || (defined(__sparc__) && defined(__arch64__) && defined(__linux__))
    1553             :         /*
    1554             :          * If the allocated memory doesn't have its upper 17 bits clear, consider it
    1555             :          * as out of memory.
    1556             :         */
    1557             :         else if ((long long)ret & 0xffff800000000000) {
    1558             :                 munmap(ret, size);
    1559             :                 ret = nullptr;
    1560             :         }
    1561             :         /* If the caller requested a specific memory location, verify that's what mmap returned. */
    1562             :         else if (check_placement && ret != addr) {
    1563             : #else
    1564         137 :         else if (addr && ret != addr) {
    1565             : #endif
    1566             :                 /*
    1567             :                  * We succeeded in mapping memory, but not in the right place.
    1568             :                  */
    1569           0 :                 if (munmap(ret, size) == -1) {
    1570             :                         char buf[STRERROR_BUF];
    1571             : 
    1572           0 :                         if (strerror_r(errno, buf, sizeof(buf)) == 0) {
    1573             :                                 _malloc_message(_getprogname(),
    1574           0 :                                         ": (malloc) Error in munmap(): ", buf, "\n");
    1575             :                         }
    1576             :                 }
    1577             :                 ret = nullptr;
    1578             :         }
    1579             :         if (ret) {
    1580             :                 MozTagAnonymousMemory(ret, size, "jemalloc");
    1581             :         }
    1582             : 
    1583             : #if defined(__ia64__) || (defined(__sparc__) && defined(__arch64__) && defined(__linux__))
    1584             :         MOZ_ASSERT(!ret || (!check_placement && ret)
    1585             :             || (check_placement && ret == addr));
    1586             : #else
    1587         137 :         MOZ_ASSERT(!ret || (!addr && ret != addr)
    1588             :             || (addr && ret == addr));
    1589             : #endif
    1590         137 :         return (ret);
    1591             : }
    1592             : 
    1593             : static void
    1594          94 : pages_unmap(void *addr, size_t size)
    1595             : {
    1596             : 
    1597          94 :         if (munmap(addr, size) == -1) {
    1598             :                 char buf[STRERROR_BUF];
    1599             : 
    1600           0 :                 if (strerror_r(errno, buf, sizeof(buf)) == 0) {
    1601             :                         _malloc_message(_getprogname(),
    1602           0 :                                 ": (malloc) Error in munmap(): ", buf, "\n");
    1603             :                 }
    1604             :         }
    1605          94 : }
    1606             : #endif
    1607             : 
    1608             : #ifdef MOZ_MEMORY_DARWIN
    1609             : #define VM_COPY_MIN (pagesize << 5)
    1610             : static inline void
    1611             : pages_copy(void *dest, const void *src, size_t n)
    1612             : {
    1613             : 
    1614             :         MOZ_ASSERT((void *)((uintptr_t)dest & ~pagesize_mask) == dest);
    1615             :         MOZ_ASSERT(n >= VM_COPY_MIN);
    1616             :         MOZ_ASSERT((void *)((uintptr_t)src & ~pagesize_mask) == src);
    1617             : 
    1618             :         vm_copy(mach_task_self(), (vm_address_t)src, (vm_size_t)n,
    1619             :             (vm_address_t)dest);
    1620             : }
    1621             : #endif
    1622             : 
    1623             : static inline malloc_rtree_t *
    1624           3 : malloc_rtree_new(unsigned bits)
    1625             : {
    1626             :         malloc_rtree_t *ret;
    1627             :         unsigned bits_per_level, height, i;
    1628             : 
    1629           3 :         bits_per_level = ffs(pow2_ceil((MALLOC_RTREE_NODESIZE /
    1630             :             sizeof(void *)))) - 1;
    1631           3 :         height = bits / bits_per_level;
    1632           3 :         if (height * bits_per_level != bits)
    1633           3 :                 height++;
    1634           3 :         MOZ_DIAGNOSTIC_ASSERT(height * bits_per_level >= bits);
    1635             : 
    1636           3 :         ret = (malloc_rtree_t*)base_calloc(1, sizeof(malloc_rtree_t) +
    1637           6 :             (sizeof(unsigned) * (height - 1)));
    1638           3 :         if (!ret)
    1639             :                 return nullptr;
    1640             : 
    1641           3 :         malloc_spin_init(&ret->lock);
    1642           3 :         ret->height = height;
    1643           3 :         if (bits_per_level * height > bits)
    1644           3 :                 ret->level2bits[0] = bits % bits_per_level;
    1645             :         else
    1646           0 :                 ret->level2bits[0] = bits_per_level;
    1647          87 :         for (i = 1; i < height; i++)
    1648          42 :                 ret->level2bits[i] = bits_per_level;
    1649             : 
    1650           3 :         ret->root = (void**)base_calloc(1, sizeof(void *) << ret->level2bits[0]);
    1651           3 :         if (!ret->root) {
    1652             :                 /*
    1653             :                  * We leak the rtree here, since there's no generic base
    1654             :                  * deallocation.
    1655             :                  */
    1656             :                 return nullptr;
    1657             :         }
    1658             : 
    1659           3 :         return (ret);
    1660             : }
    1661             : 
    1662             : #define MALLOC_RTREE_GET_GENERATE(f)                                    \
    1663             : /* The least significant bits of the key are ignored. */                \
    1664             : static inline void *                                                    \
    1665             : f(malloc_rtree_t *rtree, uintptr_t key)                                 \
    1666             : {                                                                       \
    1667             :         void *ret;                                                      \
    1668             :         uintptr_t subkey;                                               \
    1669             :         unsigned i, lshift, height, bits;                               \
    1670             :         void **node, **child;                                           \
    1671             :                                                                         \
    1672             :         MALLOC_RTREE_LOCK(&rtree->lock);                         \
    1673             :         for (i = lshift = 0, height = rtree->height, node = rtree->root;\
    1674             :             i < height - 1;                                          \
    1675             :             i++, lshift += bits, node = child) {                        \
    1676             :                 bits = rtree->level2bits[i];                         \
    1677             :                 subkey = (key << lshift) >> ((SIZEOF_PTR << 3) - bits);       \
    1678             :                 child = (void**)node[subkey];                           \
    1679             :                 if (!child) {                                           \
    1680             :                         MALLOC_RTREE_UNLOCK(&rtree->lock);               \
    1681             :                         return nullptr;                                 \
    1682             :                 }                                                       \
    1683             :         }                                                               \
    1684             :                                                                         \
    1685             :         /*                                                              \
    1686             :          * node is a leaf, so it contains values rather than node       \
    1687             :          * pointers.                                                    \
    1688             :          */                                                             \
    1689             :         bits = rtree->level2bits[i];                                 \
    1690             :         subkey = (key << lshift) >> ((SIZEOF_PTR << 3) - bits);               \
    1691             :         ret = node[subkey];                                             \
    1692             :         MALLOC_RTREE_UNLOCK(&rtree->lock);                               \
    1693             :                                                                         \
    1694             :         MALLOC_RTREE_GET_VALIDATE                                       \
    1695             :         return (ret);                                                   \
    1696             : }
    1697             : 
    1698             : #ifdef MOZ_DEBUG
    1699             : #  define MALLOC_RTREE_LOCK(l)          malloc_spin_lock(l)
    1700             : #  define MALLOC_RTREE_UNLOCK(l)        malloc_spin_unlock(l)
    1701             : #  define MALLOC_RTREE_GET_VALIDATE
    1702             : MALLOC_RTREE_GET_GENERATE(malloc_rtree_get_locked)
    1703             : #  undef MALLOC_RTREE_LOCK
    1704             : #  undef MALLOC_RTREE_UNLOCK
    1705             : #  undef MALLOC_RTREE_GET_VALIDATE
    1706             : #endif
    1707             : 
    1708             : #define MALLOC_RTREE_LOCK(l)
    1709             : #define MALLOC_RTREE_UNLOCK(l)
    1710             : #ifdef MOZ_DEBUG
    1711             :    /*
    1712             :     * Suppose that it were possible for a jemalloc-allocated chunk to be
    1713             :     * munmap()ped, followed by a different allocator in another thread re-using
    1714             :     * overlapping virtual memory, all without invalidating the cached rtree
    1715             :     * value.  The result would be a false positive (the rtree would claim that
    1716             :     * jemalloc owns memory that it had actually discarded).  I don't think this
    1717             :     * scenario is possible, but the following assertion is a prudent sanity
    1718             :     * check.
    1719             :     */
    1720             : #  define MALLOC_RTREE_GET_VALIDATE                                     \
    1721             :         MOZ_ASSERT(malloc_rtree_get_locked(rtree, key) == ret);
    1722             : #else
    1723             : #  define MALLOC_RTREE_GET_VALIDATE
    1724             : #endif
    1725       21981 : MALLOC_RTREE_GET_GENERATE(malloc_rtree_get)
    1726             : #undef MALLOC_RTREE_LOCK
    1727             : #undef MALLOC_RTREE_UNLOCK
    1728             : #undef MALLOC_RTREE_GET_VALIDATE
    1729             : 
    1730             : static inline bool
    1731         114 : malloc_rtree_set(malloc_rtree_t *rtree, uintptr_t key, void *val)
    1732             : {
    1733             :         uintptr_t subkey;
    1734             :         unsigned i, lshift, height, bits;
    1735             :         void **node, **child;
    1736             : 
    1737         228 :         malloc_spin_lock(&rtree->lock);
    1738        3306 :         for (i = lshift = 0, height = rtree->height, node = rtree->root;
    1739        1710 :             i < height - 1;
    1740        1596 :             i++, lshift += bits, node = child) {
    1741        1596 :                 bits = rtree->level2bits[i];
    1742        1596 :                 subkey = (key << lshift) >> ((SIZEOF_PTR << 3) - bits);
    1743        1596 :                 child = (void**)node[subkey];
    1744        1596 :                 if (!child) {
    1745         109 :                         child = (void**)base_calloc(1, sizeof(void *) <<
    1746         218 :                             rtree->level2bits[i+1]);
    1747         109 :                         if (!child) {
    1748           0 :                                 malloc_spin_unlock(&rtree->lock);
    1749           0 :                                 return (true);
    1750             :                         }
    1751         109 :                         node[subkey] = child;
    1752             :                 }
    1753             :         }
    1754             : 
    1755             :         /* node is a leaf, so it contains values rather than node pointers. */
    1756         114 :         bits = rtree->level2bits[i];
    1757         114 :         subkey = (key << lshift) >> ((SIZEOF_PTR << 3) - bits);
    1758         114 :         node[subkey] = val;
    1759         228 :         malloc_spin_unlock(&rtree->lock);
    1760             : 
    1761         114 :         return (false);
    1762             : }
    1763             : 
    1764             : /* pages_trim, chunk_alloc_mmap_slow and chunk_alloc_mmap were cherry-picked
    1765             :  * from upstream jemalloc 3.4.1 to fix Mozilla bug 956501. */
    1766             : 
    1767             : /* Return the offset between a and the nearest aligned address at or below a. */
    1768             : #define        ALIGNMENT_ADDR2OFFSET(a, alignment)                                \
    1769             :         ((size_t)((uintptr_t)(a) & (alignment - 1)))
    1770             : 
    1771             : /* Return the smallest alignment multiple that is >= s. */
    1772             : #define        ALIGNMENT_CEILING(s, alignment)                                        \
    1773             :         (((s) + (alignment - 1)) & (~(alignment - 1)))
    1774             : 
    1775             : static void *
    1776          34 : pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size)
    1777             : {
    1778          34 :         void *ret = (void *)((uintptr_t)addr + leadsize);
    1779             : 
    1780          34 :         MOZ_ASSERT(alloc_size >= leadsize + size);
    1781             : #ifdef MOZ_MEMORY_WINDOWS
    1782             :         {
    1783             :                 void *new_addr;
    1784             : 
    1785             :                 pages_unmap(addr, alloc_size);
    1786             :                 new_addr = pages_map(ret, size);
    1787             :                 if (new_addr == ret)
    1788             :                         return (ret);
    1789             :                 if (new_addr)
    1790             :                         pages_unmap(new_addr, size);
    1791             :                 return nullptr;
    1792             :         }
    1793             : #else
    1794             :         {
    1795          34 :                 size_t trailsize = alloc_size - leadsize - size;
    1796             : 
    1797          34 :                 if (leadsize != 0)
    1798          29 :                         pages_unmap(addr, leadsize);
    1799          34 :                 if (trailsize != 0)
    1800          31 :                         pages_unmap((void *)((uintptr_t)ret + size), trailsize);
    1801          34 :                 return (ret);
    1802             :         }
    1803             : #endif
    1804             : }
    1805             : 
    1806             : static void *
    1807          34 : chunk_alloc_mmap_slow(size_t size, size_t alignment)
    1808             : {
    1809             :         void *ret, *pages;
    1810             :         size_t alloc_size, leadsize;
    1811             : 
    1812          34 :         alloc_size = size + alignment - pagesize;
    1813             :         /* Beware size_t wrap-around. */
    1814          34 :         if (alloc_size < size)
    1815             :                 return nullptr;
    1816             :         do {
    1817          34 :                 pages = pages_map(nullptr, alloc_size);
    1818          34 :                 if (!pages)
    1819             :                         return nullptr;
    1820          34 :                 leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
    1821             :                         (uintptr_t)pages;
    1822          34 :                 ret = pages_trim(pages, alloc_size, leadsize, size);
    1823          34 :         } while (!ret);
    1824             : 
    1825             :         MOZ_ASSERT(ret);
    1826             :         return (ret);
    1827             : }
    1828             : 
    1829             : static void *
    1830         103 : chunk_alloc_mmap(size_t size, size_t alignment)
    1831             : {
    1832             :         void *ret;
    1833             :         size_t offset;
    1834             : 
    1835             :         /*
    1836             :          * Ideally, there would be a way to specify alignment to mmap() (like
    1837             :          * NetBSD has), but in the absence of such a feature, we have to work
    1838             :          * hard to efficiently create aligned mappings. The reliable, but
    1839             :          * slow method is to create a mapping that is over-sized, then trim the
    1840             :          * excess. However, that always results in one or two calls to
    1841             :          * pages_unmap().
    1842             :          *
    1843             :          * Optimistically try mapping precisely the right amount before falling
    1844             :          * back to the slow method, with the expectation that the optimistic
    1845             :          * approach works most of the time.
    1846             :          */
    1847             : 
    1848         103 :         ret = pages_map(nullptr, size);
    1849         103 :         if (!ret)
    1850             :                 return nullptr;
    1851         103 :         offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
    1852         103 :         if (offset != 0) {
    1853          34 :                 pages_unmap(ret, size);
    1854          34 :                 return (chunk_alloc_mmap_slow(size, alignment));
    1855             :         }
    1856             : 
    1857             :         MOZ_ASSERT(ret);
    1858             :         return (ret);
    1859             : }
    1860             : 
    1861             : /* Purge and release the pages in the chunk of length `length` at `addr` to
    1862             :  * the OS.
    1863             :  * Returns whether the pages are guaranteed to be full of zeroes when the
    1864             :  * function returns.
    1865             :  * The force_zero argument explicitly requests that the memory is guaranteed
    1866             :  * to be full of zeroes when the function returns.
    1867             :  */
    1868             : static bool
    1869           3 : pages_purge(void *addr, size_t length, bool force_zero)
    1870             : {
    1871             : #ifdef MALLOC_DECOMMIT
    1872             :         pages_decommit(addr, length);
    1873             :         return true;
    1874             : #else
    1875             : #  ifndef MOZ_MEMORY_LINUX
    1876             :         if (force_zero)
    1877             :                 memset(addr, 0, length);
    1878             : #  endif
    1879             : #  ifdef MOZ_MEMORY_WINDOWS
    1880             :         /*
    1881             :         * The region starting at addr may have been allocated in multiple calls
    1882             :         * to VirtualAlloc and recycled, so resetting the entire region in one
    1883             :         * go may not be valid. However, since we allocate at least a chunk at a
    1884             :         * time, we may touch any region in chunksized increments.
    1885             :         */
    1886             :         size_t pages_size = std::min(length, chunksize -
    1887             :                 CHUNK_ADDR2OFFSET((uintptr_t)addr));
    1888             :         while (length > 0) {
    1889             :                 VirtualAlloc(addr, pages_size, MEM_RESET, PAGE_READWRITE);
    1890             :                 addr = (void *)((uintptr_t)addr + pages_size);
    1891             :                 length -= pages_size;
    1892             :                 pages_size = std::min(length, chunksize);
    1893             :         }
    1894             :         return force_zero;
    1895             : #  else
    1896             : #    ifdef MOZ_MEMORY_LINUX
    1897             : #      define JEMALLOC_MADV_PURGE MADV_DONTNEED
    1898             : #      define JEMALLOC_MADV_ZEROS true
    1899             : #    else /* FreeBSD and Darwin. */
    1900             : #      define JEMALLOC_MADV_PURGE MADV_FREE
    1901             : #      define JEMALLOC_MADV_ZEROS force_zero
    1902             : #    endif
    1903           3 :         int err = madvise(addr, length, JEMALLOC_MADV_PURGE);
    1904           3 :         return JEMALLOC_MADV_ZEROS && err == 0;
    1905             : #    undef JEMALLOC_MADV_PURGE
    1906             : #    undef JEMALLOC_MADV_ZEROS
    1907             : #  endif
    1908             : #endif
    1909             : }
    1910             : 
    1911             : static void *
    1912         114 : chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
    1913             :     size_t alignment, bool base, bool *zeroed)
    1914             : {
    1915             :         void *ret;
    1916             :         extent_node_t *node;
    1917             :         extent_node_t key;
    1918             :         size_t alloc_size, leadsize, trailsize;
    1919             :         ChunkType chunk_type;
    1920             : 
    1921         114 :         if (base) {
    1922             :                 /*
    1923             :                  * This function may need to call base_node_{,de}alloc(), but
    1924             :                  * the current chunk allocation request is on behalf of the
    1925             :                  * base allocator.  Avoid deadlock (and if that weren't an
    1926             :                  * issue, potential for infinite recursion) by returning nullptr.
    1927             :                  */
    1928             :                 return nullptr;
    1929             :         }
    1930             : 
    1931         111 :         alloc_size = size + alignment - chunksize;
    1932             :         /* Beware size_t wrap-around. */
    1933         111 :         if (alloc_size < size)
    1934             :                 return nullptr;
    1935         111 :         key.addr = nullptr;
    1936         111 :         key.size = alloc_size;
    1937         111 :         malloc_mutex_lock(&chunks_mtx);
    1938         111 :         node = extent_tree_szad_nsearch(chunks_szad, &key);
    1939         111 :         if (!node) {
    1940         100 :                 malloc_mutex_unlock(&chunks_mtx);
    1941         100 :                 return nullptr;
    1942             :         }
    1943          11 :         leadsize = ALIGNMENT_CEILING((uintptr_t)node->addr, alignment) -
    1944             :             (uintptr_t)node->addr;
    1945          11 :         MOZ_ASSERT(node->size >= leadsize + size);
    1946          11 :         trailsize = node->size - leadsize - size;
    1947          11 :         ret = (void *)((uintptr_t)node->addr + leadsize);
    1948          11 :         chunk_type = node->chunk_type;
    1949          11 :         if (zeroed) {
    1950          11 :                 *zeroed = (chunk_type == ZEROED_CHUNK);
    1951             :         }
    1952             :         /* Remove node from the tree. */
    1953          11 :         extent_tree_szad_remove(chunks_szad, node);
    1954          11 :         extent_tree_ad_remove(chunks_ad, node);
    1955          11 :         if (leadsize != 0) {
    1956             :                 /* Insert the leading space as a smaller chunk. */
    1957           0 :                 node->size = leadsize;
    1958           0 :                 extent_tree_szad_insert(chunks_szad, node);
    1959           0 :                 extent_tree_ad_insert(chunks_ad, node);
    1960           0 :                 node = nullptr;
    1961             :         }
    1962          11 :         if (trailsize != 0) {
    1963             :                 /* Insert the trailing space as a smaller chunk. */
    1964          11 :                 if (!node) {
    1965             :                         /*
    1966             :                          * An additional node is required, but
    1967             :                          * base_node_alloc() can cause a new base chunk to be
    1968             :                          * allocated.  Drop chunks_mtx in order to avoid
    1969             :                          * deadlock, and if node allocation fails, deallocate
    1970             :                          * the result before returning an error.
    1971             :                          */
    1972           0 :                         malloc_mutex_unlock(&chunks_mtx);
    1973           0 :                         node = base_node_alloc();
    1974           0 :                         if (!node) {
    1975           0 :                                 chunk_dealloc(ret, size, chunk_type);
    1976           0 :                                 return nullptr;
    1977             :                         }
    1978             :                         malloc_mutex_lock(&chunks_mtx);
    1979             :                 }
    1980          11 :                 node->addr = (void *)((uintptr_t)(ret) + size);
    1981          11 :                 node->size = trailsize;
    1982          11 :                 node->chunk_type = chunk_type;
    1983          11 :                 extent_tree_szad_insert(chunks_szad, node);
    1984          11 :                 extent_tree_ad_insert(chunks_ad, node);
    1985          11 :                 node = nullptr;
    1986             :         }
    1987             : 
    1988          11 :         recycled_size -= size;
    1989             : 
    1990          11 :         malloc_mutex_unlock(&chunks_mtx);
    1991             : 
    1992          11 :         if (node)
    1993           0 :                 base_node_dealloc(node);
    1994             : #ifdef MALLOC_DECOMMIT
    1995             :         pages_commit(ret, size);
    1996             :         // pages_commit is guaranteed to zero the chunk.
    1997             :         if (zeroed) {
    1998             :                 *zeroed = true;
    1999             :         }
    2000             : #endif
    2001             :         return (ret);
    2002             : }
    2003             : 
    2004             : #ifdef MOZ_MEMORY_WINDOWS
    2005             : /*
    2006             :  * On Windows, calls to VirtualAlloc and VirtualFree must be matched, making it
    2007             :  * awkward to recycle allocations of varying sizes. Therefore we only allow
    2008             :  * recycling when the size equals the chunksize, unless deallocation is entirely
    2009             :  * disabled.
    2010             :  */
    2011             : #define CAN_RECYCLE(size) (size == chunksize)
    2012             : #else
    2013             : #define CAN_RECYCLE(size) true
    2014             : #endif
    2015             : 
    2016             : /* Allocates `size` bytes of system memory aligned for `alignment`.
    2017             :  * `base` indicates whether the memory will be used for the base allocator
    2018             :  * (e.g. base_alloc).
    2019             :  * `zeroed` is an outvalue that returns whether the allocated memory is
    2020             :  * guaranteed to be full of zeroes. It can be omitted when the caller doesn't
    2021             :  * care about the result.
    2022             :  */
    2023             : static void *
    2024         114 : chunk_alloc(size_t size, size_t alignment, bool base, bool *zeroed)
    2025             : {
    2026             :         void *ret;
    2027             : 
    2028         114 :         MOZ_ASSERT(size != 0);
    2029         114 :         MOZ_ASSERT((size & chunksize_mask) == 0);
    2030         114 :         MOZ_ASSERT(alignment != 0);
    2031         114 :         MOZ_ASSERT((alignment & chunksize_mask) == 0);
    2032             : 
    2033             :         if (CAN_RECYCLE(size)) {
    2034         114 :                 ret = chunk_recycle(&chunks_szad_mmap, &chunks_ad_mmap,
    2035         114 :                         size, alignment, base, zeroed);
    2036         114 :                 if (ret)
    2037             :                         goto RETURN;
    2038             :         }
    2039         103 :         ret = chunk_alloc_mmap(size, alignment);
    2040         103 :         if (zeroed)
    2041         100 :                 *zeroed = true;
    2042         103 :         if (ret) {
    2043             :                 goto RETURN;
    2044             :         }
    2045             : 
    2046             :         /* All strategies for allocation failed. */
    2047           0 :         ret = nullptr;
    2048             : RETURN:
    2049             : 
    2050         114 :         if (ret && base == false) {
    2051         111 :                 if (malloc_rtree_set(chunk_rtree, (uintptr_t)ret, ret)) {
    2052           0 :                         chunk_dealloc(ret, size, UNKNOWN_CHUNK);
    2053           0 :                         return nullptr;
    2054             :                 }
    2055             :         }
    2056             : 
    2057         114 :         MOZ_ASSERT(CHUNK_ADDR2BASE(ret) == ret);
    2058             :         return (ret);
    2059             : }
    2060             : 
    2061             : static void
    2062           1 : chunk_ensure_zero(void* ptr, size_t size, bool zeroed)
    2063             : {
    2064           1 :         if (zeroed == false)
    2065             :                 memset(ptr, 0, size);
    2066             : #ifdef MOZ_DEBUG
    2067             :         else {
    2068             :                 size_t i;
    2069             :                 size_t *p = (size_t *)(uintptr_t)ret;
    2070             : 
    2071             :                 for (i = 0; i < size / sizeof(size_t); i++)
    2072             :                         MOZ_ASSERT(p[i] == 0);
    2073             :         }
    2074             : #endif
    2075           1 : }
    2076             : 
    2077             : static void
    2078           3 : chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
    2079             :     size_t size, ChunkType chunk_type)
    2080             : {
    2081             :         extent_node_t *xnode, *node, *prev, *xprev, key;
    2082             : 
    2083           3 :         if (chunk_type != ZEROED_CHUNK) {
    2084           3 :                 if (pages_purge(chunk, size, chunk_type == HUGE_CHUNK)) {
    2085           3 :                         chunk_type = ZEROED_CHUNK;
    2086             :                 }
    2087             :         }
    2088             : 
    2089             :         /*
    2090             :          * Allocate a node before acquiring chunks_mtx even though it might not
    2091             :          * be needed, because base_node_alloc() may cause a new base chunk to
    2092             :          * be allocated, which could cause deadlock if chunks_mtx were already
    2093             :          * held.
    2094             :          */
    2095           3 :         xnode = base_node_alloc();
    2096             :         /* Use xprev to implement conditional deferred deallocation of prev. */
    2097           3 :         xprev = nullptr;
    2098             : 
    2099           3 :         malloc_mutex_lock(&chunks_mtx);
    2100           3 :         key.addr = (void *)((uintptr_t)chunk + size);
    2101           3 :         node = extent_tree_ad_nsearch(chunks_ad, &key);
    2102             :         /* Try to coalesce forward. */
    2103           3 :         if (node && node->addr == key.addr) {
    2104             :                 /*
    2105             :                  * Coalesce chunk with the following address range.  This does
    2106             :                  * not change the position within chunks_ad, so only
    2107             :                  * remove/insert from/into chunks_szad.
    2108             :                  */
    2109           0 :                 extent_tree_szad_remove(chunks_szad, node);
    2110           0 :                 node->addr = chunk;
    2111           0 :                 node->size += size;
    2112           0 :                 if (node->chunk_type != chunk_type) {
    2113           0 :                         node->chunk_type = RECYCLED_CHUNK;
    2114             :                 }
    2115           0 :                 extent_tree_szad_insert(chunks_szad, node);
    2116             :         } else {
    2117             :                 /* Coalescing forward failed, so insert a new node. */
    2118           3 :                 if (!xnode) {
    2119             :                         /*
    2120             :                          * base_node_alloc() failed, which is an exceedingly
    2121             :                          * unlikely failure.  Leak chunk; its pages have
    2122             :                          * already been purged, so this is only a virtual
    2123             :                          * memory leak.
    2124             :                          */
    2125             :                         goto label_return;
    2126             :                 }
    2127           3 :                 node = xnode;
    2128           3 :                 xnode = nullptr; /* Prevent deallocation below. */
    2129           3 :                 node->addr = chunk;
    2130           3 :                 node->size = size;
    2131           3 :                 node->chunk_type = chunk_type;
    2132           3 :                 extent_tree_ad_insert(chunks_ad, node);
    2133           3 :                 extent_tree_szad_insert(chunks_szad, node);
    2134             :         }
    2135             : 
    2136             :         /* Try to coalesce backward. */
    2137           3 :         prev = extent_tree_ad_prev(chunks_ad, node);
    2138           3 :         if (prev && (void *)((uintptr_t)prev->addr + prev->size) ==
    2139             :             chunk) {
    2140             :                 /*
    2141             :                  * Coalesce chunk with the previous address range.  This does
    2142             :                  * not change the position within chunks_ad, so only
    2143             :                  * remove/insert node from/into chunks_szad.
    2144             :                  */
    2145           0 :                 extent_tree_szad_remove(chunks_szad, prev);
    2146           0 :                 extent_tree_ad_remove(chunks_ad, prev);
    2147             : 
    2148           0 :                 extent_tree_szad_remove(chunks_szad, node);
    2149           0 :                 node->addr = prev->addr;
    2150           0 :                 node->size += prev->size;
    2151           0 :                 if (node->chunk_type != prev->chunk_type) {
    2152           0 :                         node->chunk_type = RECYCLED_CHUNK;
    2153             :                 }
    2154           0 :                 extent_tree_szad_insert(chunks_szad, node);
    2155             : 
    2156           0 :                 xprev = prev;
    2157             :         }
    2158             : 
    2159           3 :         recycled_size += size;
    2160             : 
    2161             : label_return:
    2162           3 :         malloc_mutex_unlock(&chunks_mtx);
    2163             :         /*
    2164             :          * Deallocate xnode and/or xprev after unlocking chunks_mtx in order to
    2165             :          * avoid potential deadlock.
    2166             :          */
    2167           3 :         if (xnode)
    2168           0 :                 base_node_dealloc(xnode);
    2169           3 :         if (xprev)
    2170           0 :                 base_node_dealloc(xprev);
    2171           3 : }
    2172             : 
    2173             : static bool
    2174           3 : chunk_dalloc_mmap(void *chunk, size_t size)
    2175             : {
    2176           6 :         if (CAN_RECYCLE(size) && load_acquire_z(&recycled_size) < recycle_limit)
    2177             :                 return true;
    2178             : 
    2179           0 :         pages_unmap(chunk, size);
    2180           0 :         return false;
    2181             : }
    2182             : 
    2183             : #undef CAN_RECYCLE
    2184             : 
    2185             : static void
    2186           3 : chunk_dealloc(void *chunk, size_t size, ChunkType type)
    2187             : {
    2188             : 
    2189           3 :         MOZ_ASSERT(chunk);
    2190           3 :         MOZ_ASSERT(CHUNK_ADDR2BASE(chunk) == chunk);
    2191           3 :         MOZ_ASSERT(size != 0);
    2192           3 :         MOZ_ASSERT((size & chunksize_mask) == 0);
    2193             : 
    2194           3 :         malloc_rtree_set(chunk_rtree, (uintptr_t)chunk, nullptr);
    2195             : 
    2196           3 :         if (chunk_dalloc_mmap(chunk, size))
    2197           3 :                 chunk_record(&chunks_szad_mmap, &chunks_ad_mmap, chunk, size, type);
    2198           3 : }
    2199             : 
    2200             : /*
    2201             :  * End chunk management functions.
    2202             :  */
    2203             : /******************************************************************************/
    2204             : /*
    2205             :  * Begin arena.
    2206             :  */
    2207             : 
    2208             : static inline arena_t *
    2209         126 : thread_local_arena(bool enabled)
    2210             : {
    2211             : #ifndef NO_TLS
    2212             :         arena_t *arena;
    2213             : 
    2214         126 :         if (enabled) {
    2215             :                 /* The arena will essentially be leaked if this function is
    2216             :                  * called with `false`, but it doesn't matter at the moment.
    2217             :                  * because in practice nothing actually calls this function
    2218             :                  * with `false`, except maybe at shutdown. */
    2219           0 :                 arena = arenas_extend();
    2220             :         } else {
    2221         126 :                 malloc_spin_lock(&arenas_lock);
    2222         126 :                 arena = arenas[0];
    2223             :                 malloc_spin_unlock(&arenas_lock);
    2224             :         }
    2225             : #ifdef MOZ_MEMORY_WINDOWS
    2226             :         TlsSetValue(tlsIndex, arena);
    2227             : #elif defined(MOZ_MEMORY_DARWIN)
    2228             :         pthread_setspecific(tlsIndex, arena);
    2229             : #else
    2230         126 :         arenas_map = arena;
    2231             : #endif
    2232             : 
    2233         126 :         return arena;
    2234             : #else
    2235             :         return arenas[0];
    2236             : #endif
    2237             : }
    2238             : 
    2239             : MOZ_JEMALLOC_API void
    2240           0 : jemalloc_thread_local_arena_impl(bool enabled)
    2241             : {
    2242           0 :         thread_local_arena(enabled);
    2243           0 : }
    2244             : 
    2245             : /*
    2246             :  * Choose an arena based on a per-thread value.
    2247             :  */
    2248             : static inline arena_t *
    2249     1201378 : choose_arena(void)
    2250             : {
    2251             :         arena_t *ret;
    2252             : 
    2253             :         /*
    2254             :          * We can only use TLS if this is a PIC library, since for the static
    2255             :          * library version, libc's malloc is used by TLS allocation, which
    2256             :          * introduces a bootstrapping issue.
    2257             :          */
    2258             : #ifndef NO_TLS
    2259             : 
    2260             : #  ifdef MOZ_MEMORY_WINDOWS
    2261             :         ret = (arena_t*)TlsGetValue(tlsIndex);
    2262             : #  elif defined(MOZ_MEMORY_DARWIN)
    2263             :         ret = (arena_t*)pthread_getspecific(tlsIndex);
    2264             : #  else
    2265     1201378 :         ret = arenas_map;
    2266             : #  endif
    2267             : 
    2268     1201378 :         if (!ret) {
    2269         126 :                 ret = thread_local_arena(false);
    2270             :         }
    2271             : #else
    2272             :         ret = arenas[0];
    2273             : #endif
    2274     1201384 :         MOZ_DIAGNOSTIC_ASSERT(ret);
    2275     1201384 :         return (ret);
    2276             : }
    2277             : 
    2278             : static inline int
    2279        9732 : arena_chunk_comp(arena_chunk_t *a, arena_chunk_t *b)
    2280             : {
    2281        9732 :         uintptr_t a_chunk = (uintptr_t)a;
    2282        9732 :         uintptr_t b_chunk = (uintptr_t)b;
    2283             : 
    2284        9732 :         MOZ_ASSERT(a);
    2285        9732 :         MOZ_ASSERT(b);
    2286             : 
    2287        9732 :         return ((a_chunk > b_chunk) - (a_chunk < b_chunk));
    2288             : }
    2289             : 
    2290             : /* Wrap red-black tree macros in functions. */
    2291        9172 : rb_wrap(static, arena_chunk_tree_dirty_, arena_chunk_tree_t,
    2292             :     arena_chunk_t, link_dirty, arena_chunk_comp)
    2293             : 
    2294             : static inline int
    2295      136216 : arena_run_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
    2296             : {
    2297      136216 :         uintptr_t a_mapelm = (uintptr_t)a;
    2298      136216 :         uintptr_t b_mapelm = (uintptr_t)b;
    2299             : 
    2300      136216 :         MOZ_ASSERT(a);
    2301      136216 :         MOZ_ASSERT(b);
    2302             : 
    2303      136216 :         return ((a_mapelm > b_mapelm) - (a_mapelm < b_mapelm));
    2304             : }
    2305             : 
    2306             : /* Wrap red-black tree macros in functions. */
    2307      127223 : rb_wrap(static, arena_run_tree_, arena_run_tree_t, arena_chunk_map_t, link,
    2308             :     arena_run_comp)
    2309             : 
    2310             : static inline int
    2311      204531 : arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
    2312             : {
    2313             :         int ret;
    2314      204531 :         size_t a_size = a->bits & ~pagesize_mask;
    2315      204531 :         size_t b_size = b->bits & ~pagesize_mask;
    2316             : 
    2317      204531 :         ret = (a_size > b_size) - (a_size < b_size);
    2318      204531 :         if (ret == 0) {
    2319             :                 uintptr_t a_mapelm, b_mapelm;
    2320             : 
    2321       75890 :                 if ((a->bits & CHUNK_MAP_KEY) == 0)
    2322       63391 :                         a_mapelm = (uintptr_t)a;
    2323             :                 else {
    2324             :                         /*
    2325             :                          * Treat keys as though they are lower than anything
    2326             :                          * else.
    2327             :                          */
    2328             :                         a_mapelm = 0;
    2329             :                 }
    2330       75890 :                 b_mapelm = (uintptr_t)b;
    2331             : 
    2332       75890 :                 ret = (a_mapelm > b_mapelm) - (a_mapelm < b_mapelm);
    2333             :         }
    2334             : 
    2335      204531 :         return (ret);
    2336             : }
    2337             : 
    2338             : /* Wrap red-black tree macros in functions. */
    2339      106334 : rb_wrap(static, arena_avail_tree_, arena_avail_tree_t, arena_chunk_map_t, link,
    2340             :     arena_avail_comp)
    2341             : 
    2342             : static inline void *
    2343     1183244 : arena_run_reg_alloc(arena_run_t *run, arena_bin_t *bin)
    2344             : {
    2345             :         void *ret;
    2346             :         unsigned i, mask, bit, regind;
    2347             : 
    2348     1183244 :         MOZ_ASSERT(run->magic == ARENA_RUN_MAGIC);
    2349     1183244 :         MOZ_ASSERT(run->regs_minelm < bin->regs_mask_nelms);
    2350             : 
    2351             :         /*
    2352             :          * Move the first check outside the loop, so that run->regs_minelm can
    2353             :          * be updated unconditionally, without the possibility of updating it
    2354             :          * multiple times.
    2355             :          */
    2356     1183244 :         i = run->regs_minelm;
    2357     1183244 :         mask = run->regs_mask[i];
    2358     1183244 :         if (mask != 0) {
    2359             :                 /* Usable allocation found. */
    2360     1149825 :                 bit = ffs((int)mask) - 1;
    2361             : 
    2362     1149825 :                 regind = ((i << (SIZEOF_INT_2POW + 3)) + bit);
    2363     1149825 :                 MOZ_ASSERT(regind < bin->nregs);
    2364     2299650 :                 ret = (void *)(((uintptr_t)run) + bin->reg0_offset
    2365     1149825 :                     + (bin->reg_size * regind));
    2366             : 
    2367             :                 /* Clear bit. */
    2368     1149825 :                 mask ^= (1U << bit);
    2369     1149825 :                 run->regs_mask[i] = mask;
    2370             : 
    2371     1149825 :                 return (ret);
    2372             :         }
    2373             : 
    2374       40076 :         for (i++; i < bin->regs_mask_nelms; i++) {
    2375       40076 :                 mask = run->regs_mask[i];
    2376       40076 :                 if (mask != 0) {
    2377             :                         /* Usable allocation found. */
    2378       33419 :                         bit = ffs((int)mask) - 1;
    2379             : 
    2380       33419 :                         regind = ((i << (SIZEOF_INT_2POW + 3)) + bit);
    2381       33419 :                         MOZ_ASSERT(regind < bin->nregs);
    2382       66838 :                         ret = (void *)(((uintptr_t)run) + bin->reg0_offset
    2383       33419 :                             + (bin->reg_size * regind));
    2384             : 
    2385             :                         /* Clear bit. */
    2386       33419 :                         mask ^= (1U << bit);
    2387       33419 :                         run->regs_mask[i] = mask;
    2388             : 
    2389             :                         /*
    2390             :                          * Make a note that nothing before this element
    2391             :                          * contains a free region.
    2392             :                          */
    2393       33419 :                         run->regs_minelm = i; /* Low payoff: + (mask == 0); */
    2394             : 
    2395       33419 :                         return (ret);
    2396             :                 }
    2397             :         }
    2398             :         /* Not reached. */
    2399           0 :         MOZ_DIAGNOSTIC_ASSERT(0);
    2400             :         return nullptr;
    2401             : }
    2402             : 
    2403             : static inline void
    2404      739103 : arena_run_reg_dalloc(arena_run_t *run, arena_bin_t *bin, void *ptr, size_t size)
    2405             : {
    2406             :         /*
    2407             :          * To divide by a number D that is not a power of two we multiply
    2408             :          * by (2^21 / D) and then right shift by 21 positions.
    2409             :          *
    2410             :          *   X / D
    2411             :          *
    2412             :          * becomes
    2413             :          *
    2414             :          *   (X * size_invs[(D >> QUANTUM_2POW_MIN) - 3]) >> SIZE_INV_SHIFT
    2415             :          */
    2416             : #define SIZE_INV_SHIFT 21
    2417             : #define SIZE_INV(s) (((1U << SIZE_INV_SHIFT) / (s << QUANTUM_2POW_MIN)) + 1)
    2418             :         static const unsigned size_invs[] = {
    2419             :             SIZE_INV(3),
    2420             :             SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7),
    2421             :             SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11),
    2422             :             SIZE_INV(12),SIZE_INV(13), SIZE_INV(14), SIZE_INV(15),
    2423             :             SIZE_INV(16),SIZE_INV(17), SIZE_INV(18), SIZE_INV(19),
    2424             :             SIZE_INV(20),SIZE_INV(21), SIZE_INV(22), SIZE_INV(23),
    2425             :             SIZE_INV(24),SIZE_INV(25), SIZE_INV(26), SIZE_INV(27),
    2426             :             SIZE_INV(28),SIZE_INV(29), SIZE_INV(30), SIZE_INV(31)
    2427             : #if (QUANTUM_2POW_MIN < 4)
    2428             :             ,
    2429             :             SIZE_INV(32), SIZE_INV(33), SIZE_INV(34), SIZE_INV(35),
    2430             :             SIZE_INV(36), SIZE_INV(37), SIZE_INV(38), SIZE_INV(39),
    2431             :             SIZE_INV(40), SIZE_INV(41), SIZE_INV(42), SIZE_INV(43),
    2432             :             SIZE_INV(44), SIZE_INV(45), SIZE_INV(46), SIZE_INV(47),
    2433             :             SIZE_INV(48), SIZE_INV(49), SIZE_INV(50), SIZE_INV(51),
    2434             :             SIZE_INV(52), SIZE_INV(53), SIZE_INV(54), SIZE_INV(55),
    2435             :             SIZE_INV(56), SIZE_INV(57), SIZE_INV(58), SIZE_INV(59),
    2436             :             SIZE_INV(60), SIZE_INV(61), SIZE_INV(62), SIZE_INV(63)
    2437             : #endif
    2438             :         };
    2439             :         unsigned diff, regind, elm, bit;
    2440             : 
    2441      739103 :         MOZ_ASSERT(run->magic == ARENA_RUN_MAGIC);
    2442             :         MOZ_ASSERT(((sizeof(size_invs)) / sizeof(unsigned)) + 3
    2443             :             >= (SMALL_MAX_DEFAULT >> QUANTUM_2POW_MIN));
    2444             : 
    2445             :         /*
    2446             :          * Avoid doing division with a variable divisor if possible.  Using
    2447             :          * actual division here can reduce allocator throughput by over 20%!
    2448             :          */
    2449      739103 :         diff = (unsigned)((uintptr_t)ptr - (uintptr_t)run - bin->reg0_offset);
    2450      739103 :         if ((size & (size - 1)) == 0) {
    2451             :                 /*
    2452             :                  * log2_table allows fast division of a power of two in the
    2453             :                  * [1..128] range.
    2454             :                  *
    2455             :                  * (x / divisor) becomes (x >> log2_table[divisor - 1]).
    2456             :                  */
    2457             :                 static const unsigned char log2_table[] = {
    2458             :                     0, 1, 0, 2, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4,
    2459             :                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5,
    2460             :                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
    2461             :                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6,
    2462             :                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
    2463             :                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
    2464             :                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
    2465             :                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7
    2466             :                 };
    2467             : 
    2468      598127 :                 if (size <= 128)
    2469      529720 :                         regind = (diff >> log2_table[size - 1]);
    2470       68407 :                 else if (size <= 32768)
    2471       68407 :                         regind = diff >> (8 + log2_table[(size >> 8) - 1]);
    2472             :                 else {
    2473             :                         /*
    2474             :                          * The run size is too large for us to use the lookup
    2475             :                          * table.  Use real division.
    2476             :                          */
    2477           0 :                         regind = diff / size;
    2478             :                 }
    2479      140976 :         } else if (size <= ((sizeof(size_invs) / sizeof(unsigned))
    2480             :             << QUANTUM_2POW_MIN) + 2) {
    2481      140614 :                 regind = size_invs[(size >> QUANTUM_2POW_MIN) - 3] * diff;
    2482      140614 :                 regind >>= SIZE_INV_SHIFT;
    2483             :         } else {
    2484             :                 /*
    2485             :                  * size_invs isn't large enough to handle this size class, so
    2486             :                  * calculate regind using actual division.  This only happens
    2487             :                  * if the user increases small_max via the 'S' runtime
    2488             :                  * configuration option.
    2489             :                  */
    2490         362 :                 regind = diff / size;
    2491             :         };
    2492      739103 :         MOZ_DIAGNOSTIC_ASSERT(diff == regind * size);
    2493      739103 :         MOZ_DIAGNOSTIC_ASSERT(regind < bin->nregs);
    2494             : 
    2495      739103 :         elm = regind >> (SIZEOF_INT_2POW + 3);
    2496      739103 :         if (elm < run->regs_minelm)
    2497       24485 :                 run->regs_minelm = elm;
    2498      739103 :         bit = regind - (elm << (SIZEOF_INT_2POW + 3));
    2499      739103 :         MOZ_DIAGNOSTIC_ASSERT((run->regs_mask[elm] & (1U << bit)) == 0);
    2500      739103 :         run->regs_mask[elm] |= (1U << bit);
    2501             : #undef SIZE_INV
    2502             : #undef SIZE_INV_SHIFT
    2503      739103 : }
    2504             : 
    2505             : static void
    2506       28297 : arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
    2507             :     bool zero)
    2508             : {
    2509             :         arena_chunk_t *chunk;
    2510             :         size_t old_ndirty, run_ind, total_pages, need_pages, rem_pages, i;
    2511             : 
    2512       28297 :         chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
    2513       28297 :         old_ndirty = chunk->ndirty;
    2514       56594 :         run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk)
    2515       28297 :             >> pagesize_2pow);
    2516       28297 :         total_pages = (chunk->map[run_ind].bits & ~pagesize_mask) >>
    2517             :             pagesize_2pow;
    2518       28297 :         need_pages = (size >> pagesize_2pow);
    2519       28297 :         MOZ_ASSERT(need_pages > 0);
    2520       28297 :         MOZ_ASSERT(need_pages <= total_pages);
    2521       28297 :         rem_pages = total_pages - need_pages;
    2522             : 
    2523       28297 :         arena_avail_tree_remove(&arena->runs_avail, &chunk->map[run_ind]);
    2524             : 
    2525             :         /* Keep track of trailing unused pages for later use. */
    2526       28297 :         if (rem_pages > 0) {
    2527       42388 :                 chunk->map[run_ind+need_pages].bits = (rem_pages <<
    2528       21194 :                     pagesize_2pow) | (chunk->map[run_ind+need_pages].bits &
    2529             :                     pagesize_mask);
    2530       42388 :                 chunk->map[run_ind+total_pages-1].bits = (rem_pages <<
    2531       21194 :                     pagesize_2pow) | (chunk->map[run_ind+total_pages-1].bits &
    2532             :                     pagesize_mask);
    2533       21194 :                 arena_avail_tree_insert(&arena->runs_avail,
    2534       21194 :                     &chunk->map[run_ind+need_pages]);
    2535             :         }
    2536             : 
    2537      189385 :         for (i = 0; i < need_pages; i++) {
    2538             :                 /*
    2539             :                  * Commit decommitted pages if necessary.  If a decommitted
    2540             :                  * page is encountered, commit all needed adjacent decommitted
    2541             :                  * pages in one operation, in order to reduce system call
    2542             :                  * overhead.
    2543             :                  */
    2544       80544 :                 if (chunk->map[run_ind + i].bits & CHUNK_MAP_MADVISED_OR_DECOMMITTED) {
    2545             :                         size_t j;
    2546             : 
    2547             :                         /*
    2548             :                          * Advance i+j to just past the index of the last page
    2549             :                          * to commit.  Clear CHUNK_MAP_DECOMMITTED and
    2550             :                          * CHUNK_MAP_MADVISED along the way.
    2551             :                          */
    2552       96065 :                         for (j = 0; i + j < need_pages && (chunk->map[run_ind +
    2553       29238 :                             i + j].bits & CHUNK_MAP_MADVISED_OR_DECOMMITTED); j++) {
    2554             :                                 /* DECOMMITTED and MADVISED are mutually exclusive. */
    2555       29226 :                                 MOZ_ASSERT(!(chunk->map[run_ind + i + j].bits & CHUNK_MAP_DECOMMITTED &&
    2556             :                                              chunk->map[run_ind + i + j].bits & CHUNK_MAP_MADVISED));
    2557             : 
    2558       29226 :                                 chunk->map[run_ind + i + j].bits &=
    2559       29226 :                                     ~CHUNK_MAP_MADVISED_OR_DECOMMITTED;
    2560             :                         }
    2561             : 
    2562             : #  ifdef MALLOC_DECOMMIT
    2563             :                         pages_commit((void *)((uintptr_t)chunk + ((run_ind + i)
    2564             :                             << pagesize_2pow)), (j << pagesize_2pow));
    2565             : #  endif
    2566             : 
    2567        8375 :                         arena->stats.committed += j;
    2568             : 
    2569             : #  ifndef MALLOC_DECOMMIT
    2570             :                 }
    2571             : #  else
    2572             :                 } else /* No need to zero since commit zeros. */
    2573             : #  endif
    2574             : 
    2575             :                 /* Zero if necessary. */
    2576       80544 :                 if (zero) {
    2577        7424 :                         if ((chunk->map[run_ind + i].bits & CHUNK_MAP_ZEROED)
    2578             :                             == 0) {
    2579        4150 :                                 memset((void *)((uintptr_t)chunk + ((run_ind
    2580        4150 :                                     + i) << pagesize_2pow)), 0, pagesize);
    2581             :                                 /* CHUNK_MAP_ZEROED is cleared below. */
    2582             :                         }
    2583             :                 }
    2584             : 
    2585             :                 /* Update dirty page accounting. */
    2586       80544 :                 if (chunk->map[run_ind + i].bits & CHUNK_MAP_DIRTY) {
    2587       51318 :                         chunk->ndirty--;
    2588       51318 :                         arena->ndirty--;
    2589             :                         /* CHUNK_MAP_DIRTY is cleared below. */
    2590             :                 }
    2591             : 
    2592             :                 /* Initialize the chunk map. */
    2593       80544 :                 if (large) {
    2594       57992 :                         chunk->map[run_ind + i].bits = CHUNK_MAP_LARGE
    2595             :                             | CHUNK_MAP_ALLOCATED;
    2596             :                 } else {
    2597       22552 :                         chunk->map[run_ind + i].bits = (size_t)run
    2598       22552 :                             | CHUNK_MAP_ALLOCATED;
    2599             :                 }
    2600             :         }
    2601             : 
    2602             :         /*
    2603             :          * Set the run size only in the first element for large runs.  This is
    2604             :          * primarily a debugging aid, since the lack of size info for trailing
    2605             :          * pages only matters if the application tries to operate on an
    2606             :          * interior pointer.
    2607             :          */
    2608       28297 :         if (large)
    2609       18753 :                 chunk->map[run_ind].bits |= size;
    2610             : 
    2611       28297 :         if (chunk->ndirty == 0 && old_ndirty > 0)
    2612        4176 :                 arena_chunk_tree_dirty_remove(&arena->chunks_dirty, chunk);
    2613       28297 : }
    2614             : 
    2615             : static void
    2616         105 : arena_chunk_init(arena_t *arena, arena_chunk_t *chunk, bool zeroed)
    2617             : {
    2618             :         size_t i;
    2619             :         /* WARNING: The following relies on !zeroed meaning "used to be an arena
    2620             :          * chunk".
    2621             :          * When the chunk we're initializating as an arena chunk is zeroed, we
    2622             :          * mark all runs are decommitted and zeroed.
    2623             :          * When it is not, which we can assume means it's a recycled arena chunk,
    2624             :          * all it can contain is an arena chunk header (which we're overwriting),
    2625             :          * and zeroed or poisoned memory (because a recycled arena chunk will
    2626             :          * have been emptied before being recycled). In that case, we can get
    2627             :          * away with reusing the chunk as-is, marking all runs as madvised.
    2628             :          */
    2629         105 :         size_t flags = zeroed ? CHUNK_MAP_DECOMMITTED | CHUNK_MAP_ZEROED
    2630         105 :                               : CHUNK_MAP_MADVISED;
    2631             : 
    2632         105 :         arena->stats.mapped += chunksize;
    2633             : 
    2634         105 :         chunk->arena = arena;
    2635             : 
    2636             :         /*
    2637             :          * Claim that no pages are in use, since the header is merely overhead.
    2638             :          */
    2639         105 :         chunk->ndirty = 0;
    2640             : 
    2641             :         /* Initialize the map to contain one maximal free untouched run. */
    2642             : #ifdef MALLOC_DECOMMIT
    2643             :         arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
    2644             :                            (arena_chunk_header_npages << pagesize_2pow));
    2645             : #endif
    2646             : 
    2647         315 :         for (i = 0; i < arena_chunk_header_npages; i++)
    2648         210 :                 chunk->map[i].bits = 0;
    2649         105 :         chunk->map[i].bits = arena_maxclass | flags;
    2650       26565 :         for (i++; i < chunk_npages-1; i++) {
    2651       26460 :                 chunk->map[i].bits = flags;
    2652             :         }
    2653         105 :         chunk->map[chunk_npages-1].bits = arena_maxclass | flags;
    2654             : 
    2655             : #ifdef MALLOC_DECOMMIT
    2656             :         /*
    2657             :          * Start out decommitted, in order to force a closer correspondence
    2658             :          * between dirty pages and committed untouched pages.
    2659             :          */
    2660             :         pages_decommit(run, arena_maxclass);
    2661             : #endif
    2662         105 :         arena->stats.committed += arena_chunk_header_npages;
    2663             : 
    2664             :         /* Insert the run into the runs_avail tree. */
    2665         105 :         arena_avail_tree_insert(&arena->runs_avail,
    2666         105 :             &chunk->map[arena_chunk_header_npages]);
    2667             : 
    2668             : #ifdef MALLOC_DOUBLE_PURGE
    2669             :         LinkedList_Init(&chunk->chunks_madvised_elem);
    2670             : #endif
    2671         105 : }
    2672             : 
    2673             : static void
    2674          97 : arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
    2675             : {
    2676             : 
    2677          97 :         if (arena->spare) {
    2678           1 :                 if (arena->spare->ndirty > 0) {
    2679           1 :                         arena_chunk_tree_dirty_remove(
    2680           2 :                             &chunk->arena->chunks_dirty, arena->spare);
    2681           1 :                         arena->ndirty -= arena->spare->ndirty;
    2682           1 :                         arena->stats.committed -= arena->spare->ndirty;
    2683             :                 }
    2684             : 
    2685             : #ifdef MALLOC_DOUBLE_PURGE
    2686             :                 /* This is safe to do even if arena->spare is not in the list. */
    2687             :                 LinkedList_Remove(&arena->spare->chunks_madvised_elem);
    2688             : #endif
    2689             : 
    2690           1 :                 chunk_dealloc((void *)arena->spare, chunksize, ARENA_CHUNK);
    2691           1 :                 arena->stats.mapped -= chunksize;
    2692           1 :                 arena->stats.committed -= arena_chunk_header_npages;
    2693             :         }
    2694             : 
    2695             :         /*
    2696             :          * Remove run from runs_avail, so that the arena does not use it.
    2697             :          * Dirty page flushing only uses the chunks_dirty tree, so leaving this
    2698             :          * chunk in the chunks_* trees is sufficient for that purpose.
    2699             :          */
    2700          97 :         arena_avail_tree_remove(&arena->runs_avail,
    2701          97 :             &chunk->map[arena_chunk_header_npages]);
    2702             : 
    2703          97 :         arena->spare = chunk;
    2704          97 : }
    2705             : 
    2706             : static arena_run_t *
    2707       28100 : arena_run_alloc(arena_t *arena, arena_bin_t *bin, size_t size, bool large,
    2708             :     bool zero)
    2709             : {
    2710             :         arena_run_t *run;
    2711             :         arena_chunk_map_t *mapelm, key;
    2712             : 
    2713       28100 :         MOZ_ASSERT(size <= arena_maxclass);
    2714       28100 :         MOZ_ASSERT((size & pagesize_mask) == 0);
    2715             : 
    2716             :         /* Search the arena's chunks for the lowest best fit. */
    2717       28100 :         key.bits = size | CHUNK_MAP_KEY;
    2718       28100 :         mapelm = arena_avail_tree_nsearch(&arena->runs_avail, &key);
    2719       28100 :         if (mapelm) {
    2720             :                 arena_chunk_t *chunk =
    2721       27899 :                     (arena_chunk_t*)CHUNK_ADDR2BASE(mapelm);
    2722       27899 :                 size_t pageind = ((uintptr_t)mapelm -
    2723             :                     (uintptr_t)chunk->map) /
    2724       27899 :                     sizeof(arena_chunk_map_t);
    2725             : 
    2726       27899 :                 run = (arena_run_t *)((uintptr_t)chunk + (pageind
    2727       27899 :                     << pagesize_2pow));
    2728       27899 :                 arena_run_split(arena, run, size, large, zero);
    2729       27899 :                 return (run);
    2730             :         }
    2731             : 
    2732         201 :         if (arena->spare) {
    2733             :                 /* Use the spare. */
    2734          96 :                 arena_chunk_t *chunk = arena->spare;
    2735          96 :                 arena->spare = nullptr;
    2736          96 :                 run = (arena_run_t *)((uintptr_t)chunk +
    2737             :                     (arena_chunk_header_npages << pagesize_2pow));
    2738             :                 /* Insert the run into the runs_avail tree. */
    2739          96 :                 arena_avail_tree_insert(&arena->runs_avail,
    2740          96 :                     &chunk->map[arena_chunk_header_npages]);
    2741          96 :                 arena_run_split(arena, run, size, large, zero);
    2742          96 :                 return (run);
    2743             :         }
    2744             : 
    2745             :         /*
    2746             :          * No usable runs.  Create a new chunk from which to allocate
    2747             :          * the run.
    2748             :          */
    2749             :         {
    2750             :                 bool zeroed;
    2751             :                 arena_chunk_t *chunk = (arena_chunk_t *)
    2752         105 :                     chunk_alloc(chunksize, chunksize, false, &zeroed);
    2753         105 :                 if (!chunk)
    2754           0 :                         return nullptr;
    2755             : 
    2756         105 :                 arena_chunk_init(arena, chunk, zeroed);
    2757         105 :                 run = (arena_run_t *)((uintptr_t)chunk +
    2758             :                     (arena_chunk_header_npages << pagesize_2pow));
    2759             :         }
    2760             :         /* Update page map. */
    2761         105 :         arena_run_split(arena, run, size, large, zero);
    2762         105 :         return (run);
    2763             : }
    2764             : 
    2765             : static void
    2766          26 : arena_purge(arena_t *arena, bool all)
    2767             : {
    2768             :         arena_chunk_t *chunk;
    2769             :         size_t i, npages;
    2770             :         /* If all is set purge all dirty pages. */
    2771          26 :         size_t dirty_max = all ? 1 : opt_dirty_max;
    2772             : #ifdef MOZ_DEBUG
    2773             :         size_t ndirty = 0;
    2774             :         rb_foreach_begin(arena_chunk_t, link_dirty, &arena->chunks_dirty,
    2775             :             chunk) {
    2776             :                 ndirty += chunk->ndirty;
    2777             :         } rb_foreach_end(arena_chunk_t, link_dirty, &arena->chunks_dirty, chunk)
    2778             :         MOZ_ASSERT(ndirty == arena->ndirty);
    2779             : #endif
    2780          26 :         MOZ_DIAGNOSTIC_ASSERT(all || (arena->ndirty > opt_dirty_max));
    2781             : 
    2782             :         /*
    2783             :          * Iterate downward through chunks until enough dirty memory has been
    2784             :          * purged.  Terminate as soon as possible in order to minimize the
    2785             :          * number of system calls, even if a chunk has only been partially
    2786             :          * purged.
    2787             :          */
    2788         305 :         while (arena->ndirty > (dirty_max >> 1)) {
    2789             : #ifdef MALLOC_DOUBLE_PURGE
    2790             :                 bool madvised = false;
    2791             : #endif
    2792         279 :                 chunk = arena_chunk_tree_dirty_last(&arena->chunks_dirty);
    2793         279 :                 MOZ_DIAGNOSTIC_ASSERT(chunk);
    2794             : 
    2795       79575 :                 for (i = chunk_npages - 1; chunk->ndirty > 0; i--) {
    2796       39674 :                         MOZ_DIAGNOSTIC_ASSERT(i >= arena_chunk_header_npages);
    2797             : 
    2798       39674 :                         if (chunk->map[i].bits & CHUNK_MAP_DIRTY) {
    2799             : #ifdef MALLOC_DECOMMIT
    2800             :                                 const size_t free_operation = CHUNK_MAP_DECOMMITTED;
    2801             : #else
    2802         829 :                                 const size_t free_operation = CHUNK_MAP_MADVISED;
    2803             : #endif
    2804         829 :                                 MOZ_ASSERT((chunk->map[i].bits &
    2805             :                                             CHUNK_MAP_MADVISED_OR_DECOMMITTED) == 0);
    2806         829 :                                 chunk->map[i].bits ^= free_operation | CHUNK_MAP_DIRTY;
    2807             :                                 /* Find adjacent dirty run(s). */
    2808        4388 :                                 for (npages = 1;
    2809        8764 :                                      i > arena_chunk_header_npages &&
    2810        4376 :                                        (chunk->map[i - 1].bits & CHUNK_MAP_DIRTY);
    2811             :                                      npages++) {
    2812        3559 :                                         i--;
    2813        3559 :                                         MOZ_ASSERT((chunk->map[i].bits &
    2814             :                                                     CHUNK_MAP_MADVISED_OR_DECOMMITTED) == 0);
    2815        3559 :                                         chunk->map[i].bits ^= free_operation | CHUNK_MAP_DIRTY;
    2816             :                                 }
    2817         829 :                                 chunk->ndirty -= npages;
    2818         829 :                                 arena->ndirty -= npages;
    2819             : 
    2820             : #ifdef MALLOC_DECOMMIT
    2821             :                                 pages_decommit((void *)((uintptr_t)
    2822             :                                     chunk + (i << pagesize_2pow)),
    2823             :                                     (npages << pagesize_2pow));
    2824             : #endif
    2825         829 :                                 arena->stats.committed -= npages;
    2826             : 
    2827             : #ifndef MALLOC_DECOMMIT
    2828         829 :                                 madvise((void *)((uintptr_t)chunk + (i <<
    2829             :                                     pagesize_2pow)), (npages << pagesize_2pow),
    2830         829 :                                     MADV_FREE);
    2831             : #  ifdef MALLOC_DOUBLE_PURGE
    2832             :                                 madvised = true;
    2833             : #  endif
    2834             : #endif
    2835         829 :                                 if (arena->ndirty <= (dirty_max >> 1))
    2836             :                                         break;
    2837             :                         }
    2838             :                 }
    2839             : 
    2840         279 :                 if (chunk->ndirty == 0) {
    2841             :                         arena_chunk_tree_dirty_remove(&arena->chunks_dirty,
    2842         263 :                             chunk);
    2843             :                 }
    2844             : #ifdef MALLOC_DOUBLE_PURGE
    2845             :                 if (madvised) {
    2846             :                         /* The chunk might already be in the list, but this
    2847             :                          * makes sure it's at the front. */
    2848             :                         LinkedList_Remove(&chunk->chunks_madvised_elem);
    2849             :                         LinkedList_InsertHead(&arena->chunks_madvised, &chunk->chunks_madvised_elem);
    2850             :                 }
    2851             : #endif
    2852             :         }
    2853          26 : }
    2854             : 
    2855             : static void
    2856       17882 : arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
    2857             : {
    2858             :         arena_chunk_t *chunk;
    2859             :         size_t size, run_ind, run_pages;
    2860             : 
    2861       17882 :         chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
    2862       17882 :         run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk)
    2863             :             >> pagesize_2pow);
    2864       17882 :         MOZ_DIAGNOSTIC_ASSERT(run_ind >= arena_chunk_header_npages);
    2865       17882 :         MOZ_DIAGNOSTIC_ASSERT(run_ind < chunk_npages);
    2866       17882 :         if ((chunk->map[run_ind].bits & CHUNK_MAP_LARGE) != 0)
    2867       13763 :                 size = chunk->map[run_ind].bits & ~pagesize_mask;
    2868             :         else
    2869        4119 :                 size = run->bin->run_size;
    2870       17882 :         run_pages = (size >> pagesize_2pow);
    2871             : 
    2872             :         /* Mark pages as unallocated in the chunk map. */
    2873       17882 :         if (dirty) {
    2874             :                 size_t i;
    2875             : 
    2876      129688 :                 for (i = 0; i < run_pages; i++) {
    2877       55903 :                         MOZ_DIAGNOSTIC_ASSERT((chunk->map[run_ind + i].bits & CHUNK_MAP_DIRTY)
    2878             :                             == 0);
    2879       55903 :                         chunk->map[run_ind + i].bits = CHUNK_MAP_DIRTY;
    2880             :                 }
    2881             : 
    2882       17882 :                 if (chunk->ndirty == 0) {
    2883        4450 :                         arena_chunk_tree_dirty_insert(&arena->chunks_dirty,
    2884        4450 :                             chunk);
    2885             :                 }
    2886       17882 :                 chunk->ndirty += run_pages;
    2887       17882 :                 arena->ndirty += run_pages;
    2888             :         } else {
    2889             :                 size_t i;
    2890             : 
    2891           0 :                 for (i = 0; i < run_pages; i++) {
    2892           0 :                         chunk->map[run_ind + i].bits &= ~(CHUNK_MAP_LARGE |
    2893           0 :                             CHUNK_MAP_ALLOCATED);
    2894             :                 }
    2895             :         }
    2896       17882 :         chunk->map[run_ind].bits = size | (chunk->map[run_ind].bits &
    2897             :             pagesize_mask);
    2898       35764 :         chunk->map[run_ind+run_pages-1].bits = size |
    2899       17882 :             (chunk->map[run_ind+run_pages-1].bits & pagesize_mask);
    2900             : 
    2901             :         /* Try to coalesce forward. */
    2902       35471 :         if (run_ind + run_pages < chunk_npages &&
    2903       17589 :             (chunk->map[run_ind+run_pages].bits & CHUNK_MAP_ALLOCATED) == 0) {
    2904             :                 size_t nrun_size = chunk->map[run_ind+run_pages].bits &
    2905        7898 :                     ~pagesize_mask;
    2906             : 
    2907             :                 /*
    2908             :                  * Remove successor from runs_avail; the coalesced run is
    2909             :                  * inserted later.
    2910             :                  */
    2911        7898 :                 arena_avail_tree_remove(&arena->runs_avail,
    2912        7898 :                     &chunk->map[run_ind+run_pages]);
    2913             : 
    2914        7898 :                 size += nrun_size;
    2915        7898 :                 run_pages = size >> pagesize_2pow;
    2916             : 
    2917        7898 :                 MOZ_DIAGNOSTIC_ASSERT((chunk->map[run_ind+run_pages-1].bits & ~pagesize_mask)
    2918             :                     == nrun_size);
    2919        7898 :                 chunk->map[run_ind].bits = size | (chunk->map[run_ind].bits &
    2920             :                     pagesize_mask);
    2921        7898 :                 chunk->map[run_ind+run_pages-1].bits = size |
    2922        7898 :                     (chunk->map[run_ind+run_pages-1].bits & pagesize_mask);
    2923             :         }
    2924             : 
    2925             :         /* Try to coalesce backward. */
    2926       17882 :         if (run_ind > arena_chunk_header_npages && (chunk->map[run_ind-1].bits &
    2927             :             CHUNK_MAP_ALLOCATED) == 0) {
    2928        2662 :                 size_t prun_size = chunk->map[run_ind-1].bits & ~pagesize_mask;
    2929             : 
    2930        2662 :                 run_ind -= prun_size >> pagesize_2pow;
    2931             : 
    2932             :                 /*
    2933             :                  * Remove predecessor from runs_avail; the coalesced run is
    2934             :                  * inserted later.
    2935             :                  */
    2936        2662 :                 arena_avail_tree_remove(&arena->runs_avail,
    2937        2662 :                     &chunk->map[run_ind]);
    2938             : 
    2939        2662 :                 size += prun_size;
    2940        2662 :                 run_pages = size >> pagesize_2pow;
    2941             : 
    2942        2662 :                 MOZ_DIAGNOSTIC_ASSERT((chunk->map[run_ind].bits & ~pagesize_mask) ==
    2943             :                     prun_size);
    2944        2662 :                 chunk->map[run_ind].bits = size | (chunk->map[run_ind].bits &
    2945             :                     pagesize_mask);
    2946        5324 :                 chunk->map[run_ind+run_pages-1].bits = size |
    2947        2662 :                     (chunk->map[run_ind+run_pages-1].bits & pagesize_mask);
    2948             :         }
    2949             : 
    2950             :         /* Insert into runs_avail, now that coalescing is complete. */
    2951       17882 :         arena_avail_tree_insert(&arena->runs_avail, &chunk->map[run_ind]);
    2952             : 
    2953             :         /* Deallocate chunk if it is now completely unused. */
    2954       17882 :         if ((chunk->map[arena_chunk_header_npages].bits & (~pagesize_mask |
    2955             :             CHUNK_MAP_ALLOCATED)) == arena_maxclass)
    2956          97 :                 arena_chunk_dealloc(arena, chunk);
    2957             : 
    2958             :         /* Enforce opt_dirty_max. */
    2959       17882 :         if (arena->ndirty > opt_dirty_max)
    2960          26 :                 arena_purge(arena, false);
    2961       17882 : }
    2962             : 
    2963             : static void
    2964           0 : arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
    2965             :     size_t oldsize, size_t newsize)
    2966             : {
    2967           0 :         size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> pagesize_2pow;
    2968           0 :         size_t head_npages = (oldsize - newsize) >> pagesize_2pow;
    2969             : 
    2970           0 :         MOZ_ASSERT(oldsize > newsize);
    2971             : 
    2972             :         /*
    2973             :          * Update the chunk map so that arena_run_dalloc() can treat the
    2974             :          * leading run as separately allocated.
    2975             :          */
    2976           0 :         chunk->map[pageind].bits = (oldsize - newsize) | CHUNK_MAP_LARGE |
    2977             :             CHUNK_MAP_ALLOCATED;
    2978           0 :         chunk->map[pageind+head_npages].bits = newsize | CHUNK_MAP_LARGE |
    2979             :             CHUNK_MAP_ALLOCATED;
    2980             : 
    2981           0 :         arena_run_dalloc(arena, run, false);
    2982           0 : }
    2983             : 
    2984             : static void
    2985           3 : arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
    2986             :     size_t oldsize, size_t newsize, bool dirty)
    2987             : {
    2988           3 :         size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> pagesize_2pow;
    2989           3 :         size_t npages = newsize >> pagesize_2pow;
    2990             : 
    2991           3 :         MOZ_ASSERT(oldsize > newsize);
    2992             : 
    2993             :         /*
    2994             :          * Update the chunk map so that arena_run_dalloc() can treat the
    2995             :          * trailing run as separately allocated.
    2996             :          */
    2997           3 :         chunk->map[pageind].bits = newsize | CHUNK_MAP_LARGE |
    2998             :             CHUNK_MAP_ALLOCATED;
    2999           6 :         chunk->map[pageind+npages].bits = (oldsize - newsize) | CHUNK_MAP_LARGE
    3000           3 :             | CHUNK_MAP_ALLOCATED;
    3001             : 
    3002           3 :         arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize),
    3003           3 :             dirty);
    3004           3 : }
    3005             : 
    3006             : static arena_run_t *
    3007       37671 : arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
    3008             : {
    3009             :         arena_chunk_map_t *mapelm;
    3010             :         arena_run_t *run;
    3011             :         unsigned i, remainder;
    3012             : 
    3013             :         /* Look for a usable run. */
    3014       37671 :         mapelm = arena_run_tree_first(&bin->runs);
    3015       37671 :         if (mapelm) {
    3016             :                 /* run is guaranteed to have available space. */
    3017       28127 :                 arena_run_tree_remove(&bin->runs, mapelm);
    3018       28127 :                 run = (arena_run_t *)(mapelm->bits & ~pagesize_mask);
    3019       28127 :                 return (run);
    3020             :         }
    3021             :         /* No existing runs have any space available. */
    3022             : 
    3023             :         /* Allocate a new run. */
    3024        9544 :         run = arena_run_alloc(arena, bin, bin->run_size, false, false);
    3025        9544 :         if (!run)
    3026             :                 return nullptr;
    3027             :         /*
    3028             :          * Don't initialize if a race in arena_run_alloc() allowed an existing
    3029             :          * run to become usable.
    3030             :          */
    3031        9544 :         if (run == bin->runcur)
    3032             :                 return (run);
    3033             : 
    3034             :         /* Initialize run internals. */
    3035        9544 :         run->bin = bin;
    3036             : 
    3037       27327 :         for (i = 0; i < bin->regs_mask_nelms - 1; i++)
    3038       17783 :                 run->regs_mask[i] = UINT_MAX;
    3039        9544 :         remainder = bin->nregs & ((1U << (SIZEOF_INT_2POW + 3)) - 1);
    3040        9544 :         if (remainder == 0)
    3041           0 :                 run->regs_mask[i] = UINT_MAX;
    3042             :         else {
    3043             :                 /* The last element has spare bits that need to be unset. */
    3044        9544 :                 run->regs_mask[i] = (UINT_MAX >> ((1U << (SIZEOF_INT_2POW + 3))
    3045        9544 :                     - remainder));
    3046             :         }
    3047             : 
    3048        9544 :         run->regs_minelm = 0;
    3049             : 
    3050        9544 :         run->nfree = bin->nregs;
    3051             : #if defined(MOZ_DEBUG) || defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
    3052        9544 :         run->magic = ARENA_RUN_MAGIC;
    3053             : #endif
    3054             : 
    3055        9544 :         bin->stats.curruns++;
    3056        9544 :         return (run);
    3057             : }
    3058             : 
    3059             : /* bin->runcur must have space available before this function is called. */
    3060             : static inline void *
    3061     1183244 : arena_bin_malloc_easy(arena_t *arena, arena_bin_t *bin, arena_run_t *run)
    3062             : {
    3063             :         void *ret;
    3064             : 
    3065     1183244 :         MOZ_DIAGNOSTIC_ASSERT(run->magic == ARENA_RUN_MAGIC);
    3066     1183244 :         MOZ_DIAGNOSTIC_ASSERT(run->nfree > 0);
    3067             : 
    3068     1183244 :         ret = arena_run_reg_alloc(run, bin);
    3069     1183244 :         MOZ_DIAGNOSTIC_ASSERT(ret);
    3070     1183244 :         run->nfree--;
    3071             : 
    3072     1183244 :         return (ret);
    3073             : }
    3074             : 
    3075             : /* Re-fill bin->runcur, then call arena_bin_malloc_easy(). */
    3076             : static void *
    3077       37671 : arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
    3078             : {
    3079             : 
    3080       37671 :         bin->runcur = arena_bin_nonfull_run_get(arena, bin);
    3081       37671 :         if (!bin->runcur)
    3082             :                 return nullptr;
    3083       37671 :         MOZ_DIAGNOSTIC_ASSERT(bin->runcur->magic == ARENA_RUN_MAGIC);
    3084       37671 :         MOZ_DIAGNOSTIC_ASSERT(bin->runcur->nfree > 0);
    3085             : 
    3086       37671 :         return (arena_bin_malloc_easy(arena, bin, bin->runcur));
    3087             : }
    3088             : 
    3089             : /*
    3090             :  * Calculate bin->run_size such that it meets the following constraints:
    3091             :  *
    3092             :  *   *) bin->run_size >= min_run_size
    3093             :  *   *) bin->run_size <= arena_maxclass
    3094             :  *   *) bin->run_size <= RUN_MAX_SMALL
    3095             :  *   *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed).
    3096             :  *
    3097             :  * bin->nregs, bin->regs_mask_nelms, and bin->reg0_offset are
    3098             :  * also calculated here, since these settings are all interdependent.
    3099             :  */
    3100             : static size_t
    3101         105 : arena_bin_run_size_calc(arena_bin_t *bin, size_t min_run_size)
    3102             : {
    3103             :         size_t try_run_size, good_run_size;
    3104             :         unsigned good_nregs, good_mask_nelms, good_reg0_offset;
    3105             :         unsigned try_nregs, try_mask_nelms, try_reg0_offset;
    3106             : 
    3107         105 :         MOZ_ASSERT(min_run_size >= pagesize);
    3108         105 :         MOZ_ASSERT(min_run_size <= arena_maxclass);
    3109             : 
    3110             :         /*
    3111             :          * Calculate known-valid settings before entering the run_size
    3112             :          * expansion loop, so that the first part of the loop always copies
    3113             :          * valid settings.
    3114             :          *
    3115             :          * The do..while loop iteratively reduces the number of regions until
    3116             :          * the run header and the regions no longer overlap.  A closed formula
    3117             :          * would be quite messy, since there is an interdependency between the
    3118             :          * header's mask length and the number of regions.
    3119             :          */
    3120         105 :         try_run_size = min_run_size;
    3121         210 :         try_nregs = ((try_run_size - sizeof(arena_run_t)) / bin->reg_size)
    3122         105 :             + 1; /* Counter-act try_nregs-- in loop. */
    3123             :         do {
    3124         144 :                 try_nregs--;
    3125         288 :                 try_mask_nelms = (try_nregs >> (SIZEOF_INT_2POW + 3)) +
    3126         144 :                     ((try_nregs & ((1U << (SIZEOF_INT_2POW + 3)) - 1)) ? 1 : 0);
    3127         144 :                 try_reg0_offset = try_run_size - (try_nregs * bin->reg_size);
    3128         144 :         } while (sizeof(arena_run_t) + (sizeof(unsigned) * (try_mask_nelms - 1))
    3129         144 :             > try_reg0_offset);
    3130             : 
    3131             :         /* run_size expansion loop. */
    3132             :         do {
    3133             :                 /*
    3134             :                  * Copy valid settings before trying more aggressive settings.
    3135             :                  */
    3136         201 :                 good_run_size = try_run_size;
    3137         201 :                 good_nregs = try_nregs;
    3138         201 :                 good_mask_nelms = try_mask_nelms;
    3139         201 :                 good_reg0_offset = try_reg0_offset;
    3140             : 
    3141             :                 /* Try more aggressive settings. */
    3142         201 :                 try_run_size += pagesize;
    3143         402 :                 try_nregs = ((try_run_size - sizeof(arena_run_t)) /
    3144         201 :                     bin->reg_size) + 1; /* Counter-act try_nregs-- in loop. */
    3145             :                 do {
    3146         282 :                         try_nregs--;
    3147         564 :                         try_mask_nelms = (try_nregs >> (SIZEOF_INT_2POW + 3)) +
    3148         282 :                             ((try_nregs & ((1U << (SIZEOF_INT_2POW + 3)) - 1)) ?
    3149             :                             1 : 0);
    3150         282 :                         try_reg0_offset = try_run_size - (try_nregs *
    3151             :                             bin->reg_size);
    3152             :                 } while (sizeof(arena_run_t) + (sizeof(unsigned) *
    3153         282 :                     (try_mask_nelms - 1)) > try_reg0_offset);
    3154             :         } while (try_run_size <= arena_maxclass
    3155         201 :             && RUN_MAX_OVRHD * (bin->reg_size << 3) > RUN_MAX_OVRHD_RELAX
    3156         399 :             && (try_reg0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size);
    3157             : 
    3158         105 :         MOZ_ASSERT(sizeof(arena_run_t) + (sizeof(unsigned) * (good_mask_nelms - 1))
    3159             :             <= good_reg0_offset);
    3160         105 :         MOZ_ASSERT((good_mask_nelms << (SIZEOF_INT_2POW + 3)) >= good_nregs);
    3161             : 
    3162             :         /* Copy final settings. */
    3163         105 :         bin->run_size = good_run_size;
    3164         105 :         bin->nregs = good_nregs;
    3165         105 :         bin->regs_mask_nelms = good_mask_nelms;
    3166         105 :         bin->reg0_offset = good_reg0_offset;
    3167             : 
    3168         105 :         return (good_run_size);
    3169             : }
    3170             : 
    3171             : static inline void *
    3172     1182944 : arena_malloc_small(arena_t *arena, size_t size, bool zero)
    3173             : {
    3174             :         void *ret;
    3175             :         arena_bin_t *bin;
    3176             :         arena_run_t *run;
    3177             : 
    3178     1182944 :         if (size < small_min) {
    3179             :                 /* Tiny. */
    3180       99774 :                 size = pow2_ceil(size);
    3181       99666 :                 bin = &arena->bins[ffs((int)(size >> (TINY_MIN_2POW +
    3182             :                     1)))];
    3183             :                 /*
    3184             :                  * Bin calculation is always correct, but we may need
    3185             :                  * to fix size for the purposes of assertions and/or
    3186             :                  * stats accuracy.
    3187             :                  */
    3188       99666 :                 if (size < (1U << TINY_MIN_2POW))
    3189       35911 :                         size = (1U << TINY_MIN_2POW);
    3190     1083170 :         } else if (size <= small_max) {
    3191             :                 /* Quantum-spaced. */
    3192     1031772 :                 size = QUANTUM_CEILING(size);
    3193     1031772 :                 bin = &arena->bins[ntbins + (size >> opt_quantum_2pow)
    3194             :                     - 1];
    3195             :         } else {
    3196             :                 /* Sub-page. */
    3197       51398 :                 size = pow2_ceil(size);
    3198       51398 :                 bin = &arena->bins[ntbins + nqbins
    3199       51398 :                     + (ffs((int)(size >> opt_small_max_2pow)) - 2)];
    3200             :         }
    3201     1182836 :         MOZ_DIAGNOSTIC_ASSERT(size == bin->reg_size);
    3202             : 
    3203     2366080 :         malloc_spin_lock(&arena->lock);
    3204     1183244 :         if ((run = bin->runcur) && run->nfree > 0)
    3205     1145573 :                 ret = arena_bin_malloc_easy(arena, bin, run);
    3206             :         else
    3207       37671 :                 ret = arena_bin_malloc_hard(arena, bin);
    3208             : 
    3209     1183244 :         if (!ret) {
    3210           0 :                 malloc_spin_unlock(&arena->lock);
    3211           0 :                 return nullptr;
    3212             :         }
    3213             : 
    3214     1183244 :         arena->stats.allocated_small += size;
    3215     2366485 :         malloc_spin_unlock(&arena->lock);
    3216             : 
    3217     1183241 :         if (zero == false) {
    3218             :                 if (opt_junk)
    3219             :                         memset(ret, kAllocJunk, size);
    3220             :                 else if (opt_zero)
    3221             :                         memset(ret, 0, size);
    3222             :         } else
    3223             :                 memset(ret, 0, size);
    3224             : 
    3225             :         return (ret);
    3226             : }
    3227             : 
    3228             : static void *
    3229       18556 : arena_malloc_large(arena_t *arena, size_t size, bool zero)
    3230             : {
    3231             :         void *ret;
    3232             : 
    3233             :         /* Large allocation. */
    3234       18556 :         size = PAGE_CEILING(size);
    3235       37112 :         malloc_spin_lock(&arena->lock);
    3236       18556 :         ret = (void *)arena_run_alloc(arena, nullptr, size, true, zero);
    3237       18556 :         if (!ret) {
    3238           0 :                 malloc_spin_unlock(&arena->lock);
    3239           0 :                 return nullptr;
    3240             :         }
    3241       18556 :         arena->stats.allocated_large += size;
    3242       37112 :         malloc_spin_unlock(&arena->lock);
    3243             : 
    3244             :         if (zero == false) {
    3245             :                 if (opt_junk)
    3246             :                         memset(ret, kAllocJunk, size);
    3247             :                 else if (opt_zero)
    3248             :                         memset(ret, 0, size);
    3249             :         }
    3250             : 
    3251       18556 :         return (ret);
    3252             : }
    3253             : 
    3254             : static inline void *
    3255     1201394 : arena_malloc(arena_t *arena, size_t size, bool zero)
    3256             : {
    3257             : 
    3258     1201394 :         MOZ_ASSERT(arena);
    3259     1201394 :         MOZ_DIAGNOSTIC_ASSERT(arena->magic == ARENA_MAGIC);
    3260     1201394 :         MOZ_ASSERT(size != 0);
    3261     1201394 :         MOZ_ASSERT(QUANTUM_CEILING(size) <= arena_maxclass);
    3262             : 
    3263     1201394 :         if (size <= bin_maxclass) {
    3264     1182838 :                 return (arena_malloc_small(arena, size, zero));
    3265             :         } else
    3266       18556 :                 return (arena_malloc_large(arena, size, zero));
    3267             : }
    3268             : 
    3269             : static inline void *
    3270      992011 : imalloc(size_t size)
    3271             : {
    3272             : 
    3273      992011 :         MOZ_ASSERT(size != 0);
    3274             : 
    3275      992011 :         if (size <= arena_maxclass)
    3276      992006 :                 return (arena_malloc(choose_arena(), size, false));
    3277             :         else
    3278           5 :                 return (huge_malloc(size, false));
    3279             : }
    3280             : 
    3281             : static inline void *
    3282      119474 : icalloc(size_t size)
    3283             : {
    3284             : 
    3285      119474 :         if (size <= arena_maxclass)
    3286      119473 :                 return (arena_malloc(choose_arena(), size, true));
    3287             :         else
    3288           1 :                 return (huge_malloc(size, true));
    3289             : }
    3290             : 
    3291             : /* Only handles large allocations that require more than page alignment. */
    3292             : static void *
    3293           0 : arena_palloc(arena_t *arena, size_t alignment, size_t size, size_t alloc_size)
    3294             : {
    3295             :         void *ret;
    3296             :         size_t offset;
    3297             :         arena_chunk_t *chunk;
    3298             : 
    3299           0 :         MOZ_ASSERT((size & pagesize_mask) == 0);
    3300           0 :         MOZ_ASSERT((alignment & pagesize_mask) == 0);
    3301             : 
    3302           0 :         malloc_spin_lock(&arena->lock);
    3303           0 :         ret = (void *)arena_run_alloc(arena, nullptr, alloc_size, true, false);
    3304           0 :         if (!ret) {
    3305           0 :                 malloc_spin_unlock(&arena->lock);
    3306           0 :                 return nullptr;
    3307             :         }
    3308             : 
    3309           0 :         chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ret);
    3310             : 
    3311           0 :         offset = (uintptr_t)ret & (alignment - 1);
    3312           0 :         MOZ_ASSERT((offset & pagesize_mask) == 0);
    3313           0 :         MOZ_ASSERT(offset < alloc_size);
    3314           0 :         if (offset == 0)
    3315           0 :                 arena_run_trim_tail(arena, chunk, (arena_run_t*)ret, alloc_size, size, false);
    3316             :         else {
    3317             :                 size_t leadsize, trailsize;
    3318             : 
    3319           0 :                 leadsize = alignment - offset;
    3320           0 :                 if (leadsize > 0) {
    3321           0 :                         arena_run_trim_head(arena, chunk, (arena_run_t*)ret, alloc_size,
    3322           0 :                             alloc_size - leadsize);
    3323           0 :                         ret = (void *)((uintptr_t)ret + leadsize);
    3324             :                 }
    3325             : 
    3326           0 :                 trailsize = alloc_size - leadsize - size;
    3327           0 :                 if (trailsize != 0) {
    3328             :                         /* Trim trailing space. */
    3329           0 :                         MOZ_ASSERT(trailsize < alloc_size);
    3330             :                         arena_run_trim_tail(arena, chunk, (arena_run_t*)ret, size + trailsize,
    3331           0 :                             size, false);
    3332             :                 }
    3333             :         }
    3334             : 
    3335           0 :         arena->stats.allocated_large += size;
    3336           0 :         malloc_spin_unlock(&arena->lock);
    3337             : 
    3338             :         if (opt_junk)
    3339             :                 memset(ret, kAllocJunk, size);
    3340             :         else if (opt_zero)
    3341             :                 memset(ret, 0, size);
    3342           0 :         return (ret);
    3343             : }
    3344             : 
    3345             : static inline void *
    3346         174 : ipalloc(size_t alignment, size_t size)
    3347             : {
    3348             :         void *ret;
    3349             :         size_t ceil_size;
    3350             : 
    3351             :         /*
    3352             :          * Round size up to the nearest multiple of alignment.
    3353             :          *
    3354             :          * This done, we can take advantage of the fact that for each small
    3355             :          * size class, every object is aligned at the smallest power of two
    3356             :          * that is non-zero in the base two representation of the size.  For
    3357             :          * example:
    3358             :          *
    3359             :          *   Size |   Base 2 | Minimum alignment
    3360             :          *   -----+----------+------------------
    3361             :          *     96 |  1100000 |  32
    3362             :          *    144 | 10100000 |  32
    3363             :          *    192 | 11000000 |  64
    3364             :          *
    3365             :          * Depending on runtime settings, it is possible that arena_malloc()
    3366             :          * will further round up to a power of two, but that never causes
    3367             :          * correctness issues.
    3368             :          */
    3369         174 :         ceil_size = ALIGNMENT_CEILING(size, alignment);
    3370             :         /*
    3371             :          * (ceil_size < size) protects against the combination of maximal
    3372             :          * alignment and size greater than maximal alignment.
    3373             :          */
    3374         174 :         if (ceil_size < size) {
    3375             :                 /* size_t overflow. */
    3376             :                 return nullptr;
    3377             :         }
    3378             : 
    3379         178 :         if (ceil_size <= pagesize || (alignment <= pagesize
    3380           4 :             && ceil_size <= arena_maxclass))
    3381         174 :                 ret = arena_malloc(choose_arena(), ceil_size, false);
    3382             :         else {
    3383             :                 size_t run_size;
    3384             : 
    3385             :                 /*
    3386             :                  * We can't achieve sub-page alignment, so round up alignment
    3387             :                  * permanently; it makes later calculations simpler.
    3388             :                  */
    3389           0 :                 alignment = PAGE_CEILING(alignment);
    3390           0 :                 ceil_size = PAGE_CEILING(size);
    3391             :                 /*
    3392             :                  * (ceil_size < size) protects against very large sizes within
    3393             :                  * pagesize of SIZE_T_MAX.
    3394             :                  *
    3395             :                  * (ceil_size + alignment < ceil_size) protects against the
    3396             :                  * combination of maximal alignment and ceil_size large enough
    3397             :                  * to cause overflow.  This is similar to the first overflow
    3398             :                  * check above, but it needs to be repeated due to the new
    3399             :                  * ceil_size value, which may now be *equal* to maximal
    3400             :                  * alignment, whereas before we only detected overflow if the
    3401             :                  * original size was *greater* than maximal alignment.
    3402             :                  */
    3403           0 :                 if (ceil_size < size || ceil_size + alignment < ceil_size) {
    3404             :                         /* size_t overflow. */
    3405             :                         return nullptr;
    3406             :                 }
    3407             : 
    3408             :                 /*
    3409             :                  * Calculate the size of the over-size run that arena_palloc()
    3410             :                  * would need to allocate in order to guarantee the alignment.
    3411             :                  */
    3412           0 :                 if (ceil_size >= alignment)
    3413           0 :                         run_size = ceil_size + alignment - pagesize;
    3414             :                 else {
    3415             :                         /*
    3416             :                          * It is possible that (alignment << 1) will cause
    3417             :                          * overflow, but it doesn't matter because we also
    3418             :                          * subtract pagesize, which in the case of overflow
    3419             :                          * leaves us with a very large run_size.  That causes
    3420             :                          * the first conditional below to fail, which means
    3421             :                          * that the bogus run_size value never gets used for
    3422             :                          * anything important.
    3423             :                          */
    3424           0 :                         run_size = (alignment << 1) - pagesize;
    3425             :                 }
    3426             : 
    3427           0 :                 if (run_size <= arena_maxclass) {
    3428           0 :                         ret = arena_palloc(choose_arena(), alignment, ceil_size,
    3429           0 :                             run_size);
    3430           0 :                 } else if (alignment <= chunksize)
    3431           0 :                         ret = huge_malloc(ceil_size, false);
    3432             :                 else
    3433           0 :                         ret = huge_palloc(ceil_size, alignment, false);
    3434             :         }
    3435             : 
    3436         174 :         MOZ_ASSERT(((uintptr_t)ret & (alignment - 1)) == 0);
    3437             :         return (ret);
    3438             : }
    3439             : 
    3440             : /* Return the size of the allocation pointed to by ptr. */
    3441             : static size_t
    3442      130221 : arena_salloc(const void *ptr)
    3443             : {
    3444             :         size_t ret;
    3445             :         arena_chunk_t *chunk;
    3446             :         size_t pageind, mapbits;
    3447             : 
    3448      130221 :         MOZ_ASSERT(ptr);
    3449      130221 :         MOZ_ASSERT(CHUNK_ADDR2BASE(ptr) != ptr);
    3450             : 
    3451      130221 :         chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
    3452      130221 :         pageind = (((uintptr_t)ptr - (uintptr_t)chunk) >> pagesize_2pow);
    3453      130221 :         mapbits = chunk->map[pageind].bits;
    3454      130221 :         MOZ_DIAGNOSTIC_ASSERT((mapbits & CHUNK_MAP_ALLOCATED) != 0);
    3455      130221 :         if ((mapbits & CHUNK_MAP_LARGE) == 0) {
    3456      127396 :                 arena_run_t *run = (arena_run_t *)(mapbits & ~pagesize_mask);
    3457      127396 :                 MOZ_DIAGNOSTIC_ASSERT(run->magic == ARENA_RUN_MAGIC);
    3458      127396 :                 ret = run->bin->reg_size;
    3459             :         } else {
    3460        2825 :                 ret = mapbits & ~pagesize_mask;
    3461        2825 :                 MOZ_DIAGNOSTIC_ASSERT(ret != 0);
    3462             :         }
    3463             : 
    3464      130221 :         return (ret);
    3465             : }
    3466             : 
    3467             : /*
    3468             :  * Validate ptr before assuming that it points to an allocation.  Currently,
    3469             :  * the following validation is performed:
    3470             :  *
    3471             :  * + Check that ptr is not nullptr.
    3472             :  *
    3473             :  * + Check that ptr lies within a mapped chunk.
    3474             :  */
    3475             : static inline size_t
    3476       21981 : isalloc_validate(const void *ptr)
    3477             : {
    3478             :         arena_chunk_t *chunk;
    3479             : 
    3480       21981 :         chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
    3481       21981 :         if (!chunk)
    3482             :                 return (0);
    3483             : 
    3484       21981 :         if (!malloc_rtree_get(chunk_rtree, (uintptr_t)chunk))
    3485             :                 return (0);
    3486             : 
    3487       21981 :         if (chunk != ptr) {
    3488       21975 :                 MOZ_DIAGNOSTIC_ASSERT(chunk->arena->magic == ARENA_MAGIC);
    3489       21975 :                 return (arena_salloc(ptr));
    3490             :         } else {
    3491             :                 size_t ret;
    3492             :                 extent_node_t *node;
    3493             :                 extent_node_t key;
    3494             : 
    3495             :                 /* Chunk. */
    3496           6 :                 key.addr = (void *)chunk;
    3497           6 :                 malloc_mutex_lock(&huge_mtx);
    3498           6 :                 node = extent_tree_ad_search(&huge, &key);
    3499           6 :                 if (node)
    3500           6 :                         ret = node->size;
    3501             :                 else
    3502             :                         ret = 0;
    3503           6 :                 malloc_mutex_unlock(&huge_mtx);
    3504             :                 return (ret);
    3505             :         }
    3506             : }
    3507             : 
    3508             : static inline size_t
    3509      108247 : isalloc(const void *ptr)
    3510             : {
    3511             :         size_t ret;
    3512             :         arena_chunk_t *chunk;
    3513             : 
    3514      108247 :         MOZ_ASSERT(ptr);
    3515             : 
    3516      108247 :         chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
    3517      108247 :         if (chunk != ptr) {
    3518             :                 /* Region. */
    3519      108247 :                 MOZ_ASSERT(chunk->arena->magic == ARENA_MAGIC);
    3520             : 
    3521      108247 :                 ret = arena_salloc(ptr);
    3522             :         } else {
    3523             :                 extent_node_t *node, key;
    3524             : 
    3525             :                 /* Chunk (huge allocation). */
    3526             : 
    3527           0 :                 malloc_mutex_lock(&huge_mtx);
    3528             : 
    3529             :                 /* Extract from tree of huge allocations. */
    3530           0 :                 key.addr = const_cast<void*>(ptr);
    3531           0 :                 node = extent_tree_ad_search(&huge, &key);
    3532           0 :                 MOZ_DIAGNOSTIC_ASSERT(node);
    3533             : 
    3534           0 :                 ret = node->size;
    3535             : 
    3536           0 :                 malloc_mutex_unlock(&huge_mtx);
    3537             :         }
    3538             : 
    3539      108247 :         return (ret);
    3540             : }
    3541             : 
    3542             : static inline void
    3543      739103 : arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
    3544             :     arena_chunk_map_t *mapelm)
    3545             : {
    3546             :         arena_run_t *run;
    3547             :         arena_bin_t *bin;
    3548             :         size_t size;
    3549             : 
    3550      739103 :         run = (arena_run_t *)(mapelm->bits & ~pagesize_mask);
    3551      739103 :         MOZ_DIAGNOSTIC_ASSERT(run->magic == ARENA_RUN_MAGIC);
    3552      739103 :         bin = run->bin;
    3553      739103 :         size = bin->reg_size;
    3554             : 
    3555      739103 :         memset(ptr, kAllocPoison, size);
    3556             : 
    3557      739103 :         arena_run_reg_dalloc(run, bin, ptr, size);
    3558      739103 :         run->nfree++;
    3559             : 
    3560      739103 :         if (run->nfree == bin->nregs) {
    3561             :                 /* Deallocate run. */
    3562        4119 :                 if (run == bin->runcur)
    3563        3253 :                         bin->runcur = nullptr;
    3564         866 :                 else if (bin->nregs != 1) {
    3565         866 :                         size_t run_pageind = (((uintptr_t)run -
    3566         866 :                             (uintptr_t)chunk)) >> pagesize_2pow;
    3567             :                         arena_chunk_map_t *run_mapelm =
    3568         866 :                             &chunk->map[run_pageind];
    3569             :                         /*
    3570             :                          * This block's conditional is necessary because if the
    3571             :                          * run only contains one region, then it never gets
    3572             :                          * inserted into the non-full runs tree.
    3573             :                          */
    3574         866 :                         MOZ_DIAGNOSTIC_ASSERT(arena_run_tree_search(&bin->runs, run_mapelm) ==
    3575             :                                 run_mapelm);
    3576         866 :                         arena_run_tree_remove(&bin->runs, run_mapelm);
    3577             :                 }
    3578             : #if defined(MOZ_DEBUG) || defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
    3579        4119 :                 run->magic = 0;
    3580             : #endif
    3581        4119 :                 arena_run_dalloc(arena, run, true);
    3582        4119 :                 bin->stats.curruns--;
    3583      734984 :         } else if (run->nfree == 1 && run != bin->runcur) {
    3584             :                 /*
    3585             :                  * Make sure that bin->runcur always refers to the lowest
    3586             :                  * non-full run, if one exists.
    3587             :                  */
    3588       32523 :                 if (!bin->runcur)
    3589        1330 :                         bin->runcur = run;
    3590       31193 :                 else if ((uintptr_t)run < (uintptr_t)bin->runcur) {
    3591             :                         /* Switch runcur. */
    3592       23067 :                         if (bin->runcur->nfree > 0) {
    3593             :                                 arena_chunk_t *runcur_chunk =
    3594       21668 :                                     (arena_chunk_t*)CHUNK_ADDR2BASE(bin->runcur);
    3595             :                                 size_t runcur_pageind =
    3596       21668 :                                     (((uintptr_t)bin->runcur -
    3597       21668 :                                     (uintptr_t)runcur_chunk)) >> pagesize_2pow;
    3598             :                                 arena_chunk_map_t *runcur_mapelm =
    3599       21668 :                                     &runcur_chunk->map[runcur_pageind];
    3600             : 
    3601             :                                 /* Insert runcur. */
    3602       21668 :                                 MOZ_DIAGNOSTIC_ASSERT(!arena_run_tree_search(&bin->runs,
    3603             :                                     runcur_mapelm));
    3604             :                                 arena_run_tree_insert(&bin->runs,
    3605       21668 :                                     runcur_mapelm);
    3606             :                         }
    3607       23067 :                         bin->runcur = run;
    3608             :                 } else {
    3609        8126 :                         size_t run_pageind = (((uintptr_t)run -
    3610        8126 :                             (uintptr_t)chunk)) >> pagesize_2pow;
    3611             :                         arena_chunk_map_t *run_mapelm =
    3612        8126 :                             &chunk->map[run_pageind];
    3613             : 
    3614        8126 :                         MOZ_DIAGNOSTIC_ASSERT(arena_run_tree_search(&bin->runs, run_mapelm) ==
    3615             :                             nullptr);
    3616        8126 :                         arena_run_tree_insert(&bin->runs, run_mapelm);
    3617             :                 }
    3618             :         }
    3619      739103 :         arena->stats.allocated_small -= size;
    3620      739103 : }
    3621             : 
    3622             : static void
    3623       13760 : arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
    3624             : {
    3625       13760 :         size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
    3626       13760 :             pagesize_2pow;
    3627       13760 :         size_t size = chunk->map[pageind].bits & ~pagesize_mask;
    3628             : 
    3629       13760 :         memset(ptr, kAllocPoison, size);
    3630       13760 :         arena->stats.allocated_large -= size;
    3631             : 
    3632       13760 :         arena_run_dalloc(arena, (arena_run_t *)ptr, true);
    3633       13760 : }
    3634             : 
    3635             : static inline void
    3636      752710 : arena_dalloc(void *ptr, size_t offset)
    3637             : {
    3638             :         arena_chunk_t *chunk;
    3639             :         arena_t *arena;
    3640             :         size_t pageind;
    3641             :         arena_chunk_map_t *mapelm;
    3642             : 
    3643      752710 :         MOZ_ASSERT(ptr);
    3644      752710 :         MOZ_ASSERT(offset != 0);
    3645      752710 :         MOZ_ASSERT(CHUNK_ADDR2OFFSET(ptr) == offset);
    3646             : 
    3647      752710 :         chunk = (arena_chunk_t *) ((uintptr_t)ptr - offset);
    3648      752710 :         arena = chunk->arena;
    3649      752710 :         MOZ_ASSERT(arena);
    3650      752710 :         MOZ_DIAGNOSTIC_ASSERT(arena->magic == ARENA_MAGIC);
    3651             : 
    3652     1505573 :         malloc_spin_lock(&arena->lock);
    3653      752863 :         pageind = offset >> pagesize_2pow;
    3654      752863 :         mapelm = &chunk->map[pageind];
    3655      752863 :         MOZ_DIAGNOSTIC_ASSERT((mapelm->bits & CHUNK_MAP_ALLOCATED) != 0);
    3656      752863 :         if ((mapelm->bits & CHUNK_MAP_LARGE) == 0) {
    3657             :                 /* Small allocation. */
    3658      739103 :                 arena_dalloc_small(arena, chunk, ptr, mapelm);
    3659             :         } else {
    3660             :                 /* Large allocation. */
    3661       13760 :                 arena_dalloc_large(arena, chunk, ptr);
    3662             :         }
    3663     1505722 :         malloc_spin_unlock(&arena->lock);
    3664      752859 : }
    3665             : 
    3666             : static inline void
    3667       89912 : idalloc(void *ptr)
    3668             : {
    3669             :         size_t offset;
    3670             : 
    3671       89912 :         MOZ_ASSERT(ptr);
    3672             : 
    3673       89912 :         offset = CHUNK_ADDR2OFFSET(ptr);
    3674       89912 :         if (offset != 0)
    3675       89912 :                 arena_dalloc(ptr, offset);
    3676             :         else
    3677           0 :                 huge_dalloc(ptr);
    3678       89912 : }
    3679             : 
    3680             : static void
    3681           3 : arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
    3682             :     size_t size, size_t oldsize)
    3683             : {
    3684             : 
    3685           3 :         MOZ_ASSERT(size < oldsize);
    3686             : 
    3687             :         /*
    3688             :          * Shrink the run, and make trailing pages available for other
    3689             :          * allocations.
    3690             :          */
    3691           6 :         malloc_spin_lock(&arena->lock);
    3692             :         arena_run_trim_tail(arena, chunk, (arena_run_t *)ptr, oldsize, size,
    3693           3 :             true);
    3694           3 :         arena->stats.allocated_large -= oldsize - size;
    3695           6 :         malloc_spin_unlock(&arena->lock);
    3696           3 : }
    3697             : 
    3698             : static bool
    3699         906 : arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
    3700             :     size_t size, size_t oldsize)
    3701             : {
    3702         906 :         size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> pagesize_2pow;
    3703         906 :         size_t npages = oldsize >> pagesize_2pow;
    3704             : 
    3705        1812 :         malloc_spin_lock(&arena->lock);
    3706         906 :         MOZ_DIAGNOSTIC_ASSERT(oldsize == (chunk->map[pageind].bits & ~pagesize_mask));
    3707             : 
    3708             :         /* Try to extend the run. */
    3709         906 :         MOZ_ASSERT(size > oldsize);
    3710        1797 :         if (pageind + npages < chunk_npages && (chunk->map[pageind+npages].bits
    3711        1373 :             & CHUNK_MAP_ALLOCATED) == 0 && (chunk->map[pageind+npages].bits &
    3712         241 :             ~pagesize_mask) >= size - oldsize) {
    3713             :                 /*
    3714             :                  * The next run is available and sufficiently large.  Split the
    3715             :                  * following run, then merge the first part with the existing
    3716             :                  * allocation.
    3717             :                  */
    3718         197 :                 arena_run_split(arena, (arena_run_t *)((uintptr_t)chunk +
    3719         197 :                     ((pageind+npages) << pagesize_2pow)), size - oldsize, true,
    3720         197 :                     false);
    3721             : 
    3722         197 :                 chunk->map[pageind].bits = size | CHUNK_MAP_LARGE |
    3723             :                     CHUNK_MAP_ALLOCATED;
    3724         197 :                 chunk->map[pageind+npages].bits = CHUNK_MAP_LARGE |
    3725             :                     CHUNK_MAP_ALLOCATED;
    3726             : 
    3727         197 :                 arena->stats.allocated_large += size - oldsize;
    3728         394 :                 malloc_spin_unlock(&arena->lock);
    3729         197 :                 return (false);
    3730             :         }
    3731        1418 :         malloc_spin_unlock(&arena->lock);
    3732             : 
    3733         709 :         return (true);
    3734             : }
    3735             : 
    3736             : /*
    3737             :  * Try to resize a large allocation, in order to avoid copying.  This will
    3738             :  * always fail if growing an object, and the following run is already in use.
    3739             :  */
    3740             : static bool
    3741        1161 : arena_ralloc_large(void *ptr, size_t size, size_t oldsize)
    3742             : {
    3743             :         size_t psize;
    3744             : 
    3745        1161 :         psize = PAGE_CEILING(size);
    3746        1161 :         if (psize == oldsize) {
    3747             :                 /* Same size class. */
    3748         252 :                 if (size < oldsize) {
    3749         244 :                         memset((void *)((uintptr_t)ptr + size), kAllocPoison, oldsize -
    3750             :                             size);
    3751             :                 }
    3752             :                 return (false);
    3753             :         } else {
    3754             :                 arena_chunk_t *chunk;
    3755             :                 arena_t *arena;
    3756             : 
    3757         909 :                 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
    3758         909 :                 arena = chunk->arena;
    3759         909 :                 MOZ_DIAGNOSTIC_ASSERT(arena->magic == ARENA_MAGIC);
    3760             : 
    3761         909 :                 if (psize < oldsize) {
    3762             :                         /* Fill before shrinking in order avoid a race. */
    3763           3 :                         memset((void *)((uintptr_t)ptr + size), kAllocPoison,
    3764           3 :                             oldsize - size);
    3765             :                         arena_ralloc_large_shrink(arena, chunk, ptr, psize,
    3766           3 :                             oldsize);
    3767           3 :                         return (false);
    3768             :                 } else {
    3769             :                         bool ret = arena_ralloc_large_grow(arena, chunk, ptr,
    3770         906 :                             psize, oldsize);
    3771             :                         if (ret == false && opt_zero) {
    3772             :                                 memset((void *)((uintptr_t)ptr + oldsize), 0,
    3773             :                                     size - oldsize);
    3774             :                         }
    3775         906 :                         return (ret);
    3776             :                 }
    3777             :         }
    3778             : }
    3779             : 
    3780             : static void *
    3781      108247 : arena_ralloc(void *ptr, size_t size, size_t oldsize)
    3782             : {
    3783             :         void *ret;
    3784             :         size_t copysize;
    3785             : 
    3786             :         /* Try to avoid moving the allocation. */
    3787      108247 :         if (size < small_min) {
    3788       28389 :                 if (oldsize < small_min &&
    3789       14166 :                     ffs((int)(pow2_ceil(size) >> (TINY_MIN_2POW + 1)))
    3790       14166 :                     == ffs((int)(pow2_ceil(oldsize) >> (TINY_MIN_2POW + 1))))
    3791             :                         goto IN_PLACE; /* Same size class. */
    3792       94024 :         } else if (size <= small_max) {
    3793      151691 :                 if (oldsize >= small_min && oldsize <= small_max &&
    3794       64309 :                     (QUANTUM_CEILING(size) >> opt_quantum_2pow)
    3795       64309 :                     == (QUANTUM_CEILING(oldsize) >> opt_quantum_2pow))
    3796             :                         goto IN_PLACE; /* Same size class. */
    3797        6642 :         } else if (size <= bin_maxclass) {
    3798        6866 :                 if (oldsize > small_max && oldsize <= bin_maxclass &&
    3799        2413 :                     pow2_ceil(size) == pow2_ceil(oldsize))
    3800             :                         goto IN_PLACE; /* Same size class. */
    3801        2189 :         } else if (oldsize > bin_maxclass && oldsize <= arena_maxclass) {
    3802             :                 MOZ_ASSERT(size > bin_maxclass);
    3803        1161 :                 if (arena_ralloc_large(ptr, size, oldsize) == false)
    3804             :                         return (ptr);
    3805             :         }
    3806             : 
    3807             :         /*
    3808             :          * If we get here, then size and oldsize are different enough that we
    3809             :          * need to move the object.  In that case, fall back to allocating new
    3810             :          * space and copying.
    3811             :          */
    3812       89911 :         ret = arena_malloc(choose_arena(), size, false);
    3813       89912 :         if (!ret)
    3814             :                 return nullptr;
    3815             : 
    3816             :         /* Junk/zero-filling were already done by arena_malloc(). */
    3817       89912 :         copysize = (size < oldsize) ? size : oldsize;
    3818             : #ifdef VM_COPY_MIN
    3819             :         if (copysize >= VM_COPY_MIN)
    3820             :                 pages_copy(ret, ptr, copysize);
    3821             :         else
    3822             : #endif
    3823       89912 :                 memcpy(ret, ptr, copysize);
    3824       89912 :         idalloc(ptr);
    3825       89912 :         return (ret);
    3826             : IN_PLACE:
    3827       17884 :         if (size < oldsize)
    3828        2353 :                 memset((void *)((uintptr_t)ptr + size), kAllocPoison, oldsize - size);
    3829             :         else if (opt_zero && size > oldsize)
    3830             :                 memset((void *)((uintptr_t)ptr + oldsize), 0, size - oldsize);
    3831             :         return (ptr);
    3832             : }
    3833             : 
    3834             : static inline void *
    3835      108246 : iralloc(void *ptr, size_t size)
    3836             : {
    3837             :         size_t oldsize;
    3838             : 
    3839      108246 :         MOZ_ASSERT(ptr);
    3840      108246 :         MOZ_ASSERT(size != 0);
    3841             : 
    3842      108246 :         oldsize = isalloc(ptr);
    3843             : 
    3844      108247 :         if (size <= arena_maxclass)
    3845      108247 :                 return (arena_ralloc(ptr, size, oldsize));
    3846             :         else
    3847           0 :                 return (huge_ralloc(ptr, size, oldsize));
    3848             : }
    3849             : 
    3850             : static bool
    3851           3 : arena_new(arena_t *arena)
    3852             : {
    3853             :         unsigned i;
    3854             :         arena_bin_t *bin;
    3855             :         size_t prev_run_size;
    3856             : 
    3857           3 :         if (malloc_spin_init(&arena->lock))
    3858             :                 return (true);
    3859             : 
    3860           6 :         memset(&arena->stats, 0, sizeof(arena_stats_t));
    3861             : 
    3862             :         /* Initialize chunks. */
    3863           3 :         arena_chunk_tree_dirty_new(&arena->chunks_dirty);
    3864             : #ifdef MALLOC_DOUBLE_PURGE
    3865             :         LinkedList_Init(&arena->chunks_madvised);
    3866             : #endif
    3867           3 :         arena->spare = nullptr;
    3868             : 
    3869           3 :         arena->ndirty = 0;
    3870             : 
    3871           3 :         arena_avail_tree_new(&arena->runs_avail);
    3872             : 
    3873             :         /* Initialize bins. */
    3874           3 :         prev_run_size = pagesize;
    3875             : 
    3876             :         /* (2^n)-spaced tiny bins. */
    3877           6 :         for (i = 0; i < ntbins; i++) {
    3878           3 :                 bin = &arena->bins[i];
    3879           3 :                 bin->runcur = nullptr;
    3880           3 :                 arena_run_tree_new(&bin->runs);
    3881             : 
    3882           3 :                 bin->reg_size = (1ULL << (TINY_MIN_2POW + i));
    3883             : 
    3884           3 :                 prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
    3885             : 
    3886           6 :                 memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
    3887             :         }
    3888             : 
    3889             :         /* Quantum-spaced bins. */
    3890         195 :         for (; i < ntbins + nqbins; i++) {
    3891          96 :                 bin = &arena->bins[i];
    3892          96 :                 bin->runcur = nullptr;
    3893          96 :                 arena_run_tree_new(&bin->runs);
    3894             : 
    3895          96 :                 bin->reg_size = quantum * (i - ntbins + 1);
    3896             : 
    3897          96 :                 prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
    3898             : 
    3899         192 :                 memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
    3900             :         }
    3901             : 
    3902             :         /* (2^n)-spaced sub-page bins. */
    3903          15 :         for (; i < ntbins + nqbins + nsbins; i++) {
    3904           6 :                 bin = &arena->bins[i];
    3905           6 :                 bin->runcur = nullptr;
    3906           6 :                 arena_run_tree_new(&bin->runs);
    3907             : 
    3908           6 :                 bin->reg_size = (small_max << (i - (ntbins + nqbins) + 1));
    3909             : 
    3910           6 :                 prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
    3911             : 
    3912          12 :                 memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
    3913             :         }
    3914             : 
    3915             : #if defined(MOZ_DEBUG) || defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
    3916           3 :         arena->magic = ARENA_MAGIC;
    3917             : #endif
    3918             : 
    3919           3 :         return (false);
    3920             : }
    3921             : 
    3922             : static inline arena_t *
    3923           0 : arenas_fallback()
    3924             : {
    3925             :         /* Only reached if there is an OOM error. */
    3926             : 
    3927             :         /*
    3928             :          * OOM here is quite inconvenient to propagate, since dealing with it
    3929             :          * would require a check for failure in the fast path.  Instead, punt
    3930             :          * by using arenas[0].
    3931             :          * In practice, this is an extremely unlikely failure.
    3932             :          */
    3933             :         _malloc_message(_getprogname(),
    3934           0 :             ": (malloc) Error initializing arena\n");
    3935             : 
    3936           0 :         return arenas[0];
    3937             : }
    3938             : 
    3939             : /* Create a new arena and return it. */
    3940             : static arena_t *
    3941           3 : arenas_extend()
    3942             : {
    3943             :         /*
    3944             :          * The list of arenas is first allocated to contain at most 16 elements,
    3945             :          * and when the limit is reached, the list is grown such that it can
    3946             :          * contain 16 more elements.
    3947             :          */
    3948           3 :         const size_t arenas_growth = 16;
    3949             :         arena_t *ret;
    3950             : 
    3951             : 
    3952             :         /* Allocate enough space for trailing bins. */
    3953             :         ret = (arena_t *)base_alloc(sizeof(arena_t)
    3954           3 :             + (sizeof(arena_bin_t) * (ntbins + nqbins + nsbins - 1)));
    3955           3 :         if (!ret || arena_new(ret)) {
    3956           0 :                 return arenas_fallback();
    3957             :         }
    3958             : 
    3959           3 :         malloc_spin_lock(&arenas_lock);
    3960             : 
    3961             :         /* Allocate and initialize arenas. */
    3962           3 :         if (narenas % arenas_growth == 0) {
    3963           3 :                 size_t max_arenas = ((narenas + arenas_growth) / arenas_growth) * arenas_growth;
    3964             :                 /*
    3965             :                  * We're unfortunately leaking the previous allocation ;
    3966             :                  * the base allocator doesn't know how to free things
    3967             :                  */
    3968           3 :                 arena_t** new_arenas = (arena_t **)base_alloc(sizeof(arena_t *) * max_arenas);
    3969           3 :                 if (!new_arenas) {
    3970           0 :                         ret = arenas ? arenas_fallback() : nullptr;
    3971           0 :                         malloc_spin_unlock(&arenas_lock);
    3972           0 :                         return (ret);
    3973             :                 }
    3974           6 :                 memcpy(new_arenas, arenas, narenas * sizeof(arena_t *));
    3975             :                 /*
    3976             :                  * Zero the array.  In practice, this should always be pre-zeroed,
    3977             :                  * since it was just mmap()ed, but let's be sure.
    3978             :                  */
    3979           6 :                 memset(new_arenas + narenas, 0, sizeof(arena_t *) * (max_arenas - narenas));
    3980           3 :                 arenas = new_arenas;
    3981             :         }
    3982           3 :         arenas[narenas++] = ret;
    3983             : 
    3984           3 :         malloc_spin_unlock(&arenas_lock);
    3985           3 :         return (ret);
    3986             : }
    3987             : 
    3988             : /*
    3989             :  * End arena.
    3990             :  */
    3991             : /******************************************************************************/
    3992             : /*
    3993             :  * Begin general internal functions.
    3994             :  */
    3995             : 
    3996             : static void *
    3997           6 : huge_malloc(size_t size, bool zero)
    3998             : {
    3999           6 :         return huge_palloc(size, chunksize, zero);
    4000             : }
    4001             : 
    4002             : static void *
    4003           6 : huge_palloc(size_t size, size_t alignment, bool zero)
    4004             : {
    4005             :         void *ret;
    4006             :         size_t csize;
    4007             :         size_t psize;
    4008             :         extent_node_t *node;
    4009             :         bool zeroed;
    4010             : 
    4011             :         /* Allocate one or more contiguous chunks for this request. */
    4012             : 
    4013           6 :         csize = CHUNK_CEILING(size);
    4014           6 :         if (csize == 0) {
    4015             :                 /* size is large enough to cause size_t wrap-around. */
    4016             :                 return nullptr;
    4017             :         }
    4018             : 
    4019             :         /* Allocate an extent node with which to track the chunk. */
    4020           6 :         node = base_node_alloc();
    4021           6 :         if (!node)
    4022             :                 return nullptr;
    4023             : 
    4024           6 :         ret = chunk_alloc(csize, alignment, false, &zeroed);
    4025           6 :         if (!ret) {
    4026           0 :                 base_node_dealloc(node);
    4027           0 :                 return nullptr;
    4028             :         }
    4029           6 :         if (zero) {
    4030           1 :                 chunk_ensure_zero(ret, csize, zeroed);
    4031             :         }
    4032             : 
    4033             :         /* Insert node into huge. */
    4034           6 :         node->addr = ret;
    4035           6 :         psize = PAGE_CEILING(size);
    4036           6 :         node->size = psize;
    4037             : 
    4038           6 :         malloc_mutex_lock(&huge_mtx);
    4039           6 :         extent_tree_ad_insert(&huge, node);
    4040           6 :         huge_nmalloc++;
    4041             : 
    4042             :         /* Although we allocated space for csize bytes, we indicate that we've
    4043             :          * allocated only psize bytes.
    4044             :          *
    4045             :          * If DECOMMIT is defined, this is a reasonable thing to do, since
    4046             :          * we'll explicitly decommit the bytes in excess of psize.
    4047             :          *
    4048             :          * If DECOMMIT is not defined, then we're relying on the OS to be lazy
    4049             :          * about how it allocates physical pages to mappings.  If we never
    4050             :          * touch the pages in excess of psize, the OS won't allocate a physical
    4051             :          * page, and we won't use more than psize bytes of physical memory.
    4052             :          *
    4053             :          * A correct program will only touch memory in excess of how much it
    4054             :          * requested if it first calls malloc_usable_size and finds out how
    4055             :          * much space it has to play with.  But because we set node->size =
    4056             :          * psize above, malloc_usable_size will return psize, not csize, and
    4057             :          * the program will (hopefully) never touch bytes in excess of psize.
    4058             :          * Thus those bytes won't take up space in physical memory, and we can
    4059             :          * reasonably claim we never "allocated" them in the first place. */
    4060           6 :         huge_allocated += psize;
    4061           6 :         huge_mapped += csize;
    4062           6 :         malloc_mutex_unlock(&huge_mtx);
    4063             : 
    4064             : #ifdef MALLOC_DECOMMIT
    4065             :         if (csize - psize > 0)
    4066             :                 pages_decommit((void *)((uintptr_t)ret + psize), csize - psize);
    4067             : #endif
    4068             : 
    4069             :         if (zero == false) {
    4070             :                 if (opt_junk)
    4071             : #  ifdef MALLOC_DECOMMIT
    4072             :                         memset(ret, kAllocJunk, psize);
    4073             : #  else
    4074             :                         memset(ret, kAllocJunk, csize);
    4075             : #  endif
    4076             :                 else if (opt_zero)
    4077             : #  ifdef MALLOC_DECOMMIT
    4078             :                         memset(ret, 0, psize);
    4079             : #  else
    4080             :                         memset(ret, 0, csize);
    4081             : #  endif
    4082             :         }
    4083             : 
    4084           6 :         return (ret);
    4085             : }
    4086             : 
    4087             : static void *
    4088           0 : huge_ralloc(void *ptr, size_t size, size_t oldsize)
    4089             : {
    4090             :         void *ret;
    4091             :         size_t copysize;
    4092             : 
    4093             :         /* Avoid moving the allocation if the size class would not change. */
    4094             : 
    4095           0 :         if (oldsize > arena_maxclass &&
    4096           0 :             CHUNK_CEILING(size) == CHUNK_CEILING(oldsize)) {
    4097           0 :                 size_t psize = PAGE_CEILING(size);
    4098           0 :                 if (size < oldsize) {
    4099           0 :                         memset((void *)((uintptr_t)ptr + size), kAllocPoison, oldsize
    4100             :                             - size);
    4101             :                 }
    4102             : #ifdef MALLOC_DECOMMIT
    4103             :                 if (psize < oldsize) {
    4104             :                         extent_node_t *node, key;
    4105             : 
    4106             :                         pages_decommit((void *)((uintptr_t)ptr + psize),
    4107             :                             oldsize - psize);
    4108             : 
    4109             :                         /* Update recorded size. */
    4110             :                         malloc_mutex_lock(&huge_mtx);
    4111             :                         key.addr = const_cast<void*>(ptr);
    4112             :                         node = extent_tree_ad_search(&huge, &key);
    4113             :                         MOZ_ASSERT(node);
    4114             :                         MOZ_ASSERT(node->size == oldsize);
    4115             :                         huge_allocated -= oldsize - psize;
    4116             :                         /* No need to change huge_mapped, because we didn't
    4117             :                          * (un)map anything. */
    4118             :                         node->size = psize;
    4119             :                         malloc_mutex_unlock(&huge_mtx);
    4120             :                 } else if (psize > oldsize) {
    4121             :                         pages_commit((void *)((uintptr_t)ptr + oldsize),
    4122             :                             psize - oldsize);
    4123             :                 }
    4124             : #endif
    4125             : 
    4126             :                 /* Although we don't have to commit or decommit anything if
    4127             :                  * DECOMMIT is not defined and the size class didn't change, we
    4128             :                  * do need to update the recorded size if the size increased,
    4129             :                  * so malloc_usable_size doesn't return a value smaller than
    4130             :                  * what was requested via realloc(). */
    4131             : 
    4132           0 :                 if (psize > oldsize) {
    4133             :                         /* Update recorded size. */
    4134             :                         extent_node_t *node, key;
    4135           0 :                         malloc_mutex_lock(&huge_mtx);
    4136           0 :                         key.addr = const_cast<void*>(ptr);
    4137           0 :                         node = extent_tree_ad_search(&huge, &key);
    4138           0 :                         MOZ_ASSERT(node);
    4139           0 :                         MOZ_ASSERT(node->size == oldsize);
    4140           0 :                         huge_allocated += psize - oldsize;
    4141             :                         /* No need to change huge_mapped, because we didn't
    4142             :                          * (un)map anything. */
    4143           0 :                         node->size = psize;
    4144           0 :                         malloc_mutex_unlock(&huge_mtx);
    4145             :                 }
    4146             : 
    4147             :                 if (opt_zero && size > oldsize) {
    4148             :                         memset((void *)((uintptr_t)ptr + oldsize), 0, size
    4149             :                             - oldsize);
    4150             :                 }
    4151             :                 return (ptr);
    4152             :         }
    4153             : 
    4154             :         /*
    4155             :          * If we get here, then size and oldsize are different enough that we
    4156             :          * need to use a different size class.  In that case, fall back to
    4157             :          * allocating new space and copying.
    4158             :          */
    4159           0 :         ret = huge_malloc(size, false);
    4160           0 :         if (!ret)
    4161             :                 return nullptr;
    4162             : 
    4163           0 :         copysize = (size < oldsize) ? size : oldsize;
    4164             : #ifdef VM_COPY_MIN
    4165             :         if (copysize >= VM_COPY_MIN)
    4166             :                 pages_copy(ret, ptr, copysize);
    4167             :         else
    4168             : #endif
    4169           0 :                 memcpy(ret, ptr, copysize);
    4170           0 :         idalloc(ptr);
    4171           0 :         return (ret);
    4172             : }
    4173             : 
    4174             : static void
    4175           2 : huge_dalloc(void *ptr)
    4176             : {
    4177             :         extent_node_t *node, key;
    4178             : 
    4179           2 :         malloc_mutex_lock(&huge_mtx);
    4180             : 
    4181             :         /* Extract from tree of huge allocations. */
    4182           2 :         key.addr = ptr;
    4183           2 :         node = extent_tree_ad_search(&huge, &key);
    4184           2 :         MOZ_ASSERT(node);
    4185           2 :         MOZ_ASSERT(node->addr == ptr);
    4186           2 :         extent_tree_ad_remove(&huge, node);
    4187             : 
    4188           2 :         huge_ndalloc++;
    4189           2 :         huge_allocated -= node->size;
    4190           2 :         huge_mapped -= CHUNK_CEILING(node->size);
    4191             : 
    4192           2 :         malloc_mutex_unlock(&huge_mtx);
    4193             : 
    4194             :         /* Unmap chunk. */
    4195           2 :         chunk_dealloc(node->addr, CHUNK_CEILING(node->size), HUGE_CHUNK);
    4196             : 
    4197           2 :         base_node_dealloc(node);
    4198           2 : }
    4199             : 
    4200             : /*
    4201             :  * FreeBSD's pthreads implementation calls malloc(3), so the malloc
    4202             :  * implementation has to take pains to avoid infinite recursion during
    4203             :  * initialization.
    4204             :  */
    4205             : #if (defined(MOZ_MEMORY_WINDOWS) || defined(MOZ_MEMORY_DARWIN))
    4206             : #define malloc_init() false
    4207             : #else
    4208             : static inline bool
    4209     1111540 : malloc_init(void)
    4210             : {
    4211             : 
    4212     1111540 :         if (malloc_initialized == false)
    4213           3 :                 return (malloc_init_hard());
    4214             : 
    4215             :         return (false);
    4216             : }
    4217             : #endif
    4218             : 
    4219             : #if defined(MOZ_MEMORY_DARWIN)
    4220             : extern "C" void register_zone(void);
    4221             : #endif
    4222             : 
    4223             : #if !defined(MOZ_MEMORY_WINDOWS)
    4224             : static
    4225             : #endif
    4226             : bool
    4227           3 : malloc_init_hard(void)
    4228             : {
    4229             :         unsigned i;
    4230             :         const char *opts;
    4231             :         long result;
    4232             : 
    4233             : #ifndef MOZ_MEMORY_WINDOWS
    4234           3 :         malloc_mutex_lock(&init_lock);
    4235             : #endif
    4236             : 
    4237           3 :         if (malloc_initialized) {
    4238             :                 /*
    4239             :                  * Another thread initialized the allocator before this one
    4240             :                  * acquired init_lock.
    4241             :                  */
    4242             : #ifndef MOZ_MEMORY_WINDOWS
    4243           0 :                 malloc_mutex_unlock(&init_lock);
    4244             : #endif
    4245           0 :                 return (false);
    4246             :         }
    4247             : 
    4248             : #ifdef MOZ_MEMORY_WINDOWS
    4249             :         /* get a thread local storage index */
    4250             :         tlsIndex = TlsAlloc();
    4251             : #elif defined(MOZ_MEMORY_DARWIN)
    4252             :         pthread_key_create(&tlsIndex, nullptr);
    4253             : #endif
    4254             : 
    4255             :         /* Get page size and number of CPUs */
    4256             : #ifdef MOZ_MEMORY_WINDOWS
    4257             :         {
    4258             :                 SYSTEM_INFO info;
    4259             : 
    4260             :                 GetSystemInfo(&info);
    4261             :                 result = info.dwPageSize;
    4262             : 
    4263             :         }
    4264             : #else
    4265           3 :         result = sysconf(_SC_PAGESIZE);
    4266           3 :         MOZ_ASSERT(result != -1);
    4267             : #endif
    4268             : 
    4269             :         /* We assume that the page size is a power of 2. */
    4270           3 :         MOZ_ASSERT(((result - 1) & result) == 0);
    4271             : #ifdef MALLOC_STATIC_SIZES
    4272           3 :         if (pagesize % (size_t) result) {
    4273             :                 _malloc_message(_getprogname(),
    4274           0 :                                 "Compile-time page size does not divide the runtime one.\n");
    4275           0 :                 moz_abort();
    4276             :         }
    4277             : #else
    4278             :         pagesize = (size_t) result;
    4279             :         pagesize_mask = (size_t) result - 1;
    4280             :         pagesize_2pow = ffs((int)result) - 1;
    4281             : #endif
    4282             : 
    4283             :         /* Get runtime configuration. */
    4284           3 :         if ((opts = getenv("MALLOC_OPTIONS"))) {
    4285           0 :                 for (i = 0; opts[i] != '\0'; i++) {
    4286             :                         unsigned j, nreps;
    4287             :                         bool nseen;
    4288             : 
    4289             :                         /* Parse repetition count, if any. */
    4290           0 :                         for (nreps = 0, nseen = false;; i++, nseen = true) {
    4291           0 :                                 switch (opts[i]) {
    4292             :                                         case '0': case '1': case '2': case '3':
    4293             :                                         case '4': case '5': case '6': case '7':
    4294             :                                         case '8': case '9':
    4295           0 :                                                 nreps *= 10;
    4296           0 :                                                 nreps += opts[i] - '0';
    4297             :                                                 break;
    4298             :                                         default:
    4299             :                                                 goto MALLOC_OUT;
    4300             :                                 }
    4301             :                         }
    4302             : MALLOC_OUT:
    4303           0 :                         if (nseen == false)
    4304           0 :                                 nreps = 1;
    4305             : 
    4306           0 :                         for (j = 0; j < nreps; j++) {
    4307           0 :                                 switch (opts[i]) {
    4308             :                                 case 'f':
    4309           0 :                                         opt_dirty_max >>= 1;
    4310           0 :                                         break;
    4311             :                                 case 'F':
    4312           0 :                                         if (opt_dirty_max == 0)
    4313           0 :                                                 opt_dirty_max = 1;
    4314           0 :                                         else if ((opt_dirty_max << 1) != 0)
    4315           0 :                                                 opt_dirty_max <<= 1;
    4316             :                                         break;
    4317             : #ifdef MOZ_DEBUG
    4318             :                                 case 'j':
    4319             :                                         opt_junk = false;
    4320             :                                         break;
    4321             :                                 case 'J':
    4322             :                                         opt_junk = true;
    4323             :                                         break;
    4324             : #endif
    4325             : #ifndef MALLOC_STATIC_SIZES
    4326             :                                 case 'k':
    4327             :                                         /*
    4328             :                                          * Chunks always require at least one
    4329             :                                          * header page, so chunks can never be
    4330             :                                          * smaller than two pages.
    4331             :                                          */
    4332             :                                         if (opt_chunk_2pow > pagesize_2pow + 1)
    4333             :                                                 opt_chunk_2pow--;
    4334             :                                         break;
    4335             :                                 case 'K':
    4336             :                                         if (opt_chunk_2pow + 1 <
    4337             :                                             (sizeof(size_t) << 3))
    4338             :                                                 opt_chunk_2pow++;
    4339             :                                         break;
    4340             : #endif
    4341             : #ifndef MALLOC_STATIC_SIZES
    4342             :                                 case 'q':
    4343             :                                         if (opt_quantum_2pow > QUANTUM_2POW_MIN)
    4344             :                                                 opt_quantum_2pow--;
    4345             :                                         break;
    4346             :                                 case 'Q':
    4347             :                                         if (opt_quantum_2pow < pagesize_2pow -
    4348             :                                             1)
    4349             :                                                 opt_quantum_2pow++;
    4350             :                                         break;
    4351             :                                 case 's':
    4352             :                                         if (opt_small_max_2pow >
    4353             :                                             QUANTUM_2POW_MIN)
    4354             :                                                 opt_small_max_2pow--;
    4355             :                                         break;
    4356             :                                 case 'S':
    4357             :                                         if (opt_small_max_2pow < pagesize_2pow
    4358             :                                             - 1)
    4359             :                                                 opt_small_max_2pow++;
    4360             :                                         break;
    4361             : #endif
    4362             : #ifdef MOZ_DEBUG
    4363             :                                 case 'z':
    4364             :                                         opt_zero = false;
    4365             :                                         break;
    4366             :                                 case 'Z':
    4367             :                                         opt_zero = true;
    4368             :                                         break;
    4369             : #endif
    4370             :                                 default: {
    4371             :                                         char cbuf[2];
    4372             : 
    4373           0 :                                         cbuf[0] = opts[i];
    4374           0 :                                         cbuf[1] = '\0';
    4375             :                                         _malloc_message(_getprogname(),
    4376             :                                             ": (malloc) Unsupported character "
    4377             :                                             "in malloc options: '", cbuf,
    4378           0 :                                             "'\n");
    4379             :                                 }
    4380             :                                 }
    4381             :                         }
    4382             :                 }
    4383             :         }
    4384             : 
    4385             : #ifndef MALLOC_STATIC_SIZES
    4386             :         /* Set variables according to the value of opt_small_max_2pow. */
    4387             :         if (opt_small_max_2pow < opt_quantum_2pow)
    4388             :                 opt_small_max_2pow = opt_quantum_2pow;
    4389             :         small_max = (1U << opt_small_max_2pow);
    4390             : 
    4391             :         /* Set bin-related variables. */
    4392             :         bin_maxclass = (pagesize >> 1);
    4393             :         MOZ_ASSERT(opt_quantum_2pow >= TINY_MIN_2POW);
    4394             :         ntbins = opt_quantum_2pow - TINY_MIN_2POW;
    4395             :         MOZ_ASSERT(ntbins <= opt_quantum_2pow);
    4396             :         nqbins = (small_max >> opt_quantum_2pow);
    4397             :         nsbins = pagesize_2pow - opt_small_max_2pow - 1;
    4398             : 
    4399             :         /* Set variables according to the value of opt_quantum_2pow. */
    4400             :         quantum = (1U << opt_quantum_2pow);
    4401             :         quantum_mask = quantum - 1;
    4402             :         if (ntbins > 0)
    4403             :                 small_min = (quantum >> 1) + 1;
    4404             :         else
    4405             :                 small_min = 1;
    4406             :         MOZ_ASSERT(small_min <= quantum);
    4407             : 
    4408             :         /* Set variables according to the value of opt_chunk_2pow. */
    4409             :         chunksize = (1LU << opt_chunk_2pow);
    4410             :         chunksize_mask = chunksize - 1;
    4411             :         chunk_npages = (chunksize >> pagesize_2pow);
    4412             : 
    4413             :         arena_chunk_header_npages = calculate_arena_header_pages();
    4414             :         arena_maxclass = calculate_arena_maxclass();
    4415             : 
    4416             :         recycle_limit = CHUNK_RECYCLE_LIMIT * chunksize;
    4417             : #endif
    4418             : 
    4419           3 :         recycled_size = 0;
    4420             : 
    4421             :         /* Various sanity checks that regard configuration. */
    4422             :         MOZ_ASSERT(quantum >= sizeof(void *));
    4423             :         MOZ_ASSERT(quantum <= pagesize);
    4424             :         MOZ_ASSERT(chunksize >= pagesize);
    4425             :         MOZ_ASSERT(quantum * 4 <= chunksize);
    4426             : 
    4427             :         /* Initialize chunks data. */
    4428           3 :         malloc_mutex_init(&chunks_mtx);
    4429           3 :         extent_tree_szad_new(&chunks_szad_mmap);
    4430           3 :         extent_tree_ad_new(&chunks_ad_mmap);
    4431             : 
    4432             :         /* Initialize huge allocation data. */
    4433           3 :         malloc_mutex_init(&huge_mtx);
    4434           3 :         extent_tree_ad_new(&huge);
    4435           3 :         huge_nmalloc = 0;
    4436           3 :         huge_ndalloc = 0;
    4437           3 :         huge_allocated = 0;
    4438           3 :         huge_mapped = 0;
    4439             : 
    4440             :         /* Initialize base allocation data structures. */
    4441           3 :         base_mapped = 0;
    4442           3 :         base_committed = 0;
    4443           3 :         base_nodes = nullptr;
    4444           3 :         malloc_mutex_init(&base_mtx);
    4445             : 
    4446           3 :         malloc_spin_init(&arenas_lock);
    4447             : 
    4448             :         /*
    4449             :          * Initialize one arena here.
    4450             :          */
    4451           3 :         arenas_extend();
    4452           3 :         if (!arenas || !arenas[0]) {
    4453             : #ifndef MOZ_MEMORY_WINDOWS
    4454           0 :                 malloc_mutex_unlock(&init_lock);
    4455             : #endif
    4456           0 :                 return (true);
    4457             :         }
    4458             : #ifndef NO_TLS
    4459             :         /*
    4460             :          * Assign the initial arena to the initial thread, in order to avoid
    4461             :          * spurious creation of an extra arena if the application switches to
    4462             :          * threaded mode.
    4463             :          */
    4464             : #ifdef MOZ_MEMORY_WINDOWS
    4465             :         TlsSetValue(tlsIndex, arenas[0]);
    4466             : #elif defined(MOZ_MEMORY_DARWIN)
    4467             :         pthread_setspecific(tlsIndex, arenas[0]);
    4468             : #else
    4469           3 :         arenas_map = arenas[0];
    4470             : #endif
    4471             : #endif
    4472             : 
    4473           3 :         chunk_rtree = malloc_rtree_new((SIZEOF_PTR << 3) - opt_chunk_2pow);
    4474           3 :         if (!chunk_rtree)
    4475             :                 return (true);
    4476             : 
    4477           3 :         malloc_initialized = true;
    4478             : 
    4479             : #if !defined(MOZ_MEMORY_WINDOWS) && !defined(MOZ_MEMORY_DARWIN)
    4480             :         /* Prevent potential deadlock on malloc locks after fork. */
    4481           3 :         pthread_atfork(_malloc_prefork, _malloc_postfork_parent, _malloc_postfork_child);
    4482             : #endif
    4483             : 
    4484             : #if defined(MOZ_MEMORY_DARWIN)
    4485             :         register_zone();
    4486             : #endif
    4487             : 
    4488             : #ifndef MOZ_MEMORY_WINDOWS
    4489           3 :         malloc_mutex_unlock(&init_lock);
    4490             : #endif
    4491           3 :         return (false);
    4492             : }
    4493             : 
    4494             : /*
    4495             :  * End general internal functions.
    4496             :  */
    4497             : /******************************************************************************/
    4498             : /*
    4499             :  * Begin malloc(3)-compatible functions.
    4500             :  */
    4501             : 
    4502             : MOZ_MEMORY_API void *
    4503      938848 : malloc_impl(size_t size)
    4504             : {
    4505             :         void *ret;
    4506             : 
    4507      938848 :         if (malloc_init()) {
    4508             :                 ret = nullptr;
    4509             :                 goto RETURN;
    4510             :         }
    4511             : 
    4512      938839 :         if (size == 0) {
    4513          12 :                 size = 1;
    4514             :         }
    4515             : 
    4516      938839 :         ret = imalloc(size);
    4517             : 
    4518             : RETURN:
    4519      939038 :         if (!ret) {
    4520           0 :                 errno = ENOMEM;
    4521             :         }
    4522             : 
    4523      939038 :         return (ret);
    4524             : }
    4525             : 
    4526             : /*
    4527             :  * In ELF systems the default visibility allows symbols to be preempted at
    4528             :  * runtime. This in turn prevents the uses of memalign in this file from being
    4529             :  * optimized. What we do in here is define two aliasing symbols (they point to
    4530             :  * the same code): memalign and memalign_internal. The internal version has
    4531             :  * hidden visibility and is used in every reference from this file.
    4532             :  *
    4533             :  * For more information on this technique, see section 2.2.7 (Avoid Using
    4534             :  * Exported Symbols) in http://www.akkadia.org/drepper/dsohowto.pdf.
    4535             :  */
    4536             : 
    4537             : #ifndef MOZ_REPLACE_MALLOC
    4538             : #if defined(__GNUC__) && !defined(MOZ_MEMORY_DARWIN)
    4539             : #define MOZ_MEMORY_ELF
    4540             : #endif
    4541             : #endif /* MOZ_REPLACE_MALLOC */
    4542             : 
    4543             : #ifdef MOZ_MEMORY_ELF
    4544             : #define MEMALIGN memalign_internal
    4545             : extern "C"
    4546             : #else
    4547             : #define MEMALIGN memalign_impl
    4548             : MOZ_MEMORY_API
    4549             : #endif
    4550             : void *
    4551         174 : MEMALIGN(size_t alignment, size_t size)
    4552             : {
    4553             :         void *ret;
    4554             : 
    4555         174 :         MOZ_ASSERT(((alignment - 1) & alignment) == 0);
    4556             : 
    4557         174 :         if (malloc_init()) {
    4558             :                 ret = nullptr;
    4559             :                 goto RETURN;
    4560             :         }
    4561             : 
    4562         174 :         if (size == 0) {
    4563           0 :                 size = 1;
    4564             :         }
    4565             : 
    4566         174 :         alignment = alignment < sizeof(void*) ? sizeof(void*) : alignment;
    4567         174 :         ret = ipalloc(alignment, size);
    4568             : 
    4569             : RETURN:
    4570         174 :         return (ret);
    4571             : }
    4572             : 
    4573             : #ifdef MOZ_MEMORY_ELF
    4574             : extern void *
    4575             : memalign_impl(size_t alignment, size_t size) __attribute__((alias ("memalign_internal"), visibility ("default")));
    4576             : #endif
    4577             : 
    4578             : MOZ_MEMORY_API int
    4579          61 : posix_memalign_impl(void **memptr, size_t alignment, size_t size)
    4580             : {
    4581             :         void *result;
    4582             : 
    4583             :         /* Make sure that alignment is a large enough power of 2. */
    4584          61 :         if (((alignment - 1) & alignment) != 0 || alignment < sizeof(void *)) {
    4585             :                 return (EINVAL);
    4586             :         }
    4587             : 
    4588             :         /* The 0-->1 size promotion is done in the memalign() call below */
    4589             : 
    4590          61 :         result = MEMALIGN(alignment, size);
    4591             : 
    4592          61 :         if (!result)
    4593             :                 return (ENOMEM);
    4594             : 
    4595          61 :         *memptr = result;
    4596          61 :         return (0);
    4597             : }
    4598             : 
    4599             : MOZ_MEMORY_API void *
    4600           0 : aligned_alloc_impl(size_t alignment, size_t size)
    4601             : {
    4602           0 :         if (size % alignment) {
    4603             :                 return nullptr;
    4604             :         }
    4605           0 :         return MEMALIGN(alignment, size);
    4606             : }
    4607             : 
    4608             : MOZ_MEMORY_API void *
    4609           0 : valloc_impl(size_t size)
    4610             : {
    4611           0 :         return (MEMALIGN(pagesize, size));
    4612             : }
    4613             : 
    4614             : MOZ_MEMORY_API void *
    4615      119473 : calloc_impl(size_t num, size_t size)
    4616             : {
    4617             :         void *ret;
    4618             :         size_t num_size;
    4619             : 
    4620      119473 :         if (malloc_init()) {
    4621             :                 num_size = 0;
    4622             :                 ret = nullptr;
    4623             :                 goto RETURN;
    4624             :         }
    4625             : 
    4626      119473 :         num_size = num * size;
    4627      119473 :         if (num_size == 0) {
    4628             :                 num_size = 1;
    4629             :         /*
    4630             :          * Try to avoid division here.  We know that it isn't possible to
    4631             :          * overflow during multiplication if neither operand uses any of the
    4632             :          * most significant half of the bits in a size_t.
    4633             :          */
    4634      119474 :         } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
    4635           0 :             && (num_size / size != num)) {
    4636             :                 /* size_t overflow. */
    4637             :                 ret = nullptr;
    4638             :                 goto RETURN;
    4639             :         }
    4640             : 
    4641      119473 :         ret = icalloc(num_size);
    4642             : 
    4643             : RETURN:
    4644      119481 :         if (!ret) {
    4645           0 :                 errno = ENOMEM;
    4646             :         }
    4647             : 
    4648      119481 :         return (ret);
    4649             : }
    4650             : 
    4651             : MOZ_MEMORY_API void *
    4652      161444 : realloc_impl(void *ptr, size_t size)
    4653             : {
    4654             :         void *ret;
    4655             : 
    4656      161444 :         if (size == 0) {
    4657           0 :                 size = 1;
    4658             :         }
    4659             : 
    4660      161444 :         if (ptr) {
    4661      108246 :                 MOZ_ASSERT(malloc_initialized);
    4662             : 
    4663      108246 :                 ret = iralloc(ptr, size);
    4664             : 
    4665      108248 :                 if (!ret) {
    4666           0 :                         errno = ENOMEM;
    4667             :                 }
    4668             :         } else {
    4669       53198 :                 if (malloc_init())
    4670             :                         ret = nullptr;
    4671             :                 else
    4672       53198 :                         ret = imalloc(size);
    4673             : 
    4674       53198 :                 if (!ret) {
    4675           0 :                         errno = ENOMEM;
    4676             :                 }
    4677             :         }
    4678             : 
    4679      161446 :         return (ret);
    4680             : }
    4681             : 
    4682             : MOZ_MEMORY_API void
    4683      755434 : free_impl(void *ptr)
    4684             : {
    4685             :         size_t offset;
    4686             : 
    4687             :         /*
    4688             :          * A version of idalloc that checks for nullptr pointer but only for
    4689             :          * huge allocations assuming that CHUNK_ADDR2OFFSET(nullptr) == 0.
    4690             :          */
    4691             :         MOZ_ASSERT(CHUNK_ADDR2OFFSET(nullptr) == 0);
    4692      755434 :         offset = CHUNK_ADDR2OFFSET(ptr);
    4693      755434 :         if (offset != 0)
    4694      662814 :                 arena_dalloc(ptr, offset);
    4695       92620 :         else if (ptr)
    4696           2 :                 huge_dalloc(ptr);
    4697      755569 : }
    4698             : 
    4699             : /*
    4700             :  * End malloc(3)-compatible functions.
    4701             :  */
    4702             : /******************************************************************************/
    4703             : /*
    4704             :  * Begin non-standard functions.
    4705             :  */
    4706             : 
    4707             : /* This was added by Mozilla for use by SQLite. */
    4708             : MOZ_MEMORY_API size_t
    4709        8846 : malloc_good_size_impl(size_t size)
    4710             : {
    4711             :         /*
    4712             :          * This duplicates the logic in imalloc(), arena_malloc() and
    4713             :          * arena_malloc_small().
    4714             :          */
    4715        8846 :         if (size < small_min) {
    4716             :                 /* Small (tiny). */
    4717         508 :                 size = pow2_ceil(size);
    4718             :                 /*
    4719             :                  * We omit the #ifdefs from arena_malloc_small() --
    4720             :                  * it can be inaccurate with its size in some cases, but this
    4721             :                  * function must be accurate.
    4722             :                  */
    4723         508 :                 if (size < (1U << TINY_MIN_2POW))
    4724         288 :                         size = (1U << TINY_MIN_2POW);
    4725        8338 :         } else if (size <= small_max) {
    4726             :                 /* Small (quantum-spaced). */
    4727        6969 :                 size = QUANTUM_CEILING(size);
    4728        1369 :         } else if (size <= bin_maxclass) {
    4729             :                 /* Small (sub-page). */
    4730        1241 :                 size = pow2_ceil(size);
    4731         128 :         } else if (size <= arena_maxclass) {
    4732             :                 /* Large. */
    4733         128 :                 size = PAGE_CEILING(size);
    4734             :         } else {
    4735             :                 /*
    4736             :                  * Huge.  We use PAGE_CEILING to get psize, instead of using
    4737             :                  * CHUNK_CEILING to get csize.  This ensures that this
    4738             :                  * malloc_usable_size(malloc(n)) always matches
    4739             :                  * malloc_good_size(n).
    4740             :                  */
    4741           0 :                 size = PAGE_CEILING(size);
    4742             :         }
    4743        8846 :         return size;
    4744             : }
    4745             : 
    4746             : 
    4747             : MOZ_MEMORY_API size_t
    4748       21981 : malloc_usable_size_impl(MALLOC_USABLE_SIZE_CONST_PTR void *ptr)
    4749             : {
    4750       21981 :         return (isalloc_validate(ptr));
    4751             : }
    4752             : 
    4753             : MOZ_JEMALLOC_API void
    4754           0 : jemalloc_stats_impl(jemalloc_stats_t *stats)
    4755             : {
    4756             :         size_t i, non_arena_mapped, chunk_header_size;
    4757             : 
    4758           0 :         MOZ_ASSERT(stats);
    4759             : 
    4760             :         /*
    4761             :          * Gather runtime settings.
    4762             :          */
    4763           0 :         stats->opt_junk = opt_junk;
    4764           0 :         stats->opt_zero = opt_zero;
    4765           0 :         stats->narenas = narenas;
    4766           0 :         stats->quantum = quantum;
    4767           0 :         stats->small_max = small_max;
    4768           0 :         stats->large_max = arena_maxclass;
    4769           0 :         stats->chunksize = chunksize;
    4770           0 :         stats->dirty_max = opt_dirty_max;
    4771             : 
    4772             :         /*
    4773             :          * Gather current memory usage statistics.
    4774             :          */
    4775           0 :         stats->mapped = 0;
    4776           0 :         stats->allocated = 0;
    4777           0 :         stats->waste = 0;
    4778           0 :         stats->page_cache = 0;
    4779           0 :         stats->bookkeeping = 0;
    4780           0 :         stats->bin_unused = 0;
    4781             : 
    4782           0 :         non_arena_mapped = 0;
    4783             : 
    4784             :         /* Get huge mapped/allocated. */
    4785           0 :         malloc_mutex_lock(&huge_mtx);
    4786           0 :         non_arena_mapped += huge_mapped;
    4787           0 :         stats->allocated += huge_allocated;
    4788           0 :         MOZ_ASSERT(huge_mapped >= huge_allocated);
    4789           0 :         malloc_mutex_unlock(&huge_mtx);
    4790             : 
    4791             :         /* Get base mapped/allocated. */
    4792           0 :         malloc_mutex_lock(&base_mtx);
    4793           0 :         non_arena_mapped += base_mapped;
    4794           0 :         stats->bookkeeping += base_committed;
    4795           0 :         MOZ_ASSERT(base_mapped >= base_committed);
    4796           0 :         malloc_mutex_unlock(&base_mtx);
    4797             : 
    4798           0 :         malloc_spin_lock(&arenas_lock);
    4799             :         /* Iterate over arenas. */
    4800           0 :         for (i = 0; i < narenas; i++) {
    4801           0 :                 arena_t *arena = arenas[i];
    4802             :                 size_t arena_mapped, arena_allocated, arena_committed, arena_dirty, j,
    4803             :                     arena_unused, arena_headers;
    4804             :                 arena_run_t* run;
    4805             :                 arena_chunk_map_t* mapelm;
    4806             : 
    4807           0 :                 if (!arena) {
    4808             :                         continue;
    4809             :                 }
    4810             : 
    4811           0 :                 arena_headers = 0;
    4812           0 :                 arena_unused = 0;
    4813             : 
    4814           0 :                 malloc_spin_lock(&arena->lock);
    4815             : 
    4816           0 :                 arena_mapped = arena->stats.mapped;
    4817             : 
    4818             :                 /* "committed" counts dirty and allocated memory. */
    4819           0 :                 arena_committed = arena->stats.committed << pagesize_2pow;
    4820             : 
    4821           0 :                 arena_allocated = arena->stats.allocated_small +
    4822           0 :                                   arena->stats.allocated_large;
    4823             : 
    4824           0 :                 arena_dirty = arena->ndirty << pagesize_2pow;
    4825             : 
    4826           0 :                 for (j = 0; j < ntbins + nqbins + nsbins; j++) {
    4827           0 :                         arena_bin_t* bin = &arena->bins[j];
    4828           0 :                         size_t bin_unused = 0;
    4829             : 
    4830           0 :                         rb_foreach_begin(arena_chunk_map_t, link, &bin->runs, mapelm) {
    4831           0 :                                 run = (arena_run_t *)(mapelm->bits & ~pagesize_mask);
    4832           0 :                                 bin_unused += run->nfree * bin->reg_size;
    4833           0 :                         } rb_foreach_end(arena_chunk_map_t, link, &bin->runs, mapelm)
    4834             : 
    4835           0 :                         if (bin->runcur) {
    4836           0 :                                 bin_unused += bin->runcur->nfree * bin->reg_size;
    4837             :                         }
    4838             : 
    4839           0 :                         arena_unused += bin_unused;
    4840           0 :                         arena_headers += bin->stats.curruns * bin->reg0_offset;
    4841             :                 }
    4842             : 
    4843           0 :                 malloc_spin_unlock(&arena->lock);
    4844             : 
    4845           0 :                 MOZ_ASSERT(arena_mapped >= arena_committed);
    4846           0 :                 MOZ_ASSERT(arena_committed >= arena_allocated + arena_dirty);
    4847             : 
    4848             :                 /* "waste" is committed memory that is neither dirty nor
    4849             :                  * allocated. */
    4850           0 :                 stats->mapped += arena_mapped;
    4851           0 :                 stats->allocated += arena_allocated;
    4852           0 :                 stats->page_cache += arena_dirty;
    4853           0 :                 stats->waste += arena_committed -
    4854           0 :                     arena_allocated - arena_dirty - arena_unused - arena_headers;
    4855           0 :                 stats->bin_unused += arena_unused;
    4856           0 :                 stats->bookkeeping += arena_headers;
    4857             :         }
    4858           0 :         malloc_spin_unlock(&arenas_lock);
    4859             : 
    4860             :         /* Account for arena chunk headers in bookkeeping rather than waste. */
    4861           0 :         chunk_header_size =
    4862           0 :             ((stats->mapped / stats->chunksize) * arena_chunk_header_npages) <<
    4863             :             pagesize_2pow;
    4864             : 
    4865           0 :         stats->mapped += non_arena_mapped;
    4866           0 :         stats->bookkeeping += chunk_header_size;
    4867           0 :         stats->waste -= chunk_header_size;
    4868             : 
    4869           0 :         MOZ_ASSERT(stats->mapped >= stats->allocated + stats->waste +
    4870             :                                 stats->page_cache + stats->bookkeeping);
    4871           0 : }
    4872             : 
    4873             : #ifdef MALLOC_DOUBLE_PURGE
    4874             : 
    4875             : /* Explicitly remove all of this chunk's MADV_FREE'd pages from memory. */
    4876             : static void
    4877             : hard_purge_chunk(arena_chunk_t *chunk)
    4878             : {
    4879             :         /* See similar logic in arena_purge(). */
    4880             : 
    4881             :         size_t i;
    4882             :         for (i = arena_chunk_header_npages; i < chunk_npages; i++) {
    4883             :                 /* Find all adjacent pages with CHUNK_MAP_MADVISED set. */
    4884             :                 size_t npages;
    4885             :                 for (npages = 0;
    4886             :                      chunk->map[i + npages].bits & CHUNK_MAP_MADVISED && i + npages < chunk_npages;
    4887             :                      npages++) {
    4888             :                         /* Turn off the chunk's MADV_FREED bit and turn on its
    4889             :                          * DECOMMITTED bit. */
    4890             :                         MOZ_DIAGNOSTIC_ASSERT(!(chunk->map[i + npages].bits & CHUNK_MAP_DECOMMITTED));
    4891             :                         chunk->map[i + npages].bits ^= CHUNK_MAP_MADVISED_OR_DECOMMITTED;
    4892             :                 }
    4893             : 
    4894             :                 /* We could use mincore to find out which pages are actually
    4895             :                  * present, but it's not clear that's better. */
    4896             :                 if (npages > 0) {
    4897             :                         pages_decommit(((char*)chunk) + (i << pagesize_2pow), npages << pagesize_2pow);
    4898             :                         pages_commit(((char*)chunk) + (i << pagesize_2pow), npages << pagesize_2pow);
    4899             :                 }
    4900             :                 i += npages;
    4901             :         }
    4902             : }
    4903             : 
    4904             : /* Explicitly remove all of this arena's MADV_FREE'd pages from memory. */
    4905             : static void
    4906             : hard_purge_arena(arena_t *arena)
    4907             : {
    4908             :         malloc_spin_lock(&arena->lock);
    4909             : 
    4910             :         while (!LinkedList_IsEmpty(&arena->chunks_madvised)) {
    4911             :                 arena_chunk_t *chunk =
    4912             :                         LinkedList_Get(arena->chunks_madvised.next,
    4913             :                                        arena_chunk_t, chunks_madvised_elem);
    4914             :                 hard_purge_chunk(chunk);
    4915             :                 LinkedList_Remove(&chunk->chunks_madvised_elem);
    4916             :         }
    4917             : 
    4918             :         malloc_spin_unlock(&arena->lock);
    4919             : }
    4920             : 
    4921             : MOZ_JEMALLOC_API void
    4922             : jemalloc_purge_freed_pages_impl()
    4923             : {
    4924             :         size_t i;
    4925             :         malloc_spin_lock(&arenas_lock);
    4926             :         for (i = 0; i < narenas; i++) {
    4927             :                 arena_t *arena = arenas[i];
    4928             :                 if (arena)
    4929             :                         hard_purge_arena(arena);
    4930             :         }
    4931             :         malloc_spin_unlock(&arenas_lock);
    4932             : }
    4933             : 
    4934             : #else /* !defined MALLOC_DOUBLE_PURGE */
    4935             : 
    4936             : MOZ_JEMALLOC_API void
    4937           0 : jemalloc_purge_freed_pages_impl()
    4938             : {
    4939             :         /* Do nothing. */
    4940           0 : }
    4941             : 
    4942             : #endif /* defined MALLOC_DOUBLE_PURGE */
    4943             : 
    4944             : 
    4945             : 
    4946             : #ifdef MOZ_MEMORY_WINDOWS
    4947             : void*
    4948             : _recalloc(void *ptr, size_t count, size_t size)
    4949             : {
    4950             :         size_t oldsize = ptr ? isalloc(ptr) : 0;
    4951             :         size_t newsize = count * size;
    4952             : 
    4953             :         /*
    4954             :          * In order for all trailing bytes to be zeroed, the caller needs to
    4955             :          * use calloc(), followed by recalloc().  However, the current calloc()
    4956             :          * implementation only zeros the bytes requested, so if recalloc() is
    4957             :          * to work 100% correctly, calloc() will need to change to zero
    4958             :          * trailing bytes.
    4959             :          */
    4960             : 
    4961             :         ptr = realloc_impl(ptr, newsize);
    4962             :         if (ptr && oldsize < newsize) {
    4963             :                 memset((void *)((uintptr_t)ptr + oldsize), 0, newsize -
    4964             :                     oldsize);
    4965             :         }
    4966             : 
    4967             :         return ptr;
    4968             : }
    4969             : 
    4970             : /*
    4971             :  * This impl of _expand doesn't ever actually expand or shrink blocks: it
    4972             :  * simply replies that you may continue using a shrunk block.
    4973             :  */
    4974             : void*
    4975             : _expand(void *ptr, size_t newsize)
    4976             : {
    4977             :         if (isalloc(ptr) >= newsize)
    4978             :                 return ptr;
    4979             : 
    4980             :         return nullptr;
    4981             : }
    4982             : 
    4983             : size_t
    4984             : _msize(void *ptr)
    4985             : {
    4986             : 
    4987             :         return malloc_usable_size_impl(ptr);
    4988             : }
    4989             : #endif
    4990             : 
    4991             : MOZ_JEMALLOC_API void
    4992           0 : jemalloc_free_dirty_pages_impl(void)
    4993             : {
    4994             :         size_t i;
    4995           0 :         malloc_spin_lock(&arenas_lock);
    4996           0 :         for (i = 0; i < narenas; i++) {
    4997           0 :                 arena_t *arena = arenas[i];
    4998             : 
    4999           0 :                 if (arena) {
    5000           0 :                         malloc_spin_lock(&arena->lock);
    5001           0 :                         arena_purge(arena, true);
    5002           0 :                         malloc_spin_unlock(&arena->lock);
    5003             :                 }
    5004             :         }
    5005           0 :         malloc_spin_unlock(&arenas_lock);
    5006           0 : }
    5007             : 
    5008             : /*
    5009             :  * End non-standard functions.
    5010             :  */
    5011             : /******************************************************************************/
    5012             : /*
    5013             :  * Begin library-private functions, used by threading libraries for protection
    5014             :  * of malloc during fork().  These functions are only called if the program is
    5015             :  * running in threaded mode, so there is no need to check whether the program
    5016             :  * is threaded here.
    5017             :  */
    5018             : 
    5019             : #ifndef MOZ_MEMORY_DARWIN
    5020             : static
    5021             : #endif
    5022             : void
    5023           4 : _malloc_prefork(void)
    5024             : {
    5025             :         unsigned i;
    5026             : 
    5027             :         /* Acquire all mutexes in a safe order. */
    5028             : 
    5029           4 :         malloc_spin_lock(&arenas_lock);
    5030           8 :         for (i = 0; i < narenas; i++) {
    5031           4 :                 if (arenas[i])
    5032           4 :                         malloc_spin_lock(&arenas[i]->lock);
    5033             :         }
    5034             : 
    5035           4 :         malloc_mutex_lock(&base_mtx);
    5036             : 
    5037           4 :         malloc_mutex_lock(&huge_mtx);
    5038           4 : }
    5039             : 
    5040             : #ifndef MOZ_MEMORY_DARWIN
    5041             : static
    5042             : #endif
    5043             : void
    5044           4 : _malloc_postfork_parent(void)
    5045             : {
    5046             :         unsigned i;
    5047             : 
    5048             :         /* Release all mutexes, now that fork() has completed. */
    5049             : 
    5050           4 :         malloc_mutex_unlock(&huge_mtx);
    5051             : 
    5052           4 :         malloc_mutex_unlock(&base_mtx);
    5053             : 
    5054           8 :         for (i = 0; i < narenas; i++) {
    5055           4 :                 if (arenas[i])
    5056           4 :                         malloc_spin_unlock(&arenas[i]->lock);
    5057             :         }
    5058           4 :         malloc_spin_unlock(&arenas_lock);
    5059           4 : }
    5060             : 
    5061             : #ifndef MOZ_MEMORY_DARWIN
    5062             : static
    5063             : #endif
    5064             : void
    5065           0 : _malloc_postfork_child(void)
    5066             : {
    5067             :         unsigned i;
    5068             : 
    5069             :         /* Reinitialize all mutexes, now that fork() has completed. */
    5070             : 
    5071           0 :         malloc_mutex_init(&huge_mtx);
    5072             : 
    5073           0 :         malloc_mutex_init(&base_mtx);
    5074             : 
    5075           0 :         for (i = 0; i < narenas; i++) {
    5076           0 :                 if (arenas[i])
    5077           0 :                         malloc_spin_init(&arenas[i]->lock);
    5078             :         }
    5079           0 :         malloc_spin_init(&arenas_lock);
    5080           0 : }
    5081             : 
    5082             : /*
    5083             :  * End library-private functions.
    5084             :  */
    5085             : /******************************************************************************/
    5086             : 
    5087             : #ifdef HAVE_DLOPEN
    5088             : #  include <dlfcn.h>
    5089             : #endif
    5090             : 
    5091             : #if defined(MOZ_MEMORY_DARWIN)
    5092             : 
    5093             : __attribute__((constructor))
    5094             : void
    5095             : jemalloc_darwin_init(void)
    5096             : {
    5097             :         if (malloc_init_hard())
    5098             :                 moz_abort();
    5099             : }
    5100             : 
    5101             : #endif
    5102             : 
    5103             : /*
    5104             :  * is_malloc(malloc_impl) is some macro magic to detect if malloc_impl is
    5105             :  * defined as "malloc" in mozmemory_wrap.h
    5106             :  */
    5107             : #define malloc_is_malloc 1
    5108             : #define is_malloc_(a) malloc_is_ ## a
    5109             : #define is_malloc(a) is_malloc_(a)
    5110             : 
    5111             : #if !defined(MOZ_MEMORY_DARWIN) && (is_malloc(malloc_impl) == 1)
    5112             : #  if defined(__GLIBC__) && !defined(__UCLIBC__)
    5113             : /*
    5114             :  * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
    5115             :  * to inconsistently reference libc's malloc(3)-compatible functions
    5116             :  * (bug 493541).
    5117             :  *
    5118             :  * These definitions interpose hooks in glibc.  The functions are actually
    5119             :  * passed an extra argument for the caller return address, which will be
    5120             :  * ignored.
    5121             :  */
    5122             : extern "C" {
    5123             : MOZ_EXPORT void (*__free_hook)(void *ptr) = free_impl;
    5124             : MOZ_EXPORT void *(*__malloc_hook)(size_t size) = malloc_impl;
    5125             : MOZ_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = realloc_impl;
    5126             : MOZ_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) = MEMALIGN;
    5127             : }
    5128             : 
    5129             : #  elif defined(RTLD_DEEPBIND)
    5130             : /*
    5131             :  * XXX On systems that support RTLD_GROUP or DF_1_GROUP, do their
    5132             :  * implementations permit similar inconsistencies?  Should STV_SINGLETON
    5133             :  * visibility be used for interposition where available?
    5134             :  */
    5135             : #    error "Interposing malloc is unsafe on this system without libc malloc hooks."
    5136             : #  endif
    5137             : #endif
    5138             : 
    5139             : #ifdef MOZ_MEMORY_WINDOWS
    5140             : /*
    5141             :  * In the new style jemalloc integration jemalloc is built as a separate
    5142             :  * shared library.  Since we're no longer hooking into the CRT binary,
    5143             :  * we need to initialize the heap at the first opportunity we get.
    5144             :  * DLL_PROCESS_ATTACH in DllMain is that opportunity.
    5145             :  */
    5146             : BOOL APIENTRY DllMain(HINSTANCE hModule,
    5147             :                       DWORD reason,
    5148             :                       LPVOID lpReserved)
    5149             : {
    5150             :   switch (reason) {
    5151             :     case DLL_PROCESS_ATTACH:
    5152             :       /* Don't force the system to page DllMain back in every time
    5153             :        * we create/destroy a thread */
    5154             :       DisableThreadLibraryCalls(hModule);
    5155             :       /* Initialize the heap */
    5156             :       malloc_init_hard();
    5157             :       break;
    5158             : 
    5159             :     case DLL_PROCESS_DETACH:
    5160             :       break;
    5161             : 
    5162             :   }
    5163             : 
    5164             :   return TRUE;
    5165             : }
    5166             : #endif

Generated by: LCOV version 1.13