summaryrefslogtreecommitdiff
path: root/third_party
diff options
context:
space:
mode:
authorLei Zhang <thestig@chromium.org>2018-10-16 18:40:15 +0000
committerChromium commit bot <commit-bot@chromium.org>2018-10-16 18:40:15 +0000
commitf1fae3cd1b9492cf9411dffd3c486b9672d8cc76 (patch)
treebee5d8b2528e714149d399163519f0fa35ec532b /third_party
parent1b6fcaad0722fb586620efc974726dee3fb0f80f (diff)
downloadpdfium-f1fae3cd1b9492cf9411dffd3c486b9672d8cc76.tar.xz
Update PartitionAlloc from Chromium at r599712.
BUG=pdfium:1170 Change-Id: I0f8dfb3d517beaa682a9ca7ad4831c5a7a10dc3b Reviewed-on: https://pdfium-review.googlesource.com/c/44073 Reviewed-by: Tom Sepez <tsepez@chromium.org> Commit-Queue: Lei Zhang <thestig@chromium.org>
Diffstat (limited to 'third_party')
-rw-r--r--third_party/BUILD.gn18
-rw-r--r--third_party/base/allocator/partition_allocator/OWNERS6
-rw-r--r--third_party/base/allocator/partition_allocator/address_space_randomization.cc100
-rw-r--r--third_party/base/allocator/partition_allocator/address_space_randomization.h198
-rw-r--r--third_party/base/allocator/partition_allocator/oom.h17
-rw-r--r--third_party/base/allocator/partition_allocator/oom_callback.cc28
-rw-r--r--third_party/base/allocator/partition_allocator/oom_callback.h26
-rw-r--r--third_party/base/allocator/partition_allocator/page_allocator.cc338
-rw-r--r--third_party/base/allocator/partition_allocator/page_allocator.h222
-rw-r--r--third_party/base/allocator/partition_allocator/page_allocator_constants.h44
-rw-r--r--third_party/base/allocator/partition_allocator/page_allocator_internal.h20
-rw-r--r--third_party/base/allocator/partition_allocator/page_allocator_internals_posix.h187
-rw-r--r--third_party/base/allocator/partition_allocator/page_allocator_internals_win.h123
-rw-r--r--third_party/base/allocator/partition_allocator/partition_alloc.cc1355
-rw-r--r--third_party/base/allocator/partition_allocator/partition_alloc.h794
-rw-r--r--third_party/base/allocator/partition_allocator/partition_alloc_constants.h169
-rw-r--r--third_party/base/allocator/partition_allocator/partition_bucket.cc568
-rw-r--r--third_party/base/allocator/partition_allocator/partition_bucket.h130
-rw-r--r--third_party/base/allocator/partition_allocator/partition_cookie.h72
-rw-r--r--third_party/base/allocator/partition_allocator/partition_direct_map_extent.h35
-rw-r--r--third_party/base/allocator/partition_allocator/partition_freelist_entry.h50
-rw-r--r--third_party/base/allocator/partition_allocator/partition_oom.cc26
-rw-r--r--third_party/base/allocator/partition_allocator/partition_oom.h28
-rw-r--r--third_party/base/allocator/partition_allocator/partition_page.cc165
-rw-r--r--third_party/base/allocator/partition_allocator/partition_page.h296
-rw-r--r--third_party/base/allocator/partition_allocator/partition_root_base.cc42
-rw-r--r--third_party/base/allocator/partition_allocator/partition_root_base.h195
-rw-r--r--third_party/base/allocator/partition_allocator/spin_lock.cc44
-rw-r--r--third_party/base/allocator/partition_allocator/spin_lock.h18
-rw-r--r--third_party/base/stl_util.h12
30 files changed, 3313 insertions, 2013 deletions
diff --git a/third_party/BUILD.gn b/third_party/BUILD.gn
index 7a5a6c3c59..2dbf9205c7 100644
--- a/third_party/BUILD.gn
+++ b/third_party/BUILD.gn
@@ -552,10 +552,28 @@ jumbo_source_set("pdfium_base") {
"base/allocator/partition_allocator/address_space_randomization.cc",
"base/allocator/partition_allocator/address_space_randomization.h",
"base/allocator/partition_allocator/oom.h",
+ "base/allocator/partition_allocator/oom_callback.cc",
+ "base/allocator/partition_allocator/oom_callback.h",
"base/allocator/partition_allocator/page_allocator.cc",
"base/allocator/partition_allocator/page_allocator.h",
+ "base/allocator/partition_allocator/page_allocator_constants.h",
+ "base/allocator/partition_allocator/page_allocator_internal.h",
+ "base/allocator/partition_allocator/page_allocator_internals_posix.h",
+ "base/allocator/partition_allocator/page_allocator_internals_win.h",
"base/allocator/partition_allocator/partition_alloc.cc",
"base/allocator/partition_allocator/partition_alloc.h",
+ "base/allocator/partition_allocator/partition_alloc_constants.h",
+ "base/allocator/partition_allocator/partition_bucket.cc",
+ "base/allocator/partition_allocator/partition_bucket.h",
+ "base/allocator/partition_allocator/partition_cookie.h",
+ "base/allocator/partition_allocator/partition_direct_map_extent.h",
+ "base/allocator/partition_allocator/partition_freelist_entry.h",
+ "base/allocator/partition_allocator/partition_oom.cc",
+ "base/allocator/partition_allocator/partition_oom.h",
+ "base/allocator/partition_allocator/partition_page.cc",
+ "base/allocator/partition_allocator/partition_page.h",
+ "base/allocator/partition_allocator/partition_root_base.cc",
+ "base/allocator/partition_allocator/partition_root_base.h",
"base/allocator/partition_allocator/spin_lock.cc",
"base/allocator/partition_allocator/spin_lock.h",
"base/base_export.h",
diff --git a/third_party/base/allocator/partition_allocator/OWNERS b/third_party/base/allocator/partition_allocator/OWNERS
index 95d998269a..b0a2a850f7 100644
--- a/third_party/base/allocator/partition_allocator/OWNERS
+++ b/third_party/base/allocator/partition_allocator/OWNERS
@@ -1,2 +1,8 @@
+ajwong@chromium.org
+haraken@chromium.org
palmer@chromium.org
tsepez@chromium.org
+
+# TEAM: platform-architecture-dev@chromium.org
+# Also: security-dev@chromium.org
+# COMPONENT: Blink>MemoryAllocator>Partition
diff --git a/third_party/base/allocator/partition_allocator/address_space_randomization.cc b/third_party/base/allocator/partition_allocator/address_space_randomization.cc
index d16970a7c9..135c67da7a 100644
--- a/third_party/base/allocator/partition_allocator/address_space_randomization.cc
+++ b/third_party/base/allocator/partition_allocator/address_space_randomization.cc
@@ -7,19 +7,17 @@
#include "build/build_config.h"
#include "third_party/base/allocator/partition_allocator/page_allocator.h"
#include "third_party/base/allocator/partition_allocator/spin_lock.h"
+#include "third_party/base/logging.h"
#if defined(OS_WIN)
-#include <windows.h>
+#include <windows.h> // Must be in front of other Windows header files.
+
+#include <VersionHelpers.h>
#else
#include <sys/time.h>
#include <unistd.h>
#endif
-// VersionHelpers.h must be included after windows.h.
-#if defined(OS_WIN)
-#include <VersionHelpers.h>
-#endif
-
namespace pdfium {
namespace base {
@@ -27,7 +25,7 @@ namespace {
// This is the same PRNG as used by tcmalloc for mapping address randomness;
// see http://burtleburtle.net/bob/rand/smallprng.html
-struct ranctx {
+struct RandomContext {
subtle::SpinLock lock;
bool initialized;
uint32_t a;
@@ -36,9 +34,16 @@ struct ranctx {
uint32_t d;
};
+RandomContext* GetRandomContext() {
+ static RandomContext* s_RandomContext = nullptr;
+ if (!s_RandomContext)
+ s_RandomContext = new RandomContext();
+ return s_RandomContext;
+}
+
#define rot(x, k) (((x) << (k)) | ((x) >> (32 - (k))))
-uint32_t ranvalInternal(ranctx* x) {
+uint32_t RandomValueInternal(RandomContext* x) {
uint32_t e = x->a - rot(x->b, 27);
x->a = x->b ^ rot(x->c, 17);
x->b = x->c + x->d;
@@ -49,7 +54,7 @@ uint32_t ranvalInternal(ranctx* x) {
#undef rot
-uint32_t ranval(ranctx* x) {
+uint32_t RandomValue(RandomContext* x) {
subtle::SpinLock::Guard guard(x->lock);
if (UNLIKELY(!x->initialized)) {
x->initialized = true;
@@ -73,29 +78,34 @@ uint32_t ranval(ranctx* x) {
x->a = 0xf1ea5eed;
x->b = x->c = x->d = seed;
for (int i = 0; i < 20; ++i) {
- (void)ranvalInternal(x);
+ RandomValueInternal(x);
}
}
- uint32_t ret = ranvalInternal(x);
- return ret;
-}
-static struct ranctx s_ranctx;
+ return RandomValueInternal(x);
+}
} // namespace
-// Calculates a random preferred mapping address. In calculating an address, we
-// balance good ASLR against not fragmenting the address space too badly.
+void SetRandomPageBaseSeed(int64_t seed) {
+ RandomContext* x = GetRandomContext();
+ subtle::SpinLock::Guard guard(x->lock);
+ // Set RNG to initial state.
+ x->initialized = true;
+ x->a = x->b = static_cast<uint32_t>(seed);
+ x->c = x->d = static_cast<uint32_t>(seed >> 32);
+}
+
void* GetRandomPageBase() {
- uintptr_t random;
- random = static_cast<uintptr_t>(ranval(&s_ranctx));
-#if defined(ARCH_CPU_X86_64)
- random <<= 32UL;
- random |= static_cast<uintptr_t>(ranval(&s_ranctx));
-// This address mask gives a low likelihood of address space collisions. We
-// handle the situation gracefully if there is a collision.
-#if defined(OS_WIN)
- random &= 0x3ffffffffffUL;
+ uintptr_t random = static_cast<uintptr_t>(RandomValue(GetRandomContext()));
+
+#if defined(ARCH_CPU_64_BITS)
+ random <<= 32ULL;
+ random |= static_cast<uintptr_t>(RandomValue(GetRandomContext()));
+
+// The kASLRMask and kASLROffset constants will be suitable for the
+// OS and build configuration.
+#if defined(OS_WIN) && !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
// Windows >= 8.1 has the full 47 bits. Use them where available.
static bool windows_81 = false;
static bool windows_81_initialized = false;
@@ -104,38 +114,32 @@ void* GetRandomPageBase() {
windows_81_initialized = true;
}
if (!windows_81) {
- random += 0x10000000000UL;
+ random &= internal::kASLRMaskBefore8_10;
+ } else {
+ random &= internal::kASLRMask;
}
-#elif defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
- // This range is copied from the TSan source, but works for all tools.
- random &= 0x007fffffffffUL;
- random += 0x7e8000000000UL;
+ random += internal::kASLROffset;
#else
- // Linux and OS X support the full 47-bit user space of x64 processors.
- random &= 0x3fffffffffffUL;
-#endif
-#elif defined(ARCH_CPU_ARM64)
- // ARM64 on Linux has 39-bit user space.
- random &= 0x3fffffffffUL;
- random += 0x1000000000UL;
-#else // !defined(ARCH_CPU_X86_64) && !defined(ARCH_CPU_ARM64)
+ random &= internal::kASLRMask;
+ random += internal::kASLROffset;
+#endif // defined(OS_WIN) && !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+#else // defined(ARCH_CPU_32_BITS)
#if defined(OS_WIN)
// On win32 host systems the randomization plus huge alignment causes
// excessive fragmentation. Plus most of these systems lack ASLR, so the
// randomization isn't buying anything. In that case we just skip it.
// TODO(jschuh): Just dump the randomization when HE-ASLR is present.
- static BOOL isWow64 = -1;
- if (isWow64 == -1 && !IsWow64Process(GetCurrentProcess(), &isWow64))
- isWow64 = FALSE;
- if (!isWow64)
+ static BOOL is_wow64 = -1;
+ if (is_wow64 == -1 && !IsWow64Process(GetCurrentProcess(), &is_wow64))
+ is_wow64 = FALSE;
+ if (!is_wow64)
return nullptr;
#endif // defined(OS_WIN)
- // This is a good range on Windows, Linux and Mac.
- // Allocates in the 0.5-1.5GB region.
- random &= 0x3fffffff;
- random += 0x20000000;
-#endif // defined(ARCH_CPU_X86_64)
- random &= kPageAllocationGranularityBaseMask;
+ random &= internal::kASLRMask;
+ random += internal::kASLROffset;
+#endif // defined(ARCH_CPU_32_BITS)
+
+ DCHECK_EQ(0ULL, (random & kPageAllocationGranularityOffsetMask));
return reinterpret_cast<void*>(random);
}
diff --git a/third_party/base/allocator/partition_allocator/address_space_randomization.h b/third_party/base/allocator/partition_allocator/address_space_randomization.h
index 97c5f606dd..efad668ebc 100644
--- a/third_party/base/allocator/partition_allocator/address_space_randomization.h
+++ b/third_party/base/allocator/partition_allocator/address_space_randomization.h
@@ -2,17 +2,207 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_RANDOMIZATION
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_RANDOMIZATION
+#ifndef THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_RANDOMIZATION_H_
+#define THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_RANDOMIZATION_H_
+
+#include "build/build_config.h"
+#include "third_party/base/allocator/partition_allocator/page_allocator.h"
+#include "third_party/base/base_export.h"
namespace pdfium {
namespace base {
+// Sets the seed for the random number generator used by GetRandomPageBase in
+// order to generate a predictable sequence of addresses. May be called multiple
+// times.
+BASE_EXPORT void SetRandomPageBaseSeed(int64_t seed);
+
// Calculates a random preferred mapping address. In calculating an address, we
// balance good ASLR against not fragmenting the address space too badly.
-void* GetRandomPageBase();
+BASE_EXPORT void* GetRandomPageBase();
+
+namespace internal {
+
+constexpr uintptr_t AslrAddress(uintptr_t mask) {
+ return mask & kPageAllocationGranularityBaseMask;
+}
+constexpr uintptr_t AslrMask(uintptr_t bits) {
+ return AslrAddress((1ULL << bits) - 1ULL);
+}
+
+// Turn off formatting, because the thicket of nested ifdefs below is
+// incomprehensible without indentation. It is also incomprehensible with
+// indentation, but the only other option is a combinatorial explosion of
+// *_{win,linux,mac,foo}_{32,64}.h files.
+//
+// clang-format off
+
+#if defined(ARCH_CPU_64_BITS)
+
+ #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+
+ // We shouldn't allocate system pages at all for sanitizer builds. However,
+ // we do, and if random hint addresses interfere with address ranges
+ // hard-coded in those tools, bad things happen. This address range is
+ // copied from TSAN source but works with all tools. See
+ // https://crbug.com/539863.
+ constexpr uintptr_t kASLRMask = AslrAddress(0x007fffffffffULL);
+ constexpr uintptr_t kASLROffset = AslrAddress(0x7e8000000000ULL);
+
+ #elif defined(OS_WIN)
+
+ // Windows 8.10 and newer support the full 48 bit address range. Older
+ // versions of Windows only support 44 bits. Since kASLROffset is non-zero
+ // and may cause a carry, use 47 and 43 bit masks. See
+ // http://www.alex-ionescu.com/?p=246
+ constexpr uintptr_t kASLRMask = AslrMask(47);
+ constexpr uintptr_t kASLRMaskBefore8_10 = AslrMask(43);
+ // Try not to map pages into the range where Windows loads DLLs by default.
+ constexpr uintptr_t kASLROffset = 0x80000000ULL;
+
+ #elif defined(OS_MACOSX)
+
+ // macOS as of 10.12.5 does not clean up entries in page map levels 3/4
+ // [PDP/PML4] created from mmap or mach_vm_allocate, even after the region
+ // is destroyed. Using a virtual address space that is too large causes a
+ // leak of about 1 wired [can never be paged out] page per call to mmap. The
+ // page is only reclaimed when the process is killed. Confine the hint to a
+ // 39-bit section of the virtual address space.
+ //
+ // This implementation adapted from
+ // https://chromium-review.googlesource.com/c/v8/v8/+/557958. The difference
+ // is that here we clamp to 39 bits, not 32.
+ //
+ // TODO(crbug.com/738925): Remove this limitation if/when the macOS behavior
+ // changes.
+ constexpr uintptr_t kASLRMask = AslrMask(38);
+ constexpr uintptr_t kASLROffset = AslrAddress(0x1000000000ULL);
+
+ #elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+
+ #if defined(ARCH_CPU_X86_64)
+
+ // Linux (and macOS) support the full 47-bit user space of x64 processors.
+ // Use only 46 to allow the kernel a chance to fulfill the request.
+ constexpr uintptr_t kASLRMask = AslrMask(46);
+ constexpr uintptr_t kASLROffset = AslrAddress(0);
+
+ #elif defined(ARCH_CPU_ARM64)
+
+ #if defined(OS_ANDROID)
+
+ // Restrict the address range on Android to avoid a large performance
+ // regression in single-process WebViews. See https://crbug.com/837640.
+ constexpr uintptr_t kASLRMask = AslrMask(30);
+ constexpr uintptr_t kASLROffset = AslrAddress(0x20000000ULL);
+
+ #else
+
+ // ARM64 on Linux has 39-bit user space. Use 38 bits since kASLROffset
+ // could cause a carry.
+ constexpr uintptr_t kASLRMask = AslrMask(38);
+ constexpr uintptr_t kASLROffset = AslrAddress(0x1000000000ULL);
+
+ #endif
+
+ #elif defined(ARCH_CPU_PPC64)
+
+ #if defined(OS_AIX)
+
+ // AIX has 64 bits of virtual addressing, but we limit the address range
+ // to (a) minimize segment lookaside buffer (SLB) misses; and (b) use
+ // extra address space to isolate the mmap regions.
+ constexpr uintptr_t kASLRMask = AslrMask(30);
+ constexpr uintptr_t kASLROffset = AslrAddress(0x400000000000ULL);
+
+ #elif defined(ARCH_CPU_BIG_ENDIAN)
+
+ // Big-endian Linux PPC has 44 bits of virtual addressing. Use 42.
+ constexpr uintptr_t kASLRMask = AslrMask(42);
+ constexpr uintptr_t kASLROffset = AslrAddress(0);
+
+ #else // !defined(OS_AIX) && !defined(ARCH_CPU_BIG_ENDIAN)
+
+ // Little-endian Linux PPC has 48 bits of virtual addressing. Use 46.
+ constexpr uintptr_t kASLRMask = AslrMask(46);
+ constexpr uintptr_t kASLROffset = AslrAddress(0);
+
+ #endif // !defined(OS_AIX) && !defined(ARCH_CPU_BIG_ENDIAN)
+
+ #elif defined(ARCH_CPU_S390X)
+
+ // Linux on Z uses bits 22 - 32 for Region Indexing, which translates to
+ // 42 bits of virtual addressing. Truncate to 40 bits to allow kernel a
+ // chance to fulfill the request.
+ constexpr uintptr_t kASLRMask = AslrMask(40);
+ constexpr uintptr_t kASLROffset = AslrAddress(0);
+
+ #elif defined(ARCH_CPU_S390)
+
+ // 31 bits of virtual addressing. Truncate to 29 bits to allow the kernel
+ // a chance to fulfill the request.
+ constexpr uintptr_t kASLRMask = AslrMask(29);
+ constexpr uintptr_t kASLROffset = AslrAddress(0);
+
+ #else // !defined(ARCH_CPU_X86_64) && !defined(ARCH_CPU_PPC64) &&
+ // !defined(ARCH_CPU_S390X) && !defined(ARCH_CPU_S390)
+
+ // For all other POSIX variants, use 30 bits.
+ constexpr uintptr_t kASLRMask = AslrMask(30);
+
+ #if defined(OS_SOLARIS)
+
+ // For our Solaris/illumos mmap hint, we pick a random address in the
+ // bottom half of the top half of the address space (that is, the third
+ // quarter). Because we do not MAP_FIXED, this will be treated only as a
+ // hint -- the system will not fail to mmap because something else
+ // happens to already be mapped at our random address. We deliberately
+ // set the hint high enough to get well above the system's break (that
+ // is, the heap); Solaris and illumos will try the hint and if that
+ // fails allocate as if there were no hint at all. The high hint
+ // prevents the break from getting hemmed in at low values, ceding half
+ // of the address space to the system heap.
+ constexpr uintptr_t kASLROffset = AslrAddress(0x80000000ULL);
+
+ #elif defined(OS_AIX)
+
+ // The range 0x30000000 - 0xD0000000 is available on AIX; choose the
+ // upper range.
+ constexpr uintptr_t kASLROffset = AslrAddress(0x90000000ULL);
+
+ #else // !defined(OS_SOLARIS) && !defined(OS_AIX)
+
+ // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
+ // variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macOS
+ // 10.6 and 10.7.
+ constexpr uintptr_t kASLROffset = AslrAddress(0x20000000ULL);
+
+ #endif // !defined(OS_SOLARIS) && !defined(OS_AIX)
+
+ #endif // !defined(ARCH_CPU_X86_64) && !defined(ARCH_CPU_PPC64) &&
+ // !defined(ARCH_CPU_S390X) && !defined(ARCH_CPU_S390)
+
+ #endif // defined(OS_POSIX)
+
+#elif defined(ARCH_CPU_32_BITS)
+
+ // This is a good range on 32-bit Windows and Android (the only platforms on
+ // which we support 32-bitness). Allocates in the 0.5 - 1.5 GiB region. There
+ // is no issue with carries here.
+ constexpr uintptr_t kASLRMask = AslrMask(30);
+ constexpr uintptr_t kASLROffset = AslrAddress(0x20000000ULL);
+
+#else
+
+ #error Please tell us about your exotic hardware! Sounds interesting.
+
+#endif // defined(ARCH_CPU_32_BITS)
+
+// clang-format on
+
+} // namespace internal
} // namespace base
} // namespace pdfium
-#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_RANDOMIZATION
+#endif // THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_RANDOMIZATION_H_
diff --git a/third_party/base/allocator/partition_allocator/oom.h b/third_party/base/allocator/partition_allocator/oom.h
index 41f29b5642..bbd1ead219 100644
--- a/third_party/base/allocator/partition_allocator/oom.h
+++ b/third_party/base/allocator/partition_allocator/oom.h
@@ -2,9 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef BASE_ALLOCATOR_OOM_H
-#define BASE_ALLOCATOR_OOM_H
+#ifndef THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_H_
+#define THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_H_
+#include "third_party/base/allocator/partition_allocator/oom_callback.h"
#include "third_party/base/logging.h"
#if defined(OS_WIN)
@@ -23,15 +24,17 @@
#define OOM_CRASH() \
do { \
OOM_CRASH_PREVENT_ICF(); \
+ base::internal::RunPartitionAllocOomCallback(); \
::RaiseException(0xE0000008, EXCEPTION_NONCONTINUABLE, 0, nullptr); \
IMMEDIATE_CRASH(); \
} while (0)
#else
-#define OOM_CRASH() \
- do { \
- OOM_CRASH_PREVENT_ICF(); \
- IMMEDIATE_CRASH(); \
+#define OOM_CRASH() \
+ do { \
+ base::internal::RunPartitionAllocOomCallback(); \
+ OOM_CRASH_PREVENT_ICF(); \
+ IMMEDIATE_CRASH(); \
} while (0)
#endif
-#endif // BASE_ALLOCATOR_OOM_H
+#endif // THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_H_
diff --git a/third_party/base/allocator/partition_allocator/oom_callback.cc b/third_party/base/allocator/partition_allocator/oom_callback.cc
new file mode 100644
index 0000000000..36c6e978a2
--- /dev/null
+++ b/third_party/base/allocator/partition_allocator/oom_callback.cc
@@ -0,0 +1,28 @@
+// Copyright (c) 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/base/allocator/partition_allocator/oom_callback.h"
+#include "third_party/base/logging.h"
+
+namespace pdfium {
+namespace base {
+
+namespace {
+PartitionAllocOomCallback g_oom_callback;
+} // namespace
+
+void SetPartitionAllocOomCallback(PartitionAllocOomCallback callback) {
+ DCHECK(!g_oom_callback);
+ g_oom_callback = callback;
+}
+
+namespace internal {
+void RunPartitionAllocOomCallback() {
+ if (g_oom_callback)
+ g_oom_callback();
+}
+} // namespace internal
+
+} // namespace base
+} // namespace pdfium
diff --git a/third_party/base/allocator/partition_allocator/oom_callback.h b/third_party/base/allocator/partition_allocator/oom_callback.h
new file mode 100644
index 0000000000..044b167ff5
--- /dev/null
+++ b/third_party/base/allocator/partition_allocator/oom_callback.h
@@ -0,0 +1,26 @@
+// Copyright (c) 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_CALLBACK_H_
+#define THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_CALLBACK_H_
+
+#include "third_party/base/base_export.h"
+
+namespace pdfium {
+namespace base {
+typedef void (*PartitionAllocOomCallback)();
+// Registers a callback to be invoked during an OOM_CRASH(). OOM_CRASH is
+// invoked by users of PageAllocator (including PartitionAlloc) to signify an
+// allocation failure from the platform.
+BASE_EXPORT void SetPartitionAllocOomCallback(
+ PartitionAllocOomCallback callback);
+
+namespace internal {
+BASE_EXPORT void RunPartitionAllocOomCallback();
+} // namespace internal
+
+} // namespace base
+} // namespace pdfium
+
+#endif // THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_CALLBACK_H_
diff --git a/third_party/base/allocator/partition_allocator/page_allocator.cc b/third_party/base/allocator/partition_allocator/page_allocator.cc
index 0869bdb769..a65fbaad80 100644
--- a/third_party/base/allocator/partition_allocator/page_allocator.cc
+++ b/third_party/base/allocator/partition_allocator/page_allocator.cc
@@ -10,151 +10,163 @@
#include "build/build_config.h"
#include "third_party/base/allocator/partition_allocator/address_space_randomization.h"
-#include "third_party/base/base_export.h"
+#include "third_party/base/allocator/partition_allocator/page_allocator_internal.h"
+#include "third_party/base/allocator/partition_allocator/spin_lock.h"
#include "third_party/base/logging.h"
+#include "third_party/base/numerics/safe_math.h"
-#if defined(OS_POSIX)
-
-#include <errno.h>
-#include <sys/mman.h>
-
-#ifndef MADV_FREE
-#define MADV_FREE MADV_DONTNEED
-#endif
-
-#ifndef MAP_ANONYMOUS
-#define MAP_ANONYMOUS MAP_ANON
-#endif
-
-// On POSIX |mmap| uses a nearby address if the hint address is blocked.
-static const bool kHintIsAdvisory = true;
-static std::atomic<int32_t> s_allocPageErrorCode{0};
-
-#elif defined(OS_WIN)
-
+#if defined(OS_WIN)
#include <windows.h>
+#endif
-// |VirtualAlloc| will fail if allocation at the hint address is blocked.
-static const bool kHintIsAdvisory = false;
-static std::atomic<int32_t> s_allocPageErrorCode{ERROR_SUCCESS};
-
+#if defined(OS_WIN)
+#include "third_party/base/allocator/partition_allocator/page_allocator_internals_win.h"
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+#include "third_party/base/allocator/partition_allocator/page_allocator_internals_posix.h"
#else
-#error Unknown OS
-#endif // defined(OS_POSIX)
+#error Platform not supported.
+#endif
namespace pdfium {
namespace base {
-// This internal function wraps the OS-specific page allocation call:
-// |VirtualAlloc| on Windows, and |mmap| on POSIX.
-static void* SystemAllocPages(
- void* hint,
- size_t length,
- PageAccessibilityConfiguration page_accessibility) {
- DCHECK(!(length & kPageAllocationGranularityOffsetMask));
- DCHECK(!(reinterpret_cast<uintptr_t>(hint) &
- kPageAllocationGranularityOffsetMask));
- void* ret;
-#if defined(OS_WIN)
- DWORD access_flag =
- page_accessibility == PageAccessible ? PAGE_READWRITE : PAGE_NOACCESS;
- ret = VirtualAlloc(hint, length, MEM_RESERVE | MEM_COMMIT, access_flag);
- if (!ret)
- s_allocPageErrorCode = GetLastError();
-#else
- int access_flag = page_accessibility == PageAccessible
- ? (PROT_READ | PROT_WRITE)
- : PROT_NONE;
- ret = mmap(hint, length, access_flag, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
- if (ret == MAP_FAILED) {
- s_allocPageErrorCode = errno;
- ret = 0;
+namespace {
+
+// We may reserve/release address space on different threads.
+subtle::SpinLock* GetReserveLock() {
+ static subtle::SpinLock* s_reserveLock = nullptr;
+ if (!s_reserveLock)
+ s_reserveLock = new subtle::SpinLock();
+ return s_reserveLock;
+}
+
+// We only support a single block of reserved address space.
+void* s_reservation_address = nullptr;
+size_t s_reservation_size = 0;
+
+void* AllocPagesIncludingReserved(void* address,
+ size_t length,
+ PageAccessibilityConfiguration accessibility,
+ PageTag page_tag,
+ bool commit) {
+ void* ret =
+ SystemAllocPages(address, length, accessibility, page_tag, commit);
+ if (ret == nullptr) {
+ const bool cant_alloc_length = kHintIsAdvisory || address == nullptr;
+ if (cant_alloc_length) {
+ // The system cannot allocate |length| bytes. Release any reserved address
+ // space and try once more.
+ ReleaseReservation();
+ ret = SystemAllocPages(address, length, accessibility, page_tag, commit);
+ }
}
-#endif
return ret;
}
-// Trims base to given length and alignment. Windows returns null on failure and
-// frees base.
-static void* TrimMapping(void* base,
- size_t base_length,
- size_t trim_length,
- uintptr_t align,
- PageAccessibilityConfiguration page_accessibility) {
- size_t pre_slack = reinterpret_cast<uintptr_t>(base) & (align - 1);
- if (pre_slack)
- pre_slack = align - pre_slack;
+// Trims |base| to given |trim_length| and |alignment|.
+//
+// On failure, on Windows, this function returns nullptr and frees |base|.
+void* TrimMapping(void* base,
+ size_t base_length,
+ size_t trim_length,
+ uintptr_t alignment,
+ PageAccessibilityConfiguration accessibility,
+ bool commit) {
+ size_t pre_slack = reinterpret_cast<uintptr_t>(base) & (alignment - 1);
+ if (pre_slack) {
+ pre_slack = alignment - pre_slack;
+ }
size_t post_slack = base_length - pre_slack - trim_length;
DCHECK(base_length >= trim_length || pre_slack || post_slack);
DCHECK(pre_slack < base_length);
DCHECK(post_slack < base_length);
- void* ret = base;
+ return TrimMappingInternal(base, base_length, trim_length, accessibility,
+ commit, pre_slack, post_slack);
+}
-#if defined(OS_POSIX) // On POSIX we can resize the allocation run.
- (void)page_accessibility;
- if (pre_slack) {
- int res = munmap(base, pre_slack);
- CHECK(!res);
- ret = reinterpret_cast<char*>(base) + pre_slack;
- }
- if (post_slack) {
- int res = munmap(reinterpret_cast<char*>(ret) + trim_length, post_slack);
- CHECK(!res);
- }
-#else // On Windows we can't resize the allocation run.
- if (pre_slack || post_slack) {
- ret = reinterpret_cast<char*>(base) + pre_slack;
- FreePages(base, base_length);
- ret = SystemAllocPages(ret, trim_length, page_accessibility);
- }
-#endif
+} // namespace
- return ret;
+void* SystemAllocPages(void* hint,
+ size_t length,
+ PageAccessibilityConfiguration accessibility,
+ PageTag page_tag,
+ bool commit) {
+ DCHECK(!(length & kPageAllocationGranularityOffsetMask));
+ DCHECK(!(reinterpret_cast<uintptr_t>(hint) &
+ kPageAllocationGranularityOffsetMask));
+ DCHECK(commit || accessibility == PageInaccessible);
+ return SystemAllocPagesInternal(hint, length, accessibility, page_tag,
+ commit);
}
void* AllocPages(void* address,
size_t length,
size_t align,
- PageAccessibilityConfiguration page_accessibility) {
+ PageAccessibilityConfiguration accessibility,
+ PageTag page_tag,
+ bool commit) {
DCHECK(length >= kPageAllocationGranularity);
DCHECK(!(length & kPageAllocationGranularityOffsetMask));
DCHECK(align >= kPageAllocationGranularity);
- DCHECK(!(align & kPageAllocationGranularityOffsetMask));
+ // Alignment must be power of 2 for masking math to work.
+ DCHECK_EQ(align & (align - 1), 0UL);
DCHECK(!(reinterpret_cast<uintptr_t>(address) &
kPageAllocationGranularityOffsetMask));
uintptr_t align_offset_mask = align - 1;
uintptr_t align_base_mask = ~align_offset_mask;
DCHECK(!(reinterpret_cast<uintptr_t>(address) & align_offset_mask));
+#if defined(OS_LINUX) && defined(ARCH_CPU_64_BITS)
+ // On 64 bit Linux, we may need to adjust the address space limit for
+ // guarded allocations.
+ if (length >= kMinimumGuardedMemorySize) {
+ CHECK(PageInaccessible == accessibility);
+ CHECK(!commit);
+ if (!AdjustAddressSpaceLimit(base::checked_cast<int64_t>(length))) {
+ // Fall through. Try the allocation, since we may have a reserve.
+ }
+ }
+#endif
+
// If the client passed null as the address, choose a good one.
- if (!address) {
+ if (address == nullptr) {
address = GetRandomPageBase();
address = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(address) &
align_base_mask);
}
// First try to force an exact-size, aligned allocation from our random base.
- for (int count = 0; count < 3; ++count) {
- void* ret = SystemAllocPages(address, length, page_accessibility);
- if (kHintIsAdvisory || ret) {
+#if defined(ARCH_CPU_32_BITS)
+ // On 32 bit systems, first try one random aligned address, and then try an
+ // aligned address derived from the value of |ret|.
+ constexpr int kExactSizeTries = 2;
+#else
+ // On 64 bit systems, try 3 random aligned addresses.
+ constexpr int kExactSizeTries = 3;
+#endif
+
+ for (int i = 0; i < kExactSizeTries; ++i) {
+ void* ret = AllocPagesIncludingReserved(address, length, accessibility,
+ page_tag, commit);
+ if (ret != nullptr) {
// If the alignment is to our liking, we're done.
if (!(reinterpret_cast<uintptr_t>(ret) & align_offset_mask))
return ret;
+ // Free the memory and try again.
FreePages(ret, length);
-#if defined(ARCH_CPU_32_BITS)
- address = reinterpret_cast<void*>(
- (reinterpret_cast<uintptr_t>(ret) + align) & align_base_mask);
-#endif
- } else if (!address) { // We know we're OOM when an unhinted allocation
- // fails.
- return nullptr;
} else {
-#if defined(ARCH_CPU_32_BITS)
- address = reinterpret_cast<char*>(address) + align;
-#endif
+ // |ret| is null; if this try was unhinted, we're OOM.
+ if (kHintIsAdvisory || address == nullptr)
+ return nullptr;
}
-#if !defined(ARCH_CPU_32_BITS)
+#if defined(ARCH_CPU_32_BITS)
+ // For small address spaces, try the first aligned address >= |ret|. Note
+ // |ret| may be null, in which case |address| becomes null.
+ address = reinterpret_cast<void*>(
+ (reinterpret_cast<uintptr_t>(ret) + align_offset_mask) &
+ align_base_mask);
+#else // defined(ARCH_CPU_64_BITS)
// Keep trying random addresses on systems that have a large address space.
address = GetRandomPageBase();
address = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(address) &
@@ -162,21 +174,21 @@ void* AllocPages(void* address,
#endif
}
- // Map a larger allocation so we can force alignment, but continue randomizing
- // only on 64-bit POSIX.
+ // Make a larger allocation so we can force alignment.
size_t try_length = length + (align - kPageAllocationGranularity);
CHECK(try_length >= length);
void* ret;
do {
- // Don't continue to burn cycles on mandatory hints (Windows).
+ // Continue randomizing only on POSIX.
address = kHintIsAdvisory ? GetRandomPageBase() : nullptr;
- ret = SystemAllocPages(address, try_length, page_accessibility);
+ ret = AllocPagesIncludingReserved(address, try_length, accessibility,
+ page_tag, commit);
// The retries are for Windows, where a race can steal our mapping on
// resize.
- } while (ret &&
- (ret = TrimMapping(ret, try_length, length, align,
- page_accessibility)) == nullptr);
+ } while (ret != nullptr &&
+ (ret = TrimMapping(ret, try_length, length, align, accessibility,
+ commit)) == nullptr);
return ret;
}
@@ -185,92 +197,60 @@ void FreePages(void* address, size_t length) {
DCHECK(!(reinterpret_cast<uintptr_t>(address) &
kPageAllocationGranularityOffsetMask));
DCHECK(!(length & kPageAllocationGranularityOffsetMask));
-#if defined(OS_POSIX)
- int ret = munmap(address, length);
- CHECK(!ret);
-#else
- BOOL ret = VirtualFree(address, 0, MEM_RELEASE);
- CHECK(ret);
-#endif
+ FreePagesInternal(address, length);
}
-void SetSystemPagesInaccessible(void* address, size_t length) {
+bool SetSystemPagesAccess(void* address,
+ size_t length,
+ PageAccessibilityConfiguration accessibility) {
DCHECK(!(length & kSystemPageOffsetMask));
-#if defined(OS_POSIX)
- int ret = mprotect(address, length, PROT_NONE);
- CHECK(!ret);
-#else
- BOOL ret = VirtualFree(address, length, MEM_DECOMMIT);
- CHECK(ret);
-#endif
-}
-
-bool SetSystemPagesAccessible(void* address, size_t length) {
- DCHECK(!(length & kSystemPageOffsetMask));
-#if defined(OS_POSIX)
- return !mprotect(address, length, PROT_READ | PROT_WRITE);
-#else
- return !!VirtualAlloc(address, length, MEM_COMMIT, PAGE_READWRITE);
-#endif
+ return SetSystemPagesAccessInternal(address, length, accessibility);
}
void DecommitSystemPages(void* address, size_t length) {
- DCHECK(!(length & kSystemPageOffsetMask));
-#if defined(OS_POSIX)
- int ret = madvise(address, length, MADV_FREE);
- if (ret != 0 && errno == EINVAL) {
- // MADV_FREE only works on Linux 4.5+ . If request failed,
- // retry with older MADV_DONTNEED . Note that MADV_FREE
- // being defined at compile time doesn't imply runtime support.
- ret = madvise(address, length, MADV_DONTNEED);
- }
- CHECK(!ret);
-#else
- SetSystemPagesInaccessible(address, length);
-#endif
+ DCHECK_EQ(0UL, length & kSystemPageOffsetMask);
+ DecommitSystemPagesInternal(address, length);
}
-void RecommitSystemPages(void* address, size_t length) {
- DCHECK(!(length & kSystemPageOffsetMask));
-#if defined(OS_POSIX)
- (void)address;
-#else
- CHECK(SetSystemPagesAccessible(address, length));
-#endif
+bool RecommitSystemPages(void* address,
+ size_t length,
+ PageAccessibilityConfiguration accessibility) {
+ DCHECK_EQ(0UL, length & kSystemPageOffsetMask);
+ DCHECK(PageInaccessible != accessibility);
+ return RecommitSystemPagesInternal(address, length, accessibility);
}
void DiscardSystemPages(void* address, size_t length) {
- DCHECK(!(length & kSystemPageOffsetMask));
-#if defined(OS_POSIX)
- // On POSIX, the implementation detail is that discard and decommit are the
- // same, and lead to pages that are returned to the system immediately and
- // get replaced with zeroed pages when touched. So we just call
- // DecommitSystemPages() here to avoid code duplication.
- DecommitSystemPages(address, length);
-#else
- // On Windows discarded pages are not returned to the system immediately and
- // not guaranteed to be zeroed when returned to the application.
- using DiscardVirtualMemoryFunction =
- DWORD(WINAPI*)(PVOID virtualAddress, SIZE_T size);
- static DiscardVirtualMemoryFunction discard_virtual_memory =
- reinterpret_cast<DiscardVirtualMemoryFunction>(-1);
- if (discard_virtual_memory ==
- reinterpret_cast<DiscardVirtualMemoryFunction>(-1))
- discard_virtual_memory =
- reinterpret_cast<DiscardVirtualMemoryFunction>(GetProcAddress(
- GetModuleHandle(L"Kernel32.dll"), "DiscardVirtualMemory"));
- // Use DiscardVirtualMemory when available because it releases faster than
- // MEM_RESET.
- DWORD ret = 1;
- if (discard_virtual_memory)
- ret = discard_virtual_memory(address, length);
- // DiscardVirtualMemory is buggy in Win10 SP0, so fall back to MEM_RESET on
- // failure.
- if (ret) {
- void* ret = VirtualAlloc(address, length, MEM_RESET, PAGE_READWRITE);
- CHECK(ret);
+ DCHECK_EQ(0UL, length & kSystemPageOffsetMask);
+ DiscardSystemPagesInternal(address, length);
+}
+
+bool ReserveAddressSpace(size_t size) {
+ // To avoid deadlock, call only SystemAllocPages.
+ subtle::SpinLock::Guard guard(*GetReserveLock());
+ if (s_reservation_address == nullptr) {
+ void* mem = SystemAllocPages(nullptr, size, PageInaccessible,
+ PageTag::kChromium, false);
+ if (mem != nullptr) {
+ // We guarantee this alignment when reserving address space.
+ DCHECK(!(reinterpret_cast<uintptr_t>(mem) &
+ kPageAllocationGranularityOffsetMask));
+ s_reservation_address = mem;
+ s_reservation_size = size;
+ return true;
+ }
+ }
+ return false;
+}
+
+void ReleaseReservation() {
+ // To avoid deadlock, call only FreePages.
+ subtle::SpinLock::Guard guard(*GetReserveLock());
+ if (s_reservation_address != nullptr) {
+ FreePages(s_reservation_address, s_reservation_size);
+ s_reservation_address = nullptr;
+ s_reservation_size = 0;
}
-#endif
}
uint32_t GetAllocPageErrorCode() {
diff --git a/third_party/base/allocator/partition_allocator/page_allocator.h b/third_party/base/allocator/partition_allocator/page_allocator.h
index bf9c0987a0..64be33c3c2 100644
--- a/third_party/base/allocator/partition_allocator/page_allocator.h
+++ b/third_party/base/allocator/partition_allocator/page_allocator.h
@@ -2,132 +2,180 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_H
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_H
+#ifndef THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_H_
+#define THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_H_
#include <stdint.h>
#include <cstddef>
#include "build/build_config.h"
+#include "third_party/base/allocator/partition_allocator/page_allocator_constants.h"
#include "third_party/base/base_export.h"
#include "third_party/base/compiler_specific.h"
namespace pdfium {
namespace base {
-#if defined(OS_WIN)
-static const size_t kPageAllocationGranularityShift = 16; // 64KB
-#elif defined(_MIPS_ARCH_LOONGSON)
-static const size_t kPageAllocationGranularityShift = 14; // 16KB
-#else
-static const size_t kPageAllocationGranularityShift = 12; // 4KB
-#endif
-static const size_t kPageAllocationGranularity =
- 1 << kPageAllocationGranularityShift;
-static const size_t kPageAllocationGranularityOffsetMask =
- kPageAllocationGranularity - 1;
-static const size_t kPageAllocationGranularityBaseMask =
- ~kPageAllocationGranularityOffsetMask;
-
-// All Blink-supported systems have 4096 sized system pages and can handle
-// permissions and commit / decommit at this granularity.
-// Loongson have 16384 sized system pages.
-#if defined(_MIPS_ARCH_LOONGSON)
-static const size_t kSystemPageSize = 16384;
-#else
-static const size_t kSystemPageSize = 4096;
-#endif
-static const size_t kSystemPageOffsetMask = kSystemPageSize - 1;
-static const size_t kSystemPageBaseMask = ~kSystemPageOffsetMask;
-
enum PageAccessibilityConfiguration {
- PageAccessible,
PageInaccessible,
+ PageRead,
+ PageReadWrite,
+ PageReadExecute,
+ // This flag is deprecated and will go away soon.
+ // TODO(bbudge) Remove this as soon as V8 doesn't need RWX pages.
+ PageReadWriteExecute,
+};
+
+// Mac OSX supports tagged memory regions, to help in debugging.
+enum class PageTag {
+ kFirst = 240, // Minimum tag value.
+ kChromium = 254, // Chromium page, including off-heap V8 ArrayBuffers.
+ kV8 = 255, // V8 heap pages.
+ kLast = kV8 // Maximum tag value.
};
// Allocate one or more pages.
-// The requested address is just a hint; the actual address returned may
-// differ. The returned address will be aligned at least to align bytes.
-// len is in bytes, and must be a multiple of kPageAllocationGranularity.
-// align is in bytes, and must be a power-of-two multiple of
-// kPageAllocationGranularity.
-// If addr is null, then a suitable and randomized address will be chosen
+//
+// The requested |address| is just a hint; the actual address returned may
+// differ. The returned address will be aligned at least to |align| bytes.
+// |length| is in bytes, and must be a multiple of |kPageAllocationGranularity|.
+// |align| is in bytes, and must be a power-of-two multiple of
+// |kPageAllocationGranularity|.
+//
+// If |address| is null, then a suitable and randomized address will be chosen
// automatically.
-// PageAccessibilityConfiguration controls the permission of the
-// allocated pages.
+//
+// |page_accessibility| controls the permission of the allocated pages.
+//
// This call will return null if the allocation cannot be satisfied.
BASE_EXPORT void* AllocPages(void* address,
- size_t len,
+ size_t length,
size_t align,
- PageAccessibilityConfiguration);
-
-// Free one or more pages.
-// addr and len must match a previous call to allocPages().
+ PageAccessibilityConfiguration page_accessibility,
+ PageTag tag = PageTag::kChromium,
+ bool commit = true);
+
+// Free one or more pages starting at |address| and continuing for |length|
+// bytes.
+//
+// |address| and |length| must match a previous call to |AllocPages|. Therefore,
+// |address| must be aligned to |kPageAllocationGranularity| bytes, and |length|
+// must be a multiple of |kPageAllocationGranularity|.
BASE_EXPORT void FreePages(void* address, size_t length);
-// Mark one or more system pages as being inaccessible.
-// Subsequently accessing any address in the range will fault, and the
-// addresses will not be re-used by future allocations.
-// len must be a multiple of kSystemPageSize bytes.
-BASE_EXPORT void SetSystemPagesInaccessible(void* address, size_t length);
-
-// Mark one or more system pages as being accessible.
-// The pages will be readable and writeable.
-// len must be a multiple of kSystemPageSize bytes.
-// The result bool value indicates whether the permission
-// change succeeded or not. You must check the result
-// (in most cases you need to CHECK that it is true).
-BASE_EXPORT WARN_UNUSED_RESULT bool SetSystemPagesAccessible(void* address,
- size_t length);
-
-// Decommit one or more system pages. Decommitted means that the physical memory
-// is released to the system, but the virtual address space remains reserved.
-// System pages are re-committed by calling recommitSystemPages(). Touching
-// a decommitted page _may_ fault.
-// Clients should not make any assumptions about the contents of decommitted
-// system pages, before or after they write to the page. The only guarantee
-// provided is that the contents of the system page will be deterministic again
-// after recommitting and writing to it. In particlar note that system pages are
-// not guaranteed to be zero-filled upon re-commit. len must be a multiple of
-// kSystemPageSize bytes.
+// Mark one or more system pages, starting at |address| with the given
+// |page_accessibility|. |length| must be a multiple of |kSystemPageSize| bytes.
+//
+// Returns true if the permission change succeeded. In most cases you must
+// |CHECK| the result.
+BASE_EXPORT WARN_UNUSED_RESULT bool SetSystemPagesAccess(
+ void* address,
+ size_t length,
+ PageAccessibilityConfiguration page_accessibility);
+
+// Decommit one or more system pages starting at |address| and continuing for
+// |length| bytes. |length| must be a multiple of |kSystemPageSize|.
+//
+// Decommitted means that physical resources (RAM or swap) backing the allocated
+// virtual address range are released back to the system, but the address space
+// is still allocated to the process (possibly using up page table entries or
+// other accounting resources). Any access to a decommitted region of memory
+// is an error and will generate a fault.
+//
+// This operation is not atomic on all platforms.
+//
+// Note: "Committed memory" is a Windows Memory Subsystem concept that ensures
+// processes will not fault when touching a committed memory region. There is
+// no analogue in the POSIX memory API where virtual memory pages are
+// best-effort allocated resources on the first touch. To create a
+// platform-agnostic abstraction, this API simulates the Windows "decommit"
+// state by both discarding the region (allowing the OS to avoid swap
+// operations) and changing the page protections so accesses fault.
+//
+// TODO(ajwong): This currently does not change page protections on POSIX
+// systems due to a perf regression. Tracked at http://crbug.com/766882.
BASE_EXPORT void DecommitSystemPages(void* address, size_t length);
-// Recommit one or more system pages. Decommitted system pages must be
-// recommitted before they are read are written again.
-// Note that this operation may be a no-op on some platforms.
-// len must be a multiple of kSystemPageSize bytes.
-BASE_EXPORT void RecommitSystemPages(void* address, size_t length);
-
-// Discard one or more system pages. Discarding is a hint to the system that
-// the page is no longer required. The hint may:
-// - Do nothing.
-// - Discard the page immediately, freeing up physical pages.
-// - Discard the page at some time in the future in response to memory pressure.
-// Only committed pages should be discarded. Discarding a page does not
-// decommit it, and it is valid to discard an already-discarded page.
-// A read or write to a discarded page will not fault.
-// Reading from a discarded page may return the original page content, or a
-// page full of zeroes.
+// Recommit one or more system pages, starting at |address| and continuing for
+// |length| bytes with the given |page_accessibility|. |length| must be a
+// multiple of |kSystemPageSize|.
+//
+// Decommitted system pages must be recommitted with their original permissions
+// before they are used again.
+//
+// Returns true if the recommit change succeeded. In most cases you must |CHECK|
+// the result.
+BASE_EXPORT WARN_UNUSED_RESULT bool RecommitSystemPages(
+ void* address,
+ size_t length,
+ PageAccessibilityConfiguration page_accessibility);
+
+// Discard one or more system pages starting at |address| and continuing for
+// |length| bytes. |length| must be a multiple of |kSystemPageSize|.
+//
+// Discarding is a hint to the system that the page is no longer required. The
+// hint may:
+// - Do nothing.
+// - Discard the page immediately, freeing up physical pages.
+// - Discard the page at some time in the future in response to memory
+// pressure.
+//
+// Only committed pages should be discarded. Discarding a page does not decommit
+// it, and it is valid to discard an already-discarded page. A read or write to
+// a discarded page will not fault.
+//
+// Reading from a discarded page may return the original page content, or a page
+// full of zeroes.
+//
// Writing to a discarded page is the only guaranteed way to tell the system
// that the page is required again. Once written to, the content of the page is
// guaranteed stable once more. After being written to, the page content may be
// based on the original page content, or a page of zeroes.
-// len must be a multiple of kSystemPageSize bytes.
BASE_EXPORT void DiscardSystemPages(void* address, size_t length);
-ALWAYS_INLINE uintptr_t RoundUpToSystemPage(uintptr_t address) {
+// Rounds up |address| to the next multiple of |kSystemPageSize|. Returns
+// 0 for an |address| of 0.
+constexpr ALWAYS_INLINE uintptr_t RoundUpToSystemPage(uintptr_t address) {
return (address + kSystemPageOffsetMask) & kSystemPageBaseMask;
}
-ALWAYS_INLINE uintptr_t RoundDownToSystemPage(uintptr_t address) {
+// Rounds down |address| to the previous multiple of |kSystemPageSize|. Returns
+// 0 for an |address| of 0.
+constexpr ALWAYS_INLINE uintptr_t RoundDownToSystemPage(uintptr_t address) {
return address & kSystemPageBaseMask;
}
-// Returns errno (or GetLastError code) when mmap (or VirtualAlloc) fails.
+// Rounds up |address| to the next multiple of |kPageAllocationGranularity|.
+// Returns 0 for an |address| of 0.
+constexpr ALWAYS_INLINE uintptr_t
+RoundUpToPageAllocationGranularity(uintptr_t address) {
+ return (address + kPageAllocationGranularityOffsetMask) &
+ kPageAllocationGranularityBaseMask;
+}
+
+// Rounds down |address| to the previous multiple of
+// |kPageAllocationGranularity|. Returns 0 for an |address| of 0.
+constexpr ALWAYS_INLINE uintptr_t
+RoundDownToPageAllocationGranularity(uintptr_t address) {
+ return address & kPageAllocationGranularityBaseMask;
+}
+
+// Reserves (at least) |size| bytes of address space, aligned to
+// |kPageAllocationGranularity|. This can be called early on to make it more
+// likely that large allocations will succeed. Returns true if the reservation
+// succeeded, false if the reservation failed or a reservation was already made.
+BASE_EXPORT bool ReserveAddressSpace(size_t size);
+
+// Releases any reserved address space. |AllocPages| calls this automatically on
+// an allocation failure. External allocators may also call this on failure.
+BASE_EXPORT void ReleaseReservation();
+
+// Returns |errno| (POSIX) or the result of |GetLastError| (Windows) when |mmap|
+// (POSIX) or |VirtualAlloc| (Windows) fails.
BASE_EXPORT uint32_t GetAllocPageErrorCode();
} // namespace base
} // namespace pdfium
-#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_H
+#endif // THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_H_
diff --git a/third_party/base/allocator/partition_allocator/page_allocator_constants.h b/third_party/base/allocator/partition_allocator/page_allocator_constants.h
new file mode 100644
index 0000000000..945273b1f2
--- /dev/null
+++ b/third_party/base/allocator/partition_allocator/page_allocator_constants.h
@@ -0,0 +1,44 @@
+// Copyright (c) 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_CONSTANTS_H_
+#define THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_CONSTANTS_H_
+
+#include <stddef.h>
+
+#include "build/build_config.h"
+
+namespace pdfium {
+namespace base {
+#if defined(OS_WIN)
+static constexpr size_t kPageAllocationGranularityShift = 16; // 64KB
+#elif defined(_MIPS_ARCH_LOONGSON)
+static constexpr size_t kPageAllocationGranularityShift = 14; // 16KB
+#else
+static constexpr size_t kPageAllocationGranularityShift = 12; // 4KB
+#endif
+static constexpr size_t kPageAllocationGranularity =
+ 1 << kPageAllocationGranularityShift;
+static constexpr size_t kPageAllocationGranularityOffsetMask =
+ kPageAllocationGranularity - 1;
+static constexpr size_t kPageAllocationGranularityBaseMask =
+ ~kPageAllocationGranularityOffsetMask;
+
+#if defined(_MIPS_ARCH_LOONGSON)
+static constexpr size_t kSystemPageSize = 16384;
+#else
+static constexpr size_t kSystemPageSize = 4096;
+#endif
+static constexpr size_t kSystemPageOffsetMask = kSystemPageSize - 1;
+static_assert((kSystemPageSize & (kSystemPageSize - 1)) == 0,
+ "kSystemPageSize must be power of 2");
+static constexpr size_t kSystemPageBaseMask = ~kSystemPageOffsetMask;
+
+static constexpr size_t kPageMetadataShift = 5; // 32 bytes per partition page.
+static constexpr size_t kPageMetadataSize = 1 << kPageMetadataShift;
+
+} // namespace base
+} // namespace pdfium
+
+#endif // THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_CONSTANTS_H_
diff --git a/third_party/base/allocator/partition_allocator/page_allocator_internal.h b/third_party/base/allocator/partition_allocator/page_allocator_internal.h
new file mode 100644
index 0000000000..22843149e1
--- /dev/null
+++ b/third_party/base/allocator/partition_allocator/page_allocator_internal.h
@@ -0,0 +1,20 @@
+// Copyright (c) 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNAL_H_
+#define THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNAL_H_
+
+namespace pdfium {
+namespace base {
+
+void* SystemAllocPages(void* hint,
+ size_t length,
+ PageAccessibilityConfiguration accessibility,
+ PageTag page_tag,
+ bool commit);
+
+} // namespace base
+} // namespace pdfium
+
+#endif // THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNAL_H_
diff --git a/third_party/base/allocator/partition_allocator/page_allocator_internals_posix.h b/third_party/base/allocator/partition_allocator/page_allocator_internals_posix.h
new file mode 100644
index 0000000000..0622222b53
--- /dev/null
+++ b/third_party/base/allocator/partition_allocator/page_allocator_internals_posix.h
@@ -0,0 +1,187 @@
+// Copyright (c) 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_POSIX_H_
+#define THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_POSIX_H_
+
+#include <errno.h>
+#include <sys/mman.h>
+
+#include "build/build_config.h"
+
+#if defined(OS_MACOSX)
+#include <mach/mach.h>
+#endif
+#if defined(OS_LINUX)
+#include <sys/resource.h>
+
+#include <algorithm>
+#endif
+
+#ifndef MAP_ANONYMOUS
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+
+namespace pdfium {
+namespace base {
+
+// |mmap| uses a nearby address if the hint address is blocked.
+constexpr bool kHintIsAdvisory = true;
+std::atomic<int32_t> s_allocPageErrorCode{0};
+
+int GetAccessFlags(PageAccessibilityConfiguration accessibility) {
+ switch (accessibility) {
+ case PageRead:
+ return PROT_READ;
+ case PageReadWrite:
+ return PROT_READ | PROT_WRITE;
+ case PageReadExecute:
+ return PROT_READ | PROT_EXEC;
+ case PageReadWriteExecute:
+ return PROT_READ | PROT_WRITE | PROT_EXEC;
+ default:
+ NOTREACHED();
+ FALLTHROUGH;
+ case PageInaccessible:
+ return PROT_NONE;
+ }
+}
+
+#if defined(OS_LINUX) && defined(ARCH_CPU_64_BITS)
+
+// Multiple guarded memory regions may exceed the process address space limit.
+// This function will raise or lower the limit by |amount|.
+bool AdjustAddressSpaceLimit(int64_t amount) {
+ struct rlimit old_rlimit;
+ if (getrlimit(RLIMIT_AS, &old_rlimit))
+ return false;
+ const rlim_t new_limit =
+ CheckAdd(old_rlimit.rlim_cur, amount).ValueOrDefault(old_rlimit.rlim_max);
+ const struct rlimit new_rlimit = {std::min(new_limit, old_rlimit.rlim_max),
+ old_rlimit.rlim_max};
+ // setrlimit will fail if limit > old_rlimit.rlim_max.
+ return setrlimit(RLIMIT_AS, &new_rlimit) == 0;
+}
+
+// Current WASM guarded memory regions have 8 GiB of address space. There are
+// schemes that reduce that to 4 GiB.
+constexpr size_t kMinimumGuardedMemorySize = 1ULL << 32; // 4 GiB
+
+#endif // defined(OS_LINUX) && defined(ARCH_CPU_64_BITS)
+
+void* SystemAllocPagesInternal(void* hint,
+ size_t length,
+ PageAccessibilityConfiguration accessibility,
+ PageTag page_tag,
+ bool commit) {
+#if defined(OS_MACOSX)
+ // Use a custom tag to make it easier to distinguish Partition Alloc regions
+ // in vmmap(1). Tags between 240-255 are supported.
+ DCHECK(PageTag::kFirst <= page_tag);
+ DCHECK(PageTag::kLast >= page_tag);
+ int fd = VM_MAKE_TAG(static_cast<int>(page_tag));
+#else
+ int fd = -1;
+#endif
+
+ int access_flag = GetAccessFlags(accessibility);
+ void* ret =
+ mmap(hint, length, access_flag, MAP_ANONYMOUS | MAP_PRIVATE, fd, 0);
+ if (ret == MAP_FAILED) {
+ s_allocPageErrorCode = errno;
+ ret = nullptr;
+ }
+ return ret;
+}
+
+void* TrimMappingInternal(void* base,
+ size_t base_length,
+ size_t trim_length,
+ PageAccessibilityConfiguration accessibility,
+ bool commit,
+ size_t pre_slack,
+ size_t post_slack) {
+ void* ret = base;
+ // We can resize the allocation run. Release unneeded memory before and after
+ // the aligned range.
+ if (pre_slack) {
+ int res = munmap(base, pre_slack);
+ CHECK(!res);
+ ret = reinterpret_cast<char*>(base) + pre_slack;
+ }
+ if (post_slack) {
+ int res = munmap(reinterpret_cast<char*>(ret) + trim_length, post_slack);
+ CHECK(!res);
+ }
+ return ret;
+}
+
+bool SetSystemPagesAccessInternal(
+ void* address,
+ size_t length,
+ PageAccessibilityConfiguration accessibility) {
+ return 0 == mprotect(address, length, GetAccessFlags(accessibility));
+}
+
+void FreePagesInternal(void* address, size_t length) {
+ CHECK(!munmap(address, length));
+
+#if defined(OS_LINUX) && defined(ARCH_CPU_64_BITS)
+ // Restore the address space limit.
+ if (length >= kMinimumGuardedMemorySize) {
+ CHECK(AdjustAddressSpaceLimit(-base::checked_cast<int64_t>(length)));
+ }
+#endif
+}
+
+void DecommitSystemPagesInternal(void* address, size_t length) {
+ // In POSIX, there is no decommit concept. Discarding is an effective way of
+ // implementing the Windows semantics where the OS is allowed to not swap the
+ // pages in the region.
+ //
+ // TODO(ajwong): Also explore setting PageInaccessible to make the protection
+ // semantics consistent between Windows and POSIX. This might have a perf cost
+ // though as both decommit and recommit would incur an extra syscall.
+ // http://crbug.com/766882
+ DiscardSystemPages(address, length);
+}
+
+bool RecommitSystemPagesInternal(void* address,
+ size_t length,
+ PageAccessibilityConfiguration accessibility) {
+#if defined(OS_MACOSX)
+ // On macOS, to update accounting, we need to make another syscall. For more
+ // details, see https://crbug.com/823915.
+ madvise(address, length, MADV_FREE_REUSE);
+#endif
+
+ // On POSIX systems, the caller need simply read the memory to recommit it.
+ // This has the correct behavior because the API requires the permissions to
+ // be the same as before decommitting and all configurations can read.
+ return true;
+}
+
+void DiscardSystemPagesInternal(void* address, size_t length) {
+#if defined(OS_MACOSX)
+ int ret = madvise(address, length, MADV_FREE_REUSABLE);
+ if (ret) {
+ // MADV_FREE_REUSABLE sometimes fails, so fall back to MADV_DONTNEED.
+ ret = madvise(address, length, MADV_DONTNEED);
+ }
+ CHECK(0 == ret);
+#else
+ // We have experimented with other flags, but with suboptimal results.
+ //
+ // MADV_FREE (Linux): Makes our memory measurements less predictable;
+ // performance benefits unclear.
+ //
+ // Therefore, we just do the simple thing: MADV_DONTNEED.
+ CHECK(!madvise(address, length, MADV_DONTNEED));
+#endif
+}
+
+} // namespace base
+} // namespace pdfium
+
+#endif // THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_POSIX_H_
diff --git a/third_party/base/allocator/partition_allocator/page_allocator_internals_win.h b/third_party/base/allocator/partition_allocator/page_allocator_internals_win.h
new file mode 100644
index 0000000000..57a11c521b
--- /dev/null
+++ b/third_party/base/allocator/partition_allocator/page_allocator_internals_win.h
@@ -0,0 +1,123 @@
+// Copyright (c) 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_WIN_H_
+#define THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_WIN_H_
+
+#include "third_party/base/allocator/partition_allocator/page_allocator_internal.h"
+
+namespace pdfium {
+namespace base {
+
+// |VirtualAlloc| will fail if allocation at the hint address is blocked.
+constexpr bool kHintIsAdvisory = false;
+std::atomic<int32_t> s_allocPageErrorCode{ERROR_SUCCESS};
+
+int GetAccessFlags(PageAccessibilityConfiguration accessibility) {
+ switch (accessibility) {
+ case PageRead:
+ return PAGE_READONLY;
+ case PageReadWrite:
+ return PAGE_READWRITE;
+ case PageReadExecute:
+ return PAGE_EXECUTE_READ;
+ case PageReadWriteExecute:
+ return PAGE_EXECUTE_READWRITE;
+ default:
+ NOTREACHED();
+ FALLTHROUGH;
+ case PageInaccessible:
+ return PAGE_NOACCESS;
+ }
+}
+
+void* SystemAllocPagesInternal(void* hint,
+ size_t length,
+ PageAccessibilityConfiguration accessibility,
+ PageTag page_tag,
+ bool commit) {
+ DWORD access_flag = GetAccessFlags(accessibility);
+ const DWORD type_flags = commit ? (MEM_RESERVE | MEM_COMMIT) : MEM_RESERVE;
+ void* ret = VirtualAlloc(hint, length, type_flags, access_flag);
+ if (ret == nullptr) {
+ s_allocPageErrorCode = GetLastError();
+ }
+ return ret;
+}
+
+void* TrimMappingInternal(void* base,
+ size_t base_length,
+ size_t trim_length,
+ PageAccessibilityConfiguration accessibility,
+ bool commit,
+ size_t pre_slack,
+ size_t post_slack) {
+ void* ret = base;
+ if (pre_slack || post_slack) {
+ // We cannot resize the allocation run. Free it and retry at the aligned
+ // address within the freed range.
+ ret = reinterpret_cast<char*>(base) + pre_slack;
+ FreePages(base, base_length);
+ ret = SystemAllocPages(ret, trim_length, accessibility, PageTag::kChromium,
+ commit);
+ }
+ return ret;
+}
+
+bool SetSystemPagesAccessInternal(
+ void* address,
+ size_t length,
+ PageAccessibilityConfiguration accessibility) {
+ if (accessibility == PageInaccessible) {
+ return VirtualFree(address, length, MEM_DECOMMIT) != 0;
+ } else {
+ return nullptr != VirtualAlloc(address, length, MEM_COMMIT,
+ GetAccessFlags(accessibility));
+ }
+}
+
+void FreePagesInternal(void* address, size_t length) {
+ CHECK(VirtualFree(address, 0, MEM_RELEASE));
+}
+
+void DecommitSystemPagesInternal(void* address, size_t length) {
+ CHECK(SetSystemPagesAccess(address, length, PageInaccessible));
+}
+
+bool RecommitSystemPagesInternal(void* address,
+ size_t length,
+ PageAccessibilityConfiguration accessibility) {
+ return SetSystemPagesAccess(address, length, accessibility);
+}
+
+void DiscardSystemPagesInternal(void* address, size_t length) {
+ // On Windows, discarded pages are not returned to the system immediately and
+ // not guaranteed to be zeroed when returned to the application.
+ using DiscardVirtualMemoryFunction =
+ DWORD(WINAPI*)(PVOID virtualAddress, SIZE_T size);
+ static DiscardVirtualMemoryFunction discard_virtual_memory =
+ reinterpret_cast<DiscardVirtualMemoryFunction>(-1);
+ if (discard_virtual_memory ==
+ reinterpret_cast<DiscardVirtualMemoryFunction>(-1))
+ discard_virtual_memory =
+ reinterpret_cast<DiscardVirtualMemoryFunction>(GetProcAddress(
+ GetModuleHandle(L"Kernel32.dll"), "DiscardVirtualMemory"));
+ // Use DiscardVirtualMemory when available because it releases faster than
+ // MEM_RESET.
+ DWORD ret = 1;
+ if (discard_virtual_memory) {
+ ret = discard_virtual_memory(address, length);
+ }
+ // DiscardVirtualMemory is buggy in Win10 SP0, so fall back to MEM_RESET on
+ // failure.
+ if (ret) {
+ void* ptr = VirtualAlloc(address, length, MEM_RESET, PAGE_READWRITE);
+ CHECK(ptr);
+ }
+}
+
+} // namespace base
+} // namespace pdfium
+
+#endif // THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_WIN_H_
diff --git a/third_party/base/allocator/partition_allocator/partition_alloc.cc b/third_party/base/allocator/partition_allocator/partition_alloc.cc
index e8aad9420c..d0d58dfbeb 100644
--- a/third_party/base/allocator/partition_allocator/partition_alloc.cc
+++ b/third_party/base/allocator/partition_allocator/partition_alloc.cc
@@ -6,172 +6,113 @@
#include <string.h>
-#include "third_party/base/allocator/partition_allocator/oom.h"
+#include <memory>
+#include <type_traits>
+
+#include "third_party/base/allocator/partition_allocator/partition_direct_map_extent.h"
+#include "third_party/base/allocator/partition_allocator/partition_oom.h"
+#include "third_party/base/allocator/partition_allocator/partition_page.h"
#include "third_party/base/allocator/partition_allocator/spin_lock.h"
-#include "third_party/base/compiler_specific.h"
+
+namespace pdfium {
+namespace base {
// Two partition pages are used as guard / metadata page so make sure the super
// page size is bigger.
-static_assert(pdfium::base::kPartitionPageSize * 4 <=
- pdfium::base::kSuperPageSize,
- "ok super page size");
-static_assert(!(pdfium::base::kSuperPageSize %
- pdfium::base::kPartitionPageSize),
- "ok super page multiple");
+static_assert(kPartitionPageSize * 4 <= kSuperPageSize, "ok super page size");
+static_assert(!(kSuperPageSize % kPartitionPageSize), "ok super page multiple");
// Four system pages gives us room to hack out a still-guard-paged piece
// of metadata in the middle of a guard partition page.
-static_assert(pdfium::base::kSystemPageSize * 4 <=
- pdfium::base::kPartitionPageSize,
+static_assert(kSystemPageSize * 4 <= kPartitionPageSize,
"ok partition page size");
-static_assert(!(pdfium::base::kPartitionPageSize %
- pdfium::base::kSystemPageSize),
+static_assert(!(kPartitionPageSize % kSystemPageSize),
"ok partition page multiple");
-static_assert(sizeof(pdfium::base::PartitionPage) <=
- pdfium::base::kPageMetadataSize,
+static_assert(sizeof(internal::PartitionPage) <= kPageMetadataSize,
"PartitionPage should not be too big");
-static_assert(sizeof(pdfium::base::PartitionBucket) <=
- pdfium::base::kPageMetadataSize,
+static_assert(sizeof(internal::PartitionBucket) <= kPageMetadataSize,
"PartitionBucket should not be too big");
-static_assert(sizeof(pdfium::base::PartitionSuperPageExtentEntry) <=
- pdfium::base::kPageMetadataSize,
+static_assert(sizeof(internal::PartitionSuperPageExtentEntry) <=
+ kPageMetadataSize,
"PartitionSuperPageExtentEntry should not be too big");
-static_assert(pdfium::base::kPageMetadataSize *
- pdfium::base::kNumPartitionPagesPerSuperPage <=
- pdfium::base::kSystemPageSize,
+static_assert(kPageMetadataSize * kNumPartitionPagesPerSuperPage <=
+ kSystemPageSize,
"page metadata fits in hole");
+// Limit to prevent callers accidentally overflowing an int size.
+static_assert(kGenericMaxDirectMapped <=
+ (1UL << 31) + kPageAllocationGranularity,
+ "maximum direct mapped allocation");
// Check that some of our zanier calculations worked out as expected.
-static_assert(pdfium::base::kGenericSmallestBucket == 8,
- "generic smallest bucket");
-static_assert(pdfium::base::kGenericMaxBucketed == 983040,
- "generic max bucketed");
-static_assert(pdfium::base::kMaxSystemPagesPerSlotSpan < (1 << 8),
+static_assert(kGenericSmallestBucket == 8, "generic smallest bucket");
+static_assert(kGenericMaxBucketed == 983040, "generic max bucketed");
+static_assert(kMaxSystemPagesPerSlotSpan < (1 << 8),
"System pages per slot span must be less than 128.");
-namespace pdfium {
-namespace base {
+internal::PartitionRootBase::PartitionRootBase() = default;
+internal::PartitionRootBase::~PartitionRootBase() = default;
+PartitionRoot::PartitionRoot() = default;
+PartitionRoot::~PartitionRoot() = default;
+PartitionRootGeneric::PartitionRootGeneric() = default;
+PartitionRootGeneric::~PartitionRootGeneric() = default;
+PartitionAllocatorGeneric::PartitionAllocatorGeneric() = default;
+PartitionAllocatorGeneric::~PartitionAllocatorGeneric() = default;
-subtle::SpinLock PartitionRootBase::gInitializedLock;
-bool PartitionRootBase::gInitialized = false;
-PartitionPage PartitionRootBase::gSeedPage;
-PartitionBucket PartitionRootBase::gPagedBucket;
-void (*PartitionRootBase::gOomHandlingFunction)() = nullptr;
+subtle::SpinLock* GetLock() {
+ static subtle::SpinLock* s_initialized_lock = nullptr;
+ if (!s_initialized_lock)
+ s_initialized_lock = new subtle::SpinLock();
+ return s_initialized_lock;
+}
+
+static bool g_initialized = false;
+
+void (*internal::PartitionRootBase::gOomHandlingFunction)() = nullptr;
PartitionAllocHooks::AllocationHook* PartitionAllocHooks::allocation_hook_ =
nullptr;
PartitionAllocHooks::FreeHook* PartitionAllocHooks::free_hook_ = nullptr;
-static uint8_t PartitionBucketNumSystemPages(size_t size) {
- // This works out reasonably for the current bucket sizes of the generic
- // allocator, and the current values of partition page size and constants.
- // Specifically, we have enough room to always pack the slots perfectly into
- // some number of system pages. The only waste is the waste associated with
- // unfaulted pages (i.e. wasted address space).
- // TODO: we end up using a lot of system pages for very small sizes. For
- // example, we'll use 12 system pages for slot size 24. The slot size is
- // so small that the waste would be tiny with just 4, or 1, system pages.
- // Later, we can investigate whether there are anti-fragmentation benefits
- // to using fewer system pages.
- double best_waste_ratio = 1.0f;
- uint16_t best_pages = 0;
- if (size > kMaxSystemPagesPerSlotSpan * kSystemPageSize) {
- DCHECK(!(size % kSystemPageSize));
- best_pages = static_cast<uint16_t>(size / kSystemPageSize);
- CHECK(best_pages < (1 << 8));
- return static_cast<uint8_t>(best_pages);
- }
- DCHECK(size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize);
- for (uint16_t i = kNumSystemPagesPerPartitionPage - 1;
- i <= kMaxSystemPagesPerSlotSpan; ++i) {
- size_t page_size = kSystemPageSize * i;
- size_t num_slots = page_size / size;
- size_t waste = page_size - (num_slots * size);
- // Leaving a page unfaulted is not free; the page will occupy an empty page
- // table entry. Make a simple attempt to account for that.
- size_t num_remainder_pages = i & (kNumSystemPagesPerPartitionPage - 1);
- size_t num_unfaulted_pages =
- num_remainder_pages
- ? (kNumSystemPagesPerPartitionPage - num_remainder_pages)
- : 0;
- waste += sizeof(void*) * num_unfaulted_pages;
- double waste_ratio = (double)waste / (double)page_size;
- if (waste_ratio < best_waste_ratio) {
- best_waste_ratio = waste_ratio;
- best_pages = i;
- }
- }
- DCHECK(best_pages > 0);
- CHECK(best_pages <= kMaxSystemPagesPerSlotSpan);
- return static_cast<uint8_t>(best_pages);
-}
-
-static void PartitionAllocBaseInit(PartitionRootBase* root) {
+static void PartitionAllocBaseInit(internal::PartitionRootBase* root) {
DCHECK(!root->initialized);
{
- subtle::SpinLock::Guard guard(PartitionRootBase::gInitializedLock);
- if (!PartitionRootBase::gInitialized) {
- PartitionRootBase::gInitialized = true;
- // We mark the seed page as free to make sure it is skipped by our
- // logic to find a new active page.
- PartitionRootBase::gPagedBucket.active_pages_head =
- &PartitionRootGeneric::gSeedPage;
+ subtle::SpinLock::Guard guard(*GetLock());
+ if (!g_initialized) {
+ g_initialized = true;
+ // We mark the sentinel bucket/page as free to make sure it is skipped by
+ // our logic to find a new active page.
+ internal::PartitionBucket::get_sentinel_bucket()->active_pages_head =
+ internal::PartitionPage::get_sentinel_page();
}
}
root->initialized = true;
- root->total_size_of_committed_pages = 0;
- root->total_size_of_super_pages = 0;
- root->total_size_of_direct_mapped_pages = 0;
- root->next_super_page = 0;
- root->next_partition_page = 0;
- root->next_partition_page_end = 0;
- root->first_extent = 0;
- root->current_extent = 0;
- root->direct_map_list = 0;
-
- memset(&root->global_empty_page_ring, '\0',
- sizeof(root->global_empty_page_ring));
- root->global_empty_page_ring_index = 0;
// This is a "magic" value so we can test if a root pointer is valid.
root->inverted_self = ~reinterpret_cast<uintptr_t>(root);
}
-static void PartitionBucketInitBase(PartitionBucket* bucket,
- PartitionRootBase* root) {
- bucket->active_pages_head = &PartitionRootGeneric::gSeedPage;
- bucket->empty_pages_head = 0;
- bucket->decommitted_pages_head = 0;
- bucket->num_full_pages = 0;
- bucket->num_system_pages_per_slot_span =
- PartitionBucketNumSystemPages(bucket->slot_size);
-}
-
void PartitionAllocGlobalInit(void (*oom_handling_function)()) {
DCHECK(oom_handling_function);
- PartitionRootBase::gOomHandlingFunction = oom_handling_function;
+ internal::PartitionRootBase::gOomHandlingFunction = oom_handling_function;
}
-void PartitionAllocInit(PartitionRoot* root,
- size_t num_buckets,
- size_t max_allocation) {
- PartitionAllocBaseInit(root);
+void PartitionRoot::Init(size_t num_buckets, size_t max_allocation) {
+ PartitionAllocBaseInit(this);
- root->num_buckets = num_buckets;
- root->max_allocation = max_allocation;
+ this->num_buckets = num_buckets;
+ this->max_allocation = max_allocation;
size_t i;
- for (i = 0; i < root->num_buckets; ++i) {
- PartitionBucket* bucket = &root->buckets()[i];
+ for (i = 0; i < this->num_buckets; ++i) {
+ internal::PartitionBucket* bucket = &this->buckets()[i];
if (!i)
- bucket->slot_size = kAllocationGranularity;
+ bucket->Init(kAllocationGranularity);
else
- bucket->slot_size = i << kBucketShift;
- PartitionBucketInitBase(bucket, root);
+ bucket->Init(i << kBucketShift);
}
}
-void PartitionAllocGenericInit(PartitionRootGeneric* root) {
- subtle::SpinLock::Guard guard(root->lock);
+void PartitionRootGeneric::Init() {
+ subtle::SpinLock::Guard guard(this->lock);
- PartitionAllocBaseInit(root);
+ PartitionAllocBaseInit(this);
// Precalculate some shift and mask constants used in the hot path.
// Example: malloc(41) == 101001 binary.
@@ -187,7 +128,7 @@ void PartitionAllocGenericInit(PartitionRootGeneric* root) {
order_index_shift = 0;
else
order_index_shift = order - (kGenericNumBucketsPerOrderBits + 1);
- root->order_index_shifts[order] = order_index_shift;
+ this->order_index_shifts[order] = order_index_shift;
size_t sub_order_index_mask;
if (order == kBitsPerSizeT) {
// This avoids invoking undefined behavior for an excessive shift.
@@ -197,7 +138,7 @@ void PartitionAllocGenericInit(PartitionRootGeneric* root) {
sub_order_index_mask = ((static_cast<size_t>(1) << order) - 1) >>
(kGenericNumBucketsPerOrderBits + 1);
}
- root->order_sub_index_masks[order] = sub_order_index_mask;
+ this->order_sub_index_masks[order] = sub_order_index_mask;
}
// Set up the actual usable buckets first.
@@ -208,768 +149,61 @@ void PartitionAllocGenericInit(PartitionRootGeneric* root) {
// code simpler and the structures more generic.
size_t i, j;
size_t current_size = kGenericSmallestBucket;
- size_t currentIncrement =
+ size_t current_increment =
kGenericSmallestBucket >> kGenericNumBucketsPerOrderBits;
- PartitionBucket* bucket = &root->buckets[0];
+ internal::PartitionBucket* bucket = &this->buckets[0];
for (i = 0; i < kGenericNumBucketedOrders; ++i) {
for (j = 0; j < kGenericNumBucketsPerOrder; ++j) {
- bucket->slot_size = current_size;
- PartitionBucketInitBase(bucket, root);
+ bucket->Init(current_size);
// Disable psuedo buckets so that touching them faults.
if (current_size % kGenericSmallestBucket)
- bucket->active_pages_head = 0;
- current_size += currentIncrement;
+ bucket->active_pages_head = nullptr;
+ current_size += current_increment;
++bucket;
}
- currentIncrement <<= 1;
+ current_increment <<= 1;
}
DCHECK(current_size == 1 << kGenericMaxBucketedOrder);
- DCHECK(bucket == &root->buckets[0] + kGenericNumBuckets);
+ DCHECK(bucket == &this->buckets[0] + kGenericNumBuckets);
// Then set up the fast size -> bucket lookup table.
- bucket = &root->buckets[0];
- PartitionBucket** bucketPtr = &root->bucket_lookups[0];
+ bucket = &this->buckets[0];
+ internal::PartitionBucket** bucket_ptr = &this->bucket_lookups[0];
for (order = 0; order <= kBitsPerSizeT; ++order) {
for (j = 0; j < kGenericNumBucketsPerOrder; ++j) {
if (order < kGenericMinBucketedOrder) {
// Use the bucket of the finest granularity for malloc(0) etc.
- *bucketPtr++ = &root->buckets[0];
+ *bucket_ptr++ = &this->buckets[0];
} else if (order > kGenericMaxBucketedOrder) {
- *bucketPtr++ = &PartitionRootGeneric::gPagedBucket;
+ *bucket_ptr++ = internal::PartitionBucket::get_sentinel_bucket();
} else {
- PartitionBucket* validBucket = bucket;
+ internal::PartitionBucket* valid_bucket = bucket;
// Skip over invalid buckets.
- while (validBucket->slot_size % kGenericSmallestBucket)
- validBucket++;
- *bucketPtr++ = validBucket;
+ while (valid_bucket->slot_size % kGenericSmallestBucket)
+ valid_bucket++;
+ *bucket_ptr++ = valid_bucket;
bucket++;
}
}
}
- DCHECK(bucket == &root->buckets[0] + kGenericNumBuckets);
- DCHECK(bucketPtr ==
- &root->bucket_lookups[0] +
- ((kBitsPerSizeT + 1) * kGenericNumBucketsPerOrder));
+ DCHECK(bucket == &this->buckets[0] + kGenericNumBuckets);
+ DCHECK(bucket_ptr == &this->bucket_lookups[0] +
+ ((kBitsPerSizeT + 1) * kGenericNumBucketsPerOrder));
// And there's one last bucket lookup that will be hit for e.g. malloc(-1),
// which tries to overflow to a non-existant order.
- *bucketPtr = &PartitionRootGeneric::gPagedBucket;
-}
-
-#if !defined(ARCH_CPU_64_BITS)
-static NOINLINE void PartitionOutOfMemoryWithLotsOfUncommitedPages() {
- OOM_CRASH();
-}
-#endif
-
-static NOINLINE void PartitionOutOfMemory(const PartitionRootBase* root) {
-#if !defined(ARCH_CPU_64_BITS)
- // Check whether this OOM is due to a lot of super pages that are allocated
- // but not committed, probably due to http://crbug.com/421387.
- if (root->total_size_of_super_pages +
- root->total_size_of_direct_mapped_pages -
- root->total_size_of_committed_pages >
- kReasonableSizeOfUnusedPages) {
- PartitionOutOfMemoryWithLotsOfUncommitedPages();
- }
-#endif
- if (PartitionRootBase::gOomHandlingFunction)
- (*PartitionRootBase::gOomHandlingFunction)();
- OOM_CRASH();
-}
-
-static NOINLINE void PartitionExcessiveAllocationSize() {
- OOM_CRASH();
-}
-
-static NOINLINE void PartitionBucketFull() {
- OOM_CRASH();
-}
-
-// partitionPageStateIs*
-// Note that it's only valid to call these functions on pages found on one of
-// the page lists. Specifically, you can't call these functions on full pages
-// that were detached from the active list.
-static bool ALWAYS_INLINE
-PartitionPageStateIsActive(const PartitionPage* page) {
- DCHECK(page != &PartitionRootGeneric::gSeedPage);
- DCHECK(!page->page_offset);
- return (page->num_allocated_slots > 0 &&
- (page->freelist_head || page->num_unprovisioned_slots));
-}
-
-static bool ALWAYS_INLINE PartitionPageStateIsFull(const PartitionPage* page) {
- DCHECK(page != &PartitionRootGeneric::gSeedPage);
- DCHECK(!page->page_offset);
- bool ret = (page->num_allocated_slots == PartitionBucketSlots(page->bucket));
- if (ret) {
- DCHECK(!page->freelist_head);
- DCHECK(!page->num_unprovisioned_slots);
- }
- return ret;
-}
-
-static bool ALWAYS_INLINE PartitionPageStateIsEmpty(const PartitionPage* page) {
- DCHECK(page != &PartitionRootGeneric::gSeedPage);
- DCHECK(!page->page_offset);
- return (!page->num_allocated_slots && page->freelist_head);
-}
-
-static bool ALWAYS_INLINE
-PartitionPageStateIsDecommitted(const PartitionPage* page) {
- DCHECK(page != &PartitionRootGeneric::gSeedPage);
- DCHECK(!page->page_offset);
- bool ret = (!page->num_allocated_slots && !page->freelist_head);
- if (ret) {
- DCHECK(!page->num_unprovisioned_slots);
- DCHECK(page->empty_cache_index == -1);
- }
- return ret;
-}
-
-static void PartitionIncreaseCommittedPages(PartitionRootBase* root,
- size_t len) {
- root->total_size_of_committed_pages += len;
- DCHECK(root->total_size_of_committed_pages <=
- root->total_size_of_super_pages +
- root->total_size_of_direct_mapped_pages);
-}
-
-static void PartitionDecreaseCommittedPages(PartitionRootBase* root,
- size_t len) {
- root->total_size_of_committed_pages -= len;
- DCHECK(root->total_size_of_committed_pages <=
- root->total_size_of_super_pages +
- root->total_size_of_direct_mapped_pages);
-}
-
-static ALWAYS_INLINE void PartitionDecommitSystemPages(PartitionRootBase* root,
- void* address,
- size_t length) {
- DecommitSystemPages(address, length);
- PartitionDecreaseCommittedPages(root, length);
-}
-
-static ALWAYS_INLINE void PartitionRecommitSystemPages(PartitionRootBase* root,
- void* address,
- size_t length) {
- RecommitSystemPages(address, length);
- PartitionIncreaseCommittedPages(root, length);
-}
-
-static ALWAYS_INLINE void* PartitionAllocPartitionPages(
- PartitionRootBase* root,
- int flags,
- uint16_t num_partition_pages) {
- DCHECK(!(reinterpret_cast<uintptr_t>(root->next_partition_page) %
- kPartitionPageSize));
- DCHECK(!(reinterpret_cast<uintptr_t>(root->next_partition_page_end) %
- kPartitionPageSize));
- DCHECK(num_partition_pages <= kNumPartitionPagesPerSuperPage);
- size_t total_size = kPartitionPageSize * num_partition_pages;
- size_t num_partition_pages_left =
- (root->next_partition_page_end - root->next_partition_page) >>
- kPartitionPageShift;
- if (LIKELY(num_partition_pages_left >= num_partition_pages)) {
- // In this case, we can still hand out pages from the current super page
- // allocation.
- char* ret = root->next_partition_page;
- root->next_partition_page += total_size;
- PartitionIncreaseCommittedPages(root, total_size);
- return ret;
- }
-
- // Need a new super page. We want to allocate super pages in a continguous
- // address region as much as possible. This is important for not causing
- // page table bloat and not fragmenting address spaces in 32 bit
- // architectures.
- char* requestedAddress = root->next_super_page;
- char* super_page = reinterpret_cast<char*>(AllocPages(
- requestedAddress, kSuperPageSize, kSuperPageSize, PageAccessible));
- if (UNLIKELY(!super_page))
- return 0;
-
- root->total_size_of_super_pages += kSuperPageSize;
- PartitionIncreaseCommittedPages(root, total_size);
-
- root->next_super_page = super_page + kSuperPageSize;
- char* ret = super_page + kPartitionPageSize;
- root->next_partition_page = ret + total_size;
- root->next_partition_page_end = root->next_super_page - kPartitionPageSize;
- // Make the first partition page in the super page a guard page, but leave a
- // hole in the middle.
- // This is where we put page metadata and also a tiny amount of extent
- // metadata.
- SetSystemPagesInaccessible(super_page, kSystemPageSize);
- SetSystemPagesInaccessible(super_page + (kSystemPageSize * 2),
- kPartitionPageSize - (kSystemPageSize * 2));
- // Also make the last partition page a guard page.
- SetSystemPagesInaccessible(super_page + (kSuperPageSize - kPartitionPageSize),
- kPartitionPageSize);
-
- // If we were after a specific address, but didn't get it, assume that
- // the system chose a lousy address. Here most OS'es have a default
- // algorithm that isn't randomized. For example, most Linux
- // distributions will allocate the mapping directly before the last
- // successful mapping, which is far from random. So we just get fresh
- // randomness for the next mapping attempt.
- if (requestedAddress && requestedAddress != super_page)
- root->next_super_page = 0;
-
- // We allocated a new super page so update super page metadata.
- // First check if this is a new extent or not.
- PartitionSuperPageExtentEntry* latest_extent =
- reinterpret_cast<PartitionSuperPageExtentEntry*>(
- PartitionSuperPageToMetadataArea(super_page));
- // By storing the root in every extent metadata object, we have a fast way
- // to go from a pointer within the partition to the root object.
- latest_extent->root = root;
- // Most new extents will be part of a larger extent, and these three fields
- // are unused, but we initialize them to 0 so that we get a clear signal
- // in case they are accidentally used.
- latest_extent->super_page_base = 0;
- latest_extent->super_pages_end = 0;
- latest_extent->next = 0;
-
- PartitionSuperPageExtentEntry* current_extent = root->current_extent;
- bool isNewExtent = (super_page != requestedAddress);
- if (UNLIKELY(isNewExtent)) {
- if (UNLIKELY(!current_extent)) {
- DCHECK(!root->first_extent);
- root->first_extent = latest_extent;
- } else {
- DCHECK(current_extent->super_page_base);
- current_extent->next = latest_extent;
- }
- root->current_extent = latest_extent;
- latest_extent->super_page_base = super_page;
- latest_extent->super_pages_end = super_page + kSuperPageSize;
- } else {
- // We allocated next to an existing extent so just nudge the size up a
- // little.
- DCHECK(current_extent->super_pages_end);
- current_extent->super_pages_end += kSuperPageSize;
- DCHECK(ret >= current_extent->super_page_base &&
- ret < current_extent->super_pages_end);
- }
- return ret;
-}
-
-static ALWAYS_INLINE uint16_t
-PartitionBucketPartitionPages(const PartitionBucket* bucket) {
- return (bucket->num_system_pages_per_slot_span +
- (kNumSystemPagesPerPartitionPage - 1)) /
- kNumSystemPagesPerPartitionPage;
-}
-
-static ALWAYS_INLINE void PartitionPageReset(PartitionPage* page) {
- DCHECK(PartitionPageStateIsDecommitted(page));
-
- page->num_unprovisioned_slots = PartitionBucketSlots(page->bucket);
- DCHECK(page->num_unprovisioned_slots);
-
- page->next_page = nullptr;
-}
-
-static ALWAYS_INLINE void PartitionPageSetup(PartitionPage* page,
- PartitionBucket* bucket) {
- // The bucket never changes. We set it up once.
- page->bucket = bucket;
- page->empty_cache_index = -1;
-
- PartitionPageReset(page);
-
- // If this page has just a single slot, do not set up page offsets for any
- // page metadata other than the first one. This ensures that attempts to
- // touch invalid page metadata fail.
- if (page->num_unprovisioned_slots == 1)
- return;
-
- uint16_t num_partition_pages = PartitionBucketPartitionPages(bucket);
- char* page_char_ptr = reinterpret_cast<char*>(page);
- for (uint16_t i = 1; i < num_partition_pages; ++i) {
- page_char_ptr += kPageMetadataSize;
- PartitionPage* secondary_page =
- reinterpret_cast<PartitionPage*>(page_char_ptr);
- secondary_page->page_offset = i;
- }
-}
-
-static ALWAYS_INLINE char* PartitionPageAllocAndFillFreelist(
- PartitionPage* page) {
- DCHECK(page != &PartitionRootGeneric::gSeedPage);
- uint16_t num_slots = page->num_unprovisioned_slots;
- DCHECK(num_slots);
- PartitionBucket* bucket = page->bucket;
- // We should only get here when _every_ slot is either used or unprovisioned.
- // (The third state is "on the freelist". If we have a non-empty freelist, we
- // should not get here.)
- DCHECK(num_slots + page->num_allocated_slots == PartitionBucketSlots(bucket));
- // Similarly, make explicitly sure that the freelist is empty.
- DCHECK(!page->freelist_head);
- DCHECK(page->num_allocated_slots >= 0);
-
- size_t size = bucket->slot_size;
- char* base = reinterpret_cast<char*>(PartitionPageToPointer(page));
- char* return_object = base + (size * page->num_allocated_slots);
- char* firstFreelistPointer = return_object + size;
- char* firstFreelistPointerExtent =
- firstFreelistPointer + sizeof(PartitionFreelistEntry*);
- // Our goal is to fault as few system pages as possible. We calculate the
- // page containing the "end" of the returned slot, and then allow freelist
- // pointers to be written up to the end of that page.
- char* sub_page_limit = reinterpret_cast<char*>(
- RoundUpToSystemPage(reinterpret_cast<size_t>(firstFreelistPointer)));
- char* slots_limit = return_object + (size * num_slots);
- char* freelist_limit = sub_page_limit;
- if (UNLIKELY(slots_limit < freelist_limit))
- freelist_limit = slots_limit;
-
- uint16_t num_new_freelist_entries = 0;
- if (LIKELY(firstFreelistPointerExtent <= freelist_limit)) {
- // Only consider used space in the slot span. If we consider wasted
- // space, we may get an off-by-one when a freelist pointer fits in the
- // wasted space, but a slot does not.
- // We know we can fit at least one freelist pointer.
- num_new_freelist_entries = 1;
- // Any further entries require space for the whole slot span.
- num_new_freelist_entries += static_cast<uint16_t>(
- (freelist_limit - firstFreelistPointerExtent) / size);
- }
-
- // We always return an object slot -- that's the +1 below.
- // We do not neccessarily create any new freelist entries, because we cross
- // sub page boundaries frequently for large bucket sizes.
- DCHECK(num_new_freelist_entries + 1 <= num_slots);
- num_slots -= (num_new_freelist_entries + 1);
- page->num_unprovisioned_slots = num_slots;
- page->num_allocated_slots++;
-
- if (LIKELY(num_new_freelist_entries)) {
- char* freelist_pointer = firstFreelistPointer;
- PartitionFreelistEntry* entry =
- reinterpret_cast<PartitionFreelistEntry*>(freelist_pointer);
- page->freelist_head = entry;
- while (--num_new_freelist_entries) {
- freelist_pointer += size;
- PartitionFreelistEntry* next_entry =
- reinterpret_cast<PartitionFreelistEntry*>(freelist_pointer);
- entry->next = PartitionFreelistMask(next_entry);
- entry = next_entry;
- }
- entry->next = PartitionFreelistMask(0);
- } else {
- page->freelist_head = 0;
- }
- return return_object;
-}
-
-// This helper function scans a bucket's active page list for a suitable new
-// active page.
-// When it finds a suitable new active page (one that has free slots and is not
-// empty), it is set as the new active page. If there is no suitable new
-// active page, the current active page is set to the seed page.
-// As potential pages are scanned, they are tidied up according to their state.
-// Empty pages are swept on to the empty page list, decommitted pages on to the
-// decommitted page list and full pages are unlinked from any list.
-static bool PartitionSetNewActivePage(PartitionBucket* bucket) {
- PartitionPage* page = bucket->active_pages_head;
- if (page == &PartitionRootBase::gSeedPage)
- return false;
-
- PartitionPage* next_page;
-
- for (; page; page = next_page) {
- next_page = page->next_page;
- DCHECK(page->bucket == bucket);
- DCHECK(page != bucket->empty_pages_head);
- DCHECK(page != bucket->decommitted_pages_head);
-
- // Deal with empty and decommitted pages.
- if (LIKELY(PartitionPageStateIsActive(page))) {
- // This page is usable because it has freelist entries, or has
- // unprovisioned slots we can create freelist entries from.
- bucket->active_pages_head = page;
- return true;
- }
- if (LIKELY(PartitionPageStateIsEmpty(page))) {
- page->next_page = bucket->empty_pages_head;
- bucket->empty_pages_head = page;
- } else if (LIKELY(PartitionPageStateIsDecommitted(page))) {
- page->next_page = bucket->decommitted_pages_head;
- bucket->decommitted_pages_head = page;
- } else {
- DCHECK(PartitionPageStateIsFull(page));
- // If we get here, we found a full page. Skip over it too, and also
- // tag it as full (via a negative value). We need it tagged so that
- // free'ing can tell, and move it back into the active page list.
- page->num_allocated_slots = -page->num_allocated_slots;
- ++bucket->num_full_pages;
- // num_full_pages is a uint16_t for efficient packing so guard against
- // overflow to be safe.
- if (UNLIKELY(!bucket->num_full_pages))
- PartitionBucketFull();
- // Not necessary but might help stop accidents.
- page->next_page = 0;
- }
- }
-
- bucket->active_pages_head = &PartitionRootGeneric::gSeedPage;
- return false;
-}
-
-static ALWAYS_INLINE PartitionDirectMapExtent* partitionPageToDirectMapExtent(
- PartitionPage* page) {
- DCHECK(PartitionBucketIsDirectMapped(page->bucket));
- return reinterpret_cast<PartitionDirectMapExtent*>(
- reinterpret_cast<char*>(page) + 3 * kPageMetadataSize);
-}
-
-static ALWAYS_INLINE void PartitionPageSetRawSize(PartitionPage* page,
- size_t size) {
- size_t* raw_size_ptr = PartitionPageGetRawSizePtr(page);
- if (UNLIKELY(raw_size_ptr != nullptr))
- *raw_size_ptr = size;
-}
-
-static ALWAYS_INLINE PartitionPage* PartitionDirectMap(PartitionRootBase* root,
- int flags,
- size_t raw_size) {
- size_t size = PartitionDirectMapSize(raw_size);
-
- // Because we need to fake looking like a super page, we need to allocate
- // a bunch of system pages more than "size":
- // - The first few system pages are the partition page in which the super
- // page metadata is stored. We fault just one system page out of a partition
- // page sized clump.
- // - We add a trailing guard page on 32-bit (on 64-bit we rely on the
- // massive address space plus randomization instead).
- size_t map_size = size + kPartitionPageSize;
-#if !defined(ARCH_CPU_64_BITS)
- map_size += kSystemPageSize;
-#endif
- // Round up to the allocation granularity.
- map_size += kPageAllocationGranularityOffsetMask;
- map_size &= kPageAllocationGranularityBaseMask;
-
- // TODO: these pages will be zero-filled. Consider internalizing an
- // allocZeroed() API so we can avoid a memset() entirely in this case.
- char* ptr = reinterpret_cast<char*>(
- AllocPages(0, map_size, kSuperPageSize, PageAccessible));
- if (UNLIKELY(!ptr))
- return nullptr;
-
- size_t committed_page_size = size + kSystemPageSize;
- root->total_size_of_direct_mapped_pages += committed_page_size;
- PartitionIncreaseCommittedPages(root, committed_page_size);
-
- char* slot = ptr + kPartitionPageSize;
- SetSystemPagesInaccessible(ptr + (kSystemPageSize * 2),
- kPartitionPageSize - (kSystemPageSize * 2));
-#if !defined(ARCH_CPU_64_BITS)
- SetSystemPagesInaccessible(ptr, kSystemPageSize);
- SetSystemPagesInaccessible(slot + size, kSystemPageSize);
-#endif
-
- PartitionSuperPageExtentEntry* extent =
- reinterpret_cast<PartitionSuperPageExtentEntry*>(
- PartitionSuperPageToMetadataArea(ptr));
- extent->root = root;
- // The new structures are all located inside a fresh system page so they
- // will all be zeroed out. These DCHECKs are for documentation.
- DCHECK(!extent->super_page_base);
- DCHECK(!extent->super_pages_end);
- DCHECK(!extent->next);
- PartitionPage* page = PartitionPointerToPageNoAlignmentCheck(slot);
- PartitionBucket* bucket = reinterpret_cast<PartitionBucket*>(
- reinterpret_cast<char*>(page) + (kPageMetadataSize * 2));
- DCHECK(!page->next_page);
- DCHECK(!page->num_allocated_slots);
- DCHECK(!page->num_unprovisioned_slots);
- DCHECK(!page->page_offset);
- DCHECK(!page->empty_cache_index);
- page->bucket = bucket;
- page->freelist_head = reinterpret_cast<PartitionFreelistEntry*>(slot);
- PartitionFreelistEntry* next_entry =
- reinterpret_cast<PartitionFreelistEntry*>(slot);
- next_entry->next = PartitionFreelistMask(0);
-
- DCHECK(!bucket->active_pages_head);
- DCHECK(!bucket->empty_pages_head);
- DCHECK(!bucket->decommitted_pages_head);
- DCHECK(!bucket->num_system_pages_per_slot_span);
- DCHECK(!bucket->num_full_pages);
- bucket->slot_size = size;
-
- PartitionDirectMapExtent* map_extent = partitionPageToDirectMapExtent(page);
- map_extent->map_size = map_size - kPartitionPageSize - kSystemPageSize;
- map_extent->bucket = bucket;
-
- // Maintain the doubly-linked list of all direct mappings.
- map_extent->next_extent = root->direct_map_list;
- if (map_extent->next_extent)
- map_extent->next_extent->prev_extent = map_extent;
- map_extent->prev_extent = nullptr;
- root->direct_map_list = map_extent;
-
- return page;
-}
-
-static ALWAYS_INLINE void PartitionDirectUnmap(PartitionPage* page) {
- PartitionRootBase* root = PartitionPageToRoot(page);
- const PartitionDirectMapExtent* extent = partitionPageToDirectMapExtent(page);
- size_t unmap_size = extent->map_size;
-
- // Maintain the doubly-linked list of all direct mappings.
- if (extent->prev_extent) {
- DCHECK(extent->prev_extent->next_extent == extent);
- extent->prev_extent->next_extent = extent->next_extent;
- } else {
- root->direct_map_list = extent->next_extent;
- }
- if (extent->next_extent) {
- DCHECK(extent->next_extent->prev_extent == extent);
- extent->next_extent->prev_extent = extent->prev_extent;
- }
-
- // Add on the size of the trailing guard page and preceeding partition
- // page.
- unmap_size += kPartitionPageSize + kSystemPageSize;
-
- size_t uncommitted_page_size = page->bucket->slot_size + kSystemPageSize;
- PartitionDecreaseCommittedPages(root, uncommitted_page_size);
- DCHECK(root->total_size_of_direct_mapped_pages >= uncommitted_page_size);
- root->total_size_of_direct_mapped_pages -= uncommitted_page_size;
-
- DCHECK(!(unmap_size & kPageAllocationGranularityOffsetMask));
-
- char* ptr = reinterpret_cast<char*>(PartitionPageToPointer(page));
- // Account for the mapping starting a partition page before the actual
- // allocation address.
- ptr -= kPartitionPageSize;
-
- FreePages(ptr, unmap_size);
-}
-
-void* PartitionAllocSlowPath(PartitionRootBase* root,
- int flags,
- size_t size,
- PartitionBucket* bucket) {
- // The slow path is called when the freelist is empty.
- DCHECK(!bucket->active_pages_head->freelist_head);
-
- PartitionPage* new_page = nullptr;
-
- // For the PartitionAllocGeneric API, we have a bunch of buckets marked
- // as special cases. We bounce them through to the slow path so that we
- // can still have a blazing fast hot path due to lack of corner-case
- // branches.
- bool returnNull = flags & PartitionAllocReturnNull;
- if (UNLIKELY(PartitionBucketIsDirectMapped(bucket))) {
- DCHECK(size > kGenericMaxBucketed);
- DCHECK(bucket == &PartitionRootBase::gPagedBucket);
- DCHECK(bucket->active_pages_head == &PartitionRootGeneric::gSeedPage);
- if (size > kGenericMaxDirectMapped) {
- if (returnNull)
- return nullptr;
- PartitionExcessiveAllocationSize();
- }
- new_page = PartitionDirectMap(root, flags, size);
- } else if (LIKELY(PartitionSetNewActivePage(bucket))) {
- // First, did we find an active page in the active pages list?
- new_page = bucket->active_pages_head;
- DCHECK(PartitionPageStateIsActive(new_page));
- } else if (LIKELY(bucket->empty_pages_head != nullptr) ||
- LIKELY(bucket->decommitted_pages_head != nullptr)) {
- // Second, look in our lists of empty and decommitted pages.
- // Check empty pages first, which are preferred, but beware that an
- // empty page might have been decommitted.
- while (LIKELY((new_page = bucket->empty_pages_head) != nullptr)) {
- DCHECK(new_page->bucket == bucket);
- DCHECK(PartitionPageStateIsEmpty(new_page) ||
- PartitionPageStateIsDecommitted(new_page));
- bucket->empty_pages_head = new_page->next_page;
- // Accept the empty page unless it got decommitted.
- if (new_page->freelist_head) {
- new_page->next_page = nullptr;
- break;
- }
- DCHECK(PartitionPageStateIsDecommitted(new_page));
- new_page->next_page = bucket->decommitted_pages_head;
- bucket->decommitted_pages_head = new_page;
- }
- if (UNLIKELY(!new_page) &&
- LIKELY(bucket->decommitted_pages_head != nullptr)) {
- new_page = bucket->decommitted_pages_head;
- DCHECK(new_page->bucket == bucket);
- DCHECK(PartitionPageStateIsDecommitted(new_page));
- bucket->decommitted_pages_head = new_page->next_page;
- void* addr = PartitionPageToPointer(new_page);
- PartitionRecommitSystemPages(root, addr,
- PartitionBucketBytes(new_page->bucket));
- PartitionPageReset(new_page);
- }
- DCHECK(new_page);
- } else {
- // Third. If we get here, we need a brand new page.
- uint16_t num_partition_pages = PartitionBucketPartitionPages(bucket);
- void* rawPages =
- PartitionAllocPartitionPages(root, flags, num_partition_pages);
- if (LIKELY(rawPages != nullptr)) {
- new_page = PartitionPointerToPageNoAlignmentCheck(rawPages);
- PartitionPageSetup(new_page, bucket);
- }
- }
-
- // Bail if we had a memory allocation failure.
- if (UNLIKELY(!new_page)) {
- DCHECK(bucket->active_pages_head == &PartitionRootGeneric::gSeedPage);
- if (returnNull)
- return nullptr;
- PartitionOutOfMemory(root);
- }
-
- bucket = new_page->bucket;
- DCHECK(bucket != &PartitionRootBase::gPagedBucket);
- bucket->active_pages_head = new_page;
- PartitionPageSetRawSize(new_page, size);
-
- // If we found an active page with free slots, or an empty page, we have a
- // usable freelist head.
- if (LIKELY(new_page->freelist_head != nullptr)) {
- PartitionFreelistEntry* entry = new_page->freelist_head;
- PartitionFreelistEntry* new_head = PartitionFreelistMask(entry->next);
- new_page->freelist_head = new_head;
- new_page->num_allocated_slots++;
- return entry;
- }
- // Otherwise, we need to build the freelist.
- DCHECK(new_page->num_unprovisioned_slots);
- return PartitionPageAllocAndFillFreelist(new_page);
-}
-
-static ALWAYS_INLINE void PartitionDecommitPage(PartitionRootBase* root,
- PartitionPage* page) {
- DCHECK(PartitionPageStateIsEmpty(page));
- DCHECK(!PartitionBucketIsDirectMapped(page->bucket));
- void* addr = PartitionPageToPointer(page);
- PartitionDecommitSystemPages(root, addr, PartitionBucketBytes(page->bucket));
-
- // We actually leave the decommitted page in the active list. We'll sweep
- // it on to the decommitted page list when we next walk the active page
- // list.
- // Pulling this trick enables us to use a singly-linked page list for all
- // cases, which is critical in keeping the page metadata structure down to
- // 32 bytes in size.
- page->freelist_head = 0;
- page->num_unprovisioned_slots = 0;
- DCHECK(PartitionPageStateIsDecommitted(page));
-}
-
-static void PartitionDecommitPageIfPossible(PartitionRootBase* root,
- PartitionPage* page) {
- DCHECK(page->empty_cache_index >= 0);
- DCHECK(static_cast<unsigned>(page->empty_cache_index) < kMaxFreeableSpans);
- DCHECK(page == root->global_empty_page_ring[page->empty_cache_index]);
- page->empty_cache_index = -1;
- if (PartitionPageStateIsEmpty(page))
- PartitionDecommitPage(root, page);
+ *bucket_ptr = internal::PartitionBucket::get_sentinel_bucket();
}
-static ALWAYS_INLINE void PartitionRegisterEmptyPage(PartitionPage* page) {
- DCHECK(PartitionPageStateIsEmpty(page));
- PartitionRootBase* root = PartitionPageToRoot(page);
-
- // If the page is already registered as empty, give it another life.
- if (page->empty_cache_index != -1) {
- DCHECK(page->empty_cache_index >= 0);
- DCHECK(static_cast<unsigned>(page->empty_cache_index) < kMaxFreeableSpans);
- DCHECK(root->global_empty_page_ring[page->empty_cache_index] == page);
- root->global_empty_page_ring[page->empty_cache_index] = 0;
- }
-
- int16_t current_index = root->global_empty_page_ring_index;
- PartitionPage* pageToDecommit = root->global_empty_page_ring[current_index];
- // The page might well have been re-activated, filled up, etc. before we get
- // around to looking at it here.
- if (pageToDecommit)
- PartitionDecommitPageIfPossible(root, pageToDecommit);
-
- // We put the empty slot span on our global list of "pages that were once
- // empty". thus providing it a bit of breathing room to get re-used before
- // we really free it. This improves performance, particularly on Mac OS X
- // which has subpar memory management performance.
- root->global_empty_page_ring[current_index] = page;
- page->empty_cache_index = current_index;
- ++current_index;
- if (current_index == kMaxFreeableSpans)
- current_index = 0;
- root->global_empty_page_ring_index = current_index;
-}
-
-static void PartitionDecommitEmptyPages(PartitionRootBase* root) {
- for (size_t i = 0; i < kMaxFreeableSpans; ++i) {
- PartitionPage* page = root->global_empty_page_ring[i];
- if (page)
- PartitionDecommitPageIfPossible(root, page);
- root->global_empty_page_ring[i] = nullptr;
- }
-}
-
-void PartitionFreeSlowPath(PartitionPage* page) {
- PartitionBucket* bucket = page->bucket;
- DCHECK(page != &PartitionRootGeneric::gSeedPage);
- if (LIKELY(page->num_allocated_slots == 0)) {
- // Page became fully unused.
- if (UNLIKELY(PartitionBucketIsDirectMapped(bucket))) {
- PartitionDirectUnmap(page);
- return;
- }
- // If it's the current active page, change it. We bounce the page to
- // the empty list as a force towards defragmentation.
- if (LIKELY(page == bucket->active_pages_head))
- (void)PartitionSetNewActivePage(bucket);
- DCHECK(bucket->active_pages_head != page);
-
- PartitionPageSetRawSize(page, 0);
- DCHECK(!PartitionPageGetRawSize(page));
-
- PartitionRegisterEmptyPage(page);
- } else {
- DCHECK(!PartitionBucketIsDirectMapped(bucket));
- // Ensure that the page is full. That's the only valid case if we
- // arrive here.
- DCHECK(page->num_allocated_slots < 0);
- // A transition of num_allocated_slots from 0 to -1 is not legal, and
- // likely indicates a double-free.
- CHECK(page->num_allocated_slots != -1);
- page->num_allocated_slots = -page->num_allocated_slots - 2;
- DCHECK(page->num_allocated_slots == PartitionBucketSlots(bucket) - 1);
- // Fully used page became partially used. It must be put back on the
- // non-full page list. Also make it the current page to increase the
- // chances of it being filled up again. The old current page will be
- // the next page.
- DCHECK(!page->next_page);
- if (LIKELY(bucket->active_pages_head != &PartitionRootGeneric::gSeedPage))
- page->next_page = bucket->active_pages_head;
- bucket->active_pages_head = page;
- --bucket->num_full_pages;
- // Special case: for a partition page with just a single slot, it may
- // now be empty and we want to run it through the empty logic.
- if (UNLIKELY(page->num_allocated_slots == 0))
- PartitionFreeSlowPath(page);
- }
-}
-
-bool partitionReallocDirectMappedInPlace(PartitionRootGeneric* root,
- PartitionPage* page,
+bool PartitionReallocDirectMappedInPlace(PartitionRootGeneric* root,
+ internal::PartitionPage* page,
size_t raw_size) {
- DCHECK(PartitionBucketIsDirectMapped(page->bucket));
+ DCHECK(page->bucket->is_direct_mapped());
- raw_size = PartitionCookieSizeAdjustAdd(raw_size);
+ raw_size = internal::PartitionCookieSizeAdjustAdd(raw_size);
// Note that the new size might be a bucketed size; this function is called
// whenever we're reallocating a direct mapped allocation.
- size_t new_size = PartitionDirectMapSize(raw_size);
+ size_t new_size = internal::PartitionBucket::get_direct_map_size(raw_size);
if (new_size < kGenericMinDirectMappedDownsize)
return false;
@@ -978,10 +212,11 @@ bool partitionReallocDirectMappedInPlace(PartitionRootGeneric* root,
if (new_size == current_size)
return true;
- char* char_ptr = static_cast<char*>(PartitionPageToPointer(page));
+ char* char_ptr = static_cast<char*>(internal::PartitionPage::ToPointer(page));
if (new_size < current_size) {
- size_t map_size = partitionPageToDirectMapExtent(page)->map_size;
+ size_t map_size =
+ internal::PartitionDirectMapExtent::FromPage(page)->map_size;
// Don't reallocate in-place if new size is less than 80 % of the full
// map size, to avoid holding on to too much unused address space.
@@ -989,16 +224,18 @@ bool partitionReallocDirectMappedInPlace(PartitionRootGeneric* root,
return false;
// Shrink by decommitting unneeded pages and making them inaccessible.
- size_t decommitSize = current_size - new_size;
- PartitionDecommitSystemPages(root, char_ptr + new_size, decommitSize);
- SetSystemPagesInaccessible(char_ptr + new_size, decommitSize);
- } else if (new_size <= partitionPageToDirectMapExtent(page)->map_size) {
+ size_t decommit_size = current_size - new_size;
+ root->DecommitSystemPages(char_ptr + new_size, decommit_size);
+ CHECK(SetSystemPagesAccess(char_ptr + new_size, decommit_size,
+ PageInaccessible));
+ } else if (new_size <=
+ internal::PartitionDirectMapExtent::FromPage(page)->map_size) {
// Grow within the actually allocated memory. Just need to make the
// pages accessible again.
size_t recommit_size = new_size - current_size;
- bool ret = SetSystemPagesAccessible(char_ptr + current_size, recommit_size);
- CHECK(ret);
- PartitionRecommitSystemPages(root, char_ptr + current_size, recommit_size);
+ CHECK(SetSystemPagesAccess(char_ptr + current_size, recommit_size,
+ PageReadWrite));
+ root->RecommitSystemPages(char_ptr + current_size, recommit_size);
#if DCHECK_IS_ON()
memset(char_ptr + current_size, kUninitializedByte, recommit_size);
@@ -1011,11 +248,12 @@ bool partitionReallocDirectMappedInPlace(PartitionRootGeneric* root,
#if DCHECK_IS_ON()
// Write a new trailing cookie.
- PartitionCookieWriteValue(char_ptr + raw_size - kCookieSize);
+ internal::PartitionCookieWriteValue(char_ptr + raw_size -
+ internal::kCookieSize);
#endif
- PartitionPageSetRawSize(page, raw_size);
- DCHECK(PartitionPageGetRawSize(page) == raw_size);
+ page->set_raw_size(raw_size);
+ DCHECK(page->get_raw_size() == raw_size);
page->bucket->slot_size = new_size;
return true;
@@ -1034,49 +272,48 @@ void* PartitionReallocGenericFlags(PartitionRootGeneric* root,
if (UNLIKELY(!ptr))
return PartitionAllocGenericFlags(root, flags, new_size, type_name);
if (UNLIKELY(!new_size)) {
- PartitionFreeGeneric(root, ptr);
+ root->Free(ptr);
return nullptr;
}
if (new_size > kGenericMaxDirectMapped) {
if (flags & PartitionAllocReturnNull)
return nullptr;
- else
- PartitionExcessiveAllocationSize();
+ internal::PartitionExcessiveAllocationSize();
}
- DCHECK(PartitionPointerIsValid(PartitionCookieFreePointerAdjust(ptr)));
-
- PartitionPage* page =
- PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr));
+ internal::PartitionPage* page = internal::PartitionPage::FromPointer(
+ internal::PartitionCookieFreePointerAdjust(ptr));
+ // TODO(palmer): See if we can afford to make this a CHECK.
+ DCHECK(root->IsValidPage(page));
- if (UNLIKELY(PartitionBucketIsDirectMapped(page->bucket))) {
+ if (UNLIKELY(page->bucket->is_direct_mapped())) {
// We may be able to perform the realloc in place by changing the
// accessibility of memory pages and, if reducing the size, decommitting
// them.
- if (partitionReallocDirectMappedInPlace(root, page, new_size)) {
+ if (PartitionReallocDirectMappedInPlace(root, page, new_size)) {
PartitionAllocHooks::ReallocHookIfEnabled(ptr, ptr, new_size, type_name);
return ptr;
}
}
- size_t actual_new_size = PartitionAllocActualSize(root, new_size);
+ size_t actual_new_size = root->ActualSize(new_size);
size_t actual_old_size = PartitionAllocGetSize(ptr);
// TODO: note that tcmalloc will "ignore" a downsizing realloc() unless the
// new size is a significant percentage smaller. We could do the same if we
// determine it is a win.
if (actual_new_size == actual_old_size) {
- // Trying to allocate a block of size new_size would give us a block of
+ // Trying to allocate a block of size |new_size| would give us a block of
// the same size as the one we've already got, so re-use the allocation
// after updating statistics (and cookies, if present).
- PartitionPageSetRawSize(page, PartitionCookieSizeAdjustAdd(new_size));
+ page->set_raw_size(internal::PartitionCookieSizeAdjustAdd(new_size));
#if DCHECK_IS_ON()
// Write a new trailing cookie when it is possible to keep track of
// |new_size| via the raw size pointer.
- if (PartitionPageGetRawSizePtr(page))
- PartitionCookieWriteValue(static_cast<char*>(ptr) + new_size);
-#endif // DCHECK_IS_ON()
+ if (page->get_raw_size_ptr())
+ internal::PartitionCookieWriteValue(static_cast<char*>(ptr) + new_size);
+#endif
return ptr;
}
@@ -1085,8 +322,7 @@ void* PartitionReallocGenericFlags(PartitionRootGeneric* root,
if (!ret) {
if (flags & PartitionAllocReturnNull)
return nullptr;
- else
- PartitionExcessiveAllocationSize();
+ internal::PartitionExcessiveAllocationSize();
}
size_t copy_size = actual_old_size;
@@ -1094,67 +330,80 @@ void* PartitionReallocGenericFlags(PartitionRootGeneric* root,
copy_size = new_size;
memcpy(ret, ptr, copy_size);
- PartitionFreeGeneric(root, ptr);
+ root->Free(ptr);
return ret;
-#endif // defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+#endif
+}
+
+void* PartitionRootGeneric::Realloc(void* ptr,
+ size_t new_size,
+ const char* type_name) {
+ return PartitionReallocGenericFlags(this, 0, ptr, new_size, type_name);
}
-void* PartitionReallocGeneric(PartitionRootGeneric* root,
- void* ptr,
- size_t new_size,
- const char* type_name) {
- return PartitionReallocGenericFlags(root, 0, ptr, new_size, type_name);
+void* PartitionRootGeneric::TryRealloc(void* ptr,
+ size_t new_size,
+ const char* type_name) {
+ return PartitionReallocGenericFlags(this, PartitionAllocReturnNull, ptr,
+ new_size, type_name);
}
-static size_t PartitionPurgePage(PartitionPage* page, bool discard) {
- const PartitionBucket* bucket = page->bucket;
+static size_t PartitionPurgePage(internal::PartitionPage* page, bool discard) {
+ const internal::PartitionBucket* bucket = page->bucket;
size_t slot_size = bucket->slot_size;
if (slot_size < kSystemPageSize || !page->num_allocated_slots)
return 0;
- size_t bucket_num_slots = PartitionBucketSlots(bucket);
+ size_t bucket_num_slots = bucket->get_slots_per_span();
size_t discardable_bytes = 0;
- size_t raw_size = PartitionPageGetRawSize(const_cast<PartitionPage*>(page));
+ size_t raw_size = page->get_raw_size();
if (raw_size) {
- uint32_t usedBytes = static_cast<uint32_t>(RoundUpToSystemPage(raw_size));
- discardable_bytes = bucket->slot_size - usedBytes;
+ uint32_t used_bytes = static_cast<uint32_t>(RoundUpToSystemPage(raw_size));
+ discardable_bytes = bucket->slot_size - used_bytes;
if (discardable_bytes && discard) {
- char* ptr = reinterpret_cast<char*>(PartitionPageToPointer(page));
- ptr += usedBytes;
+ char* ptr =
+ reinterpret_cast<char*>(internal::PartitionPage::ToPointer(page));
+ ptr += used_bytes;
DiscardSystemPages(ptr, discardable_bytes);
}
return discardable_bytes;
}
- const size_t max_slot_count =
+ constexpr size_t kMaxSlotCount =
(kPartitionPageSize * kMaxPartitionPagesPerSlotSpan) / kSystemPageSize;
- DCHECK(bucket_num_slots <= max_slot_count);
+ DCHECK(bucket_num_slots <= kMaxSlotCount);
DCHECK(page->num_unprovisioned_slots < bucket_num_slots);
size_t num_slots = bucket_num_slots - page->num_unprovisioned_slots;
- char slot_usage[max_slot_count];
+ char slot_usage[kMaxSlotCount];
+#if !defined(OS_WIN)
+ // The last freelist entry should not be discarded when using OS_WIN.
+ // DiscardVirtualMemory makes the contents of discarded memory undefined.
size_t last_slot = static_cast<size_t>(-1);
+#endif
memset(slot_usage, 1, num_slots);
- char* ptr = reinterpret_cast<char*>(PartitionPageToPointer(page));
- PartitionFreelistEntry* entry = page->freelist_head;
+ char* ptr = reinterpret_cast<char*>(internal::PartitionPage::ToPointer(page));
// First, walk the freelist for this page and make a bitmap of which slots
// are not in use.
- while (entry) {
- size_t slotIndex = (reinterpret_cast<char*>(entry) - ptr) / slot_size;
- DCHECK(slotIndex < num_slots);
- slot_usage[slotIndex] = 0;
- entry = PartitionFreelistMask(entry->next);
- // If we have a slot where the masked freelist entry is 0, we can
- // actually discard that freelist entry because touching a discarded
- // page is guaranteed to return original content or 0.
- // (Note that this optimization won't fire on big endian machines
- // because the masking function is negation.)
- if (!PartitionFreelistMask(entry))
- last_slot = slotIndex;
+ for (internal::PartitionFreelistEntry* entry = page->freelist_head; entry;
+ /**/) {
+ size_t slot_index = (reinterpret_cast<char*>(entry) - ptr) / slot_size;
+ DCHECK(slot_index < num_slots);
+ slot_usage[slot_index] = 0;
+ entry = internal::PartitionFreelistEntry::Transform(entry->next);
+#if !defined(OS_WIN)
+ // If we have a slot where the masked freelist entry is 0, we can actually
+ // discard that freelist entry because touching a discarded page is
+ // guaranteed to return original content or 0. (Note that this optimization
+ // won't fire on big-endian machines because the masking function is
+ // negation.)
+ if (!internal::PartitionFreelistEntry::Transform(entry))
+ last_slot = slot_index;
+#endif
}
- // If the slot(s) at the end of the slot span are not in used, we can
- // truncate them entirely and rewrite the freelist.
+ // If the slot(s) at the end of the slot span are not in used, we can truncate
+ // them entirely and rewrite the freelist.
size_t truncated_slots = 0;
while (!slot_usage[num_slots - 1]) {
truncated_slots++;
@@ -1163,62 +412,67 @@ static size_t PartitionPurgePage(PartitionPage* page, bool discard) {
}
// First, do the work of calculating the discardable bytes. Don't actually
// discard anything unless the discard flag was passed in.
- char* begin_ptr = nullptr;
- char* end_ptr = nullptr;
- size_t unprovisioned_bytes = 0;
if (truncated_slots) {
- begin_ptr = ptr + (num_slots * slot_size);
- end_ptr = begin_ptr + (slot_size * truncated_slots);
+ size_t unprovisioned_bytes = 0;
+ char* begin_ptr = ptr + (num_slots * slot_size);
+ char* end_ptr = begin_ptr + (slot_size * truncated_slots);
begin_ptr = reinterpret_cast<char*>(
RoundUpToSystemPage(reinterpret_cast<size_t>(begin_ptr)));
- // We round the end pointer here up and not down because we're at the
- // end of a slot span, so we "own" all the way up the page boundary.
+ // We round the end pointer here up and not down because we're at the end of
+ // a slot span, so we "own" all the way up the page boundary.
end_ptr = reinterpret_cast<char*>(
RoundUpToSystemPage(reinterpret_cast<size_t>(end_ptr)));
- DCHECK(end_ptr <= ptr + PartitionBucketBytes(bucket));
+ DCHECK(end_ptr <= ptr + bucket->get_bytes_per_span());
if (begin_ptr < end_ptr) {
unprovisioned_bytes = end_ptr - begin_ptr;
discardable_bytes += unprovisioned_bytes;
}
- }
- if (unprovisioned_bytes && discard) {
- DCHECK(truncated_slots > 0);
- size_t num_new_entries = 0;
- page->num_unprovisioned_slots += static_cast<uint16_t>(truncated_slots);
- // Rewrite the freelist.
- PartitionFreelistEntry** entry_ptr = &page->freelist_head;
- for (size_t slotIndex = 0; slotIndex < num_slots; ++slotIndex) {
- if (slot_usage[slotIndex])
- continue;
- PartitionFreelistEntry* entry = reinterpret_cast<PartitionFreelistEntry*>(
- ptr + (slot_size * slotIndex));
- *entry_ptr = PartitionFreelistMask(entry);
- entry_ptr = reinterpret_cast<PartitionFreelistEntry**>(entry);
- num_new_entries++;
+ if (unprovisioned_bytes && discard) {
+ DCHECK(truncated_slots > 0);
+ size_t num_new_entries = 0;
+ page->num_unprovisioned_slots += static_cast<uint16_t>(truncated_slots);
+ // Rewrite the freelist.
+ internal::PartitionFreelistEntry** entry_ptr = &page->freelist_head;
+ for (size_t slot_index = 0; slot_index < num_slots; ++slot_index) {
+ if (slot_usage[slot_index])
+ continue;
+ auto* entry = reinterpret_cast<internal::PartitionFreelistEntry*>(
+ ptr + (slot_size * slot_index));
+ *entry_ptr = internal::PartitionFreelistEntry::Transform(entry);
+ entry_ptr = reinterpret_cast<internal::PartitionFreelistEntry**>(entry);
+ num_new_entries++;
+#if !defined(OS_WIN)
+ last_slot = slot_index;
+#endif
+ }
+ // Terminate the freelist chain.
+ *entry_ptr = nullptr;
+ // The freelist head is stored unmasked.
+ page->freelist_head =
+ internal::PartitionFreelistEntry::Transform(page->freelist_head);
+ DCHECK(num_new_entries == num_slots - page->num_allocated_slots);
+ // Discard the memory.
+ DiscardSystemPages(begin_ptr, unprovisioned_bytes);
}
- // Terminate the freelist chain.
- *entry_ptr = nullptr;
- // The freelist head is stored unmasked.
- page->freelist_head = PartitionFreelistMask(page->freelist_head);
- DCHECK(num_new_entries == num_slots - page->num_allocated_slots);
- // Discard the memory.
- DiscardSystemPages(begin_ptr, unprovisioned_bytes);
}
- // Next, walk the slots and for any not in use, consider where the system
- // page boundaries occur. We can release any system pages back to the
- // system as long as we don't interfere with a freelist pointer or an
- // adjacent slot.
+ // Next, walk the slots and for any not in use, consider where the system page
+ // boundaries occur. We can release any system pages back to the system as
+ // long as we don't interfere with a freelist pointer or an adjacent slot.
for (size_t i = 0; i < num_slots; ++i) {
if (slot_usage[i])
continue;
// The first address we can safely discard is just after the freelist
- // pointer. There's one quirk: if the freelist pointer is actually a
- // null, we can discard that pointer value too.
+ // pointer. There's one quirk: if the freelist pointer is actually NULL, we
+ // can discard that pointer value too.
char* begin_ptr = ptr + (i * slot_size);
char* end_ptr = begin_ptr + slot_size;
+#if !defined(OS_WIN)
if (i != last_slot)
- begin_ptr += sizeof(PartitionFreelistEntry);
+ begin_ptr += sizeof(internal::PartitionFreelistEntry);
+#else
+ begin_ptr += sizeof(internal::PartitionFreelistEntry);
+#endif
begin_ptr = reinterpret_cast<char*>(
RoundUpToSystemPage(reinterpret_cast<size_t>(begin_ptr)));
end_ptr = reinterpret_cast<char*>(
@@ -1233,32 +487,33 @@ static size_t PartitionPurgePage(PartitionPage* page, bool discard) {
return discardable_bytes;
}
-static void PartitionPurgeBucket(PartitionBucket* bucket) {
- if (bucket->active_pages_head != &PartitionRootGeneric::gSeedPage) {
- for (PartitionPage* page = bucket->active_pages_head; page;
+static void PartitionPurgeBucket(internal::PartitionBucket* bucket) {
+ if (bucket->active_pages_head !=
+ internal::PartitionPage::get_sentinel_page()) {
+ for (internal::PartitionPage* page = bucket->active_pages_head; page;
page = page->next_page) {
- DCHECK(page != &PartitionRootGeneric::gSeedPage);
- (void)PartitionPurgePage(page, true);
+ DCHECK(page != internal::PartitionPage::get_sentinel_page());
+ PartitionPurgePage(page, true);
}
}
}
-void PartitionPurgeMemory(PartitionRoot* root, int flags) {
+void PartitionRoot::PurgeMemory(int flags) {
if (flags & PartitionPurgeDecommitEmptyPages)
- PartitionDecommitEmptyPages(root);
+ DecommitEmptyPages();
// We don't currently do anything for PartitionPurgeDiscardUnusedSystemPages
- // here because that flag is only useful for allocations >= system page
- // size. We only have allocations that large inside generic partitions
- // at the moment.
+ // here because that flag is only useful for allocations >= system page size.
+ // We only have allocations that large inside generic partitions at the
+ // moment.
}
-void PartitionPurgeMemoryGeneric(PartitionRootGeneric* root, int flags) {
- subtle::SpinLock::Guard guard(root->lock);
+void PartitionRootGeneric::PurgeMemory(int flags) {
+ subtle::SpinLock::Guard guard(this->lock);
if (flags & PartitionPurgeDecommitEmptyPages)
- PartitionDecommitEmptyPages(root);
+ DecommitEmptyPages();
if (flags & PartitionPurgeDiscardUnusedSystemPages) {
for (size_t i = 0; i < kGenericNumBuckets; ++i) {
- PartitionBucket* bucket = &root->buckets[i];
+ internal::PartitionBucket* bucket = &this->buckets[i];
if (bucket->slot_size >= kSystemPageSize)
PartitionPurgeBucket(bucket);
}
@@ -1266,47 +521,48 @@ void PartitionPurgeMemoryGeneric(PartitionRootGeneric* root, int flags) {
}
static void PartitionDumpPageStats(PartitionBucketMemoryStats* stats_out,
- const PartitionPage* page) {
- uint16_t bucket_num_slots = PartitionBucketSlots(page->bucket);
+ internal::PartitionPage* page) {
+ uint16_t bucket_num_slots = page->bucket->get_slots_per_span();
- if (PartitionPageStateIsDecommitted(page)) {
+ if (page->is_decommitted()) {
++stats_out->num_decommitted_pages;
return;
}
- stats_out->discardable_bytes +=
- PartitionPurgePage(const_cast<PartitionPage*>(page), false);
+ stats_out->discardable_bytes += PartitionPurgePage(page, false);
- size_t raw_size = PartitionPageGetRawSize(const_cast<PartitionPage*>(page));
- if (raw_size)
+ size_t raw_size = page->get_raw_size();
+ if (raw_size) {
stats_out->active_bytes += static_cast<uint32_t>(raw_size);
- else
+ } else {
stats_out->active_bytes +=
(page->num_allocated_slots * stats_out->bucket_slot_size);
+ }
size_t page_bytes_resident =
RoundUpToSystemPage((bucket_num_slots - page->num_unprovisioned_slots) *
stats_out->bucket_slot_size);
stats_out->resident_bytes += page_bytes_resident;
- if (PartitionPageStateIsEmpty(page)) {
+ if (page->is_empty()) {
stats_out->decommittable_bytes += page_bytes_resident;
++stats_out->num_empty_pages;
- } else if (PartitionPageStateIsFull(page)) {
+ } else if (page->is_full()) {
++stats_out->num_full_pages;
} else {
- DCHECK(PartitionPageStateIsActive(page));
+ DCHECK(page->is_active());
++stats_out->num_active_pages;
}
}
static void PartitionDumpBucketStats(PartitionBucketMemoryStats* stats_out,
- const PartitionBucket* bucket) {
- DCHECK(!PartitionBucketIsDirectMapped(bucket));
+ const internal::PartitionBucket* bucket) {
+ DCHECK(!bucket->is_direct_mapped());
stats_out->is_valid = false;
- // If the active page list is empty (== &PartitionRootGeneric::gSeedPage),
- // the bucket might still need to be reported if it has a list of empty,
- // decommitted or full pages.
- if (bucket->active_pages_head == &PartitionRootGeneric::gSeedPage &&
+ // If the active page list is empty (==
+ // internal::PartitionPage::get_sentinel_page()), the bucket might still need
+ // to be reported if it has a list of empty, decommitted or full pages.
+ if (bucket->active_pages_head ==
+ internal::PartitionPage::get_sentinel_page() &&
!bucket->empty_pages_head && !bucket->decommitted_pages_head &&
!bucket->num_full_pages)
return;
@@ -1316,42 +572,41 @@ static void PartitionDumpBucketStats(PartitionBucketMemoryStats* stats_out,
stats_out->is_direct_map = false;
stats_out->num_full_pages = static_cast<size_t>(bucket->num_full_pages);
stats_out->bucket_slot_size = bucket->slot_size;
- uint16_t bucket_num_slots = PartitionBucketSlots(bucket);
+ uint16_t bucket_num_slots = bucket->get_slots_per_span();
size_t bucket_useful_storage = stats_out->bucket_slot_size * bucket_num_slots;
- stats_out->allocated_page_size = PartitionBucketBytes(bucket);
+ stats_out->allocated_page_size = bucket->get_bytes_per_span();
stats_out->active_bytes = bucket->num_full_pages * bucket_useful_storage;
stats_out->resident_bytes =
bucket->num_full_pages * stats_out->allocated_page_size;
- for (const PartitionPage* page = bucket->empty_pages_head; page;
+ for (internal::PartitionPage* page = bucket->empty_pages_head; page;
page = page->next_page) {
- DCHECK(PartitionPageStateIsEmpty(page) ||
- PartitionPageStateIsDecommitted(page));
+ DCHECK(page->is_empty() || page->is_decommitted());
PartitionDumpPageStats(stats_out, page);
}
- for (const PartitionPage* page = bucket->decommitted_pages_head; page;
+ for (internal::PartitionPage* page = bucket->decommitted_pages_head; page;
page = page->next_page) {
- DCHECK(PartitionPageStateIsDecommitted(page));
+ DCHECK(page->is_decommitted());
PartitionDumpPageStats(stats_out, page);
}
- if (bucket->active_pages_head != &PartitionRootGeneric::gSeedPage) {
- for (const PartitionPage* page = bucket->active_pages_head; page;
+ if (bucket->active_pages_head !=
+ internal::PartitionPage::get_sentinel_page()) {
+ for (internal::PartitionPage* page = bucket->active_pages_head; page;
page = page->next_page) {
- DCHECK(page != &PartitionRootGeneric::gSeedPage);
+ DCHECK(page != internal::PartitionPage::get_sentinel_page());
PartitionDumpPageStats(stats_out, page);
}
}
}
-void PartitionDumpStatsGeneric(PartitionRootGeneric* partition,
- const char* partition_name,
- bool is_light_dump,
- PartitionStatsDumper* dumper) {
+void PartitionRootGeneric::DumpStats(const char* partition_name,
+ bool is_light_dump,
+ PartitionStatsDumper* dumper) {
PartitionMemoryStats stats = {0};
- stats.total_mmapped_bytes = partition->total_size_of_super_pages +
- partition->total_size_of_direct_mapped_pages;
- stats.total_committed_bytes = partition->total_size_of_committed_pages;
+ stats.total_mmapped_bytes =
+ this->total_size_of_super_pages + this->total_size_of_direct_mapped_pages;
+ stats.total_committed_bytes = this->total_size_of_committed_pages;
size_t direct_mapped_allocations_total_size = 0;
@@ -1368,13 +623,13 @@ void PartitionDumpStatsGeneric(PartitionRootGeneric* partition,
PartitionBucketMemoryStats bucket_stats[kGenericNumBuckets];
size_t num_direct_mapped_allocations = 0;
{
- subtle::SpinLock::Guard guard(partition->lock);
+ subtle::SpinLock::Guard guard(this->lock);
for (size_t i = 0; i < kGenericNumBuckets; ++i) {
- const PartitionBucket* bucket = &partition->buckets[i];
+ const internal::PartitionBucket* bucket = &this->buckets[i];
// Don't report the pseudo buckets that the generic allocator sets up in
// order to preserve a fast size->bucket map (see
- // PartitionAllocGenericInit for details).
+ // PartitionRootGeneric::Init() for details).
if (!bucket->active_pages_head)
bucket_stats[i].is_valid = false;
else
@@ -1387,7 +642,7 @@ void PartitionDumpStatsGeneric(PartitionRootGeneric* partition,
}
}
- for (PartitionDirectMapExtent *extent = partition->direct_map_list;
+ for (internal::PartitionDirectMapExtent *extent = this->direct_map_list;
extent && num_direct_mapped_allocations < kMaxReportableDirectMaps;
extent = extent->next_extent, ++num_direct_mapped_allocations) {
DCHECK(!extent->next_extent ||
@@ -1402,8 +657,8 @@ void PartitionDumpStatsGeneric(PartitionRootGeneric* partition,
if (!is_light_dump) {
// Call |PartitionsDumpBucketStats| after collecting stats because it can
- // try to allocate using |PartitionAllocGeneric| and it can't obtain the
- // lock.
+ // try to allocate using |PartitionRootGeneric::Alloc()| and it can't
+ // obtain the lock.
for (size_t i = 0; i < kGenericNumBuckets; ++i) {
if (bucket_stats[i].is_valid)
dumper->PartitionsDumpBucketStats(partition_name, &bucket_stats[i]);
@@ -1412,16 +667,15 @@ void PartitionDumpStatsGeneric(PartitionRootGeneric* partition,
for (size_t i = 0; i < num_direct_mapped_allocations; ++i) {
uint32_t size = direct_map_lengths[i];
- PartitionBucketMemoryStats stats;
- memset(&stats, '\0', sizeof(stats));
- stats.is_valid = true;
- stats.is_direct_map = true;
- stats.num_full_pages = 1;
- stats.allocated_page_size = size;
- stats.bucket_slot_size = size;
- stats.active_bytes = size;
- stats.resident_bytes = size;
- dumper->PartitionsDumpBucketStats(partition_name, &stats);
+ PartitionBucketMemoryStats mapped_stats = {};
+ mapped_stats.is_valid = true;
+ mapped_stats.is_direct_map = true;
+ mapped_stats.num_full_pages = 1;
+ mapped_stats.allocated_page_size = size;
+ mapped_stats.bucket_slot_size = size;
+ mapped_stats.active_bytes = size;
+ mapped_stats.resident_bytes = size;
+ dumper->PartitionsDumpBucketStats(partition_name, &mapped_stats);
}
}
@@ -1430,31 +684,46 @@ void PartitionDumpStatsGeneric(PartitionRootGeneric* partition,
dumper->PartitionDumpTotals(partition_name, &stats);
}
-void PartitionDumpStats(PartitionRoot* partition,
- const char* partition_name,
- bool is_light_dump,
- PartitionStatsDumper* dumper) {
- static const size_t kMaxReportableBuckets = 4096 / sizeof(void*);
- PartitionBucketMemoryStats memory_stats[kMaxReportableBuckets];
- const size_t partitionNumBuckets = partition->num_buckets;
- DCHECK(partitionNumBuckets <= kMaxReportableBuckets);
+void PartitionRoot::DumpStats(const char* partition_name,
+ bool is_light_dump,
+ PartitionStatsDumper* dumper) {
+ PartitionMemoryStats stats = {0};
+ stats.total_mmapped_bytes = this->total_size_of_super_pages;
+ stats.total_committed_bytes = this->total_size_of_committed_pages;
+ DCHECK(!this->total_size_of_direct_mapped_pages);
- for (size_t i = 0; i < partitionNumBuckets; ++i)
- PartitionDumpBucketStats(&memory_stats[i], &partition->buckets()[i]);
+ static constexpr size_t kMaxReportableBuckets = 4096 / sizeof(void*);
+ std::unique_ptr<PartitionBucketMemoryStats[]> memory_stats;
+ if (!is_light_dump) {
+ memory_stats = std::unique_ptr<PartitionBucketMemoryStats[]>(
+ new PartitionBucketMemoryStats[kMaxReportableBuckets]);
+ }
- // PartitionsDumpBucketStats is called after collecting stats because it
- // can use PartitionAlloc to allocate and this can affect the statistics.
- PartitionMemoryStats stats = {0};
- stats.total_mmapped_bytes = partition->total_size_of_super_pages;
- stats.total_committed_bytes = partition->total_size_of_committed_pages;
- DCHECK(!partition->total_size_of_direct_mapped_pages);
- for (size_t i = 0; i < partitionNumBuckets; ++i) {
- if (memory_stats[i].is_valid) {
- stats.total_resident_bytes += memory_stats[i].resident_bytes;
- stats.total_active_bytes += memory_stats[i].active_bytes;
- stats.total_decommittable_bytes += memory_stats[i].decommittable_bytes;
- stats.total_discardable_bytes += memory_stats[i].discardable_bytes;
- if (!is_light_dump)
+ const size_t partition_num_buckets = this->num_buckets;
+ DCHECK(partition_num_buckets <= kMaxReportableBuckets);
+
+ for (size_t i = 0; i < partition_num_buckets; ++i) {
+ PartitionBucketMemoryStats bucket_stats = {0};
+ PartitionDumpBucketStats(&bucket_stats, &this->buckets()[i]);
+ if (bucket_stats.is_valid) {
+ stats.total_resident_bytes += bucket_stats.resident_bytes;
+ stats.total_active_bytes += bucket_stats.active_bytes;
+ stats.total_decommittable_bytes += bucket_stats.decommittable_bytes;
+ stats.total_discardable_bytes += bucket_stats.discardable_bytes;
+ }
+ if (!is_light_dump) {
+ if (bucket_stats.is_valid)
+ memory_stats[i] = bucket_stats;
+ else
+ memory_stats[i].is_valid = false;
+ }
+ }
+ if (!is_light_dump) {
+ // PartitionsDumpBucketStats is called after collecting stats because it
+ // can use PartitionRoot::Alloc() to allocate and this can affect the
+ // statistics.
+ for (size_t i = 0; i < partition_num_buckets; ++i) {
+ if (memory_stats[i].is_valid)
dumper->PartitionsDumpBucketStats(partition_name, &memory_stats[i]);
}
}
diff --git a/third_party/base/allocator/partition_allocator/partition_alloc.h b/third_party/base/allocator/partition_allocator/partition_alloc.h
index 69fba97d62..a80755c510 100644
--- a/third_party/base/allocator/partition_allocator/partition_alloc.h
+++ b/third_party/base/allocator/partition_allocator/partition_alloc.h
@@ -2,12 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H
+#ifndef THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H_
+#define THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H_
// DESCRIPTION
-// partitionAlloc() / PartitionAllocGeneric() and PartitionFree() /
-// PartitionFreeGeneric() are approximately analagous to malloc() and free().
+// PartitionRoot::Alloc() / PartitionRootGeneric::Alloc() and PartitionFree() /
+// PartitionRootGeneric::Free() are approximately analagous to malloc() and
+// free().
//
// The main difference is that a PartitionRoot / PartitionRootGeneric object
// must be supplied to these functions, representing a specific "heap partition"
@@ -23,14 +24,14 @@
// PartitionRoot is really just a header adjacent to other data areas provided
// by the allocator class.
//
-// The partitionAlloc() variant of the API has the following caveats:
+// The PartitionRoot::Alloc() variant of the API has the following caveats:
// - Allocations and frees against a single partition must be single threaded.
// - Allocations must not exceed a max size, chosen at compile-time via a
// templated parameter to PartitionAllocator.
// - Allocation sizes must be aligned to the system pointer size.
// - Allocations are bucketed exactly according to size.
//
-// And for PartitionAllocGeneric():
+// And for PartitionRootGeneric::Alloc():
// - Multi-threaded use against a single partition is ok; locking is handled.
// - Allocations of any arbitrary size can be handled (subject to a limit of
// INT_MAX bytes for security reasons).
@@ -64,10 +65,17 @@
#include "build/build_config.h"
#include "third_party/base/allocator/partition_allocator/page_allocator.h"
+#include "third_party/base/allocator/partition_allocator/partition_alloc_constants.h"
+#include "third_party/base/allocator/partition_allocator/partition_bucket.h"
+#include "third_party/base/allocator/partition_allocator/partition_cookie.h"
+#include "third_party/base/allocator/partition_allocator/partition_page.h"
+#include "third_party/base/allocator/partition_allocator/partition_root_base.h"
#include "third_party/base/allocator/partition_allocator/spin_lock.h"
+#include "third_party/base/base_export.h"
#include "third_party/base/bits.h"
#include "third_party/base/compiler_specific.h"
#include "third_party/base/logging.h"
+#include "third_party/base/stl_util.h"
#include "third_party/base/sys_byteorder.h"
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
@@ -77,289 +85,83 @@
namespace pdfium {
namespace base {
-// Allocation granularity of sizeof(void*) bytes.
-static const size_t kAllocationGranularity = sizeof(void*);
-static const size_t kAllocationGranularityMask = kAllocationGranularity - 1;
-static const size_t kBucketShift = (kAllocationGranularity == 8) ? 3 : 2;
-
-// Underlying partition storage pages are a power-of-two size. It is typical
-// for a partition page to be based on multiple system pages. Most references to
-// "page" refer to partition pages.
-// We also have the concept of "super pages" -- these are the underlying system
-// allocations we make. Super pages contain multiple partition pages inside them
-// and include space for a small amount of metadata per partition page.
-// Inside super pages, we store "slot spans". A slot span is a continguous range
-// of one or more partition pages that stores allocations of the same size.
-// Slot span sizes are adjusted depending on the allocation size, to make sure
-// the packing does not lead to unused (wasted) space at the end of the last
-// system page of the span. For our current max slot span size of 64k and other
-// constant values, we pack _all_ PartitionAllocGeneric() sizes perfectly up
-// against the end of a system page.
-#if defined(_MIPS_ARCH_LOONGSON)
-static const size_t kPartitionPageShift = 16; // 64KB
-#else
-static const size_t kPartitionPageShift = 14; // 16KB
-#endif
-static const size_t kPartitionPageSize = 1 << kPartitionPageShift;
-static const size_t kPartitionPageOffsetMask = kPartitionPageSize - 1;
-static const size_t kPartitionPageBaseMask = ~kPartitionPageOffsetMask;
-static const size_t kMaxPartitionPagesPerSlotSpan = 4;
-
-// To avoid fragmentation via never-used freelist entries, we hand out partition
-// freelist sections gradually, in units of the dominant system page size.
-// What we're actually doing is avoiding filling the full partition page (16 KB)
-// with freelist pointers right away. Writing freelist pointers will fault and
-// dirty a private page, which is very wasteful if we never actually store
-// objects there.
-static const size_t kNumSystemPagesPerPartitionPage =
- kPartitionPageSize / kSystemPageSize;
-static const size_t kMaxSystemPagesPerSlotSpan =
- kNumSystemPagesPerPartitionPage * kMaxPartitionPagesPerSlotSpan;
-
-// We reserve virtual address space in 2MB chunks (aligned to 2MB as well).
-// These chunks are called "super pages". We do this so that we can store
-// metadata in the first few pages of each 2MB aligned section. This leads to
-// a very fast free(). We specifically choose 2MB because this virtual address
-// block represents a full but single PTE allocation on ARM, ia32 and x64.
-//
-// The layout of the super page is as follows. The sizes below are the same
-// for 32 bit and 64 bit.
-//
-// | Guard page (4KB) |
-// | Metadata page (4KB) |
-// | Guard pages (8KB) |
-// | Slot span |
-// | Slot span |
-// | ... |
-// | Slot span |
-// | Guard page (4KB) |
-//
-// - Each slot span is a contiguous range of one or more PartitionPages.
-// - The metadata page has the following format. Note that the PartitionPage
-// that is not at the head of a slot span is "unused". In other words,
-// the metadata for the slot span is stored only in the first PartitionPage
-// of the slot span. Metadata accesses to other PartitionPages are
-// redirected to the first PartitionPage.
-//
-// | SuperPageExtentEntry (32B) |
-// | PartitionPage of slot span 1 (32B, used) |
-// | PartitionPage of slot span 1 (32B, unused) |
-// | PartitionPage of slot span 1 (32B, unused) |
-// | PartitionPage of slot span 2 (32B, used) |
-// | PartitionPage of slot span 3 (32B, used) |
-// | ... |
-// | PartitionPage of slot span N (32B, unused) |
-//
-// A direct mapped page has a similar layout to fake it looking like a super
-// page:
-//
-// | Guard page (4KB) |
-// | Metadata page (4KB) |
-// | Guard pages (8KB) |
-// | Direct mapped object |
-// | Guard page (4KB) |
-//
-// - The metadata page has the following layout:
-//
-// | SuperPageExtentEntry (32B) |
-// | PartitionPage (32B) |
-// | PartitionBucket (32B) |
-// | PartitionDirectMapExtent (8B) |
-static const size_t kSuperPageShift = 21; // 2MB
-static const size_t kSuperPageSize = 1 << kSuperPageShift;
-static const size_t kSuperPageOffsetMask = kSuperPageSize - 1;
-static const size_t kSuperPageBaseMask = ~kSuperPageOffsetMask;
-static const size_t kNumPartitionPagesPerSuperPage =
- kSuperPageSize / kPartitionPageSize;
-
-static const size_t kPageMetadataShift = 5; // 32 bytes per partition page.
-static const size_t kPageMetadataSize = 1 << kPageMetadataShift;
-
-// The following kGeneric* constants apply to the generic variants of the API.
-// The "order" of an allocation is closely related to the power-of-two size of
-// the allocation. More precisely, the order is the bit index of the
-// most-significant-bit in the allocation size, where the bit numbers starts
-// at index 1 for the least-significant-bit.
-// In terms of allocation sizes, order 0 covers 0, order 1 covers 1, order 2
-// covers 2->3, order 3 covers 4->7, order 4 covers 8->15.
-static const size_t kGenericMinBucketedOrder = 4; // 8 bytes.
-static const size_t kGenericMaxBucketedOrder =
- 20; // Largest bucketed order is 1<<(20-1) (storing 512KB -> almost 1MB)
-static const size_t kGenericNumBucketedOrders =
- (kGenericMaxBucketedOrder - kGenericMinBucketedOrder) + 1;
-// Eight buckets per order (for the higher orders), e.g. order 8 is 128, 144,
-// 160, ..., 240:
-static const size_t kGenericNumBucketsPerOrderBits = 3;
-static const size_t kGenericNumBucketsPerOrder =
- 1 << kGenericNumBucketsPerOrderBits;
-static const size_t kGenericNumBuckets =
- kGenericNumBucketedOrders * kGenericNumBucketsPerOrder;
-static const size_t kGenericSmallestBucket = 1
- << (kGenericMinBucketedOrder - 1);
-static const size_t kGenericMaxBucketSpacing =
- 1 << ((kGenericMaxBucketedOrder - 1) - kGenericNumBucketsPerOrderBits);
-static const size_t kGenericMaxBucketed =
- (1 << (kGenericMaxBucketedOrder - 1)) +
- ((kGenericNumBucketsPerOrder - 1) * kGenericMaxBucketSpacing);
-static const size_t kGenericMinDirectMappedDownsize =
- kGenericMaxBucketed +
- 1; // Limit when downsizing a direct mapping using realloc().
-static const size_t kGenericMaxDirectMapped = INT_MAX - kSystemPageSize;
-static const size_t kBitsPerSizeT = sizeof(void*) * CHAR_BIT;
-
-// Constants for the memory reclaim logic.
-static const size_t kMaxFreeableSpans = 16;
-
-// If the total size in bytes of allocated but not committed pages exceeds this
-// value (probably it is a "out of virtual address space" crash),
-// a special crash stack trace is generated at |partitionOutOfMemory|.
-// This is to distinguish "out of virtual address space" from
-// "out of physical memory" in crash reports.
-static const size_t kReasonableSizeOfUnusedPages = 1024 * 1024 * 1024; // 1GiB
-
-#if DCHECK_IS_ON()
-// These two byte values match tcmalloc.
-static const unsigned char kUninitializedByte = 0xAB;
-static const unsigned char kFreedByte = 0xCD;
-static const size_t kCookieSize =
- 16; // Handles alignment up to XMM instructions on Intel.
-static const unsigned char kCookieValue[kCookieSize] = {
- 0xDE, 0xAD, 0xBE, 0xEF, 0xCA, 0xFE, 0xD0, 0x0D,
- 0x13, 0x37, 0xF0, 0x05, 0xBA, 0x11, 0xAB, 0x1E};
-#endif
-
-struct PartitionBucket;
-struct PartitionRootBase;
-
-struct PartitionFreelistEntry {
- PartitionFreelistEntry* next;
-};
-
-// Some notes on page states. A page can be in one of four major states:
-// 1) Active.
-// 2) Full.
-// 3) Empty.
-// 4) Decommitted.
-// An active page has available free slots. A full page has no free slots. An
-// empty page has no free slots, and a decommitted page is an empty page that
-// had its backing memory released back to the system.
-// There are two linked lists tracking the pages. The "active page" list is an
-// approximation of a list of active pages. It is an approximation because
-// full, empty and decommitted pages may briefly be present in the list until
-// we next do a scan over it.
-// The "empty page" list is an accurate list of pages which are either empty
-// or decommitted.
-//
-// The significant page transitions are:
-// - free() will detect when a full page has a slot free()'d and immediately
-// return the page to the head of the active list.
-// - free() will detect when a page is fully emptied. It _may_ add it to the
-// empty list or it _may_ leave it on the active list until a future list scan.
-// - malloc() _may_ scan the active page list in order to fulfil the request.
-// If it does this, full, empty and decommitted pages encountered will be
-// booted out of the active list. If there are no suitable active pages found,
-// an empty or decommitted page (if one exists) will be pulled from the empty
-// list on to the active list.
-struct PartitionPage {
- PartitionFreelistEntry* freelist_head;
- PartitionPage* next_page;
- PartitionBucket* bucket;
- // Deliberately signed, 0 for empty or decommitted page, -n for full pages:
- int16_t num_allocated_slots;
- uint16_t num_unprovisioned_slots;
- uint16_t page_offset;
- int16_t empty_cache_index; // -1 if not in the empty cache.
-};
-
-struct PartitionBucket {
- PartitionPage* active_pages_head; // Accessed most in hot path => goes first.
- PartitionPage* empty_pages_head;
- PartitionPage* decommitted_pages_head;
- uint32_t slot_size;
- unsigned num_system_pages_per_slot_span : 8;
- unsigned num_full_pages : 24;
-};
+class PartitionStatsDumper;
-// An "extent" is a span of consecutive superpages. We link to the partition's
-// next extent (if there is one) at the very start of a superpage's metadata
-// area.
-struct PartitionSuperPageExtentEntry {
- PartitionRootBase* root;
- char* super_page_base;
- char* super_pages_end;
- PartitionSuperPageExtentEntry* next;
-};
-
-struct PartitionDirectMapExtent {
- PartitionDirectMapExtent* next_extent;
- PartitionDirectMapExtent* prev_extent;
- PartitionBucket* bucket;
- size_t map_size; // Mapped size, not including guard pages and meta-data.
-};
-
-struct BASE_EXPORT PartitionRootBase {
- size_t total_size_of_committed_pages;
- size_t total_size_of_super_pages;
- size_t total_size_of_direct_mapped_pages;
- // Invariant: total_size_of_committed_pages <=
- // total_size_of_super_pages +
- // total_size_of_direct_mapped_pages.
- unsigned num_buckets;
- unsigned max_allocation;
- bool initialized;
- char* next_super_page;
- char* next_partition_page;
- char* next_partition_page_end;
- PartitionSuperPageExtentEntry* current_extent;
- PartitionSuperPageExtentEntry* first_extent;
- PartitionDirectMapExtent* direct_map_list;
- PartitionPage* global_empty_page_ring[kMaxFreeableSpans];
- int16_t global_empty_page_ring_index;
- uintptr_t inverted_self;
-
- static subtle::SpinLock gInitializedLock;
- static bool gInitialized;
- // gSeedPage is used as a sentinel to indicate that there is no page
- // in the active page list. We can use nullptr, but in that case we need
- // to add a null-check branch to the hot allocation path. We want to avoid
- // that.
- static PartitionPage gSeedPage;
- static PartitionBucket gPagedBucket;
- // gOomHandlingFunction is invoked when ParitionAlloc hits OutOfMemory.
- static void (*gOomHandlingFunction)();
+enum PartitionPurgeFlags {
+ // Decommitting the ring list of empty pages is reasonably fast.
+ PartitionPurgeDecommitEmptyPages = 1 << 0,
+ // Discarding unused system pages is slower, because it involves walking all
+ // freelists in all active partition pages of all buckets >= system page
+ // size. It often frees a similar amount of memory to decommitting the empty
+ // pages, though.
+ PartitionPurgeDiscardUnusedSystemPages = 1 << 1,
};
// Never instantiate a PartitionRoot directly, instead use PartitionAlloc.
-struct PartitionRoot : public PartitionRootBase {
+struct BASE_EXPORT PartitionRoot : public internal::PartitionRootBase {
+ PartitionRoot();
+ ~PartitionRoot() override;
+ // This references the buckets OFF the edge of this struct. All uses of
+ // PartitionRoot must have the bucket array come right after.
+ //
// The PartitionAlloc templated class ensures the following is correct.
- ALWAYS_INLINE PartitionBucket* buckets() {
- return reinterpret_cast<PartitionBucket*>(this + 1);
+ ALWAYS_INLINE internal::PartitionBucket* buckets() {
+ return reinterpret_cast<internal::PartitionBucket*>(this + 1);
}
- ALWAYS_INLINE const PartitionBucket* buckets() const {
- return reinterpret_cast<const PartitionBucket*>(this + 1);
+ ALWAYS_INLINE const internal::PartitionBucket* buckets() const {
+ return reinterpret_cast<const internal::PartitionBucket*>(this + 1);
}
+
+ void Init(size_t num_buckets, size_t max_allocation);
+
+ ALWAYS_INLINE void* Alloc(size_t size, const char* type_name);
+ ALWAYS_INLINE void* AllocFlags(int flags, size_t size, const char* type_name);
+
+ void PurgeMemory(int flags);
+
+ void DumpStats(const char* partition_name,
+ bool is_light_dump,
+ PartitionStatsDumper* dumper);
};
// Never instantiate a PartitionRootGeneric directly, instead use
// PartitionAllocatorGeneric.
-struct PartitionRootGeneric : public PartitionRootBase {
+struct BASE_EXPORT PartitionRootGeneric : public internal::PartitionRootBase {
+ PartitionRootGeneric();
+ ~PartitionRootGeneric() override;
subtle::SpinLock lock;
// Some pre-computed constants.
- size_t order_index_shifts[kBitsPerSizeT + 1];
- size_t order_sub_index_masks[kBitsPerSizeT + 1];
+ size_t order_index_shifts[kBitsPerSizeT + 1] = {};
+ size_t order_sub_index_masks[kBitsPerSizeT + 1] = {};
// The bucket lookup table lets us map a size_t to a bucket quickly.
// The trailing +1 caters for the overflow case for very large allocation
// sizes. It is one flat array instead of a 2D array because in the 2D
// world, we'd need to index array[blah][max+1] which risks undefined
// behavior.
- PartitionBucket*
- bucket_lookups[((kBitsPerSizeT + 1) * kGenericNumBucketsPerOrder) + 1];
- PartitionBucket buckets[kGenericNumBuckets];
-};
+ internal::PartitionBucket*
+ bucket_lookups[((kBitsPerSizeT + 1) * kGenericNumBucketsPerOrder) + 1] =
+ {};
+ internal::PartitionBucket buckets[kGenericNumBuckets] = {};
+
+ // Public API.
+ void Init();
+
+ ALWAYS_INLINE void* Alloc(size_t size, const char* type_name);
+ ALWAYS_INLINE void* AllocFlags(int flags, size_t size, const char* type_name);
+ ALWAYS_INLINE void Free(void* ptr);
-// Flags for PartitionAllocGenericFlags.
-enum PartitionAllocFlags {
- PartitionAllocReturnNull = 1 << 0,
+ NOINLINE void* Realloc(void* ptr, size_t new_size, const char* type_name);
+ // Overload that may return nullptr if reallocation isn't possible. In this
+ // case, |ptr| remains valid.
+ NOINLINE void* TryRealloc(void* ptr, size_t new_size, const char* type_name);
+
+ ALWAYS_INLINE size_t ActualSize(size_t size);
+
+ void PurgeMemory(int flags);
+
+ void DumpStats(const char* partition_name,
+ bool is_light_dump,
+ PartitionStatsDumper* partition_stats_dumper);
};
// Struct used to retrieve total memory usage of a partition. Used by
@@ -409,58 +211,24 @@ class BASE_EXPORT PartitionStatsDumper {
};
BASE_EXPORT void PartitionAllocGlobalInit(void (*oom_handling_function)());
-BASE_EXPORT void PartitionAllocInit(PartitionRoot*,
- size_t num_buckets,
- size_t max_allocation);
-BASE_EXPORT void PartitionAllocGenericInit(PartitionRootGeneric*);
-
-enum PartitionPurgeFlags {
- // Decommitting the ring list of empty pages is reasonably fast.
- PartitionPurgeDecommitEmptyPages = 1 << 0,
- // Discarding unused system pages is slower, because it involves walking all
- // freelists in all active partition pages of all buckets >= system page
- // size. It often frees a similar amount of memory to decommitting the empty
- // pages, though.
- PartitionPurgeDiscardUnusedSystemPages = 1 << 1,
-};
-
-BASE_EXPORT void PartitionPurgeMemory(PartitionRoot*, int);
-BASE_EXPORT void PartitionPurgeMemoryGeneric(PartitionRootGeneric*, int);
-
-BASE_EXPORT NOINLINE void* PartitionAllocSlowPath(PartitionRootBase*,
- int,
- size_t,
- PartitionBucket*);
-BASE_EXPORT NOINLINE void PartitionFreeSlowPath(PartitionPage*);
-BASE_EXPORT NOINLINE void* PartitionReallocGenericFlags(
- PartitionRootGeneric* root,
- int flags,
- void* ptr,
- size_t new_size,
- const char* type_name);
-BASE_EXPORT NOINLINE void* PartitionReallocGeneric(PartitionRootGeneric* root,
- void* ptr,
- size_t new_size,
- const char* type_name);
-
-BASE_EXPORT void PartitionDumpStats(PartitionRoot*,
- const char* partition_name,
- bool is_light_dump,
- PartitionStatsDumper*);
-BASE_EXPORT void PartitionDumpStatsGeneric(PartitionRootGeneric*,
- const char* partition_name,
- bool is_light_dump,
- PartitionStatsDumper*);
class BASE_EXPORT PartitionAllocHooks {
public:
typedef void AllocationHook(void* address, size_t, const char* type_name);
typedef void FreeHook(void* address);
+ // To unhook, call Set*Hook with nullptr.
static void SetAllocationHook(AllocationHook* hook) {
+ // Chained allocation hooks are not supported. Registering a non-null
+ // hook when a non-null hook is already registered indicates somebody is
+ // trying to overwrite a hook.
+ CHECK(!hook || !allocation_hook_);
allocation_hook_ = hook;
}
- static void SetFreeHook(FreeHook* hook) { free_hook_ = hook; }
+ static void SetFreeHook(FreeHook* hook) {
+ CHECK(!hook || !free_hook_);
+ free_hook_ = hook;
+ }
static void AllocationHookIfEnabled(void* address,
size_t size,
@@ -496,282 +264,69 @@ class BASE_EXPORT PartitionAllocHooks {
static FreeHook* free_hook_;
};
-ALWAYS_INLINE PartitionFreelistEntry* PartitionFreelistMask(
- PartitionFreelistEntry* ptr) {
-// We use bswap on little endian as a fast mask for two reasons:
-// 1) If an object is freed and its vtable used where the attacker doesn't
-// get the chance to run allocations between the free and use, the vtable
-// dereference is likely to fault.
-// 2) If the attacker has a linear buffer overflow and elects to try and
-// corrupt a freelist pointer, partial pointer overwrite attacks are
-// thwarted.
-// For big endian, similar guarantees are arrived at with a negation.
-#if defined(ARCH_CPU_BIG_ENDIAN)
- uintptr_t masked = ~reinterpret_cast<uintptr_t>(ptr);
-#else
- uintptr_t masked = ByteSwapUintPtrT(reinterpret_cast<uintptr_t>(ptr));
-#endif
- return reinterpret_cast<PartitionFreelistEntry*>(masked);
-}
-
-ALWAYS_INLINE size_t PartitionCookieSizeAdjustAdd(size_t size) {
-#if DCHECK_IS_ON()
- // Add space for cookies, checking for integer overflow. TODO(palmer):
- // Investigate the performance and code size implications of using
- // CheckedNumeric throughout PA.
- DCHECK(size + (2 * kCookieSize) > size);
- size += 2 * kCookieSize;
-#endif
- return size;
-}
-
-ALWAYS_INLINE size_t PartitionCookieSizeAdjustSubtract(size_t size) {
-#if DCHECK_IS_ON()
- // Remove space for cookies.
- DCHECK(size >= 2 * kCookieSize);
- size -= 2 * kCookieSize;
-#endif
- return size;
-}
-
-ALWAYS_INLINE void* PartitionCookieFreePointerAdjust(void* ptr) {
-#if DCHECK_IS_ON()
- // The value given to the application is actually just after the cookie.
- ptr = static_cast<char*>(ptr) - kCookieSize;
-#endif
- return ptr;
-}
-
-ALWAYS_INLINE void PartitionCookieWriteValue(void* ptr) {
-#if DCHECK_IS_ON()
- auto* cookie_ptr = reinterpret_cast<unsigned char*>(ptr);
- for (size_t i = 0; i < kCookieSize; ++i, ++cookie_ptr)
- *cookie_ptr = kCookieValue[i];
-#endif
-}
-
-ALWAYS_INLINE void PartitionCookieCheckValue(void* ptr) {
-#if DCHECK_IS_ON()
- auto* cookie_ptr = reinterpret_cast<unsigned char*>(ptr);
- for (size_t i = 0; i < kCookieSize; ++i, ++cookie_ptr)
- DCHECK(*cookie_ptr == kCookieValue[i]);
-#endif
-}
-
-ALWAYS_INLINE char* PartitionSuperPageToMetadataArea(char* ptr) {
- auto pointer_as_uint = reinterpret_cast<uintptr_t>(ptr);
- DCHECK(!(pointer_as_uint & kSuperPageOffsetMask));
- // The metadata area is exactly one system page (the guard page) into the
- // super page.
- return reinterpret_cast<char*>(pointer_as_uint + kSystemPageSize);
-}
-
-ALWAYS_INLINE PartitionPage* PartitionPointerToPageNoAlignmentCheck(void* ptr) {
- auto pointer_as_uint = reinterpret_cast<uintptr_t>(ptr);
- auto* super_page_ptr =
- reinterpret_cast<char*>(pointer_as_uint & kSuperPageBaseMask);
- uintptr_t partition_page_index =
- (pointer_as_uint & kSuperPageOffsetMask) >> kPartitionPageShift;
- // Index 0 is invalid because it is the metadata and guard area and
- // the last index is invalid because it is a guard page.
- DCHECK(partition_page_index);
- DCHECK(partition_page_index < kNumPartitionPagesPerSuperPage - 1);
- auto* page = reinterpret_cast<PartitionPage*>(
- PartitionSuperPageToMetadataArea(super_page_ptr) +
- (partition_page_index << kPageMetadataShift));
- // Partition pages in the same slot span can share the same page object.
- // Adjust for that.
- size_t delta = page->page_offset << kPageMetadataShift;
- page =
- reinterpret_cast<PartitionPage*>(reinterpret_cast<char*>(page) - delta);
- return page;
-}
-
-ALWAYS_INLINE void* PartitionPageToPointer(const PartitionPage* page) {
- auto pointer_as_uint = reinterpret_cast<uintptr_t>(page);
- uintptr_t super_page_offset = (pointer_as_uint & kSuperPageOffsetMask);
- DCHECK(super_page_offset > kSystemPageSize);
- DCHECK(super_page_offset < kSystemPageSize + (kNumPartitionPagesPerSuperPage *
- kPageMetadataSize));
- uintptr_t partition_page_index =
- (super_page_offset - kSystemPageSize) >> kPageMetadataShift;
- // Index 0 is invalid because it is the metadata area and the last index is
- // invalid because it is a guard page.
- DCHECK(partition_page_index);
- DCHECK(partition_page_index < kNumPartitionPagesPerSuperPage - 1);
- uintptr_t super_page_base = (pointer_as_uint & kSuperPageBaseMask);
- auto* ret = reinterpret_cast<void*>(
- super_page_base + (partition_page_index << kPartitionPageShift));
- return ret;
-}
-
-ALWAYS_INLINE PartitionPage* PartitionPointerToPage(void* ptr) {
- PartitionPage* page = PartitionPointerToPageNoAlignmentCheck(ptr);
- // Checks that the pointer is a multiple of bucket size.
- DCHECK(!((reinterpret_cast<uintptr_t>(ptr) -
- reinterpret_cast<uintptr_t>(PartitionPageToPointer(page))) %
- page->bucket->slot_size));
- return page;
-}
-
-ALWAYS_INLINE bool PartitionBucketIsDirectMapped(
- const PartitionBucket* bucket) {
- return !bucket->num_system_pages_per_slot_span;
-}
-
-ALWAYS_INLINE size_t PartitionBucketBytes(const PartitionBucket* bucket) {
- return bucket->num_system_pages_per_slot_span * kSystemPageSize;
-}
-
-ALWAYS_INLINE uint16_t PartitionBucketSlots(const PartitionBucket* bucket) {
- return static_cast<uint16_t>(PartitionBucketBytes(bucket) /
- bucket->slot_size);
-}
-
-ALWAYS_INLINE size_t* PartitionPageGetRawSizePtr(PartitionPage* page) {
- // For single-slot buckets which span more than one partition page, we
- // have some spare metadata space to store the raw allocation size. We
- // can use this to report better statistics.
- PartitionBucket* bucket = page->bucket;
- if (bucket->slot_size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize)
- return nullptr;
-
- DCHECK((bucket->slot_size % kSystemPageSize) == 0);
- DCHECK(PartitionBucketIsDirectMapped(bucket) ||
- PartitionBucketSlots(bucket) == 1);
- page++;
- return reinterpret_cast<size_t*>(&page->freelist_head);
-}
-
-ALWAYS_INLINE size_t PartitionPageGetRawSize(PartitionPage* page) {
- size_t* raw_size_ptr = PartitionPageGetRawSizePtr(page);
- if (UNLIKELY(raw_size_ptr != nullptr))
- return *raw_size_ptr;
- return 0;
-}
-
-ALWAYS_INLINE PartitionRootBase* PartitionPageToRoot(PartitionPage* page) {
- auto* extent_entry = reinterpret_cast<PartitionSuperPageExtentEntry*>(
- reinterpret_cast<uintptr_t>(page) & kSystemPageBaseMask);
- return extent_entry->root;
-}
-
-ALWAYS_INLINE bool PartitionPointerIsValid(void* ptr) {
- PartitionPage* page = PartitionPointerToPage(ptr);
- PartitionRootBase* root = PartitionPageToRoot(page);
- return root->inverted_self == ~reinterpret_cast<uintptr_t>(root);
-}
-
-ALWAYS_INLINE void* PartitionBucketAlloc(PartitionRootBase* root,
- int flags,
- size_t size,
- PartitionBucket* bucket) {
- PartitionPage* page = bucket->active_pages_head;
- // Check that this page is neither full nor freed.
- DCHECK(page->num_allocated_slots >= 0);
- void* ret = page->freelist_head;
- if (LIKELY(ret)) {
- // If these asserts fire, you probably corrupted memory.
- DCHECK(PartitionPointerIsValid(ret));
- // All large allocations must go through the slow path to correctly
- // update the size metadata.
- DCHECK(PartitionPageGetRawSize(page) == 0);
- PartitionFreelistEntry* new_head =
- PartitionFreelistMask(static_cast<PartitionFreelistEntry*>(ret)->next);
- page->freelist_head = new_head;
- page->num_allocated_slots++;
- } else {
- ret = PartitionAllocSlowPath(root, flags, size, bucket);
- DCHECK(!ret || PartitionPointerIsValid(ret));
- }
-#if DCHECK_IS_ON()
- if (!ret)
- return nullptr;
- // Fill the uninitialized pattern, and write the cookies.
- page = PartitionPointerToPage(ret);
- size_t slot_size = page->bucket->slot_size;
- size_t raw_size = PartitionPageGetRawSize(page);
- if (raw_size) {
- DCHECK(raw_size == size);
- slot_size = raw_size;
- }
- size_t no_cookie_size = PartitionCookieSizeAdjustSubtract(slot_size);
- auto* char_ret = static_cast<char*>(ret);
- // The value given to the application is actually just after the cookie.
- ret = char_ret + kCookieSize;
- memset(ret, kUninitializedByte, no_cookie_size);
- PartitionCookieWriteValue(char_ret);
- PartitionCookieWriteValue(char_ret + kCookieSize + no_cookie_size);
-#endif
- return ret;
+ALWAYS_INLINE void* PartitionRoot::Alloc(size_t size, const char* type_name) {
+ return AllocFlags(0, size, type_name);
}
-ALWAYS_INLINE void* PartitionAlloc(PartitionRoot* root,
- size_t size,
- const char* type_name) {
+ALWAYS_INLINE void* PartitionRoot::AllocFlags(int flags,
+ size_t size,
+ const char* type_name) {
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
void* result = malloc(size);
CHECK(result);
return result;
#else
size_t requested_size = size;
- size = PartitionCookieSizeAdjustAdd(size);
- DCHECK(root->initialized);
+ size = internal::PartitionCookieSizeAdjustAdd(size);
+ DCHECK(this->initialized);
size_t index = size >> kBucketShift;
- DCHECK(index < root->num_buckets);
+ DCHECK(index < this->num_buckets);
DCHECK(size == index << kBucketShift);
- PartitionBucket* bucket = &root->buckets()[index];
- void* result = PartitionBucketAlloc(root, 0, size, bucket);
+ internal::PartitionBucket* bucket = &this->buckets()[index];
+ void* result = AllocFromBucket(bucket, flags, size);
PartitionAllocHooks::AllocationHookIfEnabled(result, requested_size,
type_name);
return result;
#endif // defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
}
-ALWAYS_INLINE void PartitionFreeWithPage(void* ptr, PartitionPage* page) {
-// If these asserts fire, you probably corrupted memory.
-#if DCHECK_IS_ON()
- size_t slot_size = page->bucket->slot_size;
- size_t raw_size = PartitionPageGetRawSize(page);
- if (raw_size)
- slot_size = raw_size;
- PartitionCookieCheckValue(ptr);
- PartitionCookieCheckValue(reinterpret_cast<char*>(ptr) + slot_size -
- kCookieSize);
- memset(ptr, kFreedByte, slot_size);
+ALWAYS_INLINE bool PartitionAllocSupportsGetSize() {
+#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+ return false;
+#else
+ return true;
#endif
- DCHECK(page->num_allocated_slots);
- PartitionFreelistEntry* freelist_head = page->freelist_head;
- DCHECK(!freelist_head || PartitionPointerIsValid(freelist_head));
- CHECK(ptr != freelist_head); // Catches an immediate double free.
- // Look for double free one level deeper in debug.
- DCHECK(!freelist_head || ptr != PartitionFreelistMask(freelist_head->next));
- auto* entry = static_cast<PartitionFreelistEntry*>(ptr);
- entry->next = PartitionFreelistMask(freelist_head);
- page->freelist_head = entry;
- --page->num_allocated_slots;
- if (UNLIKELY(page->num_allocated_slots <= 0)) {
- PartitionFreeSlowPath(page);
- } else {
- // All single-slot allocations must go through the slow path to
- // correctly update the size metadata.
- DCHECK(PartitionPageGetRawSize(page) == 0);
- }
+}
+
+ALWAYS_INLINE size_t PartitionAllocGetSize(void* ptr) {
+ // No need to lock here. Only |ptr| being freed by another thread could
+ // cause trouble, and the caller is responsible for that not happening.
+ DCHECK(PartitionAllocSupportsGetSize());
+ ptr = internal::PartitionCookieFreePointerAdjust(ptr);
+ internal::PartitionPage* page = internal::PartitionPage::FromPointer(ptr);
+ // TODO(palmer): See if we can afford to make this a CHECK.
+ DCHECK(internal::PartitionRootBase::IsValidPage(page));
+ size_t size = page->bucket->slot_size;
+ return internal::PartitionCookieSizeAdjustSubtract(size);
}
ALWAYS_INLINE void PartitionFree(void* ptr) {
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
free(ptr);
#else
- PartitionAllocHooks::FreeHookIfEnabled(ptr);
- ptr = PartitionCookieFreePointerAdjust(ptr);
- DCHECK(PartitionPointerIsValid(ptr));
- PartitionPage* page = PartitionPointerToPage(ptr);
- PartitionFreeWithPage(ptr, page);
+ void* original_ptr = ptr;
+ // TODO(palmer): Check ptr alignment before continuing. Shall we do the check
+ // inside PartitionCookieFreePointerAdjust?
+ PartitionAllocHooks::FreeHookIfEnabled(original_ptr);
+ ptr = internal::PartitionCookieFreePointerAdjust(ptr);
+ internal::PartitionPage* page = internal::PartitionPage::FromPointer(ptr);
+ // TODO(palmer): See if we can afford to make this a CHECK.
+ DCHECK(internal::PartitionRootBase::IsValidPage(page));
+ page->Free(ptr);
#endif
}
-ALWAYS_INLINE PartitionBucket* PartitionGenericSizeToBucket(
+ALWAYS_INLINE internal::PartitionBucket* PartitionGenericSizeToBucket(
PartitionRootGeneric* root,
size_t size) {
size_t order = kBitsPerSizeT - bits::CountLeadingZeroBitsSizeT(size);
@@ -780,9 +335,10 @@ ALWAYS_INLINE PartitionBucket* PartitionGenericSizeToBucket(
(kGenericNumBucketsPerOrder - 1);
// And if the remaining bits are non-zero we must bump the bucket up.
size_t sub_order_index = size & root->order_sub_index_masks[order];
- PartitionBucket* bucket =
+ internal::PartitionBucket* bucket =
root->bucket_lookups[(order << kGenericNumBucketsPerOrderBits) +
order_index + !!sub_order_index];
+ CHECK(bucket);
DCHECK(!bucket->slot_size || bucket->slot_size >= size);
DCHECK(!(bucket->slot_size % kGenericSmallestBucket));
return bucket;
@@ -792,119 +348,109 @@ ALWAYS_INLINE void* PartitionAllocGenericFlags(PartitionRootGeneric* root,
int flags,
size_t size,
const char* type_name) {
+ DCHECK(flags < PartitionAllocLastFlag << 1);
+
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
- void* result = malloc(size);
+ const bool zero_fill = flags & PartitionAllocZeroFill;
+ void* result = zero_fill ? calloc(1, size) : malloc(size);
CHECK(result || flags & PartitionAllocReturnNull);
return result;
#else
DCHECK(root->initialized);
size_t requested_size = size;
- size = PartitionCookieSizeAdjustAdd(size);
- PartitionBucket* bucket = PartitionGenericSizeToBucket(root, size);
+ size = internal::PartitionCookieSizeAdjustAdd(size);
+ internal::PartitionBucket* bucket = PartitionGenericSizeToBucket(root, size);
void* ret = nullptr;
{
subtle::SpinLock::Guard guard(root->lock);
- ret = PartitionBucketAlloc(root, flags, size, bucket);
+ ret = root->AllocFromBucket(bucket, flags, size);
}
PartitionAllocHooks::AllocationHookIfEnabled(ret, requested_size, type_name);
+
return ret;
#endif
}
-ALWAYS_INLINE void* PartitionAllocGeneric(PartitionRootGeneric* root,
- size_t size,
- const char* type_name) {
- return PartitionAllocGenericFlags(root, 0, size, type_name);
+ALWAYS_INLINE void* PartitionRootGeneric::Alloc(size_t size,
+ const char* type_name) {
+ return PartitionAllocGenericFlags(this, 0, size, type_name);
}
-ALWAYS_INLINE void PartitionFreeGeneric(PartitionRootGeneric* root, void* ptr) {
+ALWAYS_INLINE void* PartitionRootGeneric::AllocFlags(int flags,
+ size_t size,
+ const char* type_name) {
+ return PartitionAllocGenericFlags(this, flags, size, type_name);
+}
+
+ALWAYS_INLINE void PartitionRootGeneric::Free(void* ptr) {
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
free(ptr);
#else
- DCHECK(root->initialized);
+ DCHECK(this->initialized);
if (UNLIKELY(!ptr))
return;
PartitionAllocHooks::FreeHookIfEnabled(ptr);
- ptr = PartitionCookieFreePointerAdjust(ptr);
- DCHECK(PartitionPointerIsValid(ptr));
- PartitionPage* page = PartitionPointerToPage(ptr);
+ ptr = internal::PartitionCookieFreePointerAdjust(ptr);
+ internal::PartitionPage* page = internal::PartitionPage::FromPointer(ptr);
+ // TODO(palmer): See if we can afford to make this a CHECK.
+ DCHECK(IsValidPage(page));
{
- subtle::SpinLock::Guard guard(root->lock);
- PartitionFreeWithPage(ptr, page);
+ subtle::SpinLock::Guard guard(this->lock);
+ page->Free(ptr);
}
#endif
}
-ALWAYS_INLINE size_t PartitionDirectMapSize(size_t size) {
- // Caller must check that the size is not above the kGenericMaxDirectMapped
- // limit before calling. This also guards against integer overflow in the
- // calculation here.
- DCHECK(size <= kGenericMaxDirectMapped);
- return (size + kSystemPageOffsetMask) & kSystemPageBaseMask;
-}
+BASE_EXPORT void* PartitionReallocGenericFlags(PartitionRootGeneric* root,
+ int flags,
+ void* ptr,
+ size_t new_size,
+ const char* type_name);
-ALWAYS_INLINE size_t PartitionAllocActualSize(PartitionRootGeneric* root,
- size_t size) {
+ALWAYS_INLINE size_t PartitionRootGeneric::ActualSize(size_t size) {
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
return size;
#else
- DCHECK(root->initialized);
- size = PartitionCookieSizeAdjustAdd(size);
- PartitionBucket* bucket = PartitionGenericSizeToBucket(root, size);
- if (LIKELY(!PartitionBucketIsDirectMapped(bucket))) {
+ DCHECK(this->initialized);
+ size = internal::PartitionCookieSizeAdjustAdd(size);
+ internal::PartitionBucket* bucket = PartitionGenericSizeToBucket(this, size);
+ if (LIKELY(!bucket->is_direct_mapped())) {
size = bucket->slot_size;
} else if (size > kGenericMaxDirectMapped) {
// Too large to allocate => return the size unchanged.
} else {
- DCHECK(bucket == &PartitionRootBase::gPagedBucket);
- size = PartitionDirectMapSize(size);
+ size = internal::PartitionBucket::get_direct_map_size(size);
}
- return PartitionCookieSizeAdjustSubtract(size);
+ return internal::PartitionCookieSizeAdjustSubtract(size);
#endif
}
-ALWAYS_INLINE bool PartitionAllocSupportsGetSize() {
-#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
- return false;
-#else
- return true;
-#endif
-}
-
-ALWAYS_INLINE size_t PartitionAllocGetSize(void* ptr) {
- // No need to lock here. Only |ptr| being freed by another thread could
- // cause trouble, and the caller is responsible for that not happening.
- DCHECK(PartitionAllocSupportsGetSize());
- ptr = PartitionCookieFreePointerAdjust(ptr);
- DCHECK(PartitionPointerIsValid(ptr));
- PartitionPage* page = PartitionPointerToPage(ptr);
- size_t size = page->bucket->slot_size;
- return PartitionCookieSizeAdjustSubtract(size);
-}
-
-// N (or more accurately, N - sizeof(void*)) represents the largest size in
-// bytes that will be handled by a SizeSpecificPartitionAllocator.
-// Attempts to partitionAlloc() more than this amount will fail.
template <size_t N>
class SizeSpecificPartitionAllocator {
public:
+ SizeSpecificPartitionAllocator() {
+ memset(actual_buckets_, 0,
+ sizeof(internal::PartitionBucket) * pdfium::size(actual_buckets_));
+ }
+ ~SizeSpecificPartitionAllocator() = default;
static const size_t kMaxAllocation = N - kAllocationGranularity;
static const size_t kNumBuckets = N / kAllocationGranularity;
- void init() {
- PartitionAllocInit(&partition_root_, kNumBuckets, kMaxAllocation);
- }
+ void init() { partition_root_.Init(kNumBuckets, kMaxAllocation); }
ALWAYS_INLINE PartitionRoot* root() { return &partition_root_; }
private:
PartitionRoot partition_root_;
- PartitionBucket actual_buckets_[kNumBuckets];
+ internal::PartitionBucket actual_buckets_[kNumBuckets];
};
-class PartitionAllocatorGeneric {
+class BASE_EXPORT PartitionAllocatorGeneric {
public:
- void init() { PartitionAllocGenericInit(&partition_root_); }
+ PartitionAllocatorGeneric();
+ ~PartitionAllocatorGeneric();
+
+ void init() { partition_root_.Init(); }
ALWAYS_INLINE PartitionRootGeneric* root() { return &partition_root_; }
private:
@@ -914,4 +460,4 @@ class PartitionAllocatorGeneric {
} // namespace base
} // namespace pdfium
-#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H
+#endif // THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H_
diff --git a/third_party/base/allocator/partition_allocator/partition_alloc_constants.h b/third_party/base/allocator/partition_allocator/partition_alloc_constants.h
new file mode 100644
index 0000000000..cd9108cec2
--- /dev/null
+++ b/third_party/base/allocator/partition_allocator/partition_alloc_constants.h
@@ -0,0 +1,169 @@
+// Copyright (c) 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CONSTANTS_H_
+#define THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CONSTANTS_H_
+
+#include <limits.h>
+
+#include "third_party/base/allocator/partition_allocator/page_allocator_constants.h"
+#include "third_party/base/logging.h"
+
+namespace pdfium {
+namespace base {
+
+// Allocation granularity of sizeof(void*) bytes.
+static const size_t kAllocationGranularity = sizeof(void*);
+static const size_t kAllocationGranularityMask = kAllocationGranularity - 1;
+static const size_t kBucketShift = (kAllocationGranularity == 8) ? 3 : 2;
+
+// Underlying partition storage pages are a power-of-two size. It is typical
+// for a partition page to be based on multiple system pages. Most references to
+// "page" refer to partition pages.
+// We also have the concept of "super pages" -- these are the underlying system
+// allocations we make. Super pages contain multiple partition pages inside them
+// and include space for a small amount of metadata per partition page.
+// Inside super pages, we store "slot spans". A slot span is a continguous range
+// of one or more partition pages that stores allocations of the same size.
+// Slot span sizes are adjusted depending on the allocation size, to make sure
+// the packing does not lead to unused (wasted) space at the end of the last
+// system page of the span. For our current max slot span size of 64k and other
+// constant values, we pack _all_ PartitionRootGeneric::Alloc() sizes perfectly
+// up against the end of a system page.
+#if defined(_MIPS_ARCH_LOONGSON)
+static const size_t kPartitionPageShift = 16; // 64KB
+#else
+static const size_t kPartitionPageShift = 14; // 16KB
+#endif
+static const size_t kPartitionPageSize = 1 << kPartitionPageShift;
+static const size_t kPartitionPageOffsetMask = kPartitionPageSize - 1;
+static const size_t kPartitionPageBaseMask = ~kPartitionPageOffsetMask;
+static const size_t kMaxPartitionPagesPerSlotSpan = 4;
+
+// To avoid fragmentation via never-used freelist entries, we hand out partition
+// freelist sections gradually, in units of the dominant system page size.
+// What we're actually doing is avoiding filling the full partition page (16 KB)
+// with freelist pointers right away. Writing freelist pointers will fault and
+// dirty a private page, which is very wasteful if we never actually store
+// objects there.
+static const size_t kNumSystemPagesPerPartitionPage =
+ kPartitionPageSize / kSystemPageSize;
+static const size_t kMaxSystemPagesPerSlotSpan =
+ kNumSystemPagesPerPartitionPage * kMaxPartitionPagesPerSlotSpan;
+
+// We reserve virtual address space in 2MB chunks (aligned to 2MB as well).
+// These chunks are called "super pages". We do this so that we can store
+// metadata in the first few pages of each 2MB aligned section. This leads to
+// a very fast free(). We specifically choose 2MB because this virtual address
+// block represents a full but single PTE allocation on ARM, ia32 and x64.
+//
+// The layout of the super page is as follows. The sizes below are the same
+// for 32 bit and 64 bit.
+//
+// | Guard page (4KB) |
+// | Metadata page (4KB) |
+// | Guard pages (8KB) |
+// | Slot span |
+// | Slot span |
+// | ... |
+// | Slot span |
+// | Guard page (4KB) |
+//
+// - Each slot span is a contiguous range of one or more PartitionPages.
+// - The metadata page has the following format. Note that the PartitionPage
+// that is not at the head of a slot span is "unused". In other words,
+// the metadata for the slot span is stored only in the first PartitionPage
+// of the slot span. Metadata accesses to other PartitionPages are
+// redirected to the first PartitionPage.
+//
+// | SuperPageExtentEntry (32B) |
+// | PartitionPage of slot span 1 (32B, used) |
+// | PartitionPage of slot span 1 (32B, unused) |
+// | PartitionPage of slot span 1 (32B, unused) |
+// | PartitionPage of slot span 2 (32B, used) |
+// | PartitionPage of slot span 3 (32B, used) |
+// | ... |
+// | PartitionPage of slot span N (32B, unused) |
+//
+// A direct mapped page has a similar layout to fake it looking like a super
+// page:
+//
+// | Guard page (4KB) |
+// | Metadata page (4KB) |
+// | Guard pages (8KB) |
+// | Direct mapped object |
+// | Guard page (4KB) |
+//
+// - The metadata page has the following layout:
+//
+// | SuperPageExtentEntry (32B) |
+// | PartitionPage (32B) |
+// | PartitionBucket (32B) |
+// | PartitionDirectMapExtent (8B) |
+static const size_t kSuperPageShift = 21; // 2MB
+static const size_t kSuperPageSize = 1 << kSuperPageShift;
+static const size_t kSuperPageOffsetMask = kSuperPageSize - 1;
+static const size_t kSuperPageBaseMask = ~kSuperPageOffsetMask;
+static const size_t kNumPartitionPagesPerSuperPage =
+ kSuperPageSize / kPartitionPageSize;
+
+// The following kGeneric* constants apply to the generic variants of the API.
+// The "order" of an allocation is closely related to the power-of-two size of
+// the allocation. More precisely, the order is the bit index of the
+// most-significant-bit in the allocation size, where the bit numbers starts
+// at index 1 for the least-significant-bit.
+// In terms of allocation sizes, order 0 covers 0, order 1 covers 1, order 2
+// covers 2->3, order 3 covers 4->7, order 4 covers 8->15.
+static const size_t kGenericMinBucketedOrder = 4; // 8 bytes.
+static const size_t kGenericMaxBucketedOrder =
+ 20; // Largest bucketed order is 1<<(20-1) (storing 512KB -> almost 1MB)
+static const size_t kGenericNumBucketedOrders =
+ (kGenericMaxBucketedOrder - kGenericMinBucketedOrder) + 1;
+// Eight buckets per order (for the higher orders), e.g. order 8 is 128, 144,
+// 160, ..., 240:
+static const size_t kGenericNumBucketsPerOrderBits = 3;
+static const size_t kGenericNumBucketsPerOrder =
+ 1 << kGenericNumBucketsPerOrderBits;
+static const size_t kGenericNumBuckets =
+ kGenericNumBucketedOrders * kGenericNumBucketsPerOrder;
+static const size_t kGenericSmallestBucket = 1
+ << (kGenericMinBucketedOrder - 1);
+static const size_t kGenericMaxBucketSpacing =
+ 1 << ((kGenericMaxBucketedOrder - 1) - kGenericNumBucketsPerOrderBits);
+static const size_t kGenericMaxBucketed =
+ (1 << (kGenericMaxBucketedOrder - 1)) +
+ ((kGenericNumBucketsPerOrder - 1) * kGenericMaxBucketSpacing);
+static const size_t kGenericMinDirectMappedDownsize =
+ kGenericMaxBucketed +
+ 1; // Limit when downsizing a direct mapping using realloc().
+static const size_t kGenericMaxDirectMapped =
+ (1UL << 31) + kPageAllocationGranularity; // 2 GB plus one more page.
+static const size_t kBitsPerSizeT = sizeof(void*) * CHAR_BIT;
+
+// Constant for the memory reclaim logic.
+static const size_t kMaxFreeableSpans = 16;
+
+// If the total size in bytes of allocated but not committed pages exceeds this
+// value (probably it is a "out of virtual address space" crash),
+// a special crash stack trace is generated at |PartitionOutOfMemory|.
+// This is to distinguish "out of virtual address space" from
+// "out of physical memory" in crash reports.
+static const size_t kReasonableSizeOfUnusedPages = 1024 * 1024 * 1024; // 1GB
+
+// These two byte values match tcmalloc.
+static const unsigned char kUninitializedByte = 0xAB;
+static const unsigned char kFreedByte = 0xCD;
+
+// Flags for PartitionAllocGenericFlags.
+enum PartitionAllocFlags {
+ PartitionAllocReturnNull = 1 << 0,
+ PartitionAllocZeroFill = 1 << 1,
+
+ PartitionAllocLastFlag = PartitionAllocZeroFill
+};
+
+} // namespace base
+} // namespace pdfium
+
+#endif // THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CONSTANTS_H_
diff --git a/third_party/base/allocator/partition_allocator/partition_bucket.cc b/third_party/base/allocator/partition_allocator/partition_bucket.cc
new file mode 100644
index 0000000000..b540adb14d
--- /dev/null
+++ b/third_party/base/allocator/partition_allocator/partition_bucket.cc
@@ -0,0 +1,568 @@
+// Copyright (c) 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/base/allocator/partition_allocator/partition_bucket.h"
+
+#include "build/build_config.h"
+#include "third_party/base/allocator/partition_allocator/oom.h"
+#include "third_party/base/allocator/partition_allocator/page_allocator.h"
+#include "third_party/base/allocator/partition_allocator/partition_alloc_constants.h"
+#include "third_party/base/allocator/partition_allocator/partition_direct_map_extent.h"
+#include "third_party/base/allocator/partition_allocator/partition_oom.h"
+#include "third_party/base/allocator/partition_allocator/partition_page.h"
+#include "third_party/base/allocator/partition_allocator/partition_root_base.h"
+
+namespace pdfium {
+namespace base {
+namespace internal {
+
+namespace {
+
+ALWAYS_INLINE PartitionPage* PartitionDirectMap(PartitionRootBase* root,
+ int flags,
+ size_t raw_size) {
+ size_t size = PartitionBucket::get_direct_map_size(raw_size);
+
+ // Because we need to fake looking like a super page, we need to allocate
+ // a bunch of system pages more than "size":
+ // - The first few system pages are the partition page in which the super
+ // page metadata is stored. We fault just one system page out of a partition
+ // page sized clump.
+ // - We add a trailing guard page on 32-bit (on 64-bit we rely on the
+ // massive address space plus randomization instead).
+ size_t map_size = size + kPartitionPageSize;
+#if !defined(ARCH_CPU_64_BITS)
+ map_size += kSystemPageSize;
+#endif
+ // Round up to the allocation granularity.
+ map_size += kPageAllocationGranularityOffsetMask;
+ map_size &= kPageAllocationGranularityBaseMask;
+
+ char* ptr = reinterpret_cast<char*>(
+ AllocPages(nullptr, map_size, kSuperPageSize, PageReadWrite));
+ if (UNLIKELY(!ptr))
+ return nullptr;
+
+ size_t committed_page_size = size + kSystemPageSize;
+ root->total_size_of_direct_mapped_pages += committed_page_size;
+ root->IncreaseCommittedPages(committed_page_size);
+
+ char* slot = ptr + kPartitionPageSize;
+ CHECK(SetSystemPagesAccess(ptr + (kSystemPageSize * 2),
+ kPartitionPageSize - (kSystemPageSize * 2),
+ PageInaccessible));
+#if !defined(ARCH_CPU_64_BITS)
+ CHECK(SetSystemPagesAccess(ptr, kSystemPageSize, PageInaccessible));
+ CHECK(SetSystemPagesAccess(slot + size, kSystemPageSize, PageInaccessible));
+#endif
+
+ PartitionSuperPageExtentEntry* extent =
+ reinterpret_cast<PartitionSuperPageExtentEntry*>(
+ PartitionSuperPageToMetadataArea(ptr));
+ extent->root = root;
+ // The new structures are all located inside a fresh system page so they
+ // will all be zeroed out. These DCHECKs are for documentation.
+ DCHECK(!extent->super_page_base);
+ DCHECK(!extent->super_pages_end);
+ DCHECK(!extent->next);
+ PartitionPage* page = PartitionPage::FromPointerNoAlignmentCheck(slot);
+ PartitionBucket* bucket = reinterpret_cast<PartitionBucket*>(
+ reinterpret_cast<char*>(page) + (kPageMetadataSize * 2));
+ DCHECK(!page->next_page);
+ DCHECK(!page->num_allocated_slots);
+ DCHECK(!page->num_unprovisioned_slots);
+ DCHECK(!page->page_offset);
+ DCHECK(!page->empty_cache_index);
+ page->bucket = bucket;
+ page->freelist_head = reinterpret_cast<PartitionFreelistEntry*>(slot);
+ PartitionFreelistEntry* next_entry =
+ reinterpret_cast<PartitionFreelistEntry*>(slot);
+ next_entry->next = PartitionFreelistEntry::Transform(nullptr);
+
+ DCHECK(!bucket->active_pages_head);
+ DCHECK(!bucket->empty_pages_head);
+ DCHECK(!bucket->decommitted_pages_head);
+ DCHECK(!bucket->num_system_pages_per_slot_span);
+ DCHECK(!bucket->num_full_pages);
+ bucket->slot_size = size;
+
+ PartitionDirectMapExtent* map_extent =
+ PartitionDirectMapExtent::FromPage(page);
+ map_extent->map_size = map_size - kPartitionPageSize - kSystemPageSize;
+ map_extent->bucket = bucket;
+
+ // Maintain the doubly-linked list of all direct mappings.
+ map_extent->next_extent = root->direct_map_list;
+ if (map_extent->next_extent)
+ map_extent->next_extent->prev_extent = map_extent;
+ map_extent->prev_extent = nullptr;
+ root->direct_map_list = map_extent;
+
+ return page;
+}
+
+} // namespace
+
+// static
+PartitionBucket PartitionBucket::sentinel_bucket_;
+
+PartitionBucket* PartitionBucket::get_sentinel_bucket() {
+ return &sentinel_bucket_;
+}
+
+// TODO(ajwong): This seems to interact badly with
+// get_pages_per_slot_span() which rounds the value from this up to a
+// multiple of kNumSystemPagesPerPartitionPage (aka 4) anyways.
+// http://crbug.com/776537
+//
+// TODO(ajwong): The waste calculation seems wrong. The PTE usage should cover
+// both used and unsed pages.
+// http://crbug.com/776537
+uint8_t PartitionBucket::get_system_pages_per_slot_span() {
+ // This works out reasonably for the current bucket sizes of the generic
+ // allocator, and the current values of partition page size and constants.
+ // Specifically, we have enough room to always pack the slots perfectly into
+ // some number of system pages. The only waste is the waste associated with
+ // unfaulted pages (i.e. wasted address space).
+ // TODO: we end up using a lot of system pages for very small sizes. For
+ // example, we'll use 12 system pages for slot size 24. The slot size is
+ // so small that the waste would be tiny with just 4, or 1, system pages.
+ // Later, we can investigate whether there are anti-fragmentation benefits
+ // to using fewer system pages.
+ double best_waste_ratio = 1.0f;
+ uint16_t best_pages = 0;
+ if (this->slot_size > kMaxSystemPagesPerSlotSpan * kSystemPageSize) {
+ // TODO(ajwong): Why is there a DCHECK here for this?
+ // http://crbug.com/776537
+ DCHECK(!(this->slot_size % kSystemPageSize));
+ best_pages = static_cast<uint16_t>(this->slot_size / kSystemPageSize);
+ // TODO(ajwong): Should this be checking against
+ // kMaxSystemPagesPerSlotSpan or numeric_limits<uint8_t>::max?
+ // http://crbug.com/776537
+ CHECK(best_pages < (1 << 8));
+ return static_cast<uint8_t>(best_pages);
+ }
+ DCHECK(this->slot_size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize);
+ for (uint16_t i = kNumSystemPagesPerPartitionPage - 1;
+ i <= kMaxSystemPagesPerSlotSpan; ++i) {
+ size_t page_size = kSystemPageSize * i;
+ size_t num_slots = page_size / this->slot_size;
+ size_t waste = page_size - (num_slots * this->slot_size);
+ // Leaving a page unfaulted is not free; the page will occupy an empty page
+ // table entry. Make a simple attempt to account for that.
+ //
+ // TODO(ajwong): This looks wrong. PTEs are allocated for all pages
+ // regardless of whether or not they are wasted. Should it just
+ // be waste += i * sizeof(void*)?
+ // http://crbug.com/776537
+ size_t num_remainder_pages = i & (kNumSystemPagesPerPartitionPage - 1);
+ size_t num_unfaulted_pages =
+ num_remainder_pages
+ ? (kNumSystemPagesPerPartitionPage - num_remainder_pages)
+ : 0;
+ waste += sizeof(void*) * num_unfaulted_pages;
+ double waste_ratio =
+ static_cast<double>(waste) / static_cast<double>(page_size);
+ if (waste_ratio < best_waste_ratio) {
+ best_waste_ratio = waste_ratio;
+ best_pages = i;
+ }
+ }
+ DCHECK(best_pages > 0);
+ CHECK(best_pages <= kMaxSystemPagesPerSlotSpan);
+ return static_cast<uint8_t>(best_pages);
+}
+
+void PartitionBucket::Init(uint32_t new_slot_size) {
+ slot_size = new_slot_size;
+ active_pages_head = PartitionPage::get_sentinel_page();
+ empty_pages_head = nullptr;
+ decommitted_pages_head = nullptr;
+ num_full_pages = 0;
+ num_system_pages_per_slot_span = get_system_pages_per_slot_span();
+}
+
+NOINLINE void PartitionBucket::OnFull() {
+ OOM_CRASH();
+}
+
+ALWAYS_INLINE void* PartitionBucket::AllocNewSlotSpan(
+ PartitionRootBase* root,
+ int flags,
+ uint16_t num_partition_pages) {
+ DCHECK(!(reinterpret_cast<uintptr_t>(root->next_partition_page) %
+ kPartitionPageSize));
+ DCHECK(!(reinterpret_cast<uintptr_t>(root->next_partition_page_end) %
+ kPartitionPageSize));
+ DCHECK(num_partition_pages <= kNumPartitionPagesPerSuperPage);
+ size_t total_size = kPartitionPageSize * num_partition_pages;
+ size_t num_partition_pages_left =
+ (root->next_partition_page_end - root->next_partition_page) >>
+ kPartitionPageShift;
+ if (LIKELY(num_partition_pages_left >= num_partition_pages)) {
+ // In this case, we can still hand out pages from the current super page
+ // allocation.
+ char* ret = root->next_partition_page;
+
+ // Fresh System Pages in the SuperPages are decommited. Commit them
+ // before vending them back.
+ CHECK(SetSystemPagesAccess(ret, total_size, PageReadWrite));
+
+ root->next_partition_page += total_size;
+ root->IncreaseCommittedPages(total_size);
+ return ret;
+ }
+
+ // Need a new super page. We want to allocate super pages in a continguous
+ // address region as much as possible. This is important for not causing
+ // page table bloat and not fragmenting address spaces in 32 bit
+ // architectures.
+ char* requested_address = root->next_super_page;
+ char* super_page = reinterpret_cast<char*>(AllocPages(
+ requested_address, kSuperPageSize, kSuperPageSize, PageReadWrite));
+ if (UNLIKELY(!super_page))
+ return nullptr;
+
+ root->total_size_of_super_pages += kSuperPageSize;
+ root->IncreaseCommittedPages(total_size);
+
+ // |total_size| MUST be less than kSuperPageSize - (kPartitionPageSize*2).
+ // This is a trustworthy value because num_partition_pages is not user
+ // controlled.
+ //
+ // TODO(ajwong): Introduce a DCHECK.
+ root->next_super_page = super_page + kSuperPageSize;
+ char* ret = super_page + kPartitionPageSize;
+ root->next_partition_page = ret + total_size;
+ root->next_partition_page_end = root->next_super_page - kPartitionPageSize;
+ // Make the first partition page in the super page a guard page, but leave a
+ // hole in the middle.
+ // This is where we put page metadata and also a tiny amount of extent
+ // metadata.
+ CHECK(SetSystemPagesAccess(super_page, kSystemPageSize, PageInaccessible));
+ CHECK(SetSystemPagesAccess(super_page + (kSystemPageSize * 2),
+ kPartitionPageSize - (kSystemPageSize * 2),
+ PageInaccessible));
+ // CHECK(SetSystemPagesAccess(super_page + (kSuperPageSize -
+ // kPartitionPageSize),
+ // kPartitionPageSize, PageInaccessible));
+ // All remaining slotspans for the unallocated PartitionPages inside the
+ // SuperPage are conceptually decommitted. Correctly set the state here
+ // so they do not occupy resources.
+ //
+ // TODO(ajwong): Refactor Page Allocator API so the SuperPage comes in
+ // decommited initially.
+ CHECK(SetSystemPagesAccess(super_page + kPartitionPageSize + total_size,
+ (kSuperPageSize - kPartitionPageSize - total_size),
+ PageInaccessible));
+
+ // If we were after a specific address, but didn't get it, assume that
+ // the system chose a lousy address. Here most OS'es have a default
+ // algorithm that isn't randomized. For example, most Linux
+ // distributions will allocate the mapping directly before the last
+ // successful mapping, which is far from random. So we just get fresh
+ // randomness for the next mapping attempt.
+ if (requested_address && requested_address != super_page)
+ root->next_super_page = nullptr;
+
+ // We allocated a new super page so update super page metadata.
+ // First check if this is a new extent or not.
+ PartitionSuperPageExtentEntry* latest_extent =
+ reinterpret_cast<PartitionSuperPageExtentEntry*>(
+ PartitionSuperPageToMetadataArea(super_page));
+ // By storing the root in every extent metadata object, we have a fast way
+ // to go from a pointer within the partition to the root object.
+ latest_extent->root = root;
+ // Most new extents will be part of a larger extent, and these three fields
+ // are unused, but we initialize them to 0 so that we get a clear signal
+ // in case they are accidentally used.
+ latest_extent->super_page_base = nullptr;
+ latest_extent->super_pages_end = nullptr;
+ latest_extent->next = nullptr;
+
+ PartitionSuperPageExtentEntry* current_extent = root->current_extent;
+ bool is_new_extent = (super_page != requested_address);
+ if (UNLIKELY(is_new_extent)) {
+ if (UNLIKELY(!current_extent)) {
+ DCHECK(!root->first_extent);
+ root->first_extent = latest_extent;
+ } else {
+ DCHECK(current_extent->super_page_base);
+ current_extent->next = latest_extent;
+ }
+ root->current_extent = latest_extent;
+ latest_extent->super_page_base = super_page;
+ latest_extent->super_pages_end = super_page + kSuperPageSize;
+ } else {
+ // We allocated next to an existing extent so just nudge the size up a
+ // little.
+ DCHECK(current_extent->super_pages_end);
+ current_extent->super_pages_end += kSuperPageSize;
+ DCHECK(ret >= current_extent->super_page_base &&
+ ret < current_extent->super_pages_end);
+ }
+ return ret;
+}
+
+ALWAYS_INLINE uint16_t PartitionBucket::get_pages_per_slot_span() {
+ // Rounds up to nearest multiple of kNumSystemPagesPerPartitionPage.
+ return (num_system_pages_per_slot_span +
+ (kNumSystemPagesPerPartitionPage - 1)) /
+ kNumSystemPagesPerPartitionPage;
+}
+
+ALWAYS_INLINE void PartitionBucket::InitializeSlotSpan(PartitionPage* page) {
+ // The bucket never changes. We set it up once.
+ page->bucket = this;
+ page->empty_cache_index = -1;
+
+ page->Reset();
+
+ // If this page has just a single slot, do not set up page offsets for any
+ // page metadata other than the first one. This ensures that attempts to
+ // touch invalid page metadata fail.
+ if (page->num_unprovisioned_slots == 1)
+ return;
+
+ uint16_t num_partition_pages = get_pages_per_slot_span();
+ char* page_char_ptr = reinterpret_cast<char*>(page);
+ for (uint16_t i = 1; i < num_partition_pages; ++i) {
+ page_char_ptr += kPageMetadataSize;
+ PartitionPage* secondary_page =
+ reinterpret_cast<PartitionPage*>(page_char_ptr);
+ secondary_page->page_offset = i;
+ }
+}
+
+ALWAYS_INLINE char* PartitionBucket::AllocAndFillFreelist(PartitionPage* page) {
+ DCHECK(page != PartitionPage::get_sentinel_page());
+ uint16_t num_slots = page->num_unprovisioned_slots;
+ DCHECK(num_slots);
+ // We should only get here when _every_ slot is either used or unprovisioned.
+ // (The third state is "on the freelist". If we have a non-empty freelist, we
+ // should not get here.)
+ DCHECK(num_slots + page->num_allocated_slots == this->get_slots_per_span());
+ // Similarly, make explicitly sure that the freelist is empty.
+ DCHECK(!page->freelist_head);
+ DCHECK(page->num_allocated_slots >= 0);
+
+ size_t size = this->slot_size;
+ char* base = reinterpret_cast<char*>(PartitionPage::ToPointer(page));
+ char* return_object = base + (size * page->num_allocated_slots);
+ char* first_freelist_pointer = return_object + size;
+ char* first_freelist_pointer_extent =
+ first_freelist_pointer + sizeof(PartitionFreelistEntry*);
+ // Our goal is to fault as few system pages as possible. We calculate the
+ // page containing the "end" of the returned slot, and then allow freelist
+ // pointers to be written up to the end of that page.
+ char* sub_page_limit = reinterpret_cast<char*>(
+ RoundUpToSystemPage(reinterpret_cast<size_t>(first_freelist_pointer)));
+ char* slots_limit = return_object + (size * num_slots);
+ char* freelist_limit = sub_page_limit;
+ if (UNLIKELY(slots_limit < freelist_limit))
+ freelist_limit = slots_limit;
+
+ uint16_t num_new_freelist_entries = 0;
+ if (LIKELY(first_freelist_pointer_extent <= freelist_limit)) {
+ // Only consider used space in the slot span. If we consider wasted
+ // space, we may get an off-by-one when a freelist pointer fits in the
+ // wasted space, but a slot does not.
+ // We know we can fit at least one freelist pointer.
+ num_new_freelist_entries = 1;
+ // Any further entries require space for the whole slot span.
+ num_new_freelist_entries += static_cast<uint16_t>(
+ (freelist_limit - first_freelist_pointer_extent) / size);
+ }
+
+ // We always return an object slot -- that's the +1 below.
+ // We do not neccessarily create any new freelist entries, because we cross
+ // sub page boundaries frequently for large bucket sizes.
+ DCHECK(num_new_freelist_entries + 1 <= num_slots);
+ num_slots -= (num_new_freelist_entries + 1);
+ page->num_unprovisioned_slots = num_slots;
+ page->num_allocated_slots++;
+
+ if (LIKELY(num_new_freelist_entries)) {
+ char* freelist_pointer = first_freelist_pointer;
+ PartitionFreelistEntry* entry =
+ reinterpret_cast<PartitionFreelistEntry*>(freelist_pointer);
+ page->freelist_head = entry;
+ while (--num_new_freelist_entries) {
+ freelist_pointer += size;
+ PartitionFreelistEntry* next_entry =
+ reinterpret_cast<PartitionFreelistEntry*>(freelist_pointer);
+ entry->next = PartitionFreelistEntry::Transform(next_entry);
+ entry = next_entry;
+ }
+ entry->next = PartitionFreelistEntry::Transform(nullptr);
+ } else {
+ page->freelist_head = nullptr;
+ }
+ return return_object;
+}
+
+bool PartitionBucket::SetNewActivePage() {
+ PartitionPage* page = this->active_pages_head;
+ if (page == PartitionPage::get_sentinel_page())
+ return false;
+
+ PartitionPage* next_page;
+
+ for (; page; page = next_page) {
+ next_page = page->next_page;
+ DCHECK(page->bucket == this);
+ DCHECK(page != this->empty_pages_head);
+ DCHECK(page != this->decommitted_pages_head);
+
+ if (LIKELY(page->is_active())) {
+ // This page is usable because it has freelist entries, or has
+ // unprovisioned slots we can create freelist entries from.
+ this->active_pages_head = page;
+ return true;
+ }
+
+ // Deal with empty and decommitted pages.
+ if (LIKELY(page->is_empty())) {
+ page->next_page = this->empty_pages_head;
+ this->empty_pages_head = page;
+ } else if (LIKELY(page->is_decommitted())) {
+ page->next_page = this->decommitted_pages_head;
+ this->decommitted_pages_head = page;
+ } else {
+ DCHECK(page->is_full());
+ // If we get here, we found a full page. Skip over it too, and also
+ // tag it as full (via a negative value). We need it tagged so that
+ // free'ing can tell, and move it back into the active page list.
+ page->num_allocated_slots = -page->num_allocated_slots;
+ ++this->num_full_pages;
+ // num_full_pages is a uint16_t for efficient packing so guard against
+ // overflow to be safe.
+ if (UNLIKELY(!this->num_full_pages))
+ OnFull();
+ // Not necessary but might help stop accidents.
+ page->next_page = nullptr;
+ }
+ }
+
+ this->active_pages_head = PartitionPage::get_sentinel_page();
+ return false;
+}
+
+void* PartitionBucket::SlowPathAlloc(PartitionRootBase* root,
+ int flags,
+ size_t size,
+ bool* is_already_zeroed) {
+ // The slow path is called when the freelist is empty.
+ DCHECK(!this->active_pages_head->freelist_head);
+
+ PartitionPage* new_page = nullptr;
+ *is_already_zeroed = false;
+
+ // For the PartitionRootGeneric::Alloc() API, we have a bunch of buckets
+ // marked as special cases. We bounce them through to the slow path so that
+ // we can still have a blazing fast hot path due to lack of corner-case
+ // branches.
+ //
+ // Note: The ordering of the conditionals matter! In particular,
+ // SetNewActivePage() has a side-effect even when returning
+ // false where it sweeps the active page list and may move things into
+ // the empty or decommitted lists which affects the subsequent conditional.
+ bool return_null = flags & PartitionAllocReturnNull;
+ if (UNLIKELY(this->is_direct_mapped())) {
+ DCHECK(size > kGenericMaxBucketed);
+ DCHECK(this == get_sentinel_bucket());
+ DCHECK(this->active_pages_head == PartitionPage::get_sentinel_page());
+ if (size > kGenericMaxDirectMapped) {
+ if (return_null)
+ return nullptr;
+ PartitionExcessiveAllocationSize();
+ }
+ new_page = PartitionDirectMap(root, flags, size);
+#if !defined(OS_MACOSX)
+ // Turn off the optimization to see if it helps https://crbug.com/892550.
+ *is_already_zeroed = true;
+#endif
+ } else if (LIKELY(this->SetNewActivePage())) {
+ // First, did we find an active page in the active pages list?
+ new_page = this->active_pages_head;
+ DCHECK(new_page->is_active());
+ } else if (LIKELY(this->empty_pages_head != nullptr) ||
+ LIKELY(this->decommitted_pages_head != nullptr)) {
+ // Second, look in our lists of empty and decommitted pages.
+ // Check empty pages first, which are preferred, but beware that an
+ // empty page might have been decommitted.
+ while (LIKELY((new_page = this->empty_pages_head) != nullptr)) {
+ DCHECK(new_page->bucket == this);
+ DCHECK(new_page->is_empty() || new_page->is_decommitted());
+ this->empty_pages_head = new_page->next_page;
+ // Accept the empty page unless it got decommitted.
+ if (new_page->freelist_head) {
+ new_page->next_page = nullptr;
+ break;
+ }
+ DCHECK(new_page->is_decommitted());
+ new_page->next_page = this->decommitted_pages_head;
+ this->decommitted_pages_head = new_page;
+ }
+ if (UNLIKELY(!new_page) &&
+ LIKELY(this->decommitted_pages_head != nullptr)) {
+ new_page = this->decommitted_pages_head;
+ DCHECK(new_page->bucket == this);
+ DCHECK(new_page->is_decommitted());
+ this->decommitted_pages_head = new_page->next_page;
+ void* addr = PartitionPage::ToPointer(new_page);
+ root->RecommitSystemPages(addr, new_page->bucket->get_bytes_per_span());
+ new_page->Reset();
+ // TODO(https://crbug.com/890752): Optimizing here might cause pages to
+ // not be zeroed.
+ // *is_already_zeroed = true;
+ }
+ DCHECK(new_page);
+ } else {
+ // Third. If we get here, we need a brand new page.
+ uint16_t num_partition_pages = this->get_pages_per_slot_span();
+ void* raw_pages = AllocNewSlotSpan(root, flags, num_partition_pages);
+ if (LIKELY(raw_pages != nullptr)) {
+ new_page = PartitionPage::FromPointerNoAlignmentCheck(raw_pages);
+ InitializeSlotSpan(new_page);
+ // TODO(https://crbug.com/890752): Optimizing here causes pages to not be
+ // zeroed on at least macOS.
+ // *is_already_zeroed = true;
+ }
+ }
+
+ // Bail if we had a memory allocation failure.
+ if (UNLIKELY(!new_page)) {
+ DCHECK(this->active_pages_head == PartitionPage::get_sentinel_page());
+ if (return_null)
+ return nullptr;
+ root->OutOfMemory();
+ }
+
+ // TODO(ajwong): Is there a way to avoid the reading of bucket here?
+ // It seems like in many of the conditional branches above, |this| ==
+ // |new_page->bucket|. Maybe pull this into another function?
+ PartitionBucket* bucket = new_page->bucket;
+ DCHECK(bucket != get_sentinel_bucket());
+ bucket->active_pages_head = new_page;
+ new_page->set_raw_size(size);
+
+ // If we found an active page with free slots, or an empty page, we have a
+ // usable freelist head.
+ if (LIKELY(new_page->freelist_head != nullptr)) {
+ PartitionFreelistEntry* entry = new_page->freelist_head;
+ PartitionFreelistEntry* new_head =
+ PartitionFreelistEntry::Transform(entry->next);
+ new_page->freelist_head = new_head;
+ new_page->num_allocated_slots++;
+ return entry;
+ }
+ // Otherwise, we need to build the freelist.
+ DCHECK(new_page->num_unprovisioned_slots);
+ return AllocAndFillFreelist(new_page);
+}
+
+} // namespace internal
+} // namespace base
+} // namespace pdfium
diff --git a/third_party/base/allocator/partition_allocator/partition_bucket.h b/third_party/base/allocator/partition_allocator/partition_bucket.h
new file mode 100644
index 0000000000..a89099b8e8
--- /dev/null
+++ b/third_party/base/allocator/partition_allocator/partition_bucket.h
@@ -0,0 +1,130 @@
+// Copyright (c) 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_BUCKET_H_
+#define THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_BUCKET_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "third_party/base/allocator/partition_allocator/partition_alloc_constants.h"
+#include "third_party/base/base_export.h"
+#include "third_party/base/compiler_specific.h"
+
+namespace pdfium {
+namespace base {
+namespace internal {
+
+struct PartitionPage;
+struct PartitionRootBase;
+
+struct PartitionBucket {
+ // Accessed most in hot path => goes first.
+ PartitionPage* active_pages_head;
+
+ PartitionPage* empty_pages_head;
+ PartitionPage* decommitted_pages_head;
+ uint32_t slot_size;
+ uint32_t num_system_pages_per_slot_span : 8;
+ uint32_t num_full_pages : 24;
+
+ // Public API.
+ void Init(uint32_t new_slot_size);
+
+ // Sets |is_already_zeroed| to true if the allocation was satisfied by
+ // requesting (a) new page(s) from the operating system, or false otherwise.
+ // This enables an optimization for when callers use |PartitionAllocZeroFill|:
+ // there is no need to call memset on fresh pages; the OS has already zeroed
+ // them. (See |PartitionRootBase::AllocFromBucket|.)
+ //
+ // Note the matching Free() functions are in PartitionPage.
+ BASE_EXPORT NOINLINE void* SlowPathAlloc(PartitionRootBase* root,
+ int flags,
+ size_t size,
+ bool* is_already_zeroed);
+
+ ALWAYS_INLINE bool is_direct_mapped() const {
+ return !num_system_pages_per_slot_span;
+ }
+ ALWAYS_INLINE size_t get_bytes_per_span() const {
+ // TODO(ajwong): Change to CheckedMul. https://crbug.com/787153
+ // https://crbug.com/680657
+ return num_system_pages_per_slot_span * kSystemPageSize;
+ }
+ ALWAYS_INLINE uint16_t get_slots_per_span() const {
+ // TODO(ajwong): Change to CheckedMul. https://crbug.com/787153
+ // https://crbug.com/680657
+ return static_cast<uint16_t>(get_bytes_per_span() / slot_size);
+ }
+
+ static ALWAYS_INLINE size_t get_direct_map_size(size_t size) {
+ // Caller must check that the size is not above the kGenericMaxDirectMapped
+ // limit before calling. This also guards against integer overflow in the
+ // calculation here.
+ DCHECK(size <= kGenericMaxDirectMapped);
+ return (size + kSystemPageOffsetMask) & kSystemPageBaseMask;
+ }
+
+ // TODO(ajwong): Can this be made private? https://crbug.com/787153
+ static PartitionBucket* get_sentinel_bucket();
+
+ // This helper function scans a bucket's active page list for a suitable new
+ // active page. When it finds a suitable new active page (one that has
+ // free slots and is not empty), it is set as the new active page. If there
+ // is no suitable new active page, the current active page is set to
+ // PartitionPage::get_sentinel_page(). As potential pages are scanned, they
+ // are tidied up according to their state. Empty pages are swept on to the
+ // empty page list, decommitted pages on to the decommitted page list and full
+ // pages are unlinked from any list.
+ //
+ // This is where the guts of the bucket maintenance is done!
+ bool SetNewActivePage();
+
+ private:
+ static void OutOfMemory(const PartitionRootBase* root);
+ static void OutOfMemoryWithLotsOfUncommitedPages();
+
+ static NOINLINE void OnFull();
+
+ // Returns a natural number of PartitionPages (calculated by
+ // get_system_pages_per_slot_span()) to allocate from the current
+ // SuperPage when the bucket runs out of slots.
+ ALWAYS_INLINE uint16_t get_pages_per_slot_span();
+
+ // Returns the number of system pages in a slot span.
+ //
+ // The calculation attemps to find the best number of System Pages to
+ // allocate for the given slot_size to minimize wasted space. It uses a
+ // heuristic that looks at number of bytes wasted after the last slot and
+ // attempts to account for the PTE usage of each System Page.
+ uint8_t get_system_pages_per_slot_span();
+
+ // Allocates a new slot span with size |num_partition_pages| from the
+ // current extent. Metadata within this slot span will be uninitialized.
+ // Returns nullptr on error.
+ ALWAYS_INLINE void* AllocNewSlotSpan(PartitionRootBase* root,
+ int flags,
+ uint16_t num_partition_pages);
+
+ // Each bucket allocates a slot span when it runs out of slots.
+ // A slot span's size is equal to get_pages_per_slot_span() number of
+ // PartitionPages. This function initializes all PartitionPage within the
+ // span to point to the first PartitionPage which holds all the metadata
+ // for the span and registers this bucket as the owner of the span. It does
+ // NOT put the slots into the bucket's freelist.
+ ALWAYS_INLINE void InitializeSlotSpan(PartitionPage* page);
+
+ // Allocates one slot from the given |page| and then adds the remainder to
+ // the current bucket. If the |page| was freshly allocated, it must have been
+ // passed through InitializeSlotSpan() first.
+ ALWAYS_INLINE char* AllocAndFillFreelist(PartitionPage* page);
+
+ static PartitionBucket sentinel_bucket_;
+};
+
+} // namespace internal
+} // namespace base
+} // namespace pdfium
+
+#endif // THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_BUCKET_H_
diff --git a/third_party/base/allocator/partition_allocator/partition_cookie.h b/third_party/base/allocator/partition_allocator/partition_cookie.h
new file mode 100644
index 0000000000..7cf4e84e05
--- /dev/null
+++ b/third_party/base/allocator/partition_allocator/partition_cookie.h
@@ -0,0 +1,72 @@
+// Copyright (c) 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_COOKIE_H_
+#define THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_COOKIE_H_
+
+#include "third_party/base/compiler_specific.h"
+#include "third_party/base/logging.h"
+
+namespace pdfium {
+namespace base {
+namespace internal {
+
+#if DCHECK_IS_ON()
+// Handles alignment up to XMM instructions on Intel.
+static constexpr size_t kCookieSize = 16;
+
+static constexpr unsigned char kCookieValue[kCookieSize] = {
+ 0xDE, 0xAD, 0xBE, 0xEF, 0xCA, 0xFE, 0xD0, 0x0D,
+ 0x13, 0x37, 0xF0, 0x05, 0xBA, 0x11, 0xAB, 0x1E};
+#endif
+
+ALWAYS_INLINE void PartitionCookieCheckValue(void* ptr) {
+#if DCHECK_IS_ON()
+ unsigned char* cookie_ptr = reinterpret_cast<unsigned char*>(ptr);
+ for (size_t i = 0; i < kCookieSize; ++i, ++cookie_ptr)
+ DCHECK(*cookie_ptr == kCookieValue[i]);
+#endif
+}
+
+ALWAYS_INLINE size_t PartitionCookieSizeAdjustAdd(size_t size) {
+#if DCHECK_IS_ON()
+ // Add space for cookies, checking for integer overflow. TODO(palmer):
+ // Investigate the performance and code size implications of using
+ // CheckedNumeric throughout PA.
+ DCHECK(size + (2 * kCookieSize) > size);
+ size += 2 * kCookieSize;
+#endif
+ return size;
+}
+
+ALWAYS_INLINE void* PartitionCookieFreePointerAdjust(void* ptr) {
+#if DCHECK_IS_ON()
+ // The value given to the application is actually just after the cookie.
+ ptr = static_cast<char*>(ptr) - kCookieSize;
+#endif
+ return ptr;
+}
+
+ALWAYS_INLINE size_t PartitionCookieSizeAdjustSubtract(size_t size) {
+#if DCHECK_IS_ON()
+ // Remove space for cookies.
+ DCHECK(size >= 2 * kCookieSize);
+ size -= 2 * kCookieSize;
+#endif
+ return size;
+}
+
+ALWAYS_INLINE void PartitionCookieWriteValue(void* ptr) {
+#if DCHECK_IS_ON()
+ unsigned char* cookie_ptr = reinterpret_cast<unsigned char*>(ptr);
+ for (size_t i = 0; i < kCookieSize; ++i, ++cookie_ptr)
+ *cookie_ptr = kCookieValue[i];
+#endif
+}
+
+} // namespace internal
+} // namespace base
+} // namespace pdfium
+
+#endif // THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_COOKIE_H_
diff --git a/third_party/base/allocator/partition_allocator/partition_direct_map_extent.h b/third_party/base/allocator/partition_allocator/partition_direct_map_extent.h
new file mode 100644
index 0000000000..192c5b4b3d
--- /dev/null
+++ b/third_party/base/allocator/partition_allocator/partition_direct_map_extent.h
@@ -0,0 +1,35 @@
+// Copyright (c) 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_DIRECT_MAP_EXTENT_H_
+#define THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_DIRECT_MAP_EXTENT_H_
+
+#include "third_party/base/allocator/partition_allocator/partition_bucket.h"
+#include "third_party/base/allocator/partition_allocator/partition_page.h"
+
+namespace pdfium {
+namespace base {
+namespace internal {
+
+struct PartitionDirectMapExtent {
+ PartitionDirectMapExtent* next_extent;
+ PartitionDirectMapExtent* prev_extent;
+ PartitionBucket* bucket;
+ size_t map_size; // Mapped size, not including guard pages and meta-data.
+
+ ALWAYS_INLINE static PartitionDirectMapExtent* FromPage(PartitionPage* page);
+};
+
+ALWAYS_INLINE PartitionDirectMapExtent* PartitionDirectMapExtent::FromPage(
+ PartitionPage* page) {
+ DCHECK(page->bucket->is_direct_mapped());
+ return reinterpret_cast<PartitionDirectMapExtent*>(
+ reinterpret_cast<char*>(page) + 3 * kPageMetadataSize);
+}
+
+} // namespace internal
+} // namespace base
+} // namespace pdfium
+
+#endif // THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_DIRECT_MAP_EXTENT_H_
diff --git a/third_party/base/allocator/partition_allocator/partition_freelist_entry.h b/third_party/base/allocator/partition_allocator/partition_freelist_entry.h
new file mode 100644
index 0000000000..e9f22842af
--- /dev/null
+++ b/third_party/base/allocator/partition_allocator/partition_freelist_entry.h
@@ -0,0 +1,50 @@
+// Copyright (c) 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_FREELIST_ENTRY_H_
+#define THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_FREELIST_ENTRY_H_
+
+#include <stdint.h>
+
+#include "build/build_config.h"
+#include "third_party/base/allocator/partition_allocator/partition_alloc_constants.h"
+#include "third_party/base/compiler_specific.h"
+#include "third_party/base/sys_byteorder.h"
+
+namespace pdfium {
+namespace base {
+namespace internal {
+
+// TODO(ajwong): Introduce an EncodedFreelistEntry type and then replace
+// Transform() with Encode()/Decode() such that the API provides some static
+// type safety.
+//
+// https://crbug.com/787153
+struct PartitionFreelistEntry {
+ PartitionFreelistEntry* next;
+
+ static ALWAYS_INLINE PartitionFreelistEntry* Transform(
+ PartitionFreelistEntry* ptr) {
+// We use bswap on little endian as a fast mask for two reasons:
+// 1) If an object is freed and its vtable used where the attacker doesn't
+// get the chance to run allocations between the free and use, the vtable
+// dereference is likely to fault.
+// 2) If the attacker has a linear buffer overflow and elects to try and
+// corrupt a freelist pointer, partial pointer overwrite attacks are
+// thwarted.
+// For big endian, similar guarantees are arrived at with a negation.
+#if defined(ARCH_CPU_BIG_ENDIAN)
+ uintptr_t masked = ~reinterpret_cast<uintptr_t>(ptr);
+#else
+ uintptr_t masked = ByteSwapUintPtrT(reinterpret_cast<uintptr_t>(ptr));
+#endif
+ return reinterpret_cast<PartitionFreelistEntry*>(masked);
+ }
+};
+
+} // namespace internal
+} // namespace base
+} // namespace pdfium
+
+#endif // THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_FREELIST_ENTRY_H_
diff --git a/third_party/base/allocator/partition_allocator/partition_oom.cc b/third_party/base/allocator/partition_allocator/partition_oom.cc
new file mode 100644
index 0000000000..a4052d1a3d
--- /dev/null
+++ b/third_party/base/allocator/partition_allocator/partition_oom.cc
@@ -0,0 +1,26 @@
+// Copyright (c) 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/base/allocator/partition_allocator/partition_oom.h"
+
+#include "build/build_config.h"
+#include "third_party/base/allocator/partition_allocator/oom.h"
+
+namespace pdfium {
+namespace base {
+namespace internal {
+
+void NOINLINE PartitionExcessiveAllocationSize() {
+ OOM_CRASH();
+}
+
+#if !defined(ARCH_CPU_64_BITS)
+NOINLINE void PartitionOutOfMemoryWithLotsOfUncommitedPages() {
+ OOM_CRASH();
+}
+#endif
+
+} // namespace internal
+} // namespace base
+} // namespace pdfium
diff --git a/third_party/base/allocator/partition_allocator/partition_oom.h b/third_party/base/allocator/partition_allocator/partition_oom.h
new file mode 100644
index 0000000000..be43ff365f
--- /dev/null
+++ b/third_party/base/allocator/partition_allocator/partition_oom.h
@@ -0,0 +1,28 @@
+// Copyright (c) 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Holds functions for generating OOM errors from PartitionAlloc. This is
+// distinct from oom.h in that it is meant only for use in PartitionAlloc.
+
+#ifndef THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_OOM_H_
+#define THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_OOM_H_
+
+#include "build/build_config.h"
+#include "third_party/base/compiler_specific.h"
+
+namespace pdfium {
+namespace base {
+namespace internal {
+
+NOINLINE void PartitionExcessiveAllocationSize();
+
+#if !defined(ARCH_CPU_64_BITS)
+NOINLINE void PartitionOutOfMemoryWithLotsOfUncommitedPages();
+#endif
+
+} // namespace internal
+} // namespace base
+} // namespace pdfium
+
+#endif // THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_OOM_H_
diff --git a/third_party/base/allocator/partition_allocator/partition_page.cc b/third_party/base/allocator/partition_allocator/partition_page.cc
new file mode 100644
index 0000000000..3f70048269
--- /dev/null
+++ b/third_party/base/allocator/partition_allocator/partition_page.cc
@@ -0,0 +1,165 @@
+// Copyright (c) 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/base/allocator/partition_allocator/partition_page.h"
+
+#include "third_party/base/allocator/partition_allocator/partition_direct_map_extent.h"
+#include "third_party/base/allocator/partition_allocator/partition_root_base.h"
+
+namespace pdfium {
+namespace base {
+namespace internal {
+
+namespace {
+
+ALWAYS_INLINE void PartitionDirectUnmap(PartitionPage* page) {
+ PartitionRootBase* root = PartitionRootBase::FromPage(page);
+ const PartitionDirectMapExtent* extent =
+ PartitionDirectMapExtent::FromPage(page);
+ size_t unmap_size = extent->map_size;
+
+ // Maintain the doubly-linked list of all direct mappings.
+ if (extent->prev_extent) {
+ DCHECK(extent->prev_extent->next_extent == extent);
+ extent->prev_extent->next_extent = extent->next_extent;
+ } else {
+ root->direct_map_list = extent->next_extent;
+ }
+ if (extent->next_extent) {
+ DCHECK(extent->next_extent->prev_extent == extent);
+ extent->next_extent->prev_extent = extent->prev_extent;
+ }
+
+ // Add on the size of the trailing guard page and preceeding partition
+ // page.
+ unmap_size += kPartitionPageSize + kSystemPageSize;
+
+ size_t uncommitted_page_size = page->bucket->slot_size + kSystemPageSize;
+ root->DecreaseCommittedPages(uncommitted_page_size);
+ DCHECK(root->total_size_of_direct_mapped_pages >= uncommitted_page_size);
+ root->total_size_of_direct_mapped_pages -= uncommitted_page_size;
+
+ DCHECK(!(unmap_size & kPageAllocationGranularityOffsetMask));
+
+ char* ptr = reinterpret_cast<char*>(PartitionPage::ToPointer(page));
+ // Account for the mapping starting a partition page before the actual
+ // allocation address.
+ ptr -= kPartitionPageSize;
+
+ FreePages(ptr, unmap_size);
+}
+
+ALWAYS_INLINE void PartitionRegisterEmptyPage(PartitionPage* page) {
+ DCHECK(page->is_empty());
+ PartitionRootBase* root = PartitionRootBase::FromPage(page);
+
+ // If the page is already registered as empty, give it another life.
+ if (page->empty_cache_index != -1) {
+ DCHECK(page->empty_cache_index >= 0);
+ DCHECK(static_cast<unsigned>(page->empty_cache_index) < kMaxFreeableSpans);
+ DCHECK(root->global_empty_page_ring[page->empty_cache_index] == page);
+ root->global_empty_page_ring[page->empty_cache_index] = nullptr;
+ }
+
+ int16_t current_index = root->global_empty_page_ring_index;
+ PartitionPage* page_to_decommit = root->global_empty_page_ring[current_index];
+ // The page might well have been re-activated, filled up, etc. before we get
+ // around to looking at it here.
+ if (page_to_decommit)
+ page_to_decommit->DecommitIfPossible(root);
+
+ // We put the empty slot span on our global list of "pages that were once
+ // empty". thus providing it a bit of breathing room to get re-used before
+ // we really free it. This improves performance, particularly on Mac OS X
+ // which has subpar memory management performance.
+ root->global_empty_page_ring[current_index] = page;
+ page->empty_cache_index = current_index;
+ ++current_index;
+ if (current_index == kMaxFreeableSpans)
+ current_index = 0;
+ root->global_empty_page_ring_index = current_index;
+}
+
+} // namespace
+
+// static
+PartitionPage PartitionPage::sentinel_page_;
+
+PartitionPage* PartitionPage::get_sentinel_page() {
+ return &sentinel_page_;
+}
+
+void PartitionPage::FreeSlowPath() {
+ DCHECK(this != get_sentinel_page());
+ if (LIKELY(this->num_allocated_slots == 0)) {
+ // Page became fully unused.
+ if (UNLIKELY(bucket->is_direct_mapped())) {
+ PartitionDirectUnmap(this);
+ return;
+ }
+ // If it's the current active page, change it. We bounce the page to
+ // the empty list as a force towards defragmentation.
+ if (LIKELY(this == bucket->active_pages_head))
+ bucket->SetNewActivePage();
+ DCHECK(bucket->active_pages_head != this);
+
+ set_raw_size(0);
+ DCHECK(!get_raw_size());
+
+ PartitionRegisterEmptyPage(this);
+ } else {
+ DCHECK(!bucket->is_direct_mapped());
+ // Ensure that the page is full. That's the only valid case if we
+ // arrive here.
+ DCHECK(this->num_allocated_slots < 0);
+ // A transition of num_allocated_slots from 0 to -1 is not legal, and
+ // likely indicates a double-free.
+ CHECK(this->num_allocated_slots != -1);
+ this->num_allocated_slots = -this->num_allocated_slots - 2;
+ DCHECK(this->num_allocated_slots == bucket->get_slots_per_span() - 1);
+ // Fully used page became partially used. It must be put back on the
+ // non-full page list. Also make it the current page to increase the
+ // chances of it being filled up again. The old current page will be
+ // the next page.
+ DCHECK(!this->next_page);
+ if (LIKELY(bucket->active_pages_head != get_sentinel_page()))
+ this->next_page = bucket->active_pages_head;
+ bucket->active_pages_head = this;
+ --bucket->num_full_pages;
+ // Special case: for a partition page with just a single slot, it may
+ // now be empty and we want to run it through the empty logic.
+ if (UNLIKELY(this->num_allocated_slots == 0))
+ FreeSlowPath();
+ }
+}
+
+void PartitionPage::Decommit(PartitionRootBase* root) {
+ DCHECK(is_empty());
+ DCHECK(!bucket->is_direct_mapped());
+ void* addr = PartitionPage::ToPointer(this);
+ root->DecommitSystemPages(addr, bucket->get_bytes_per_span());
+
+ // We actually leave the decommitted page in the active list. We'll sweep
+ // it on to the decommitted page list when we next walk the active page
+ // list.
+ // Pulling this trick enables us to use a singly-linked page list for all
+ // cases, which is critical in keeping the page metadata structure down to
+ // 32 bytes in size.
+ freelist_head = nullptr;
+ num_unprovisioned_slots = 0;
+ DCHECK(is_decommitted());
+}
+
+void PartitionPage::DecommitIfPossible(PartitionRootBase* root) {
+ DCHECK(empty_cache_index >= 0);
+ DCHECK(static_cast<unsigned>(empty_cache_index) < kMaxFreeableSpans);
+ DCHECK(this == root->global_empty_page_ring[empty_cache_index]);
+ empty_cache_index = -1;
+ if (is_empty())
+ Decommit(root);
+}
+
+} // namespace internal
+} // namespace base
+} // namespace pdfium
diff --git a/third_party/base/allocator/partition_allocator/partition_page.h b/third_party/base/allocator/partition_allocator/partition_page.h
new file mode 100644
index 0000000000..a40ff8e039
--- /dev/null
+++ b/third_party/base/allocator/partition_allocator/partition_page.h
@@ -0,0 +1,296 @@
+// Copyright (c) 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_PAGE_H_
+#define THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_PAGE_H_
+
+#include <string.h>
+
+#include "third_party/base/allocator/partition_allocator/partition_alloc_constants.h"
+#include "third_party/base/allocator/partition_allocator/partition_bucket.h"
+#include "third_party/base/allocator/partition_allocator/partition_cookie.h"
+#include "third_party/base/allocator/partition_allocator/partition_freelist_entry.h"
+
+namespace pdfium {
+namespace base {
+namespace internal {
+
+struct PartitionRootBase;
+
+// Some notes on page states. A page can be in one of four major states:
+// 1) Active.
+// 2) Full.
+// 3) Empty.
+// 4) Decommitted.
+// An active page has available free slots. A full page has no free slots. An
+// empty page has no free slots, and a decommitted page is an empty page that
+// had its backing memory released back to the system.
+// There are two linked lists tracking the pages. The "active page" list is an
+// approximation of a list of active pages. It is an approximation because
+// full, empty and decommitted pages may briefly be present in the list until
+// we next do a scan over it.
+// The "empty page" list is an accurate list of pages which are either empty
+// or decommitted.
+//
+// The significant page transitions are:
+// - free() will detect when a full page has a slot free()'d and immediately
+// return the page to the head of the active list.
+// - free() will detect when a page is fully emptied. It _may_ add it to the
+// empty list or it _may_ leave it on the active list until a future list scan.
+// - malloc() _may_ scan the active page list in order to fulfil the request.
+// If it does this, full, empty and decommitted pages encountered will be
+// booted out of the active list. If there are no suitable active pages found,
+// an empty or decommitted page (if one exists) will be pulled from the empty
+// list on to the active list.
+//
+// TODO(ajwong): Evaluate if this should be named PartitionSlotSpanMetadata or
+// similar. If so, all uses of the term "page" in comments, member variables,
+// local variables, and documentation that refer to this concept should be
+// updated.
+struct PartitionPage {
+ PartitionFreelistEntry* freelist_head;
+ PartitionPage* next_page;
+ PartitionBucket* bucket;
+ // Deliberately signed, 0 for empty or decommitted page, -n for full pages:
+ int16_t num_allocated_slots;
+ uint16_t num_unprovisioned_slots;
+ uint16_t page_offset;
+ int16_t empty_cache_index; // -1 if not in the empty cache.
+
+ // Public API
+
+ // Note the matching Alloc() functions are in PartitionPage.
+ BASE_EXPORT NOINLINE void FreeSlowPath();
+ ALWAYS_INLINE void Free(void* ptr);
+
+ void Decommit(PartitionRootBase* root);
+ void DecommitIfPossible(PartitionRootBase* root);
+
+ // Pointer manipulation functions. These must be static as the input |page|
+ // pointer may be the result of an offset calculation and therefore cannot
+ // be trusted. The objective of these functions is to sanitize this input.
+ ALWAYS_INLINE static void* ToPointer(const PartitionPage* page);
+ ALWAYS_INLINE static PartitionPage* FromPointerNoAlignmentCheck(void* ptr);
+ ALWAYS_INLINE static PartitionPage* FromPointer(void* ptr);
+
+ ALWAYS_INLINE const size_t* get_raw_size_ptr() const;
+ ALWAYS_INLINE size_t* get_raw_size_ptr() {
+ return const_cast<size_t*>(
+ const_cast<const PartitionPage*>(this)->get_raw_size_ptr());
+ }
+
+ ALWAYS_INLINE size_t get_raw_size() const;
+ ALWAYS_INLINE void set_raw_size(size_t size);
+
+ ALWAYS_INLINE void Reset();
+
+ // TODO(ajwong): Can this be made private? https://crbug.com/787153
+ BASE_EXPORT static PartitionPage* get_sentinel_page();
+
+ // Page State accessors.
+ // Note that it's only valid to call these functions on pages found on one of
+ // the page lists. Specifically, you can't call these functions on full pages
+ // that were detached from the active list.
+ //
+ // This restriction provides the flexibity for some of the status fields to
+ // be repurposed when a page is taken off a list. See the negation of
+ // |num_allocated_slots| when a full page is removed from the active list
+ // for an example of such repurposing.
+ ALWAYS_INLINE bool is_active() const;
+ ALWAYS_INLINE bool is_full() const;
+ ALWAYS_INLINE bool is_empty() const;
+ ALWAYS_INLINE bool is_decommitted() const;
+
+ private:
+ // g_sentinel_page is used as a sentinel to indicate that there is no page
+ // in the active page list. We can use nullptr, but in that case we need
+ // to add a null-check branch to the hot allocation path. We want to avoid
+ // that.
+ //
+ // Note, this declaration is kept in the header as opposed to an anonymous
+ // namespace so the getter can be fully inlined.
+ static PartitionPage sentinel_page_;
+};
+static_assert(sizeof(PartitionPage) <= kPageMetadataSize,
+ "PartitionPage must be able to fit in a metadata slot");
+
+ALWAYS_INLINE char* PartitionSuperPageToMetadataArea(char* ptr) {
+ uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(ptr);
+ DCHECK(!(pointer_as_uint & kSuperPageOffsetMask));
+ // The metadata area is exactly one system page (the guard page) into the
+ // super page.
+ return reinterpret_cast<char*>(pointer_as_uint + kSystemPageSize);
+}
+
+ALWAYS_INLINE PartitionPage* PartitionPage::FromPointerNoAlignmentCheck(
+ void* ptr) {
+ uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(ptr);
+ char* super_page_ptr =
+ reinterpret_cast<char*>(pointer_as_uint & kSuperPageBaseMask);
+ uintptr_t partition_page_index =
+ (pointer_as_uint & kSuperPageOffsetMask) >> kPartitionPageShift;
+ // Index 0 is invalid because it is the metadata and guard area and
+ // the last index is invalid because it is a guard page.
+ DCHECK(partition_page_index);
+ DCHECK(partition_page_index < kNumPartitionPagesPerSuperPage - 1);
+ PartitionPage* page = reinterpret_cast<PartitionPage*>(
+ PartitionSuperPageToMetadataArea(super_page_ptr) +
+ (partition_page_index << kPageMetadataShift));
+ // Partition pages in the same slot span can share the same page object.
+ // Adjust for that.
+ size_t delta = page->page_offset << kPageMetadataShift;
+ page =
+ reinterpret_cast<PartitionPage*>(reinterpret_cast<char*>(page) - delta);
+ return page;
+}
+
+// Resturns start of the slot span for the PartitionPage.
+ALWAYS_INLINE void* PartitionPage::ToPointer(const PartitionPage* page) {
+ uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(page);
+
+ uintptr_t super_page_offset = (pointer_as_uint & kSuperPageOffsetMask);
+
+ // A valid |page| must be past the first guard System page and within
+ // the following metadata region.
+ DCHECK(super_page_offset > kSystemPageSize);
+ // Must be less than total metadata region.
+ DCHECK(super_page_offset < kSystemPageSize + (kNumPartitionPagesPerSuperPage *
+ kPageMetadataSize));
+ uintptr_t partition_page_index =
+ (super_page_offset - kSystemPageSize) >> kPageMetadataShift;
+ // Index 0 is invalid because it is the superpage extent metadata and the
+ // last index is invalid because the whole PartitionPage is set as guard
+ // pages for the metadata region.
+ DCHECK(partition_page_index);
+ DCHECK(partition_page_index < kNumPartitionPagesPerSuperPage - 1);
+ uintptr_t super_page_base = (pointer_as_uint & kSuperPageBaseMask);
+ void* ret = reinterpret_cast<void*>(
+ super_page_base + (partition_page_index << kPartitionPageShift));
+ return ret;
+}
+
+ALWAYS_INLINE PartitionPage* PartitionPage::FromPointer(void* ptr) {
+ PartitionPage* page = PartitionPage::FromPointerNoAlignmentCheck(ptr);
+ // Checks that the pointer is a multiple of bucket size.
+ DCHECK(!((reinterpret_cast<uintptr_t>(ptr) -
+ reinterpret_cast<uintptr_t>(PartitionPage::ToPointer(page))) %
+ page->bucket->slot_size));
+ return page;
+}
+
+ALWAYS_INLINE const size_t* PartitionPage::get_raw_size_ptr() const {
+ // For single-slot buckets which span more than one partition page, we
+ // have some spare metadata space to store the raw allocation size. We
+ // can use this to report better statistics.
+ if (bucket->slot_size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize)
+ return nullptr;
+
+ DCHECK((bucket->slot_size % kSystemPageSize) == 0);
+ DCHECK(bucket->is_direct_mapped() || bucket->get_slots_per_span() == 1);
+
+ const PartitionPage* the_next_page = this + 1;
+ return reinterpret_cast<const size_t*>(&the_next_page->freelist_head);
+}
+
+ALWAYS_INLINE size_t PartitionPage::get_raw_size() const {
+ const size_t* ptr = get_raw_size_ptr();
+ if (UNLIKELY(ptr != nullptr))
+ return *ptr;
+ return 0;
+}
+
+ALWAYS_INLINE void PartitionPage::Free(void* ptr) {
+ size_t slot_size = this->bucket->slot_size;
+ const size_t raw_size = get_raw_size();
+ if (raw_size) {
+ slot_size = raw_size;
+ }
+
+#if DCHECK_IS_ON()
+ // If these asserts fire, you probably corrupted memory.
+ PartitionCookieCheckValue(ptr);
+ PartitionCookieCheckValue(reinterpret_cast<char*>(ptr) + slot_size -
+ kCookieSize);
+
+ memset(ptr, kFreedByte, slot_size);
+#endif
+
+ DCHECK(this->num_allocated_slots);
+ // TODO(palmer): See if we can afford to make this a CHECK.
+ // FIX FIX FIX
+ // DCHECK(!freelist_head || PartitionRootBase::IsValidPage(
+ // PartitionPage::FromPointer(freelist_head)));
+ CHECK(ptr != freelist_head); // Catches an immediate double free.
+ // Look for double free one level deeper in debug.
+ DCHECK(!freelist_head || ptr != internal::PartitionFreelistEntry::Transform(
+ freelist_head->next));
+ internal::PartitionFreelistEntry* entry =
+ static_cast<internal::PartitionFreelistEntry*>(ptr);
+ entry->next = internal::PartitionFreelistEntry::Transform(freelist_head);
+ freelist_head = entry;
+ --this->num_allocated_slots;
+ if (UNLIKELY(this->num_allocated_slots <= 0)) {
+ FreeSlowPath();
+ } else {
+ // All single-slot allocations must go through the slow path to
+ // correctly update the size metadata.
+ DCHECK(get_raw_size() == 0);
+ }
+}
+
+ALWAYS_INLINE bool PartitionPage::is_active() const {
+ DCHECK(this != get_sentinel_page());
+ DCHECK(!page_offset);
+ return (num_allocated_slots > 0 &&
+ (freelist_head || num_unprovisioned_slots));
+}
+
+ALWAYS_INLINE bool PartitionPage::is_full() const {
+ DCHECK(this != get_sentinel_page());
+ DCHECK(!page_offset);
+ bool ret = (num_allocated_slots == bucket->get_slots_per_span());
+ if (ret) {
+ DCHECK(!freelist_head);
+ DCHECK(!num_unprovisioned_slots);
+ }
+ return ret;
+}
+
+ALWAYS_INLINE bool PartitionPage::is_empty() const {
+ DCHECK(this != get_sentinel_page());
+ DCHECK(!page_offset);
+ return (!num_allocated_slots && freelist_head);
+}
+
+ALWAYS_INLINE bool PartitionPage::is_decommitted() const {
+ DCHECK(this != get_sentinel_page());
+ DCHECK(!page_offset);
+ bool ret = (!num_allocated_slots && !freelist_head);
+ if (ret) {
+ DCHECK(!num_unprovisioned_slots);
+ DCHECK(empty_cache_index == -1);
+ }
+ return ret;
+}
+
+ALWAYS_INLINE void PartitionPage::set_raw_size(size_t size) {
+ size_t* raw_size_ptr = get_raw_size_ptr();
+ if (UNLIKELY(raw_size_ptr != nullptr))
+ *raw_size_ptr = size;
+}
+
+ALWAYS_INLINE void PartitionPage::Reset() {
+ DCHECK(this->is_decommitted());
+
+ num_unprovisioned_slots = bucket->get_slots_per_span();
+ DCHECK(num_unprovisioned_slots);
+
+ next_page = nullptr;
+}
+
+} // namespace internal
+} // namespace base
+} // namespace pdfium
+
+#endif // THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_PAGE_H_
diff --git a/third_party/base/allocator/partition_allocator/partition_root_base.cc b/third_party/base/allocator/partition_allocator/partition_root_base.cc
new file mode 100644
index 0000000000..313658419a
--- /dev/null
+++ b/third_party/base/allocator/partition_allocator/partition_root_base.cc
@@ -0,0 +1,42 @@
+// Copyright (c) 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/base/allocator/partition_allocator/partition_root_base.h"
+
+#include "build/build_config.h"
+#include "third_party/base/allocator/partition_allocator/oom.h"
+#include "third_party/base/allocator/partition_allocator/partition_oom.h"
+#include "third_party/base/allocator/partition_allocator/partition_page.h"
+
+namespace pdfium {
+namespace base {
+namespace internal {
+
+NOINLINE void PartitionRootBase::OutOfMemory() {
+#if !defined(ARCH_CPU_64_BITS)
+ // Check whether this OOM is due to a lot of super pages that are allocated
+ // but not committed, probably due to http://crbug.com/421387.
+ if (total_size_of_super_pages + total_size_of_direct_mapped_pages -
+ total_size_of_committed_pages >
+ kReasonableSizeOfUnusedPages) {
+ PartitionOutOfMemoryWithLotsOfUncommitedPages();
+ }
+#endif
+ if (PartitionRootBase::gOomHandlingFunction)
+ (*PartitionRootBase::gOomHandlingFunction)();
+ OOM_CRASH();
+}
+
+void PartitionRootBase::DecommitEmptyPages() {
+ for (size_t i = 0; i < kMaxFreeableSpans; ++i) {
+ internal::PartitionPage* page = global_empty_page_ring[i];
+ if (page)
+ page->DecommitIfPossible(this);
+ global_empty_page_ring[i] = nullptr;
+ }
+}
+
+} // namespace internal
+} // namespace base
+} // namespace pdfium
diff --git a/third_party/base/allocator/partition_allocator/partition_root_base.h b/third_party/base/allocator/partition_allocator/partition_root_base.h
new file mode 100644
index 0000000000..e4f72286d5
--- /dev/null
+++ b/third_party/base/allocator/partition_allocator/partition_root_base.h
@@ -0,0 +1,195 @@
+// Copyright (c) 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ROOT_BASE_H_
+#define THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ROOT_BASE_H_
+
+#include "build/build_config.h"
+#include "third_party/base/allocator/partition_allocator/page_allocator.h"
+#include "third_party/base/allocator/partition_allocator/partition_alloc_constants.h"
+#include "third_party/base/allocator/partition_allocator/partition_bucket.h"
+#include "third_party/base/allocator/partition_allocator/partition_direct_map_extent.h"
+#include "third_party/base/allocator/partition_allocator/partition_page.h"
+
+namespace pdfium {
+namespace base {
+namespace internal {
+
+struct PartitionPage;
+struct PartitionRootBase;
+
+// An "extent" is a span of consecutive superpages. We link to the partition's
+// next extent (if there is one) to the very start of a superpage's metadata
+// area.
+struct PartitionSuperPageExtentEntry {
+ PartitionRootBase* root;
+ char* super_page_base;
+ char* super_pages_end;
+ PartitionSuperPageExtentEntry* next;
+};
+static_assert(
+ sizeof(PartitionSuperPageExtentEntry) <= kPageMetadataSize,
+ "PartitionSuperPageExtentEntry must be able to fit in a metadata slot");
+
+struct BASE_EXPORT PartitionRootBase {
+ PartitionRootBase();
+ virtual ~PartitionRootBase();
+ size_t total_size_of_committed_pages = 0;
+ size_t total_size_of_super_pages = 0;
+ size_t total_size_of_direct_mapped_pages = 0;
+ // Invariant: total_size_of_committed_pages <=
+ // total_size_of_super_pages +
+ // total_size_of_direct_mapped_pages.
+ unsigned num_buckets = 0;
+ unsigned max_allocation = 0;
+ bool initialized = false;
+ char* next_super_page = nullptr;
+ char* next_partition_page = nullptr;
+ char* next_partition_page_end = nullptr;
+ PartitionSuperPageExtentEntry* current_extent = nullptr;
+ PartitionSuperPageExtentEntry* first_extent = nullptr;
+ PartitionDirectMapExtent* direct_map_list = nullptr;
+ PartitionPage* global_empty_page_ring[kMaxFreeableSpans] = {};
+ int16_t global_empty_page_ring_index = 0;
+ uintptr_t inverted_self = 0;
+
+ // Public API
+
+ // Allocates out of the given bucket. Properly, this function should probably
+ // be in PartitionBucket, but because the implementation needs to be inlined
+ // for performance, and because it needs to inspect PartitionPage,
+ // it becomes impossible to have it in PartitionBucket as this causes a
+ // cyclical dependency on PartitionPage function implementations.
+ //
+ // Moving it a layer lower couples PartitionRootBase and PartitionBucket, but
+ // preserves the layering of the includes.
+ //
+ // Note the matching Free() functions are in PartitionPage.
+ ALWAYS_INLINE void* AllocFromBucket(PartitionBucket* bucket,
+ int flags,
+ size_t size);
+
+ ALWAYS_INLINE static bool IsValidPage(PartitionPage* page);
+ ALWAYS_INLINE static PartitionRootBase* FromPage(PartitionPage* page);
+
+ // gOomHandlingFunction is invoked when PartitionAlloc hits OutOfMemory.
+ static void (*gOomHandlingFunction)();
+ NOINLINE void OutOfMemory();
+
+ ALWAYS_INLINE void IncreaseCommittedPages(size_t len);
+ ALWAYS_INLINE void DecreaseCommittedPages(size_t len);
+ ALWAYS_INLINE void DecommitSystemPages(void* address, size_t length);
+ ALWAYS_INLINE void RecommitSystemPages(void* address, size_t length);
+
+ void DecommitEmptyPages();
+};
+
+ALWAYS_INLINE void* PartitionRootBase::AllocFromBucket(PartitionBucket* bucket,
+ int flags,
+ size_t size) {
+ bool zero_fill = flags & PartitionAllocZeroFill;
+ bool is_already_zeroed = false;
+
+ PartitionPage* page = bucket->active_pages_head;
+ // Check that this page is neither full nor freed.
+ DCHECK(page->num_allocated_slots >= 0);
+ void* ret = page->freelist_head;
+ if (LIKELY(ret != 0)) {
+ // If these DCHECKs fire, you probably corrupted memory. TODO(palmer): See
+ // if we can afford to make these CHECKs.
+ DCHECK(PartitionRootBase::IsValidPage(page));
+
+ // All large allocations must go through the slow path to correctly update
+ // the size metadata.
+ DCHECK(page->get_raw_size() == 0);
+ internal::PartitionFreelistEntry* new_head =
+ internal::PartitionFreelistEntry::Transform(
+ static_cast<internal::PartitionFreelistEntry*>(ret)->next);
+ page->freelist_head = new_head;
+ page->num_allocated_slots++;
+ } else {
+ ret = bucket->SlowPathAlloc(this, flags, size, &is_already_zeroed);
+ // TODO(palmer): See if we can afford to make this a CHECK.
+ DCHECK(!ret ||
+ PartitionRootBase::IsValidPage(PartitionPage::FromPointer(ret)));
+ }
+
+#if DCHECK_IS_ON()
+ if (!ret) {
+ return nullptr;
+ }
+
+ page = PartitionPage::FromPointer(ret);
+ // TODO(ajwong): Can |page->bucket| ever not be |this|? If not, can this just
+ // be bucket->slot_size?
+ size_t new_slot_size = page->bucket->slot_size;
+ size_t raw_size = page->get_raw_size();
+ if (raw_size) {
+ DCHECK(raw_size == size);
+ new_slot_size = raw_size;
+ }
+ size_t no_cookie_size = PartitionCookieSizeAdjustSubtract(new_slot_size);
+ char* char_ret = static_cast<char*>(ret);
+ // The value given to the application is actually just after the cookie.
+ ret = char_ret + kCookieSize;
+
+ // Fill the region kUninitializedByte or 0, and surround it with 2 cookies.
+ PartitionCookieWriteValue(char_ret);
+ if (!zero_fill) {
+ memset(ret, kUninitializedByte, no_cookie_size);
+ } else if (!is_already_zeroed) {
+ memset(ret, 0, no_cookie_size);
+ }
+ PartitionCookieWriteValue(char_ret + kCookieSize + no_cookie_size);
+#else
+ if (ret && zero_fill && !is_already_zeroed) {
+ memset(ret, 0, size);
+ }
+#endif
+
+ return ret;
+}
+
+ALWAYS_INLINE bool PartitionRootBase::IsValidPage(PartitionPage* page) {
+ PartitionRootBase* root = PartitionRootBase::FromPage(page);
+ return root->inverted_self == ~reinterpret_cast<uintptr_t>(root);
+}
+
+ALWAYS_INLINE PartitionRootBase* PartitionRootBase::FromPage(
+ PartitionPage* page) {
+ PartitionSuperPageExtentEntry* extent_entry =
+ reinterpret_cast<PartitionSuperPageExtentEntry*>(
+ reinterpret_cast<uintptr_t>(page) & kSystemPageBaseMask);
+ return extent_entry->root;
+}
+
+ALWAYS_INLINE void PartitionRootBase::IncreaseCommittedPages(size_t len) {
+ total_size_of_committed_pages += len;
+ DCHECK(total_size_of_committed_pages <=
+ total_size_of_super_pages + total_size_of_direct_mapped_pages);
+}
+
+ALWAYS_INLINE void PartitionRootBase::DecreaseCommittedPages(size_t len) {
+ total_size_of_committed_pages -= len;
+ DCHECK(total_size_of_committed_pages <=
+ total_size_of_super_pages + total_size_of_direct_mapped_pages);
+}
+
+ALWAYS_INLINE void PartitionRootBase::DecommitSystemPages(void* address,
+ size_t length) {
+ ::pdfium::base::DecommitSystemPages(address, length);
+ DecreaseCommittedPages(length);
+}
+
+ALWAYS_INLINE void PartitionRootBase::RecommitSystemPages(void* address,
+ size_t length) {
+ CHECK(::pdfium::base::RecommitSystemPages(address, length, PageReadWrite));
+ IncreaseCommittedPages(length);
+}
+
+} // namespace internal
+} // namespace base
+} // namespace pdfium
+
+#endif // THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ROOT_BASE_H_
diff --git a/third_party/base/allocator/partition_allocator/spin_lock.cc b/third_party/base/allocator/partition_allocator/spin_lock.cc
index 8d7151a8b7..42055836b8 100644
--- a/third_party/base/allocator/partition_allocator/spin_lock.cc
+++ b/third_party/base/allocator/partition_allocator/spin_lock.cc
@@ -4,9 +4,12 @@
#include "third_party/base/allocator/partition_allocator/spin_lock.h"
+#include "build/build_config.h"
+#include "third_party/base/logging.h"
+
#if defined(OS_WIN)
#include <windows.h>
-#elif defined(OS_POSIX)
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
#include <sched.h>
#endif
@@ -21,9 +24,12 @@
// you really should be using a proper lock (such as |base::Lock|)rather than
// these spinlocks.
#if defined(OS_WIN)
+
#define YIELD_PROCESSOR YieldProcessor()
#define YIELD_THREAD SwitchToThread()
-#elif defined(COMPILER_GCC) || defined(__clang__)
+
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+
#if defined(ARCH_CPU_X86_64) || defined(ARCH_CPU_X86)
#define YIELD_PROCESSOR __asm__ __volatile__("pause")
#elif (defined(ARCH_CPU_ARMEL) && __ARM_ARCH >= 6) || defined(ARCH_CPU_ARM64)
@@ -37,22 +43,26 @@
// Don't bother doing using .word here since r2 is the lowest supported mips64
// that Chromium supports.
#define YIELD_PROCESSOR __asm__ __volatile__("pause")
-#endif
-#endif
+#elif defined(ARCH_CPU_PPC64_FAMILY)
+#define YIELD_PROCESSOR __asm__ __volatile__("or 31,31,31")
+#elif defined(ARCH_CPU_S390_FAMILY)
+// just do nothing
+#define YIELD_PROCESSOR ((void)0)
+#endif // ARCH
#ifndef YIELD_PROCESSOR
#warning "Processor yield not supported on this architecture."
#define YIELD_PROCESSOR ((void)0)
#endif
-#ifndef YIELD_THREAD
-#if defined(OS_POSIX)
#define YIELD_THREAD sched_yield()
-#else
+
+#else // Other OS
+
#warning "Thread yield not supported on this OS."
#define YIELD_THREAD ((void)0)
-#endif
-#endif
+
+#endif // OS_WIN
namespace pdfium {
namespace base {
@@ -63,6 +73,9 @@ void SpinLock::LockSlow() {
// critical section defaults, and various other recommendations.
// TODO(jschuh): Further tuning may be warranted.
static const int kYieldProcessorTries = 1000;
+ // The value of |kYieldThreadTries| is completely made up.
+ static const int kYieldThreadTries = 10;
+ int yield_thread_count = 0;
do {
do {
for (int count = 0; count < kYieldProcessorTries; ++count) {
@@ -73,8 +86,17 @@ void SpinLock::LockSlow() {
return;
}
- // Give the OS a chance to schedule something on this core.
- YIELD_THREAD;
+ if (yield_thread_count < kYieldThreadTries) {
+ ++yield_thread_count;
+ // Give the OS a chance to schedule something on this core.
+ YIELD_THREAD;
+ } else {
+ // At this point, it's likely that the lock is held by a lower priority
+ // thread that is unavailable to finish its work because of higher
+ // priority threads spinning here. Sleeping should ensure that they make
+ // progress.
+ NOTREACHED();
+ }
} while (lock_.load(std::memory_order_relaxed));
} while (UNLIKELY(lock_.exchange(true, std::memory_order_acquire)));
}
diff --git a/third_party/base/allocator/partition_allocator/spin_lock.h b/third_party/base/allocator/partition_allocator/spin_lock.h
index 7a42a29c4e..5613fd130c 100644
--- a/third_party/base/allocator/partition_allocator/spin_lock.h
+++ b/third_party/base/allocator/partition_allocator/spin_lock.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SPIN_LOCK_H
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SPIN_LOCK_H
+#ifndef THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_SPIN_LOCK_H_
+#define THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_SPIN_LOCK_H_
#include <atomic>
#include <memory>
@@ -17,16 +17,14 @@
// intended only for very short duration locks and assume a system with multiple
// cores. For any potentially longer wait you should use a real lock, such as
// |base::Lock|.
-//
-// |SpinLock|s MUST be globals. Using them as (e.g.) struct/class members will
-// result in an uninitialized lock, which is dangerously incorrect.
-
namespace pdfium {
namespace base {
namespace subtle {
-class SpinLock {
+class BASE_EXPORT SpinLock {
public:
+ constexpr SpinLock() = default;
+ ~SpinLock() = default;
using Guard = std::lock_guard<SpinLock>;
ALWAYS_INLINE void lock() {
@@ -42,13 +40,13 @@ class SpinLock {
private:
// This is called if the initial attempt to acquire the lock fails. It's
// slower, but has a much better scheduling and power consumption behavior.
- BASE_EXPORT void LockSlow();
+ void LockSlow();
- std::atomic_int lock_;
+ std::atomic_int lock_{0};
};
} // namespace subtle
} // namespace base
} // namespace pdfium
-#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SPIN_LOCK_H
+#endif // THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_SPIN_LOCK_H_
diff --git a/third_party/base/stl_util.h b/third_party/base/stl_util.h
index ab3ac60911..8163b73951 100644
--- a/third_party/base/stl_util.h
+++ b/third_party/base/stl_util.h
@@ -16,6 +16,18 @@
namespace pdfium {
+// C++11 implementation of C++17's std::size():
+// http://en.cppreference.com/w/cpp/iterator/size
+template <typename Container>
+constexpr auto size(const Container& c) -> decltype(c.size()) {
+ return c.size();
+}
+
+template <typename T, size_t N>
+constexpr size_t size(const T (&array)[N]) noexcept {
+ return N;
+}
+
// Test to see if a set, map, hash_set or hash_map contains a particular key.
// Returns true if the key is in the collection.
template <typename Collection, typename Key>