summaryrefslogtreecommitdiff
path: root/third_party/base/allocator/partition_allocator/page_allocator.cc
diff options
context:
space:
mode:
Diffstat (limited to 'third_party/base/allocator/partition_allocator/page_allocator.cc')
-rw-r--r--third_party/base/allocator/partition_allocator/page_allocator.cc338
1 files changed, 159 insertions, 179 deletions
diff --git a/third_party/base/allocator/partition_allocator/page_allocator.cc b/third_party/base/allocator/partition_allocator/page_allocator.cc
index 0869bdb769..a65fbaad80 100644
--- a/third_party/base/allocator/partition_allocator/page_allocator.cc
+++ b/third_party/base/allocator/partition_allocator/page_allocator.cc
@@ -10,151 +10,163 @@
#include "build/build_config.h"
#include "third_party/base/allocator/partition_allocator/address_space_randomization.h"
-#include "third_party/base/base_export.h"
+#include "third_party/base/allocator/partition_allocator/page_allocator_internal.h"
+#include "third_party/base/allocator/partition_allocator/spin_lock.h"
#include "third_party/base/logging.h"
+#include "third_party/base/numerics/safe_math.h"
-#if defined(OS_POSIX)
-
-#include <errno.h>
-#include <sys/mman.h>
-
-#ifndef MADV_FREE
-#define MADV_FREE MADV_DONTNEED
-#endif
-
-#ifndef MAP_ANONYMOUS
-#define MAP_ANONYMOUS MAP_ANON
-#endif
-
-// On POSIX |mmap| uses a nearby address if the hint address is blocked.
-static const bool kHintIsAdvisory = true;
-static std::atomic<int32_t> s_allocPageErrorCode{0};
-
-#elif defined(OS_WIN)
-
+#if defined(OS_WIN)
#include <windows.h>
+#endif
-// |VirtualAlloc| will fail if allocation at the hint address is blocked.
-static const bool kHintIsAdvisory = false;
-static std::atomic<int32_t> s_allocPageErrorCode{ERROR_SUCCESS};
-
+#if defined(OS_WIN)
+#include "third_party/base/allocator/partition_allocator/page_allocator_internals_win.h"
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+#include "third_party/base/allocator/partition_allocator/page_allocator_internals_posix.h"
#else
-#error Unknown OS
-#endif // defined(OS_POSIX)
+#error Platform not supported.
+#endif
namespace pdfium {
namespace base {
-// This internal function wraps the OS-specific page allocation call:
-// |VirtualAlloc| on Windows, and |mmap| on POSIX.
-static void* SystemAllocPages(
- void* hint,
- size_t length,
- PageAccessibilityConfiguration page_accessibility) {
- DCHECK(!(length & kPageAllocationGranularityOffsetMask));
- DCHECK(!(reinterpret_cast<uintptr_t>(hint) &
- kPageAllocationGranularityOffsetMask));
- void* ret;
-#if defined(OS_WIN)
- DWORD access_flag =
- page_accessibility == PageAccessible ? PAGE_READWRITE : PAGE_NOACCESS;
- ret = VirtualAlloc(hint, length, MEM_RESERVE | MEM_COMMIT, access_flag);
- if (!ret)
- s_allocPageErrorCode = GetLastError();
-#else
- int access_flag = page_accessibility == PageAccessible
- ? (PROT_READ | PROT_WRITE)
- : PROT_NONE;
- ret = mmap(hint, length, access_flag, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
- if (ret == MAP_FAILED) {
- s_allocPageErrorCode = errno;
- ret = 0;
+namespace {
+
+// We may reserve/release address space on different threads.
+subtle::SpinLock* GetReserveLock() {
+ static subtle::SpinLock* s_reserveLock = nullptr;
+ if (!s_reserveLock)
+ s_reserveLock = new subtle::SpinLock();
+ return s_reserveLock;
+}
+
+// We only support a single block of reserved address space.
+void* s_reservation_address = nullptr;
+size_t s_reservation_size = 0;
+
+void* AllocPagesIncludingReserved(void* address,
+ size_t length,
+ PageAccessibilityConfiguration accessibility,
+ PageTag page_tag,
+ bool commit) {
+ void* ret =
+ SystemAllocPages(address, length, accessibility, page_tag, commit);
+ if (ret == nullptr) {
+ const bool cant_alloc_length = kHintIsAdvisory || address == nullptr;
+ if (cant_alloc_length) {
+ // The system cannot allocate |length| bytes. Release any reserved address
+ // space and try once more.
+ ReleaseReservation();
+ ret = SystemAllocPages(address, length, accessibility, page_tag, commit);
+ }
}
-#endif
return ret;
}
-// Trims base to given length and alignment. Windows returns null on failure and
-// frees base.
-static void* TrimMapping(void* base,
- size_t base_length,
- size_t trim_length,
- uintptr_t align,
- PageAccessibilityConfiguration page_accessibility) {
- size_t pre_slack = reinterpret_cast<uintptr_t>(base) & (align - 1);
- if (pre_slack)
- pre_slack = align - pre_slack;
+// Trims |base| to given |trim_length| and |alignment|.
+//
+// On failure, on Windows, this function returns nullptr and frees |base|.
+void* TrimMapping(void* base,
+ size_t base_length,
+ size_t trim_length,
+ uintptr_t alignment,
+ PageAccessibilityConfiguration accessibility,
+ bool commit) {
+ size_t pre_slack = reinterpret_cast<uintptr_t>(base) & (alignment - 1);
+ if (pre_slack) {
+ pre_slack = alignment - pre_slack;
+ }
size_t post_slack = base_length - pre_slack - trim_length;
DCHECK(base_length >= trim_length || pre_slack || post_slack);
DCHECK(pre_slack < base_length);
DCHECK(post_slack < base_length);
- void* ret = base;
+ return TrimMappingInternal(base, base_length, trim_length, accessibility,
+ commit, pre_slack, post_slack);
+}
-#if defined(OS_POSIX) // On POSIX we can resize the allocation run.
- (void)page_accessibility;
- if (pre_slack) {
- int res = munmap(base, pre_slack);
- CHECK(!res);
- ret = reinterpret_cast<char*>(base) + pre_slack;
- }
- if (post_slack) {
- int res = munmap(reinterpret_cast<char*>(ret) + trim_length, post_slack);
- CHECK(!res);
- }
-#else // On Windows we can't resize the allocation run.
- if (pre_slack || post_slack) {
- ret = reinterpret_cast<char*>(base) + pre_slack;
- FreePages(base, base_length);
- ret = SystemAllocPages(ret, trim_length, page_accessibility);
- }
-#endif
+} // namespace
- return ret;
+void* SystemAllocPages(void* hint,
+ size_t length,
+ PageAccessibilityConfiguration accessibility,
+ PageTag page_tag,
+ bool commit) {
+ DCHECK(!(length & kPageAllocationGranularityOffsetMask));
+ DCHECK(!(reinterpret_cast<uintptr_t>(hint) &
+ kPageAllocationGranularityOffsetMask));
+ DCHECK(commit || accessibility == PageInaccessible);
+ return SystemAllocPagesInternal(hint, length, accessibility, page_tag,
+ commit);
}
void* AllocPages(void* address,
size_t length,
size_t align,
- PageAccessibilityConfiguration page_accessibility) {
+ PageAccessibilityConfiguration accessibility,
+ PageTag page_tag,
+ bool commit) {
DCHECK(length >= kPageAllocationGranularity);
DCHECK(!(length & kPageAllocationGranularityOffsetMask));
DCHECK(align >= kPageAllocationGranularity);
- DCHECK(!(align & kPageAllocationGranularityOffsetMask));
+ // Alignment must be power of 2 for masking math to work.
+ DCHECK_EQ(align & (align - 1), 0UL);
DCHECK(!(reinterpret_cast<uintptr_t>(address) &
kPageAllocationGranularityOffsetMask));
uintptr_t align_offset_mask = align - 1;
uintptr_t align_base_mask = ~align_offset_mask;
DCHECK(!(reinterpret_cast<uintptr_t>(address) & align_offset_mask));
+#if defined(OS_LINUX) && defined(ARCH_CPU_64_BITS)
+ // On 64 bit Linux, we may need to adjust the address space limit for
+ // guarded allocations.
+ if (length >= kMinimumGuardedMemorySize) {
+ CHECK(PageInaccessible == accessibility);
+ CHECK(!commit);
+ if (!AdjustAddressSpaceLimit(base::checked_cast<int64_t>(length))) {
+ // Fall through. Try the allocation, since we may have a reserve.
+ }
+ }
+#endif
+
// If the client passed null as the address, choose a good one.
- if (!address) {
+ if (address == nullptr) {
address = GetRandomPageBase();
address = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(address) &
align_base_mask);
}
// First try to force an exact-size, aligned allocation from our random base.
- for (int count = 0; count < 3; ++count) {
- void* ret = SystemAllocPages(address, length, page_accessibility);
- if (kHintIsAdvisory || ret) {
+#if defined(ARCH_CPU_32_BITS)
+ // On 32 bit systems, first try one random aligned address, and then try an
+ // aligned address derived from the value of |ret|.
+ constexpr int kExactSizeTries = 2;
+#else
+ // On 64 bit systems, try 3 random aligned addresses.
+ constexpr int kExactSizeTries = 3;
+#endif
+
+ for (int i = 0; i < kExactSizeTries; ++i) {
+ void* ret = AllocPagesIncludingReserved(address, length, accessibility,
+ page_tag, commit);
+ if (ret != nullptr) {
// If the alignment is to our liking, we're done.
if (!(reinterpret_cast<uintptr_t>(ret) & align_offset_mask))
return ret;
+ // Free the memory and try again.
FreePages(ret, length);
-#if defined(ARCH_CPU_32_BITS)
- address = reinterpret_cast<void*>(
- (reinterpret_cast<uintptr_t>(ret) + align) & align_base_mask);
-#endif
- } else if (!address) { // We know we're OOM when an unhinted allocation
- // fails.
- return nullptr;
} else {
-#if defined(ARCH_CPU_32_BITS)
- address = reinterpret_cast<char*>(address) + align;
-#endif
+ // |ret| is null; if this try was unhinted, we're OOM.
+ if (kHintIsAdvisory || address == nullptr)
+ return nullptr;
}
-#if !defined(ARCH_CPU_32_BITS)
+#if defined(ARCH_CPU_32_BITS)
+ // For small address spaces, try the first aligned address >= |ret|. Note
+ // |ret| may be null, in which case |address| becomes null.
+ address = reinterpret_cast<void*>(
+ (reinterpret_cast<uintptr_t>(ret) + align_offset_mask) &
+ align_base_mask);
+#else // defined(ARCH_CPU_64_BITS)
// Keep trying random addresses on systems that have a large address space.
address = GetRandomPageBase();
address = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(address) &
@@ -162,21 +174,21 @@ void* AllocPages(void* address,
#endif
}
- // Map a larger allocation so we can force alignment, but continue randomizing
- // only on 64-bit POSIX.
+ // Make a larger allocation so we can force alignment.
size_t try_length = length + (align - kPageAllocationGranularity);
CHECK(try_length >= length);
void* ret;
do {
- // Don't continue to burn cycles on mandatory hints (Windows).
+ // Continue randomizing only on POSIX.
address = kHintIsAdvisory ? GetRandomPageBase() : nullptr;
- ret = SystemAllocPages(address, try_length, page_accessibility);
+ ret = AllocPagesIncludingReserved(address, try_length, accessibility,
+ page_tag, commit);
// The retries are for Windows, where a race can steal our mapping on
// resize.
- } while (ret &&
- (ret = TrimMapping(ret, try_length, length, align,
- page_accessibility)) == nullptr);
+ } while (ret != nullptr &&
+ (ret = TrimMapping(ret, try_length, length, align, accessibility,
+ commit)) == nullptr);
return ret;
}
@@ -185,92 +197,60 @@ void FreePages(void* address, size_t length) {
DCHECK(!(reinterpret_cast<uintptr_t>(address) &
kPageAllocationGranularityOffsetMask));
DCHECK(!(length & kPageAllocationGranularityOffsetMask));
-#if defined(OS_POSIX)
- int ret = munmap(address, length);
- CHECK(!ret);
-#else
- BOOL ret = VirtualFree(address, 0, MEM_RELEASE);
- CHECK(ret);
-#endif
+ FreePagesInternal(address, length);
}
-void SetSystemPagesInaccessible(void* address, size_t length) {
+bool SetSystemPagesAccess(void* address,
+ size_t length,
+ PageAccessibilityConfiguration accessibility) {
DCHECK(!(length & kSystemPageOffsetMask));
-#if defined(OS_POSIX)
- int ret = mprotect(address, length, PROT_NONE);
- CHECK(!ret);
-#else
- BOOL ret = VirtualFree(address, length, MEM_DECOMMIT);
- CHECK(ret);
-#endif
-}
-
-bool SetSystemPagesAccessible(void* address, size_t length) {
- DCHECK(!(length & kSystemPageOffsetMask));
-#if defined(OS_POSIX)
- return !mprotect(address, length, PROT_READ | PROT_WRITE);
-#else
- return !!VirtualAlloc(address, length, MEM_COMMIT, PAGE_READWRITE);
-#endif
+ return SetSystemPagesAccessInternal(address, length, accessibility);
}
void DecommitSystemPages(void* address, size_t length) {
- DCHECK(!(length & kSystemPageOffsetMask));
-#if defined(OS_POSIX)
- int ret = madvise(address, length, MADV_FREE);
- if (ret != 0 && errno == EINVAL) {
- // MADV_FREE only works on Linux 4.5+ . If request failed,
- // retry with older MADV_DONTNEED . Note that MADV_FREE
- // being defined at compile time doesn't imply runtime support.
- ret = madvise(address, length, MADV_DONTNEED);
- }
- CHECK(!ret);
-#else
- SetSystemPagesInaccessible(address, length);
-#endif
+ DCHECK_EQ(0UL, length & kSystemPageOffsetMask);
+ DecommitSystemPagesInternal(address, length);
}
-void RecommitSystemPages(void* address, size_t length) {
- DCHECK(!(length & kSystemPageOffsetMask));
-#if defined(OS_POSIX)
- (void)address;
-#else
- CHECK(SetSystemPagesAccessible(address, length));
-#endif
+bool RecommitSystemPages(void* address,
+ size_t length,
+ PageAccessibilityConfiguration accessibility) {
+ DCHECK_EQ(0UL, length & kSystemPageOffsetMask);
+ DCHECK(PageInaccessible != accessibility);
+ return RecommitSystemPagesInternal(address, length, accessibility);
}
void DiscardSystemPages(void* address, size_t length) {
- DCHECK(!(length & kSystemPageOffsetMask));
-#if defined(OS_POSIX)
- // On POSIX, the implementation detail is that discard and decommit are the
- // same, and lead to pages that are returned to the system immediately and
- // get replaced with zeroed pages when touched. So we just call
- // DecommitSystemPages() here to avoid code duplication.
- DecommitSystemPages(address, length);
-#else
- // On Windows discarded pages are not returned to the system immediately and
- // not guaranteed to be zeroed when returned to the application.
- using DiscardVirtualMemoryFunction =
- DWORD(WINAPI*)(PVOID virtualAddress, SIZE_T size);
- static DiscardVirtualMemoryFunction discard_virtual_memory =
- reinterpret_cast<DiscardVirtualMemoryFunction>(-1);
- if (discard_virtual_memory ==
- reinterpret_cast<DiscardVirtualMemoryFunction>(-1))
- discard_virtual_memory =
- reinterpret_cast<DiscardVirtualMemoryFunction>(GetProcAddress(
- GetModuleHandle(L"Kernel32.dll"), "DiscardVirtualMemory"));
- // Use DiscardVirtualMemory when available because it releases faster than
- // MEM_RESET.
- DWORD ret = 1;
- if (discard_virtual_memory)
- ret = discard_virtual_memory(address, length);
- // DiscardVirtualMemory is buggy in Win10 SP0, so fall back to MEM_RESET on
- // failure.
- if (ret) {
- void* ret = VirtualAlloc(address, length, MEM_RESET, PAGE_READWRITE);
- CHECK(ret);
+ DCHECK_EQ(0UL, length & kSystemPageOffsetMask);
+ DiscardSystemPagesInternal(address, length);
+}
+
+bool ReserveAddressSpace(size_t size) {
+ // To avoid deadlock, call only SystemAllocPages.
+ subtle::SpinLock::Guard guard(*GetReserveLock());
+ if (s_reservation_address == nullptr) {
+ void* mem = SystemAllocPages(nullptr, size, PageInaccessible,
+ PageTag::kChromium, false);
+ if (mem != nullptr) {
+ // We guarantee this alignment when reserving address space.
+ DCHECK(!(reinterpret_cast<uintptr_t>(mem) &
+ kPageAllocationGranularityOffsetMask));
+ s_reservation_address = mem;
+ s_reservation_size = size;
+ return true;
+ }
+ }
+ return false;
+}
+
+void ReleaseReservation() {
+ // To avoid deadlock, call only FreePages.
+ subtle::SpinLock::Guard guard(*GetReserveLock());
+ if (s_reservation_address != nullptr) {
+ FreePages(s_reservation_address, s_reservation_size);
+ s_reservation_address = nullptr;
+ s_reservation_size = 0;
}
-#endif
}
uint32_t GetAllocPageErrorCode() {