diff --git a/base/allocator/partition_allocator/address_pool_manager.cc b/base/allocator/partition_allocator/address_pool_manager.cc index b42e571263495b..64b2deba576cf3 100644 --- a/base/allocator/partition_allocator/address_pool_manager.cc +++ b/base/allocator/partition_allocator/address_pool_manager.cc @@ -4,25 +4,24 @@ #include "base/allocator/partition_allocator/address_pool_manager.h" -#include "build/build_config.h" - -#if BUILDFLAG(IS_APPLE) -#include -#endif - #include +#include #include #include "base/allocator/buildflags.h" #include "base/allocator/partition_allocator/page_allocator.h" #include "base/allocator/partition_allocator/page_allocator_constants.h" -#include "base/allocator/partition_allocator/page_allocator_internal.h" #include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_constants.h" #include "base/allocator/partition_allocator/partition_alloc_notreached.h" #include "base/allocator/partition_allocator/reservation_offset_table.h" #include "base/cxx17_backports.h" #include "base/lazy_instance.h" +#include "build/build_config.h" + +#if BUILDFLAG(IS_APPLE) +#include +#endif namespace base { namespace internal { @@ -48,7 +47,7 @@ void DecommitPages(uintptr_t address, size_t size) { // Callers rely on the pages being zero-initialized when recommitting them. // |DecommitSystemPages| doesn't guarantee this on all operating systems, in // particular on macOS, but |DecommitAndZeroSystemPages| does. - DecommitAndZeroSystemPages(reinterpret_cast(address), size); + DecommitAndZeroSystemPages(address, size); } } // namespace @@ -303,9 +302,8 @@ uintptr_t AddressPoolManager::Reserve(pool_handle handle, uintptr_t requested_address, size_t length) { PA_DCHECK(!(length & DirectMapAllocationGranularityOffsetMask())); - uintptr_t address = reinterpret_cast( - AllocPages(reinterpret_cast(requested_address), length, - kSuperPageSize, PageInaccessible, PageTag::kPartitionAlloc)); + uintptr_t address = AllocPages(requested_address, length, kSuperPageSize, + PageInaccessible, PageTag::kPartitionAlloc); return address; } @@ -314,7 +312,7 @@ void AddressPoolManager::UnreserveAndDecommit(pool_handle handle, size_t length) { PA_DCHECK(!(address & kSuperPageOffsetMask)); PA_DCHECK(!(length & DirectMapAllocationGranularityOffsetMask())); - FreePages(reinterpret_cast(address), length); + FreePages(address, length); } void AddressPoolManager::MarkUsed(pool_handle handle, diff --git a/base/allocator/partition_allocator/address_pool_manager_unittest.cc b/base/allocator/partition_allocator/address_pool_manager_unittest.cc index 1336e27f583bba..d992421e1d3478 100644 --- a/base/allocator/partition_allocator/address_pool_manager_unittest.cc +++ b/base/allocator/partition_allocator/address_pool_manager_unittest.cc @@ -3,10 +3,10 @@ // found in the LICENSE file. #include "base/allocator/partition_allocator/address_pool_manager.h" + #include #include "base/allocator/partition_allocator/page_allocator.h" -#include "base/allocator/partition_allocator/page_allocator_internal.h" #include "base/allocator/partition_allocator/partition_alloc_constants.h" #include "base/bits.h" #include "build/build_config.h" @@ -30,16 +30,16 @@ class PartitionAllocAddressPoolManagerTest : public testing::Test { void SetUp() override { manager_ = std::make_unique(); - base_ptr_ = AllocPages(nullptr, kPoolSize, kSuperPageSize, - base::PageInaccessible, PageTag::kPartitionAlloc); - base_address_ = reinterpret_cast(base_ptr_); + base_address_ = + AllocPages(kPoolSize, kSuperPageSize, base::PageInaccessible, + PageTag::kPartitionAlloc); ASSERT_TRUE(base_address_); pool_ = manager_->Add(base_address_, kPoolSize); } void TearDown() override { manager_->Remove(pool_); - FreePages(base_ptr_, kPoolSize); + FreePages(base_address_, kPoolSize); manager_.reset(); } @@ -49,7 +49,6 @@ class PartitionAllocAddressPoolManagerTest : public testing::Test { static constexpr size_t kPoolSize = kSuperPageSize * kPageCnt; std::unique_ptr manager_; - void* base_ptr_; uintptr_t base_address_; pool_handle pool_; }; @@ -196,23 +195,21 @@ TEST_F(PartitionAllocAddressPoolManagerTest, DecommittedDataIsErased) { uintptr_t address = GetAddressPoolManager()->Reserve(pool_, 0, kSuperPageSize); ASSERT_TRUE(address); - void* ptr = reinterpret_cast(address); - RecommitSystemPages(ptr, kSuperPageSize, PageReadWrite, + RecommitSystemPages(address, kSuperPageSize, PageReadWrite, PageUpdatePermissions); - memset(ptr, 42, kSuperPageSize); + memset(reinterpret_cast(address), 42, kSuperPageSize); GetAddressPoolManager()->UnreserveAndDecommit(pool_, address, kSuperPageSize); uintptr_t address2 = GetAddressPoolManager()->Reserve(pool_, 0, kSuperPageSize); ASSERT_EQ(address, address2); - uint8_t* ptr2 = reinterpret_cast(address2); - RecommitSystemPages(ptr2, kSuperPageSize, PageReadWrite, + RecommitSystemPages(address2, kSuperPageSize, PageReadWrite, PageUpdatePermissions); uint32_t sum = 0; for (size_t i = 0; i < kSuperPageSize; i++) { - sum += ptr2[i]; + sum += reinterpret_cast(address2)[i]; } EXPECT_EQ(0u, sum) << sum / 42 << " bytes were not zeroed"; diff --git a/base/allocator/partition_allocator/address_space_randomization.cc b/base/allocator/partition_allocator/address_space_randomization.cc index b14912360317ae..661fab395dfcf2 100644 --- a/base/allocator/partition_allocator/address_space_randomization.cc +++ b/base/allocator/partition_allocator/address_space_randomization.cc @@ -4,6 +4,8 @@ #include "base/allocator/partition_allocator/address_space_randomization.h" +#include + #include "base/allocator/partition_allocator/page_allocator.h" #include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/random.h" @@ -18,7 +20,7 @@ namespace base { -void* GetRandomPageBase() { +uintptr_t GetRandomPageBase() { uintptr_t random = static_cast(RandomValue()); #if defined(ARCH_CPU_64_BITS) @@ -55,14 +57,14 @@ void* GetRandomPageBase() { if (is_wow64 == -1 && !IsWow64Process(GetCurrentProcess(), &is_wow64)) is_wow64 = FALSE; if (!is_wow64) - return nullptr; + return 0; #endif // BUILDFLAG(IS_WIN) random &= internal::ASLRMask(); random += internal::ASLROffset(); #endif // defined(ARCH_CPU_32_BITS) PA_DCHECK(!(random & PageAllocationGranularityOffsetMask())); - return reinterpret_cast(random); + return random; } } // namespace base diff --git a/base/allocator/partition_allocator/address_space_randomization.h b/base/allocator/partition_allocator/address_space_randomization.h index 5de435448ae390..2c4fdd5786c229 100644 --- a/base/allocator/partition_allocator/address_space_randomization.h +++ b/base/allocator/partition_allocator/address_space_randomization.h @@ -5,6 +5,8 @@ #ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_RANDOMIZATION_H_ #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_RANDOMIZATION_H_ +#include + #include "base/allocator/partition_allocator/page_allocator.h" #include "base/base_export.h" #include "base/compiler_specific.h" @@ -14,7 +16,7 @@ namespace base { // Calculates a random preferred mapping address. In calculating an address, we // balance good ASLR against not fragmenting the address space too badly. -BASE_EXPORT void* GetRandomPageBase(); +BASE_EXPORT uintptr_t GetRandomPageBase(); namespace internal { diff --git a/base/allocator/partition_allocator/address_space_randomization_unittest.cc b/base/allocator/partition_allocator/address_space_randomization_unittest.cc index 8944289a6229c5..1274723a1254b4 100644 --- a/base/allocator/partition_allocator/address_space_randomization_unittest.cc +++ b/base/allocator/partition_allocator/address_space_randomization_unittest.cc @@ -4,6 +4,7 @@ #include "base/allocator/partition_allocator/address_space_randomization.h" +#include #include #include "base/allocator/partition_allocator/page_allocator.h" @@ -48,7 +49,7 @@ uintptr_t GetMask() { const size_t kSamples = 100; uintptr_t GetAddressBits() { - return reinterpret_cast(base::GetRandomPageBase()); + return base::GetRandomPageBase(); } uintptr_t GetRandomBits() { @@ -63,10 +64,10 @@ TEST(PartitionAllocAddressSpaceRandomizationTest, DisabledASLR) { if (!mask) { #if BUILDFLAG(IS_WIN) && defined(ARCH_CPU_32_BITS) // ASLR should be turned off on 32-bit Windows. - EXPECT_EQ(nullptr, base::GetRandomPageBase()); + EXPECT_EQ(0u, base::GetRandomPageBase()); #else - // Otherwise, nullptr is very unexpected. - EXPECT_NE(nullptr, base::GetRandomPageBase()); + // Otherwise, 0 is very unexpected. + EXPECT_NE(0u, base::GetRandomPageBase()); #endif } } @@ -106,15 +107,13 @@ TEST(PartitionAllocAddressSpaceRandomizationTest, Predictable) { std::vector sequence; for (size_t i = 0; i < kSamples; ++i) { - uintptr_t address = reinterpret_cast(base::GetRandomPageBase()); - sequence.push_back(address); + sequence.push_back(GetRandomPageBase()); } base::SetMmapSeedForTesting(kInitialSeed); for (size_t i = 0; i < kSamples; ++i) { - uintptr_t address = reinterpret_cast(base::GetRandomPageBase()); - EXPECT_EQ(address, sequence[i]); + EXPECT_EQ(GetRandomPageBase(), sequence[i]); } } diff --git a/base/allocator/partition_allocator/page_allocator.cc b/base/allocator/partition_allocator/page_allocator.cc index 64b7c8d3067479..5f3093969fa3df 100644 --- a/base/allocator/partition_allocator/page_allocator.cc +++ b/base/allocator/partition_allocator/page_allocator.cc @@ -7,8 +7,10 @@ #include #include +#include #include "base/allocator/partition_allocator/address_space_randomization.h" +#include "base/allocator/partition_allocator/page_allocator.h" #include "base/allocator/partition_allocator/page_allocator_internal.h" #include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_lock.h" @@ -43,16 +45,17 @@ internal::PartitionLock& GetReserveLock() { std::atomic g_total_mapped_address_space; // We only support a single block of reserved address space. -void* s_reservation_address GUARDED_BY(GetReserveLock()) = nullptr; +uintptr_t s_reservation_address GUARDED_BY(GetReserveLock()) = 0; size_t s_reservation_size GUARDED_BY(GetReserveLock()) = 0; -void* AllocPagesIncludingReserved(void* address, - size_t length, - PageAccessibilityConfiguration accessibility, - PageTag page_tag) { - void* ret = SystemAllocPages(address, length, accessibility, page_tag); - if (ret == nullptr) { - const bool cant_alloc_length = kHintIsAdvisory || address == nullptr; +uintptr_t AllocPagesIncludingReserved( + uintptr_t address, + size_t length, + PageAccessibilityConfiguration accessibility, + PageTag page_tag) { + uintptr_t ret = SystemAllocPages(address, length, accessibility, page_tag); + if (!ret) { + const bool cant_alloc_length = kHintIsAdvisory || !address; if (cant_alloc_length) { // The system cannot allocate |length| bytes. Release any reserved address // space and try once more. @@ -63,29 +66,29 @@ void* AllocPagesIncludingReserved(void* address, return ret; } -// Trims |base| to given |trim_length| and |alignment|. +// Trims memory at |base_address| to given |trim_length| and |alignment|. // -// On failure, on Windows, this function returns nullptr and frees |base|. -void* TrimMapping(void* base, - size_t base_length, - size_t trim_length, - uintptr_t alignment, - uintptr_t alignment_offset, - PageAccessibilityConfiguration accessibility) { +// On failure, on Windows, this function returns 0 and frees memory at +// |base_address|. +uintptr_t TrimMapping(uintptr_t base_address, + size_t base_length, + size_t trim_length, + uintptr_t alignment, + uintptr_t alignment_offset, + PageAccessibilityConfiguration accessibility) { PA_DCHECK(base_length >= trim_length); PA_DCHECK(bits::IsPowerOfTwo(alignment)); PA_DCHECK(alignment_offset < alignment); - uintptr_t base_as_uintptr = reinterpret_cast(base); uintptr_t new_base = - NextAlignedWithOffset(base_as_uintptr, alignment, alignment_offset); - PA_DCHECK(new_base >= base_as_uintptr); - size_t pre_slack = new_base - base_as_uintptr; + NextAlignedWithOffset(base_address, alignment, alignment_offset); + PA_DCHECK(new_base >= base_address); + size_t pre_slack = new_base - base_address; size_t post_slack = base_length - pre_slack - trim_length; PA_DCHECK(base_length == trim_length || pre_slack || post_slack); PA_DCHECK(pre_slack < base_length); PA_DCHECK(post_slack < base_length); - return TrimMappingInternal(base, base_length, trim_length, accessibility, - pre_slack, post_slack); + return TrimMappingInternal(base_address, base_length, trim_length, + accessibility, pre_slack, post_slack); } } // namespace @@ -119,35 +122,52 @@ uintptr_t NextAlignedWithOffset(uintptr_t address, return new_address; } -void* SystemAllocPages(void* hint, - size_t length, - PageAccessibilityConfiguration accessibility, - PageTag page_tag) { +uintptr_t SystemAllocPages(uintptr_t hint, + size_t length, + PageAccessibilityConfiguration accessibility, + PageTag page_tag) { PA_DCHECK(!(length & PageAllocationGranularityOffsetMask())); - PA_DCHECK(!(reinterpret_cast(hint) & - PageAllocationGranularityOffsetMask())); - void* ptr = SystemAllocPagesInternal(hint, length, accessibility, page_tag); - if (ptr) + PA_DCHECK(!(hint & PageAllocationGranularityOffsetMask())); + uintptr_t ret = + SystemAllocPagesInternal(hint, length, accessibility, page_tag); + if (ret) g_total_mapped_address_space.fetch_add(length, std::memory_order_relaxed); - return ptr; + return ret; } +uintptr_t AllocPages(size_t length, + size_t align, + PageAccessibilityConfiguration accessibility, + PageTag page_tag) { + return AllocPagesWithAlignOffset(0, length, align, 0, accessibility, + page_tag); +} +uintptr_t AllocPages(uintptr_t address, + size_t length, + size_t align, + PageAccessibilityConfiguration accessibility, + PageTag page_tag) { + return AllocPagesWithAlignOffset(address, length, align, 0, accessibility, + page_tag); +} void* AllocPages(void* address, size_t length, size_t align, PageAccessibilityConfiguration accessibility, PageTag page_tag) { - return AllocPagesWithAlignOffset(address, length, align, 0, accessibility, - page_tag); + return reinterpret_cast( + AllocPages(reinterpret_cast(address), length, align, + accessibility, page_tag)); } -void* AllocPagesWithAlignOffset(void* address, - size_t length, - size_t align, - size_t align_offset, - PageAccessibilityConfiguration accessibility, - PageTag page_tag) { +uintptr_t AllocPagesWithAlignOffset( + uintptr_t address, + size_t length, + size_t align, + size_t align_offset, + PageAccessibilityConfiguration accessibility, + PageTag page_tag) { PA_DCHECK(length >= PageAllocationGranularity()); PA_DCHECK(!(length & PageAllocationGranularityOffsetMask())); PA_DCHECK(align >= PageAllocationGranularity()); @@ -155,19 +175,14 @@ void* AllocPagesWithAlignOffset(void* address, PA_DCHECK(base::bits::IsPowerOfTwo(align)); PA_DCHECK(align_offset < align); PA_DCHECK(!(align_offset & PageAllocationGranularityOffsetMask())); - PA_DCHECK(!(reinterpret_cast(address) & - PageAllocationGranularityOffsetMask())); + PA_DCHECK(!(address & PageAllocationGranularityOffsetMask())); uintptr_t align_offset_mask = align - 1; uintptr_t align_base_mask = ~align_offset_mask; - PA_DCHECK(address == nullptr || (reinterpret_cast(address) & - align_offset_mask) == align_offset); + PA_DCHECK(!address || (address & align_offset_mask) == align_offset); // If the client passed null as the address, choose a good one. - if (address == nullptr) { - address = GetRandomPageBase(); - address = reinterpret_cast( - (reinterpret_cast(address) & align_base_mask) + - align_offset); + if (!address) { + address = (GetRandomPageBase() & align_base_mask) + align_offset; } // First try to force an exact-size, aligned allocation from our random base. @@ -181,19 +196,18 @@ void* AllocPagesWithAlignOffset(void* address, #endif for (int i = 0; i < kExactSizeTries; ++i) { - void* ret = + uintptr_t ret = AllocPagesIncludingReserved(address, length, accessibility, page_tag); - if (ret != nullptr) { + if (ret) { // If the alignment is to our liking, we're done. - if ((reinterpret_cast(ret) & align_offset_mask) == - align_offset) + if ((ret & align_offset_mask) == align_offset) return ret; // Free the memory and try again. FreePages(ret, length); } else { // |ret| is null; if this try was unhinted, we're OOM. - if (kHintIsAdvisory || address == nullptr) - return nullptr; + if (kHintIsAdvisory || !address) + return 0; } #if defined(ARCH_CPU_32_BITS) @@ -201,54 +215,56 @@ void* AllocPagesWithAlignOffset(void* address, // |ret| may be null, in which case |address| becomes null. If // |align_offset| is non-zero, this calculation may get us not the first, // but the next matching address. - address = reinterpret_cast( - ((reinterpret_cast(ret) + align_offset_mask) & - align_base_mask) + - align_offset); + address = ((ret + align_offset_mask) & align_base_mask) + align_offset; #else // defined(ARCH_CPU_64_BITS) // Keep trying random addresses on systems that have a large address space. - address = GetRandomPageBase(); - address = reinterpret_cast(NextAlignedWithOffset( - reinterpret_cast(address), align, align_offset)); + address = NextAlignedWithOffset(GetRandomPageBase(), align, align_offset); #endif } // Make a larger allocation so we can force alignment. size_t try_length = length + (align - PageAllocationGranularity()); PA_CHECK(try_length >= length); - void* ret; + uintptr_t ret; do { // Continue randomizing only on POSIX. - address = kHintIsAdvisory ? GetRandomPageBase() : nullptr; + address = kHintIsAdvisory ? GetRandomPageBase() : 0; ret = AllocPagesIncludingReserved(address, try_length, accessibility, page_tag); // The retries are for Windows, where a race can steal our mapping on // resize. - } while (ret != nullptr && - (ret = TrimMapping(ret, try_length, length, align, align_offset, - accessibility)) == nullptr); + } while (ret && (ret = TrimMapping(ret, try_length, length, align, + align_offset, accessibility)) == 0); return ret; } -void FreePages(void* address, size_t length) { - PA_DCHECK(!(reinterpret_cast(address) & - PageAllocationGranularityOffsetMask())); +void FreePages(uintptr_t address, size_t length) { + PA_DCHECK(!(address & PageAllocationGranularityOffsetMask())); PA_DCHECK(!(length & PageAllocationGranularityOffsetMask())); FreePagesInternal(address, length); PA_DCHECK(g_total_mapped_address_space.load(std::memory_order_relaxed) > 0); g_total_mapped_address_space.fetch_sub(length, std::memory_order_relaxed); } +void FreePages(void* address, size_t length) { + FreePages(reinterpret_cast(address), length); +} -bool TrySetSystemPagesAccess(void* address, +bool TrySetSystemPagesAccess(uintptr_t address, size_t length, PageAccessibilityConfiguration accessibility) { PA_DCHECK(!(length & SystemPageOffsetMask())); return TrySetSystemPagesAccessInternal(address, length, accessibility); } +bool TrySetSystemPagesAccess(void* address, + size_t length, + PageAccessibilityConfiguration accessibility) { + return TrySetSystemPagesAccess(reinterpret_cast(address), length, + accessibility); +} -void SetSystemPagesAccess(void* address, +void SetSystemPagesAccess(uintptr_t address, size_t length, PageAccessibilityConfiguration accessibility) { PA_DCHECK(!(length & SystemPageOffsetMask())); @@ -256,26 +272,36 @@ void SetSystemPagesAccess(void* address, } void DecommitSystemPages( - void* address, + uintptr_t address, size_t length, PageAccessibilityDisposition accessibility_disposition) { - PA_DCHECK(!(reinterpret_cast(address) & SystemPageOffsetMask())); + PA_DCHECK(!(address & SystemPageOffsetMask())); PA_DCHECK(!(length & SystemPageOffsetMask())); DecommitSystemPagesInternal(address, length, accessibility_disposition); } +void DecommitSystemPages( + void* address, + size_t length, + PageAccessibilityDisposition accessibility_disposition) { + DecommitSystemPages(reinterpret_cast(address), length, + accessibility_disposition); +} -void DecommitAndZeroSystemPages(void* address, size_t length) { - PA_DCHECK(!(reinterpret_cast(address) & SystemPageOffsetMask())); +void DecommitAndZeroSystemPages(uintptr_t address, size_t length) { + PA_DCHECK(!(address & SystemPageOffsetMask())); PA_DCHECK(!(length & SystemPageOffsetMask())); DecommitAndZeroSystemPagesInternal(address, length); } +void DecommitAndZeroSystemPages(void* address, size_t length) { + DecommitAndZeroSystemPages(reinterpret_cast(address), length); +} void RecommitSystemPages( - void* address, + uintptr_t address, size_t length, PageAccessibilityConfiguration accessibility, PageAccessibilityDisposition accessibility_disposition) { - PA_DCHECK(!(reinterpret_cast(address) & SystemPageOffsetMask())); + PA_DCHECK(!(address & SystemPageOffsetMask())); PA_DCHECK(!(length & SystemPageOffsetMask())); PA_DCHECK(accessibility != PageInaccessible); RecommitSystemPagesInternal(address, length, accessibility, @@ -283,34 +309,36 @@ void RecommitSystemPages( } bool TryRecommitSystemPages( - void* address, + uintptr_t address, size_t length, PageAccessibilityConfiguration accessibility, PageAccessibilityDisposition accessibility_disposition) { // Duplicated because we want errors to be reported at a lower level in the // crashing case. - PA_DCHECK(!(reinterpret_cast(address) & SystemPageOffsetMask())); + PA_DCHECK(!(address & SystemPageOffsetMask())); PA_DCHECK(!(length & SystemPageOffsetMask())); PA_DCHECK(accessibility != PageInaccessible); return TryRecommitSystemPagesInternal(address, length, accessibility, accessibility_disposition); } -void DiscardSystemPages(void* address, size_t length) { +void DiscardSystemPages(uintptr_t address, size_t length) { PA_DCHECK(!(length & SystemPageOffsetMask())); DiscardSystemPagesInternal(address, length); } +void DiscardSystemPages(void* address, size_t length) { + DiscardSystemPages(reinterpret_cast(address), length); +} bool ReserveAddressSpace(size_t size) { // To avoid deadlock, call only SystemAllocPages. internal::PartitionAutoLock guard(GetReserveLock()); - if (s_reservation_address == nullptr) { - void* mem = - SystemAllocPages(nullptr, size, PageInaccessible, PageTag::kChromium); - if (mem != nullptr) { + if (!s_reservation_address) { + uintptr_t mem = + SystemAllocPages(0, size, PageInaccessible, PageTag::kChromium); + if (mem) { // We guarantee this alignment when reserving address space. - PA_DCHECK(!(reinterpret_cast(mem) & - PageAllocationGranularityOffsetMask())); + PA_DCHECK(!(mem & PageAllocationGranularityOffsetMask())); s_reservation_address = mem; s_reservation_size = size; return true; @@ -326,14 +354,14 @@ bool ReleaseReservation() { return false; FreePages(s_reservation_address, s_reservation_size); - s_reservation_address = nullptr; + s_reservation_address = 0; s_reservation_size = 0; return true; } bool HasReservationForTesting() { internal::PartitionAutoLock guard(GetReserveLock()); - return s_reservation_address != nullptr; + return s_reservation_address; } uint32_t GetAllocPageErrorCode() { diff --git a/base/allocator/partition_allocator/page_allocator.h b/base/allocator/partition_allocator/page_allocator.h index a39a9093666d13..a5134cb39a5b85 100644 --- a/base/allocator/partition_allocator/page_allocator.h +++ b/base/allocator/partition_allocator/page_allocator.h @@ -8,6 +8,7 @@ #include #include +#include #include "base/allocator/partition_allocator/page_allocator_constants.h" #include "base/base_export.h" @@ -67,8 +68,8 @@ BASE_EXPORT uintptr_t NextAlignedWithOffset(uintptr_t ptr, // |PageAllocationGranularity()|. |length| and |align| must be non-zero. // |align_offset| must be less than |align|. |align| must be a power of two. // -// If |address| is null, then a suitable and randomized address will be chosen -// automatically. +// If |address| is 0/nullptr, then a suitable and randomized address will be +// chosen automatically. // // |accessibility| controls the permission of the allocated pages. // PageInaccessible means uncommitted. @@ -76,19 +77,28 @@ BASE_EXPORT uintptr_t NextAlignedWithOffset(uintptr_t ptr, // |page_tag| is used on some platforms to identify the source of the // allocation. Use PageTag::kChromium as a catch-all category. // -// This call will return null if the allocation cannot be satisfied. +// This call will return 0/nullptr if the allocation cannot be satisfied. +BASE_EXPORT uintptr_t AllocPages(size_t length, + size_t align, + PageAccessibilityConfiguration accessibility, + PageTag page_tag); +BASE_EXPORT uintptr_t AllocPages(uintptr_t address, + size_t length, + size_t align, + PageAccessibilityConfiguration accessibility, + PageTag page_tag); BASE_EXPORT void* AllocPages(void* address, size_t length, size_t align, PageAccessibilityConfiguration accessibility, PageTag page_tag); -BASE_EXPORT void* AllocPagesWithAlignOffset( - void* address, - size_t length, - size_t align, - size_t align_offset, - PageAccessibilityConfiguration page_accessibility, - PageTag page_tag); +BASE_EXPORT uintptr_t +AllocPagesWithAlignOffset(uintptr_t address, + size_t length, + size_t align, + size_t align_offset, + PageAccessibilityConfiguration page_accessibility, + PageTag page_tag); // Free one or more pages starting at |address| and continuing for |length| // bytes. @@ -96,6 +106,7 @@ BASE_EXPORT void* AllocPagesWithAlignOffset( // |address| and |length| must match a previous call to |AllocPages|. Therefore, // |address| must be aligned to |PageAllocationGranularity()| bytes, and // |length| must be a multiple of |PageAllocationGranularity()|. +BASE_EXPORT void FreePages(uintptr_t address, size_t length); BASE_EXPORT void FreePages(void* address, size_t length); // Mark one or more system pages, starting at |address| with the given @@ -104,6 +115,10 @@ BASE_EXPORT void FreePages(void* address, size_t length); // // Returns true if the permission change succeeded. In most cases you must // |CHECK| the result. +[[nodiscard]] BASE_EXPORT bool TrySetSystemPagesAccess( + uintptr_t address, + size_t length, + PageAccessibilityConfiguration page_accessibility); [[nodiscard]] BASE_EXPORT bool TrySetSystemPagesAccess( void* address, size_t length, @@ -114,6 +129,10 @@ BASE_EXPORT void FreePages(void* address, size_t length); // bytes. // // Performs a CHECK that the operation succeeds. +BASE_EXPORT void SetSystemPagesAccess( + uintptr_t address, + size_t length, + PageAccessibilityConfiguration page_accessibility); BASE_EXPORT void SetSystemPagesAccess( void* address, size_t length, @@ -151,6 +170,10 @@ BASE_EXPORT void SetSystemPagesAccess( // protections so accesses fault. // // This API will crash if the operation cannot be performed. +BASE_EXPORT void DecommitSystemPages( + uintptr_t address, + size_t length, + PageAccessibilityDisposition accessibility_disposition); BASE_EXPORT void DecommitSystemPages( void* address, size_t length, @@ -165,6 +188,7 @@ BASE_EXPORT void DecommitSystemPages( // setting them to PageInaccessible). // // This API will crash if the operation cannot be performed. +BASE_EXPORT void DecommitAndZeroSystemPages(uintptr_t address, size_t length); BASE_EXPORT void DecommitAndZeroSystemPages(void* address, size_t length); // Whether decommitted memory is guaranteed to be zeroed when it is @@ -194,14 +218,14 @@ constexpr BASE_EXPORT bool DecommittedMemoryIsAlwaysZeroed() { // // This API will crash if the operation cannot be performed. BASE_EXPORT void RecommitSystemPages( - void* address, + uintptr_t address, size_t length, PageAccessibilityConfiguration page_accessibility, PageAccessibilityDisposition accessibility_disposition); // Like RecommitSystemPages(), but returns false instead of crashing. [[nodiscard]] BASE_EXPORT bool TryRecommitSystemPages( - void* address, + uintptr_t address, size_t length, PageAccessibilityConfiguration page_accessibility, PageAccessibilityDisposition accessibility_disposition); @@ -227,6 +251,7 @@ BASE_EXPORT void RecommitSystemPages( // that the page is required again. Once written to, the content of the page is // guaranteed stable once more. After being written to, the page content may be // based on the original page content, or a page of zeroes. +BASE_EXPORT void DiscardSystemPages(uintptr_t address, size_t length); BASE_EXPORT void DiscardSystemPages(void* address, size_t length); // Rounds up |address| to the next multiple of |SystemPageSize()|. Returns diff --git a/base/allocator/partition_allocator/page_allocator_internal.h b/base/allocator/partition_allocator/page_allocator_internal.h index 060d4f3d661705..2fc9e0962d3911 100644 --- a/base/allocator/partition_allocator/page_allocator_internal.h +++ b/base/allocator/partition_allocator/page_allocator_internal.h @@ -5,12 +5,17 @@ #ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNAL_H_ #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNAL_H_ +#include +#include + +#include "base/allocator/partition_allocator/page_allocator.h" + namespace base { -void* SystemAllocPages(void* hint, - size_t length, - PageAccessibilityConfiguration accessibility, - PageTag page_tag); +uintptr_t SystemAllocPages(uintptr_t hint, + size_t length, + PageAccessibilityConfiguration accessibility, + PageTag page_tag); } // namespace base diff --git a/base/allocator/partition_allocator/page_allocator_internals_fuchsia.h b/base/allocator/partition_allocator/page_allocator_internals_fuchsia.h index 20b86758775e6f..c6077ecbcca60d 100644 --- a/base/allocator/partition_allocator/page_allocator_internals_fuchsia.h +++ b/base/allocator/partition_allocator/page_allocator_internals_fuchsia.h @@ -15,6 +15,8 @@ #include #include +#include + #include "base/allocator/partition_allocator/page_allocator.h" #include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_notreached.h" @@ -70,15 +72,15 @@ constexpr bool kHintIsAdvisory = false; std::atomic s_allocPageErrorCode{0}; -void* SystemAllocPagesInternal(void* hint, - size_t length, - PageAccessibilityConfiguration accessibility, - PageTag page_tag) { +uintptr_t SystemAllocPagesInternal(uintptr_t hint, + size_t length, + PageAccessibilityConfiguration accessibility, + PageTag page_tag) { zx::vmo vmo; zx_status_t status = zx::vmo::create(length, 0, &vmo); if (status != ZX_OK) { ZX_DLOG(INFO, status) << "zx_vmo_create"; - return nullptr; + return 0; } const char* vmo_name = PageTagToName(page_tag); @@ -94,7 +96,7 @@ void* SystemAllocPagesInternal(void* hint, status = vmo.replace_as_executable(zx::resource(), &vmo); if (status != ZX_OK) { ZX_DLOG(INFO, status) << "zx_vmo_replace_as_executable"; - return nullptr; + return 0; } } @@ -102,7 +104,7 @@ void* SystemAllocPagesInternal(void* hint, uint64_t vmar_offset = 0; if (hint) { - vmar_offset = reinterpret_cast(hint); + vmar_offset = hint; options |= ZX_VM_SPECIFIC; } @@ -115,22 +117,20 @@ void* SystemAllocPagesInternal(void* hint, if (!hint) { ZX_DLOG(ERROR, status) << "zx_vmar_map"; } - return nullptr; + return 0; } - return reinterpret_cast(address); + return address; } -void* TrimMappingInternal(void* base, - size_t base_length, - size_t trim_length, - PageAccessibilityConfiguration accessibility, - size_t pre_slack, - size_t post_slack) { +uintptr_t TrimMappingInternal(uintptr_t base_address, + size_t base_length, + size_t trim_length, + PageAccessibilityConfiguration accessibility, + size_t pre_slack, + size_t post_slack) { PA_DCHECK(base_length == trim_length + pre_slack + post_slack); - uint64_t base_address = reinterpret_cast(base); - // Unmap head if necessary. if (pre_slack) { zx_status_t status = zx::vmar::root_self()->unmap(base_address, pre_slack); @@ -144,46 +144,42 @@ void* TrimMappingInternal(void* base, ZX_CHECK(status == ZX_OK, status); } - return reinterpret_cast(base_address + pre_slack); + return base_address + pre_slack; } bool TrySetSystemPagesAccessInternal( - void* address, + uint64_t address, size_t length, PageAccessibilityConfiguration accessibility) { zx_status_t status = zx::vmar::root_self()->protect( - PageAccessibilityToZxVmOptions(accessibility), - reinterpret_cast(address), length); + PageAccessibilityToZxVmOptions(accessibility), address, length); return status == ZX_OK; } void SetSystemPagesAccessInternal( - void* address, + uint64_t address, size_t length, PageAccessibilityConfiguration accessibility) { zx_status_t status = zx::vmar::root_self()->protect( - PageAccessibilityToZxVmOptions(accessibility), - reinterpret_cast(address), length); + PageAccessibilityToZxVmOptions(accessibility), address, length); ZX_CHECK(status == ZX_OK, status); } -void FreePagesInternal(void* address, size_t length) { - uint64_t address_int = reinterpret_cast(address); - zx_status_t status = zx::vmar::root_self()->unmap(address_int, length); +void FreePagesInternal(uint64_t address, size_t length) { + zx_status_t status = zx::vmar::root_self()->unmap(address, length); ZX_CHECK(status == ZX_OK, status); } -void DiscardSystemPagesInternal(void* address, size_t length) { +void DiscardSystemPagesInternal(uint64_t address, size_t length) { // TODO(https://crbug.com/1022062): Mark pages as discardable, rather than // forcibly de-committing them immediately, when Fuchsia supports it. - uint64_t address_int = reinterpret_cast(address); zx_status_t status = zx::vmar::root_self()->op_range( - ZX_VMO_OP_DECOMMIT, address_int, length, nullptr, 0); + ZX_VMO_OP_DECOMMIT, address, length, nullptr, 0); ZX_CHECK(status == ZX_OK, status); } void DecommitSystemPagesInternal( - void* address, + uint64_t address, size_t length, PageAccessibilityDisposition accessibility_disposition) { if (accessibility_disposition == PageUpdatePermissions) { @@ -196,7 +192,7 @@ void DecommitSystemPagesInternal( DiscardSystemPagesInternal(address, length); } -void DecommitAndZeroSystemPagesInternal(void* address, size_t length) { +void DecommitAndZeroSystemPagesInternal(uintptr_t address, size_t length) { SetSystemPagesAccess(address, length, PageInaccessible); // TODO(https://crbug.com/1022062): this implementation will likely no longer @@ -206,7 +202,7 @@ void DecommitAndZeroSystemPagesInternal(void* address, size_t length) { } void RecommitSystemPagesInternal( - void* address, + uintptr_t address, size_t length, PageAccessibilityConfiguration accessibility, PageAccessibilityDisposition accessibility_disposition) { @@ -219,7 +215,7 @@ void RecommitSystemPagesInternal( } bool TryRecommitSystemPagesInternal( - void* address, + uintptr_t address, size_t length, PageAccessibilityConfiguration accessibility, PageAccessibilityDisposition accessibility_disposition) { diff --git a/base/allocator/partition_allocator/page_allocator_internals_posix.h b/base/allocator/partition_allocator/page_allocator_internals_posix.h index e016ff7ce5db43..778a03919e407b 100644 --- a/base/allocator/partition_allocator/page_allocator_internals_posix.h +++ b/base/allocator/partition_allocator/page_allocator_internals_posix.h @@ -8,7 +8,9 @@ #include #include #include + #include +#include #include "base/allocator/partition_allocator/oom.h" #include "base/allocator/partition_allocator/partition_alloc_check.h" @@ -141,10 +143,10 @@ std::atomic s_allocPageErrorCode{0}; int GetAccessFlags(PageAccessibilityConfiguration accessibility); -void* SystemAllocPagesInternal(void* hint, - size_t length, - PageAccessibilityConfiguration accessibility, - PageTag page_tag) { +uintptr_t SystemAllocPagesInternal(uintptr_t hint, + size_t length, + PageAccessibilityConfiguration accessibility, + PageTag page_tag) { #if BUILDFLAG(IS_APPLE) // Use a custom tag to make it easier to distinguish Partition Alloc regions // in vmmap(1). Tags between 240-255 are supported. @@ -169,7 +171,8 @@ void* SystemAllocPagesInternal(void* hint, } #endif - void* ret = mmap(hint, length, access_flag, map_flags, fd, 0); + void* ret = mmap(reinterpret_cast(hint), length, access_flag, + map_flags, fd, 0); if (ret == MAP_FAILED) { s_allocPageErrorCode = errno; ret = nullptr; @@ -185,23 +188,24 @@ void* SystemAllocPagesInternal(void* hint, } #endif - return ret; + return reinterpret_cast(ret); } bool TrySetSystemPagesAccessInternal( - void* address, + uintptr_t address, size_t length, PageAccessibilityConfiguration accessibility) { - return 0 == - HANDLE_EINTR(mprotect(address, length, GetAccessFlags(accessibility))); + return 0 == HANDLE_EINTR(mprotect(reinterpret_cast(address), length, + GetAccessFlags(accessibility))); } void SetSystemPagesAccessInternal( - void* address, + uintptr_t address, size_t length, PageAccessibilityConfiguration accessibility) { int access_flags = GetAccessFlags(accessibility); - const int ret = HANDLE_EINTR(mprotect(address, length, access_flags)); + const int ret = HANDLE_EINTR( + mprotect(reinterpret_cast(address), length, access_flags)); // On Linux, man mprotect(2) states that ENOMEM is returned when (1) internal // kernel data structures cannot be allocated, (2) the address range is @@ -222,31 +226,31 @@ void SetSystemPagesAccessInternal( PA_PCHECK(0 == ret); } -void FreePagesInternal(void* address, size_t length) { - PA_PCHECK(0 == munmap(address, length)); +void FreePagesInternal(uintptr_t address, size_t length) { + PA_PCHECK(0 == munmap(reinterpret_cast(address), length)); } -void* TrimMappingInternal(void* base, - size_t base_length, - size_t trim_length, - PageAccessibilityConfiguration accessibility, - size_t pre_slack, - size_t post_slack) { - void* ret = base; +uintptr_t TrimMappingInternal(uintptr_t base_address, + size_t base_length, + size_t trim_length, + PageAccessibilityConfiguration accessibility, + size_t pre_slack, + size_t post_slack) { + uintptr_t ret = base_address; // We can resize the allocation run. Release unneeded memory before and after // the aligned range. if (pre_slack) { - FreePages(base, pre_slack); - ret = reinterpret_cast(base) + pre_slack; + FreePages(base_address, pre_slack); + ret = base_address + pre_slack; } if (post_slack) { - FreePages(reinterpret_cast(ret) + trim_length, post_slack); + FreePages(ret + trim_length, post_slack); } return ret; } void DecommitSystemPagesInternal( - void* address, + uintptr_t address, size_t length, PageAccessibilityDisposition accessibility_disposition) { // In POSIX, there is no decommit concept. Discarding is an effective way of @@ -268,8 +272,9 @@ void DecommitSystemPagesInternal( if (!DecommittedMemoryIsAlwaysZeroed() && change_permissions) { // Memory may not be writable. size_t size = std::min(length, 2 * SystemPageSize()); - PA_CHECK(mprotect(address, size, PROT_WRITE) == 0); - memset(address, 0xcc, size); + void* ptr = reinterpret_cast(address); + PA_CHECK(mprotect(ptr, size, PROT_WRITE) == 0); + memset(ptr, 0xcc, size); } #endif @@ -284,20 +289,21 @@ void DecommitSystemPagesInternal( } } -void DecommitAndZeroSystemPagesInternal(void* address, size_t length) { +void DecommitAndZeroSystemPagesInternal(uintptr_t address, size_t length) { // https://pubs.opengroup.org/onlinepubs/9699919799/functions/mmap.html: "If // a MAP_FIXED request is successful, then any previous mappings [...] for // those whole pages containing any part of the address range [pa,pa+len) // shall be removed, as if by an appropriate call to munmap(), before the // new mapping is established." As a consequence, the memory will be // zero-initialized on next access. - void* ptr = mmap(address, length, PROT_NONE, + void* ptr = reinterpret_cast(address); + void* ret = mmap(ptr, length, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); - PA_CHECK(ptr == address); + PA_CHECK(ptr == ret); } void RecommitSystemPagesInternal( - void* address, + uintptr_t address, size_t length, PageAccessibilityConfiguration accessibility, PageAccessibilityDisposition accessibility_disposition) { @@ -311,12 +317,12 @@ void RecommitSystemPagesInternal( #if BUILDFLAG(IS_APPLE) // On macOS, to update accounting, we need to make another syscall. For more // details, see https://crbug.com/823915. - madvise(address, length, MADV_FREE_REUSE); + madvise(reinterpret_cast(address), length, MADV_FREE_REUSE); #endif } bool TryRecommitSystemPagesInternal( - void* address, + uintptr_t address, size_t length, PageAccessibilityConfiguration accessibility, PageAccessibilityDisposition accessibility_disposition) { @@ -332,18 +338,19 @@ bool TryRecommitSystemPagesInternal( #if BUILDFLAG(IS_APPLE) // On macOS, to update accounting, we need to make another syscall. For more // details, see https://crbug.com/823915. - madvise(address, length, MADV_FREE_REUSE); + madvise(reinterpret_cast(address), length, MADV_FREE_REUSE); #endif return true; } -void DiscardSystemPagesInternal(void* address, size_t length) { +void DiscardSystemPagesInternal(uintptr_t address, size_t length) { + void* ptr = reinterpret_cast(address); #if BUILDFLAG(IS_APPLE) - int ret = madvise(address, length, MADV_FREE_REUSABLE); + int ret = madvise(ptr, length, MADV_FREE_REUSABLE); if (ret) { // MADV_FREE_REUSABLE sometimes fails, so fall back to MADV_DONTNEED. - ret = madvise(address, length, MADV_DONTNEED); + ret = madvise(ptr, length, MADV_DONTNEED); } PA_PCHECK(ret == 0); #else @@ -353,7 +360,7 @@ void DiscardSystemPagesInternal(void* address, size_t length) { // performance benefits unclear. // // Therefore, we just do the simple thing: MADV_DONTNEED. - PA_PCHECK(0 == madvise(address, length, MADV_DONTNEED)); + PA_PCHECK(0 == madvise(ptr, length, MADV_DONTNEED)); #endif } diff --git a/base/allocator/partition_allocator/page_allocator_internals_win.h b/base/allocator/partition_allocator/page_allocator_internals_win.h index 384367dff8bb1d..32413792bacf7c 100644 --- a/base/allocator/partition_allocator/page_allocator_internals_win.h +++ b/base/allocator/partition_allocator/page_allocator_internals_win.h @@ -5,13 +5,15 @@ #ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_WIN_H_ #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_INTERNALS_WIN_H_ +#include + +#include + #include "base/allocator/partition_allocator/oom.h" #include "base/allocator/partition_allocator/page_allocator_internal.h" #include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_notreached.h" -#include - namespace base { namespace { @@ -49,61 +51,63 @@ int GetAccessFlags(PageAccessibilityConfiguration accessibility) { } } -void* SystemAllocPagesInternal(void* hint, - size_t length, - PageAccessibilityConfiguration accessibility, - PageTag page_tag) { +uintptr_t SystemAllocPagesInternal(uintptr_t hint, + size_t length, + PageAccessibilityConfiguration accessibility, + PageTag page_tag) { DWORD access_flag = GetAccessFlags(accessibility); const DWORD type_flags = (accessibility != PageInaccessible) ? (MEM_RESERVE | MEM_COMMIT) : MEM_RESERVE; - void* ret = VirtualAlloc(hint, length, type_flags, access_flag); + void* ret = VirtualAlloc(reinterpret_cast(hint), length, type_flags, + access_flag); if (ret == nullptr) { s_allocPageErrorCode = GetLastError(); } - return ret; + return reinterpret_cast(ret); } -void* TrimMappingInternal(void* base, - size_t base_length, - size_t trim_length, - PageAccessibilityConfiguration accessibility, - size_t pre_slack, - size_t post_slack) { - void* ret = base; +uintptr_t TrimMappingInternal(uintptr_t base_address, + size_t base_length, + size_t trim_length, + PageAccessibilityConfiguration accessibility, + size_t pre_slack, + size_t post_slack) { + uintptr_t ret = base_address; if (pre_slack || post_slack) { // We cannot resize the allocation run. Free it and retry at the aligned // address within the freed range. - ret = reinterpret_cast(base) + pre_slack; - FreePages(base, base_length); + ret = base_address + pre_slack; + FreePages(base_address, base_length); ret = SystemAllocPages(ret, trim_length, accessibility, PageTag::kChromium); } return ret; } bool TrySetSystemPagesAccessInternal( - void* address, + uintptr_t address, size_t length, PageAccessibilityConfiguration accessibility) { + void* ptr = reinterpret_cast(address); if (accessibility == PageInaccessible) - return VirtualFree(address, length, MEM_DECOMMIT) != 0; - return nullptr != VirtualAlloc(address, length, MEM_COMMIT, - GetAccessFlags(accessibility)); + return VirtualFree(ptr, length, MEM_DECOMMIT) != 0; + return nullptr != + VirtualAlloc(ptr, length, MEM_COMMIT, GetAccessFlags(accessibility)); } void SetSystemPagesAccessInternal( - void* address, + uintptr_t address, size_t length, PageAccessibilityConfiguration accessibility) { + void* ptr = reinterpret_cast(address); if (accessibility == PageInaccessible) { - if (!VirtualFree(address, length, MEM_DECOMMIT)) { + if (!VirtualFree(ptr, length, MEM_DECOMMIT)) { // We check `GetLastError` for `ERROR_SUCCESS` here so that in a crash // report we get the error number. PA_CHECK(static_cast(ERROR_SUCCESS) == GetLastError()); } } else { - if (!VirtualAlloc(address, length, MEM_COMMIT, - GetAccessFlags(accessibility))) { + if (!VirtualAlloc(ptr, length, MEM_COMMIT, GetAccessFlags(accessibility))) { int32_t error = GetLastError(); if (error == ERROR_COMMITMENT_LIMIT) OOM_CRASH(length); @@ -114,12 +118,12 @@ void SetSystemPagesAccessInternal( } } -void FreePagesInternal(void* address, size_t length) { - PA_CHECK(VirtualFree(address, 0, MEM_RELEASE)); +void FreePagesInternal(uintptr_t address, size_t length) { + PA_CHECK(VirtualFree(reinterpret_cast(address), 0, MEM_RELEASE)); } void DecommitSystemPagesInternal( - void* address, + uintptr_t address, size_t length, PageAccessibilityDisposition accessibility_disposition) { // Ignore accessibility_disposition, because decommitting is equivalent to @@ -127,7 +131,7 @@ void DecommitSystemPagesInternal( SetSystemPagesAccess(address, length, PageInaccessible); } -void DecommitAndZeroSystemPagesInternal(void* address, size_t length) { +void DecommitAndZeroSystemPagesInternal(uintptr_t address, size_t length) { // https://docs.microsoft.com/en-us/windows/win32/api/memoryapi/nf-memoryapi-virtualfree: // "If a page is decommitted but not released, its state changes to reserved. // Subsequently, you can call VirtualAlloc to commit it, or VirtualFree to @@ -136,11 +140,11 @@ void DecommitAndZeroSystemPagesInternal(void* address, size_t length) { // https://docs.microsoft.com/en-us/windows/win32/api/memoryapi/nf-memoryapi-virtualalloc // for MEM_COMMIT: "The function also guarantees that when the caller later // initially accesses the memory, the contents will be zero." - PA_CHECK(VirtualFree(address, length, MEM_DECOMMIT)); + PA_CHECK(VirtualFree(reinterpret_cast(address), length, MEM_DECOMMIT)); } void RecommitSystemPagesInternal( - void* address, + uintptr_t address, size_t length, PageAccessibilityConfiguration accessibility, PageAccessibilityDisposition accessibility_disposition) { @@ -150,7 +154,7 @@ void RecommitSystemPagesInternal( } bool TryRecommitSystemPagesInternal( - void* address, + uintptr_t address, size_t length, PageAccessibilityConfiguration accessibility, PageAccessibilityDisposition accessibility_disposition) { @@ -159,7 +163,7 @@ bool TryRecommitSystemPagesInternal( return TrySetSystemPagesAccess(address, length, accessibility); } -void DiscardSystemPagesInternal(void* address, size_t length) { +void DiscardSystemPagesInternal(uintptr_t address, size_t length) { if (s_discard_virtual_memory == reinterpret_cast(-1)) { // DiscardVirtualMemory's minimum supported client is Windows 8.1 Update. @@ -174,17 +178,17 @@ void DiscardSystemPagesInternal(void* address, size_t length) { } } + void* ptr = reinterpret_cast(address); // Use DiscardVirtualMemory when available because it releases faster than // MEM_RESET. DWORD ret = 1; if (s_discard_virtual_memory) { - ret = s_discard_virtual_memory(address, length); + ret = s_discard_virtual_memory(ptr, length); } // DiscardVirtualMemory is buggy in Win10 SP0, so fall back to MEM_RESET on // failure. if (ret) { - void* ptr = VirtualAlloc(address, length, MEM_RESET, PAGE_READWRITE); - PA_CHECK(ptr); + PA_CHECK(VirtualAlloc(ptr, length, MEM_RESET, PAGE_READWRITE)); } } diff --git a/base/allocator/partition_allocator/page_allocator_unittest.cc b/base/allocator/partition_allocator/page_allocator_unittest.cc index c0ba022aab0769..c2a1bd6366e96a 100644 --- a/base/allocator/partition_allocator/page_allocator_unittest.cc +++ b/base/allocator/partition_allocator/page_allocator_unittest.cc @@ -6,17 +6,19 @@ #include #include + #include #include #include #include -#include "base/cpu.h" -#include "base/logging.h" -#include "base/memory/tagging.h" #include "base/allocator/partition_allocator/address_space_randomization.h" #include "base/allocator/partition_allocator/partition_alloc_notreached.h" +#include "base/cpu.h" +#include "base/logging.h" +#include "base/memory/tagging.h" #include "build/build_config.h" + #if BUILDFLAG(IS_ANDROID) #include "base/debug/proc_maps_linux.h" #endif // BUILDFLAG(IS_ANDROID) @@ -116,9 +118,9 @@ TEST(PartitionAllocPageAllocatorTest, AllocFailure) { if (size == 0) return; - void* result = AllocPages(nullptr, size, PageAllocationGranularity(), - PageInaccessible, PageTag::kChromium); - if (result == nullptr) { + uintptr_t result = AllocPages(size, PageAllocationGranularity(), + PageInaccessible, PageTag::kChromium); + if (!result) { // We triggered allocation failure. Our reservation should have been // released, and we should be able to make a new reservation. EXPECT_TRUE(ReserveAddressSpace(EasyAllocSize())); @@ -156,9 +158,9 @@ TEST(PartitionAllocPageAllocatorTest, MAYBE_ReserveAddressSpace) { } TEST(PartitionAllocPageAllocatorTest, AllocAndFreePages) { - void* buffer = AllocPages(nullptr, PageAllocationGranularity(), - PageAllocationGranularity(), PageReadWrite, - PageTag::kChromium); + uintptr_t buffer = + AllocPages(PageAllocationGranularity(), PageAllocationGranularity(), + PageReadWrite, PageTag::kChromium); EXPECT_TRUE(buffer); int* buffer0 = reinterpret_cast(buffer); *buffer0 = 42; @@ -175,10 +177,10 @@ TEST(PartitionAllocPageAllocatorTest, AllocPagesAligned) { alignment - PageAllocationGranularity()}; for (size_t size : sizes) { for (size_t offset : offsets) { - void* buffer = AllocPagesWithAlignOffset( - nullptr, size, alignment, offset, PageReadWrite, PageTag::kChromium); + uintptr_t buffer = AllocPagesWithAlignOffset( + 0, size, alignment, offset, PageReadWrite, PageTag::kChromium); EXPECT_TRUE(buffer); - EXPECT_EQ(reinterpret_cast(buffer) % alignment, offset); + EXPECT_EQ(buffer % alignment, offset); FreePages(buffer, size); } } @@ -188,9 +190,9 @@ TEST(PartitionAllocPageAllocatorTest, AllocAndFreePagesWithPageReadWriteTagged) { // This test checks that a page allocated with PageReadWriteTagged is // safe to use on all systems (even those which don't support MTE). - void* buffer = AllocPages(nullptr, PageAllocationGranularity(), - PageAllocationGranularity(), PageReadWriteTagged, - PageTag::kChromium); + uintptr_t buffer = + AllocPages(PageAllocationGranularity(), PageAllocationGranularity(), + PageReadWriteTagged, PageTag::kChromium); EXPECT_TRUE(buffer); int* buffer0 = reinterpret_cast(buffer); *buffer0 = 42; @@ -215,17 +217,17 @@ TEST(PartitionAllocPageAllocatorTest, } #if defined(MTE_KILLED_BY_SIGNAL_AVAILABLE) // Next, map some read-write memory and copy the BTI-enabled function there. - char* const buffer = reinterpret_cast(AllocPages( - nullptr, PageAllocationGranularity(), PageAllocationGranularity(), - PageReadWrite, PageTag::kChromium)); + uintptr_t buffer = + AllocPages(PageAllocationGranularity(), PageAllocationGranularity(), + PageReadWrite, PageTag::kChromium); ptrdiff_t function_range = reinterpret_cast(arm_bti_test_function_end) - reinterpret_cast(arm_bti_test_function); ptrdiff_t invalid_offset = reinterpret_cast(arm_bti_test_function_invalid_offset) - reinterpret_cast(arm_bti_test_function); - memcpy(buffer, reinterpret_cast(arm_bti_test_function), - function_range); + memcpy(reinterpret_cast(buffer), + reinterpret_cast(arm_bti_test_function), function_range); // Next re-protect the page. SetSystemPagesAccess(buffer, PageAllocationGranularity(), @@ -264,17 +266,17 @@ TEST(PartitionAllocPageAllocatorTest, } #if defined(MTE_KILLED_BY_SIGNAL_AVAILABLE) - void* buffer = AllocPages(nullptr, PageAllocationGranularity(), - PageAllocationGranularity(), PageReadWriteTagged, - PageTag::kChromium); + uintptr_t buffer = + AllocPages(PageAllocationGranularity(), PageAllocationGranularity(), + PageReadWriteTagged, PageTag::kChromium); EXPECT_TRUE(buffer); int* buffer0 = reinterpret_cast(buffer); // Assign an 0x1 tag to the first granule of buffer. - int* buffer1 = reinterpret_cast(__arm_mte_increment_tag(buffer, 0x1)); + int* buffer1 = __arm_mte_increment_tag(buffer0, 0x1); EXPECT_NE(buffer0, buffer1); __arm_mte_set_tag(buffer1); // Retrieve the tag to ensure that it's set. - buffer1 = reinterpret_cast(__arm_mte_get_tag(buffer)); + buffer1 = __arm_mte_get_tag(buffer0); // Prove that the tag is different (if they're the same, the test won't work). ASSERT_NE(buffer0, buffer1); memory::TagViolationReportingMode parent_tagging_mode = @@ -319,13 +321,13 @@ TEST(PartitionAllocPageAllocatorTest, } #if defined(MTE_KILLED_BY_SIGNAL_AVAILABLE) - void* buffer = AllocPages(nullptr, PageAllocationGranularity(), - PageAllocationGranularity(), PageReadWriteTagged, - PageTag::kChromium); + uintptr_t buffer = + AllocPages(PageAllocationGranularity(), PageAllocationGranularity(), + PageReadWriteTagged, PageTag::kChromium); EXPECT_TRUE(buffer); int* buffer0 = reinterpret_cast(buffer); - __arm_mte_set_tag(__arm_mte_increment_tag(buffer, 0x1)); - int* buffer1 = reinterpret_cast(__arm_mte_get_tag(buffer)); + __arm_mte_set_tag(__arm_mte_increment_tag(buffer0, 0x1)); + int* buffer1 = __arm_mte_get_tag(buffer0); EXPECT_NE(buffer0, buffer1); memory::TagViolationReportingMode parent_tagging_mode = memory::GetMemoryTaggingModeForCurrentThread(); @@ -401,9 +403,9 @@ void SignalHandler(int signal, siginfo_t* info, void*) { } TEST(PartitionAllocPageAllocatorTest, InaccessiblePages) { - void* buffer = AllocPages(nullptr, PageAllocationGranularity(), - PageAllocationGranularity(), PageInaccessible, - PageTag::kChromium); + uintptr_t buffer = + AllocPages(PageAllocationGranularity(), PageAllocationGranularity(), + PageInaccessible, PageTag::kChromium); EXPECT_TRUE(buffer); FAULT_TEST_BEGIN() @@ -420,9 +422,9 @@ TEST(PartitionAllocPageAllocatorTest, InaccessiblePages) { } TEST(PartitionAllocPageAllocatorTest, ReadExecutePages) { - void* buffer = AllocPages(nullptr, PageAllocationGranularity(), - PageAllocationGranularity(), PageReadExecute, - PageTag::kChromium); + uintptr_t buffer = + AllocPages(PageAllocationGranularity(), PageAllocationGranularity(), + PageReadExecute, PageTag::kChromium); EXPECT_TRUE(buffer); int* buffer0 = reinterpret_cast(buffer); // Reading from buffer should succeed. @@ -445,9 +447,9 @@ TEST(PartitionAllocPageAllocatorTest, ReadExecutePages) { #if BUILDFLAG(IS_ANDROID) TEST(PartitionAllocPageAllocatorTest, PageTagging) { - void* buffer = AllocPages(nullptr, PageAllocationGranularity(), - PageAllocationGranularity(), PageInaccessible, - PageTag::kChromium); + uintptr_t buffer = + AllocPages(PageAllocationGranularity(), PageAllocationGranularity(), + PageInaccessible, PageTag::kChromium); EXPECT_TRUE(buffer); std::string proc_maps; @@ -457,7 +459,7 @@ TEST(PartitionAllocPageAllocatorTest, PageTagging) { bool found = false; for (const auto& region : regions) { - if (region.start == reinterpret_cast(buffer)) { + if (region.start == buffer) { found = true; EXPECT_EQ("[anon:chromium]", region.path); break; @@ -474,11 +476,11 @@ TEST(PartitionAllocPageAllocatorTest, DecommitErasesMemory) { return; size_t size = PageAllocationGranularity(); - void* buffer = AllocPages(nullptr, size, PageAllocationGranularity(), - PageReadWrite, PageTag::kChromium); + uintptr_t buffer = AllocPages(size, PageAllocationGranularity(), + PageReadWrite, PageTag::kChromium); ASSERT_TRUE(buffer); - memset(buffer, 42, size); + memset(reinterpret_cast(buffer), 42, size); DecommitSystemPages(buffer, size, PageKeepPermissionsIfPossible); RecommitSystemPages(buffer, size, PageReadWrite, @@ -496,11 +498,11 @@ TEST(PartitionAllocPageAllocatorTest, DecommitErasesMemory) { TEST(PartitionAllocPageAllocatorTest, DecommitAndZero) { size_t size = PageAllocationGranularity(); - void* buffer = AllocPages(nullptr, size, PageAllocationGranularity(), - PageReadWrite, PageTag::kChromium); + uintptr_t buffer = AllocPages(size, PageAllocationGranularity(), + PageReadWrite, PageTag::kChromium); ASSERT_TRUE(buffer); - memset(buffer, 42, size); + memset(reinterpret_cast(buffer), 42, size); DecommitAndZeroSystemPages(buffer, size); @@ -545,8 +547,8 @@ TEST(PartitionAllocPageAllocatorTest, MappedPagesAccounting) { size_t mapped_size_before = GetTotalMappedSize(); for (size_t offset : offsets) { - void* data = AllocPagesWithAlignOffset( - nullptr, size, alignment, offset, PageInaccessible, PageTag::kChromium); + uintptr_t data = AllocPagesWithAlignOffset( + 0, size, alignment, offset, PageInaccessible, PageTag::kChromium); ASSERT_TRUE(data); EXPECT_EQ(mapped_size_before + size, GetTotalMappedSize()); diff --git a/base/allocator/partition_allocator/partition_address_space.cc b/base/allocator/partition_allocator/partition_address_space.cc index 526ed47725cee5..db863594e23f63 100644 --- a/base/allocator/partition_allocator/partition_address_space.cc +++ b/base/allocator/partition_allocator/partition_address_space.cc @@ -10,7 +10,6 @@ #include "base/allocator/partition_allocator/address_pool_manager.h" #include "base/allocator/partition_allocator/page_allocator.h" -#include "base/allocator/partition_allocator/page_allocator_internal.h" #include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_constants.h" #include "base/bits.h" @@ -74,9 +73,9 @@ void PartitionAddressSpace::Init() { if (IsInitialized()) return; - setup_.regular_pool_base_address_ = reinterpret_cast( - AllocPages(nullptr, kRegularPoolSize, kRegularPoolSize, - base::PageInaccessible, PageTag::kPartitionAlloc)); + setup_.regular_pool_base_address_ = + AllocPages(kRegularPoolSize, kRegularPoolSize, base::PageInaccessible, + PageTag::kPartitionAlloc); if (!setup_.regular_pool_base_address_) HandleGigaCageAllocFailure(); PA_DCHECK(!(setup_.regular_pool_base_address_ & (kRegularPoolSize - 1))); @@ -95,14 +94,13 @@ void PartitionAddressSpace::Init() { // is a valid pointer, and having a "forbidden zone" before the BRP pool // prevents such a pointer from "sneaking into" the pool. const size_t kForbiddenZoneSize = PageAllocationGranularity(); - void* ptr = AllocPagesWithAlignOffset( - nullptr, kBRPPoolSize + kForbiddenZoneSize, kBRPPoolSize, + uintptr_t base_address = AllocPagesWithAlignOffset( + 0, kBRPPoolSize + kForbiddenZoneSize, kBRPPoolSize, kBRPPoolSize - kForbiddenZoneSize, base::PageInaccessible, PageTag::kPartitionAlloc); - if (!ptr) + if (!base_address) HandleGigaCageAllocFailure(); - setup_.brp_pool_base_address_ = - reinterpret_cast(ptr) + kForbiddenZoneSize; + setup_.brp_pool_base_address_ = base_address + kForbiddenZoneSize; PA_DCHECK(!(setup_.brp_pool_base_address_ & (kBRPPoolSize - 1))); setup_.brp_pool_ = internal::AddressPoolManager::GetInstance()->Add( setup_.brp_pool_base_address_, kBRPPoolSize); @@ -147,13 +145,11 @@ void PartitionAddressSpace::InitConfigurablePool(uintptr_t pool_base, } void PartitionAddressSpace::UninitForTesting() { - FreePages(reinterpret_cast(setup_.regular_pool_base_address_), - kRegularPoolSize); + FreePages(setup_.regular_pool_base_address_, kRegularPoolSize); // For BRP pool, the allocation region includes a "forbidden zone" before the // pool. const size_t kForbiddenZoneSize = PageAllocationGranularity(); - FreePages(reinterpret_cast(setup_.brp_pool_base_address_ - - kForbiddenZoneSize), + FreePages(setup_.brp_pool_base_address_ - kForbiddenZoneSize, kBRPPoolSize + kForbiddenZoneSize); // Do not free pages for the configurable pool, because its memory is owned // by someone else, but deinitialize it nonetheless. diff --git a/base/allocator/partition_allocator/partition_alloc.cc b/base/allocator/partition_allocator/partition_alloc.cc index 650a55671e620a..b6fc83af1bcf3b 100644 --- a/base/allocator/partition_allocator/partition_alloc.cc +++ b/base/allocator/partition_allocator/partition_alloc.cc @@ -6,12 +6,12 @@ #include +#include #include #include "base/allocator/buildflags.h" #include "base/allocator/partition_allocator/address_pool_manager.h" #include "base/allocator/partition_allocator/memory_reclaimer.h" -#include "base/allocator/partition_allocator/page_allocator_internal.h" #include "base/allocator/partition_allocator/partition_address_space.h" #include "base/allocator/partition_allocator/partition_alloc_hooks.h" #include "base/allocator/partition_allocator/partition_direct_map_extent.h" diff --git a/base/allocator/partition_allocator/partition_alloc_unittest.cc b/base/allocator/partition_allocator/partition_alloc_unittest.cc index 3a4a4acb2cf3b6..258d78eb105d44 100644 --- a/base/allocator/partition_allocator/partition_alloc_unittest.cc +++ b/base/allocator/partition_allocator/partition_alloc_unittest.cc @@ -1655,15 +1655,13 @@ TEST_F(PartitionAllocTest, MappingCollision) { uintptr_t super_page = slot_spart_start - PartitionPageSize(); // Map a single system page either side of the mapping for our allocations, // with the goal of tripping up alignment of the next mapping. - void* map1 = AllocPages( - reinterpret_cast(super_page - PageAllocationGranularity()), - PageAllocationGranularity(), PageAllocationGranularity(), - PageInaccessible, PageTag::kPartitionAlloc); + uintptr_t map1 = AllocPages( + super_page - PageAllocationGranularity(), PageAllocationGranularity(), + PageAllocationGranularity(), PageInaccessible, PageTag::kPartitionAlloc); EXPECT_TRUE(map1); - void* map2 = - AllocPages(reinterpret_cast(super_page + kSuperPageSize), - PageAllocationGranularity(), PageAllocationGranularity(), - PageInaccessible, PageTag::kPartitionAlloc); + uintptr_t map2 = AllocPages( + super_page + kSuperPageSize, PageAllocationGranularity(), + PageAllocationGranularity(), PageInaccessible, PageTag::kPartitionAlloc); EXPECT_TRUE(map2); for (i = 0; i < num_partition_pages_needed; ++i) @@ -1678,14 +1676,13 @@ TEST_F(PartitionAllocTest, MappingCollision) { super_page -= PartitionPageSize(); // Map a single system page either side of the mapping for our allocations, // with the goal of tripping up alignment of the next mapping. - map1 = AllocPages( - reinterpret_cast(super_page - PageAllocationGranularity()), - PageAllocationGranularity(), PageAllocationGranularity(), - PageReadWriteTagged, PageTag::kPartitionAlloc); - EXPECT_TRUE(map1); - map2 = AllocPages(reinterpret_cast(super_page + kSuperPageSize), + map1 = AllocPages(super_page - PageAllocationGranularity(), PageAllocationGranularity(), PageAllocationGranularity(), PageReadWriteTagged, PageTag::kPartitionAlloc); + EXPECT_TRUE(map1); + map2 = AllocPages(super_page + kSuperPageSize, PageAllocationGranularity(), + PageAllocationGranularity(), PageReadWriteTagged, + PageTag::kPartitionAlloc); EXPECT_TRUE(map2); EXPECT_TRUE(TrySetSystemPagesAccess(map1, PageAllocationGranularity(), PageInaccessible)); @@ -2959,12 +2956,9 @@ TEST_F(PartitionAllocTest, FundamentalAlignment) { void* ptr2 = allocator.root()->Alloc(size, ""); void* ptr3 = allocator.root()->Alloc(size, ""); - EXPECT_EQ(reinterpret_cast(ptr) % fundamental_alignment, - static_cast(0)); - EXPECT_EQ(reinterpret_cast(ptr2) % fundamental_alignment, - static_cast(0)); - EXPECT_EQ(reinterpret_cast(ptr3) % fundamental_alignment, - static_cast(0)); + EXPECT_EQ(reinterpret_cast(ptr) % fundamental_alignment, 0u); + EXPECT_EQ(reinterpret_cast(ptr2) % fundamental_alignment, 0u); + EXPECT_EQ(reinterpret_cast(ptr3) % fundamental_alignment, 0u); #if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT) // The capacity(C) is slot size - kExtraAllocSize. @@ -2976,12 +2970,11 @@ TEST_F(PartitionAllocTest, FundamentalAlignment) { // == (kAlignment - kInSlotRefCountBufferSize) % kAlignment. EXPECT_EQ(allocator.root()->AllocationCapacityFromPtr(ptr) % fundamental_alignment, - static_cast(fundamental_alignment - - kInSlotRefCountBufferSize)); + fundamental_alignment - kInSlotRefCountBufferSize); #else EXPECT_EQ(allocator.root()->AllocationCapacityFromPtr(ptr) % fundamental_alignment, - static_cast(0)); + 0u); #endif allocator.root()->Free(ptr); @@ -3751,10 +3744,9 @@ TEST_F(PartitionAllocTest, ConfigurablePool) { pool_size /= 2) { DCHECK(bits::IsPowerOfTwo(pool_size)); EXPECT_FALSE(IsConfigurablePoolAvailable()); - void* pool_memory = AllocPages(nullptr, pool_size, pool_size, - PageInaccessible, PageTag::kPartitionAlloc); - EXPECT_NE(nullptr, pool_memory); - uintptr_t pool_base = reinterpret_cast(pool_memory); + uintptr_t pool_base = AllocPages(pool_size, pool_size, PageInaccessible, + PageTag::kPartitionAlloc); + EXPECT_NE(0u, pool_base); PartitionAddressSpace::InitConfigurablePool(pool_base, pool_size); EXPECT_TRUE(IsConfigurablePoolAvailable()); @@ -3782,7 +3774,7 @@ TEST_F(PartitionAllocTest, ConfigurablePool) { } PartitionAddressSpace::UninitConfigurablePoolForTesting(); - FreePages(pool_memory, pool_size); + FreePages(pool_base, pool_size); } #endif // defined(ARCH_CPU_64_BITS) diff --git a/base/allocator/partition_allocator/partition_bucket.cc b/base/allocator/partition_allocator/partition_bucket.cc index 02a8c26433e8f9..944423debde608 100644 --- a/base/allocator/partition_allocator/partition_bucket.cc +++ b/base/allocator/partition_allocator/partition_bucket.cc @@ -3,6 +3,7 @@ // found in the LICENSE file. #include "base/allocator/partition_allocator/partition_bucket.h" + #include #include "base/allocator/buildflags.h" @@ -272,7 +273,7 @@ SlotSpanMetadata* PartitionDirectMap( { ScopedSyscallTimer timer{root}; RecommitSystemPages( - reinterpret_cast(reservation_start + SystemPageSize()), + reservation_start + SystemPageSize(), #if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT) // If PUT_REF_COUNT_IN_PREVIOUS_SLOT is on, and if the BRP pool is // used, allocate 2 SystemPages, one for SuperPage metadata and the @@ -329,8 +330,9 @@ SlotSpanMetadata* PartitionDirectMap( // first and the last bytes are on the same system page, i.e. within the // super page metadata region. PA_DCHECK( - bits::AlignDown(reinterpret_cast(metadata), SystemPageSize()) == - bits::AlignDown(reinterpret_cast(metadata) + + bits::AlignDown(reinterpret_cast(metadata), + SystemPageSize()) == + bits::AlignDown(reinterpret_cast(metadata) + sizeof(PartitionDirectMapMetadata) - 1, SystemPageSize())); PA_DCHECK(page == &metadata->page); @@ -367,9 +369,8 @@ SlotSpanMetadata* PartitionDirectMap( // Note that we didn't check above, because if we cannot even commit a // single page, then this is likely hopeless anyway, and we will crash very // soon. - const bool ok = root->TryRecommitSystemPagesForData( - reinterpret_cast(slot_start), slot_size, - PageUpdatePermissions); + const bool ok = root->TryRecommitSystemPagesForData(slot_start, slot_size, + PageUpdatePermissions); if (!ok) { if (!return_null) { PartitionOutOfMemoryCommitFailure(root, slot_size); @@ -378,9 +379,8 @@ SlotSpanMetadata* PartitionDirectMap( { ScopedSyscallTimer timer{root}; #if !defined(PA_HAS_64_BITS_POINTERS) - AddressPoolManager::GetInstance()->MarkUnused( - pool, reinterpret_cast(reservation_start), - reservation_size); + AddressPoolManager::GetInstance()->MarkUnused(pool, reservation_start, + reservation_size); #endif AddressPoolManager::GetInstance()->UnreserveAndDecommit( pool, reservation_start, reservation_size); @@ -562,9 +562,8 @@ PartitionBucket::AllocNewSlotSpan(PartitionRoot* root, PA_DEBUG_DATA_ON_STACK("spansize", slot_span_reservation_size); PA_DEBUG_DATA_ON_STACK("spancmt", slot_span_committed_size); - root->RecommitSystemPagesForData( - reinterpret_cast(slot_span_start), slot_span_committed_size, - PageUpdatePermissions); + root->RecommitSystemPagesForData(slot_span_start, slot_span_committed_size, + PageUpdatePermissions); } PA_CHECK(get_slots_per_span() <= @@ -606,8 +605,7 @@ ALWAYS_INLINE uintptr_t PartitionBucket::AllocNewSuperPage( root->next_super_page = super_page + kSuperPageSize; uintptr_t state_bitmap = super_page + PartitionPageSize(); - PA_DCHECK(reinterpret_cast(SuperPageStateBitmap(super_page)) == - state_bitmap); + PA_DCHECK(SuperPageStateBitmapAddr(super_page) == state_bitmap); const size_t state_bitmap_reservation_size = root->IsQuarantineAllowed() ? ReservedStateBitmapSize() : 0; const size_t state_bitmap_size_to_commit = @@ -628,7 +626,7 @@ ALWAYS_INLINE uintptr_t PartitionBucket::AllocNewSuperPage( { ScopedSyscallTimer timer{root}; RecommitSystemPages( - reinterpret_cast(super_page + SystemPageSize()), + super_page + SystemPageSize(), #if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT) // If PUT_REF_COUNT_IN_PREVIOUS_SLOT is on, and if the BRP pool is used, // allocate 2 SystemPages, one for SuperPage metadata and the other for @@ -692,9 +690,8 @@ ALWAYS_INLINE uintptr_t PartitionBucket::AllocNewSuperPage( if (root->IsQuarantineEnabled()) { { ScopedSyscallTimer timer{root}; - RecommitSystemPages(reinterpret_cast(state_bitmap), - state_bitmap_size_to_commit, PageReadWrite, - PageUpdatePermissions); + RecommitSystemPages(state_bitmap, state_bitmap_size_to_commit, + PageReadWrite, PageUpdatePermissions); } PCScan::RegisterNewSuperPage(root, super_page); } @@ -770,8 +767,7 @@ PartitionBucket::ProvisionMoreSlotsAndAllocOne( // Windows anyway). if (kUseLazyCommit) { // TODO(lizeb): Handle commit failure. - root->RecommitSystemPagesForData(reinterpret_cast(commit_start), - commit_end - commit_start, + root->RecommitSystemPagesForData(commit_start, commit_end - commit_start, PageUpdatePermissions); } diff --git a/base/allocator/partition_allocator/partition_page.h b/base/allocator/partition_allocator/partition_page.h index 2a46ca7008297e..1940f0624f8511 100644 --- a/base/allocator/partition_allocator/partition_page.h +++ b/base/allocator/partition_allocator/partition_page.h @@ -6,6 +6,7 @@ #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_PAGE_H_ #include + #include #include #include @@ -403,12 +404,15 @@ CommittedStateBitmapSize() { return bits::AlignUp(sizeof(AllocationStateMap), SystemPageSize()); } -// Returns the pointer to the state bitmap in the super page. It's the caller's -// responsibility to ensure that the bitmaps even exist. -ALWAYS_INLINE AllocationStateMap* SuperPageStateBitmap(uintptr_t super_page) { +// Returns the address/pointer to the state bitmap in the super page. It's the +// caller's responsibility to ensure that the bitmaps even exist. +ALWAYS_INLINE uintptr_t SuperPageStateBitmapAddr(uintptr_t super_page) { PA_DCHECK(!(super_page % kSuperPageAlignment)); - return reinterpret_cast(super_page + - PartitionPageSize()); + return super_page + PartitionPageSize(); +} +ALWAYS_INLINE AllocationStateMap* SuperPageStateBitmap(uintptr_t super_page) { + return reinterpret_cast( + SuperPageStateBitmapAddr(super_page)); } ALWAYS_INLINE uintptr_t SuperPagePayloadBegin(uintptr_t super_page, diff --git a/base/allocator/partition_allocator/partition_root.cc b/base/allocator/partition_allocator/partition_root.cc index e22a3949819a0c..a0ffcc47c037d5 100644 --- a/base/allocator/partition_allocator/partition_root.cc +++ b/base/allocator/partition_allocator/partition_root.cc @@ -203,8 +203,7 @@ static size_t PartitionPurgeSlotSpan( internal::SlotSpanMetadata::ToSlotSpanStart(slot_span); uintptr_t committed_data_end = slot_span_start + utilized_slot_size; ScopedSyscallTimer timer{root}; - DiscardSystemPages(reinterpret_cast(committed_data_end), - discardable_bytes); + DiscardSystemPages(committed_data_end, discardable_bytes); } return discardable_bytes; } @@ -316,8 +315,7 @@ static size_t PartitionPurgeSlotSpan( PA_DCHECK(num_new_entries == num_slots - slot_span->num_allocated_slots); // Discard the memory. ScopedSyscallTimer timer{root}; - DiscardSystemPages(reinterpret_cast(begin_addr), - unprovisioned_bytes); + DiscardSystemPages(begin_addr, unprovisioned_bytes); } } @@ -345,8 +343,7 @@ static size_t PartitionPurgeSlotSpan( discardable_bytes += partial_slot_bytes; if (discard) { ScopedSyscallTimer timer{root}; - DiscardSystemPages(reinterpret_cast(begin_addr), - partial_slot_bytes); + DiscardSystemPages(begin_addr, partial_slot_bytes); } } } diff --git a/base/allocator/partition_allocator/partition_root.h b/base/allocator/partition_allocator/partition_root.h index 3f28f6f1755f18..1f098729280ec7 100644 --- a/base/allocator/partition_allocator/partition_root.h +++ b/base/allocator/partition_allocator/partition_root.h @@ -1350,8 +1350,7 @@ ALWAYS_INLINE void PartitionRoot::DecommitSystemPagesForData( size_t length, PageAccessibilityDisposition accessibility_disposition) { internal::ScopedSyscallTimer timer{this}; - DecommitSystemPages(reinterpret_cast(address), length, - accessibility_disposition); + DecommitSystemPages(address, length, accessibility_disposition); DecreaseCommittedPages(length); } @@ -1363,13 +1362,12 @@ ALWAYS_INLINE void PartitionRoot::RecommitSystemPagesForData( PageAccessibilityDisposition accessibility_disposition) { internal::ScopedSyscallTimer timer{this}; - void* ptr = reinterpret_cast(address); - bool ok = TryRecommitSystemPages(ptr, length, PageReadWriteTagged, + bool ok = TryRecommitSystemPages(address, length, PageReadWriteTagged, accessibility_disposition); if (UNLIKELY(!ok)) { // Decommit some memory and retry. The alternative is crashing. DecommitEmptySlotSpans(); - RecommitSystemPages(ptr, length, PageReadWriteTagged, + RecommitSystemPages(address, length, PageReadWriteTagged, accessibility_disposition); } @@ -1382,8 +1380,7 @@ ALWAYS_INLINE bool PartitionRoot::TryRecommitSystemPagesForData( size_t length, PageAccessibilityDisposition accessibility_disposition) { internal::ScopedSyscallTimer timer{this}; - void* ptr = reinterpret_cast(address); - bool ok = TryRecommitSystemPages(ptr, length, PageReadWriteTagged, + bool ok = TryRecommitSystemPages(address, length, PageReadWriteTagged, accessibility_disposition); #if defined(PA_COMMIT_CHARGE_IS_LIMITED) if (UNLIKELY(!ok)) { @@ -1391,7 +1388,7 @@ ALWAYS_INLINE bool PartitionRoot::TryRecommitSystemPagesForData( ::partition_alloc::ScopedGuard guard(lock_); DecommitEmptySlotSpans(); } - ok = TryRecommitSystemPages(ptr, length, PageReadWriteTagged, + ok = TryRecommitSystemPages(address, length, PageReadWriteTagged, accessibility_disposition); } #endif // defined(PA_COMMIT_CHARGE_IS_LIMITED) diff --git a/base/allocator/partition_allocator/starscan/pcscan_internal.cc b/base/allocator/partition_allocator/starscan/pcscan_internal.cc index b89d1e6cc39e08..ea3bf97bdf0f1d 100644 --- a/base/allocator/partition_allocator/starscan/pcscan_internal.cc +++ b/base/allocator/partition_allocator/starscan/pcscan_internal.cc @@ -284,9 +284,9 @@ SimdSupport DetectSimdSupport() { void CommitCardTable() { #if PA_STARSCAN_USE_CARD_TABLE - RecommitSystemPages( - reinterpret_cast(PartitionAddressSpace::RegularPoolBase()), - sizeof(QuarantineCardTable), PageReadWrite, PageUpdatePermissions); + RecommitSystemPages(PartitionAddressSpace::RegularPoolBase(), + sizeof(QuarantineCardTable), PageReadWrite, + PageUpdatePermissions); #endif } @@ -992,8 +992,7 @@ void UnmarkInCardTable(uintptr_t object, const uintptr_t discard_begin = bits::AlignUp(object, SystemPageSize()); const intptr_t discard_size = discard_end - discard_begin; if (discard_size > 0) { - DiscardSystemPages(reinterpret_cast(discard_begin), - discard_size); + DiscardSystemPages(discard_begin, discard_size); stat.discarded_bytes += discard_size; } } @@ -1417,7 +1416,7 @@ PCScanInternal::SuperPages GetSuperPagesAndCommitStateBitmaps( const volatile char* metadata = reinterpret_cast( PartitionSuperPageToMetadataArea(super_page)); *metadata; - RecommitSystemPages(SuperPageStateBitmap(super_page), + RecommitSystemPages(SuperPageStateBitmapAddr(super_page), state_bitmap_size_to_commit, PageReadWrite, PageUpdatePermissions); super_pages.push_back(super_page); diff --git a/base/allocator/partition_allocator/starscan/state_bitmap_unittest.cc b/base/allocator/partition_allocator/starscan/state_bitmap_unittest.cc index e8e8c7675a663f..2f5380306e70c0 100644 --- a/base/allocator/partition_allocator/starscan/state_bitmap_unittest.cc +++ b/base/allocator/partition_allocator/starscan/state_bitmap_unittest.cc @@ -4,9 +4,10 @@ #include "base/allocator/partition_allocator/starscan/state_bitmap.h" +#include + #include "base/allocator/partition_allocator/page_allocator.h" #include "base/allocator/partition_allocator/partition_alloc_constants.h" - #include "testing/gtest/include/gtest/gtest.h" namespace base { @@ -19,12 +20,11 @@ using TestBitmap = StateBitmap; class PageWithBitmap final { public: PageWithBitmap() - : base_(base::AllocPages(nullptr, - kSuperPageSize, + : base_(base::AllocPages(kSuperPageSize, kSuperPageAlignment, PageReadWrite, PageTag::kPartitionAlloc)), - bitmap_(new (base_) TestBitmap) {} + bitmap_(new (reinterpret_cast(base_)) TestBitmap) {} PageWithBitmap(const PageWithBitmap&) = delete; PageWithBitmap& operator=(const PageWithBitmap&) = delete; @@ -33,10 +33,10 @@ class PageWithBitmap final { TestBitmap& bitmap() const { return *bitmap_; } - void* base() const { return base_; } + void* base() const { return reinterpret_cast(base_); } size_t size() const { return kSuperPageSize; } - void* base_; + uintptr_t base_; TestBitmap* bitmap_; }; @@ -154,8 +154,7 @@ TEST_F(PartitionAllocStateBitmapTest, CountAllocated) { } TEST_F(PartitionAllocStateBitmapTest, StateTransititions) { - for (auto i : {static_cast(0), static_cast(1), - LastIndex() - 1, LastIndex()}) { + for (auto i : {uintptr_t{0}, uintptr_t{1}, LastIndex() - 1, LastIndex()}) { AssertFreed(i); AllocateObject(i); diff --git a/base/memory/tagging_unittest.cc b/base/memory/tagging_unittest.cc index 6e915793673c93..47518a90228770 100644 --- a/base/memory/tagging_unittest.cc +++ b/base/memory/tagging_unittest.cc @@ -3,8 +3,12 @@ // found in the LICENSE file. #include "base/memory/tagging.h" + +#include + #include "base/allocator/partition_allocator/page_allocator.h" #include "base/cpu.h" +#include "build/build_config.h" #include "testing/gtest/include/gtest/gtest.h" namespace base { @@ -12,11 +16,12 @@ namespace memory { // Check whether we can call the tagging intrinsics safely on all architectures. TEST(MemoryTagging, TagMemoryRangeRandomlySafe) { - void* buffer = AllocPages(nullptr, PageAllocationGranularity(), - PageAllocationGranularity(), PageReadWriteTagged, - PageTag::kChromium); + uintptr_t buffer = + AllocPages(PageAllocationGranularity(), PageAllocationGranularity(), + PageReadWriteTagged, PageTag::kChromium); EXPECT_TRUE(buffer); - void* bufferp = TagMemoryRangeRandomly(buffer, 4 * kMemTagGranuleSize, 0u); + uintptr_t bufferp = + TagMemoryRangeRandomly(buffer, 4 * kMemTagGranuleSize, 0u); EXPECT_TRUE(bufferp); int* buffer0 = reinterpret_cast(bufferp); *buffer0 = 42; @@ -26,11 +31,11 @@ TEST(MemoryTagging, TagMemoryRangeRandomlySafe) { TEST(MemoryTagging, TagMemoryRangeIncrementSafe) { CPU cpu; - void* buffer = AllocPages(nullptr, PageAllocationGranularity(), - PageAllocationGranularity(), PageReadWriteTagged, - PageTag::kChromium); + uintptr_t buffer = + AllocPages(PageAllocationGranularity(), PageAllocationGranularity(), + PageReadWriteTagged, PageTag::kChromium); EXPECT_TRUE(buffer); - void* bufferp = TagMemoryRangeIncrement(buffer, 4 * kMemTagGranuleSize); + uintptr_t bufferp = TagMemoryRangeIncrement(buffer, 4 * kMemTagGranuleSize); EXPECT_TRUE(bufferp); int* buffer0 = reinterpret_cast(bufferp); *buffer0 = 42; @@ -45,82 +50,82 @@ TEST(MemoryTagging, TagMemoryRangeIncrementSafe) { // Size / alignment constraints are only enforced on 64-bit architectures. TEST(MemoryTagging, TagMemoryRangeBadSz) { CPU cpu; - void* buffer = AllocPages(nullptr, PageAllocationGranularity(), - PageAllocationGranularity(), PageReadWriteTagged, - PageTag::kChromium); + uintptr_t buffer = + AllocPages(PageAllocationGranularity(), PageAllocationGranularity(), + PageReadWriteTagged, PageTag::kChromium); EXPECT_TRUE(buffer); - void* bufferp = + uintptr_t bufferp = TagMemoryRangeRandomly(buffer, 4 * kMemTagGranuleSize - 1, 0u); if (cpu.has_mte()) { - EXPECT_EQ(bufferp, nullptr); + EXPECT_EQ(bufferp, 0u); } FreePages(buffer, PageAllocationGranularity()); } TEST(MemoryTagging, TagMemoryRangeRandomlyNoSz) { CPU cpu; - void* buffer = AllocPages(nullptr, PageAllocationGranularity(), - PageAllocationGranularity(), PageReadWriteTagged, - PageTag::kChromium); + uintptr_t buffer = + AllocPages(PageAllocationGranularity(), PageAllocationGranularity(), + PageReadWriteTagged, PageTag::kChromium); EXPECT_TRUE(buffer); - void* bufferp = TagMemoryRangeRandomly(buffer, 0, 0u); + uintptr_t bufferp = TagMemoryRangeRandomly(buffer, 0, 0u); if (cpu.has_mte()) { - EXPECT_EQ(bufferp, nullptr); + EXPECT_EQ(bufferp, 0u); } FreePages(buffer, PageAllocationGranularity()); } TEST(MemoryTagging, TagMemoryRangeRandomlyBadAlign) { CPU cpu; - void* buffer = AllocPages(nullptr, PageAllocationGranularity(), - PageAllocationGranularity(), PageReadWriteTagged, - PageTag::kChromium); - char* bufferc = reinterpret_cast(buffer); + uintptr_t buffer = + AllocPages(PageAllocationGranularity(), PageAllocationGranularity(), + PageReadWriteTagged, PageTag::kChromium); EXPECT_TRUE(buffer); - void* bufferp = - TagMemoryRangeRandomly(bufferc - 1, 4 * kMemTagGranuleSize, 0u); + uintptr_t bufferp = + TagMemoryRangeRandomly(buffer - 1, 4 * kMemTagGranuleSize, 0u); if (cpu.has_mte()) { - EXPECT_EQ(bufferp, nullptr); + EXPECT_EQ(bufferp, 0u); } FreePages(buffer, PageAllocationGranularity()); } TEST(MemoryTagging, TagMemoryRangeIncrementBadSz) { CPU cpu; - void* buffer = AllocPages(nullptr, PageAllocationGranularity(), - PageAllocationGranularity(), PageReadWriteTagged, - PageTag::kChromium); + uintptr_t buffer = + AllocPages(PageAllocationGranularity(), PageAllocationGranularity(), + PageReadWriteTagged, PageTag::kChromium); EXPECT_TRUE(buffer); - void* bufferp = TagMemoryRangeIncrement(buffer, 4 * kMemTagGranuleSize - 1); + uintptr_t bufferp = + TagMemoryRangeIncrement(buffer, 4 * kMemTagGranuleSize - 1); if (cpu.has_mte()) { - EXPECT_EQ(bufferp, nullptr); + EXPECT_EQ(bufferp, 0u); } FreePages(buffer, PageAllocationGranularity()); } TEST(MemoryTagging, TagMemoryRangeIncrementNoSz) { CPU cpu; - void* buffer = AllocPages(nullptr, PageAllocationGranularity(), - PageAllocationGranularity(), PageReadWriteTagged, - PageTag::kChromium); + uintptr_t buffer = + AllocPages(PageAllocationGranularity(), PageAllocationGranularity(), + PageReadWriteTagged, PageTag::kChromium); EXPECT_TRUE(buffer); - void* bufferp = TagMemoryRangeIncrement(buffer, 0); + uintptr_t bufferp = TagMemoryRangeIncrement(buffer, 0); if (cpu.has_mte()) { - EXPECT_EQ(bufferp, nullptr); + EXPECT_EQ(bufferp, 0u); } FreePages(buffer, PageAllocationGranularity()); } TEST(MemoryTagging, TagMemoryRangeIncrementBadAlign) { CPU cpu; - void* buffer = AllocPages(nullptr, PageAllocationGranularity(), - PageAllocationGranularity(), PageReadWriteTagged, - PageTag::kChromium); - char* bufferc = reinterpret_cast(buffer); + uintptr_t buffer = + AllocPages(PageAllocationGranularity(), PageAllocationGranularity(), + PageReadWriteTagged, PageTag::kChromium); EXPECT_TRUE(buffer); - void* bufferp = TagMemoryRangeIncrement(bufferc - 1, 4 * kMemTagGranuleSize); + uintptr_t bufferp = + TagMemoryRangeIncrement(buffer - 1, 4 * kMemTagGranuleSize); if (cpu.has_mte()) { - EXPECT_EQ(bufferp, nullptr); + EXPECT_EQ(bufferp, 0u); } FreePages(buffer, PageAllocationGranularity()); } diff --git a/gin/v8_platform_page_allocator.cc b/gin/v8_platform_page_allocator.cc index d94b0770cec113..990cc6ee5dfbdc 100644 --- a/gin/v8_platform_page_allocator.cc +++ b/gin/v8_platform_page_allocator.cc @@ -3,6 +3,7 @@ // found in the LICENSE file. #include "v8_platform_page_allocator.h" + #include "base/allocator/partition_allocator/address_space_randomization.h" #include "base/allocator/partition_allocator/page_allocator_constants.h" #include "base/allocator/partition_allocator/random.h" @@ -62,7 +63,7 @@ void PageAllocator::SetRandomMmapSeed(int64_t seed) { } void* PageAllocator::GetRandomMmapAddr() { - return base::GetRandomPageBase(); + return reinterpret_cast(base::GetRandomPageBase()); } void* PageAllocator::AllocatePages(void* address,