Skip to content
This repository was archived by the owner on Feb 25, 2025. It is now read-only.

[Impeller] Add RAII wrappers for VMA objects. #43626

Merged
merged 2 commits into from
Jul 13, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions ci/licenses_golden/licenses_flutter
Original file line number Diff line number Diff line change
Expand Up @@ -1562,6 +1562,8 @@ ORIGIN: ../../../flutter/impeller/renderer/backend/vulkan/texture_vk.h + ../../.
ORIGIN: ../../../flutter/impeller/renderer/backend/vulkan/vertex_descriptor_vk.cc + ../../../flutter/LICENSE
ORIGIN: ../../../flutter/impeller/renderer/backend/vulkan/vertex_descriptor_vk.h + ../../../flutter/LICENSE
ORIGIN: ../../../flutter/impeller/renderer/backend/vulkan/vk.h + ../../../flutter/LICENSE
ORIGIN: ../../../flutter/impeller/renderer/backend/vulkan/vma.cc + ../../../flutter/LICENSE
ORIGIN: ../../../flutter/impeller/renderer/backend/vulkan/vma.h + ../../../flutter/LICENSE
ORIGIN: ../../../flutter/impeller/renderer/blit_command.cc + ../../../flutter/LICENSE
ORIGIN: ../../../flutter/impeller/renderer/blit_command.h + ../../../flutter/LICENSE
ORIGIN: ../../../flutter/impeller/renderer/blit_pass.cc + ../../../flutter/LICENSE
Expand Down Expand Up @@ -4250,6 +4252,8 @@ FILE: ../../../flutter/impeller/renderer/backend/vulkan/texture_vk.h
FILE: ../../../flutter/impeller/renderer/backend/vulkan/vertex_descriptor_vk.cc
FILE: ../../../flutter/impeller/renderer/backend/vulkan/vertex_descriptor_vk.h
FILE: ../../../flutter/impeller/renderer/backend/vulkan/vk.h
FILE: ../../../flutter/impeller/renderer/backend/vulkan/vma.cc
FILE: ../../../flutter/impeller/renderer/backend/vulkan/vma.h
FILE: ../../../flutter/impeller/renderer/blit_command.cc
FILE: ../../../flutter/impeller/renderer/blit_command.h
FILE: ../../../flutter/impeller/renderer/blit_pass.cc
Expand Down
2 changes: 2 additions & 0 deletions impeller/renderer/backend/vulkan/BUILD.gn
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,8 @@ impeller_component("vulkan") {
"vertex_descriptor_vk.cc",
"vertex_descriptor_vk.h",
"vk.h",
"vma.cc",
"vma.h",
]

public_deps = [
Expand Down
217 changes: 93 additions & 124 deletions impeller/renderer/backend/vulkan/allocator_vk.cc
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,75 @@

namespace impeller {

static constexpr vk::Flags<vk::MemoryPropertyFlagBits>
ToVKBufferMemoryPropertyFlags(StorageMode mode) {
switch (mode) {
case StorageMode::kHostVisible:
return vk::MemoryPropertyFlagBits::eHostVisible;
case StorageMode::kDevicePrivate:
return vk::MemoryPropertyFlagBits::eDeviceLocal;
case StorageMode::kDeviceTransient:
return vk::MemoryPropertyFlagBits::eLazilyAllocated;
}
FML_UNREACHABLE();
}

static VmaAllocationCreateFlags ToVmaAllocationBufferCreateFlags(
StorageMode mode) {
VmaAllocationCreateFlags flags = 0;
switch (mode) {
case StorageMode::kHostVisible:
flags |= VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT;
flags |= VMA_ALLOCATION_CREATE_MAPPED_BIT;
return flags;
case StorageMode::kDevicePrivate:
return flags;
case StorageMode::kDeviceTransient:
return flags;
}
FML_UNREACHABLE();
}

static PoolVMA CreateBufferPool(VmaAllocator allocator) {
vk::BufferCreateInfo buffer_info;
buffer_info.usage = vk::BufferUsageFlagBits::eVertexBuffer |
vk::BufferUsageFlagBits::eIndexBuffer |
vk::BufferUsageFlagBits::eUniformBuffer |
vk::BufferUsageFlagBits::eStorageBuffer |
vk::BufferUsageFlagBits::eTransferSrc |
vk::BufferUsageFlagBits::eTransferDst;
buffer_info.size = 1u; // doesn't matter
buffer_info.sharingMode = vk::SharingMode::eExclusive;
auto buffer_info_native =
static_cast<vk::BufferCreateInfo::NativeType>(buffer_info);

VmaAllocationCreateInfo allocation_info = {};
allocation_info.usage = VMA_MEMORY_USAGE_AUTO;
allocation_info.preferredFlags = static_cast<VkMemoryPropertyFlags>(
ToVKBufferMemoryPropertyFlags(StorageMode::kHostVisible));
allocation_info.flags =
ToVmaAllocationBufferCreateFlags(StorageMode::kHostVisible);

uint32_t memTypeIndex;
auto result = vk::Result{vmaFindMemoryTypeIndexForBufferInfo(
allocator, &buffer_info_native, &allocation_info, &memTypeIndex)};
if (result != vk::Result::eSuccess) {
return {};
}

VmaPoolCreateInfo pool_create_info = {};
pool_create_info.memoryTypeIndex = memTypeIndex;
pool_create_info.flags = VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT |
VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT;

VmaPool pool = {};
result = vk::Result{::vmaCreatePool(allocator, &pool_create_info, &pool)};
if (result != vk::Result::eSuccess) {
return {};
}
return {allocator, pool};
}

AllocatorVK::AllocatorVK(std::weak_ptr<Context> context,
uint32_t vulkan_api_version,
const vk::PhysicalDevice& physical_device,
Expand Down Expand Up @@ -93,26 +162,16 @@ AllocatorVK::AllocatorVK(std::weak_ptr<Context> context,
VALIDATION_LOG << "Could not create memory allocator";
return;
}
for (auto i = 0u; i < kPoolCount; i++) {
created_buffer_pools_ &=
CreateBufferPool(allocator, &staging_buffer_pools_[i]);
for (size_t i = 0u; i < staging_buffer_pools_.size(); i++) {
staging_buffer_pools_[i].reset(CreateBufferPool(allocator));
created_buffer_pools_ &= staging_buffer_pools_[i].is_valid();
}
allocator_ = allocator;
allocator_.reset(allocator);
supports_memoryless_textures_ = capabilities.SupportsMemorylessTextures();
is_valid_ = true;
}

AllocatorVK::~AllocatorVK() {
TRACE_EVENT0("impeller", "DestroyAllocatorVK");
if (allocator_) {
for (auto i = 0u; i < kPoolCount; i++) {
if (staging_buffer_pools_[i]) {
::vmaDestroyPool(allocator_, staging_buffer_pools_[i]);
}
}
::vmaDestroyAllocator(allocator_);
}
}
AllocatorVK::~AllocatorVK() = default;

// |Allocator|
bool AllocatorVK::IsValid() const {
Expand Down Expand Up @@ -206,35 +265,6 @@ ToVKTextureMemoryPropertyFlags(StorageMode mode,
FML_UNREACHABLE();
}

static constexpr vk::Flags<vk::MemoryPropertyFlagBits>
ToVKBufferMemoryPropertyFlags(StorageMode mode) {
switch (mode) {
case StorageMode::kHostVisible:
return vk::MemoryPropertyFlagBits::eHostVisible;
case StorageMode::kDevicePrivate:
return vk::MemoryPropertyFlagBits::eDeviceLocal;
case StorageMode::kDeviceTransient:
return vk::MemoryPropertyFlagBits::eLazilyAllocated;
}
FML_UNREACHABLE();
}

static VmaAllocationCreateFlags ToVmaAllocationBufferCreateFlags(
StorageMode mode) {
VmaAllocationCreateFlags flags = 0;
switch (mode) {
case StorageMode::kHostVisible:
flags |= VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT;
flags |= VMA_ALLOCATION_CREATE_MAPPED_BIT;
return flags;
case StorageMode::kDevicePrivate:
return flags;
case StorageMode::kDeviceTransient:
return flags;
}
FML_UNREACHABLE();
}

static VmaAllocationCreateFlags ToVmaAllocationCreateFlags(StorageMode mode,
bool is_texture,
size_t size) {
Expand Down Expand Up @@ -357,61 +387,36 @@ class AllocatedTextureSourceVK final : public TextureSourceVK {
<< vk::to_string(result);
return;
}
resource_.Reset(
ImageResource(image, allocator, allocation, std::move(image_view)));
resource_.Reset(ImageResource(ImageVMA{allocator, allocation, image},
std::move(image_view)));
is_valid_ = true;
}

~AllocatedTextureSourceVK() = default;

bool IsValid() const { return is_valid_; }

vk::Image GetImage() const override { return resource_->image; }
vk::Image GetImage() const override { return resource_->image.get().image; }

vk::ImageView GetImageView() const override {
return resource_->image_view.get();
}

private:
struct ImageResource {
vk::Image image = {};
VmaAllocator allocator = {};
VmaAllocation allocation = {};
UniqueImageVMA image;
vk::UniqueImageView image_view;

ImageResource() = default;

ImageResource(vk::Image p_image,
VmaAllocator p_allocator,
VmaAllocation p_allocation,
vk::UniqueImageView p_image_view)
: image(p_image),
allocator(p_allocator),
allocation(p_allocation),
image_view(std::move(p_image_view)) {}
ImageResource(ImageVMA p_image, vk::UniqueImageView p_image_view)
: image(p_image), image_view(std::move(p_image_view)) {}

ImageResource(ImageResource&& o) {
std::swap(image, o.image);
std::swap(allocator, o.allocator);
std::swap(allocation, o.allocation);
std::swap(image_view, o.image_view);
}

~ImageResource() {
if (!image) {
return;
}
TRACE_EVENT0("impeller", "DestroyDeviceTexture");
image_view.reset();
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The image view is already a unique object. It had to be reset manually earlier because the image was explicitly collected. With the RAII wrapper, there is no need as the ivar destruction order holds.

if (image) {
::vmaDestroyImage(
allocator, //
static_cast<typename decltype(image)::NativeType>(image), //
allocation //
);
}
}

FML_DISALLOW_COPY_AND_ASSIGN(ImageResource);
};

Expand Down Expand Up @@ -439,7 +444,7 @@ std::shared_ptr<Texture> AllocatorVK::OnCreateTexture(
auto source = std::make_shared<AllocatedTextureSourceVK>(
ContextVK::Cast(*context).GetResourceManager(), //
desc, //
allocator_, //
allocator_.get(), //
device_holder->GetDevice(), //
supports_memoryless_textures_ //
);
Expand Down Expand Up @@ -477,13 +482,16 @@ std::shared_ptr<DeviceBuffer> AllocatorVK::OnCreateBuffer(
allocation_info.flags = ToVmaAllocationBufferCreateFlags(desc.storage_mode);
if (created_buffer_pools_ && desc.storage_mode == StorageMode::kHostVisible &&
raster_thread_id_ == std::this_thread::get_id()) {
allocation_info.pool = staging_buffer_pools_[frame_count_ % kPoolCount];
allocation_info.pool =
staging_buffer_pools_[frame_count_ % staging_buffer_pools_.size()]
.get()
.pool;
}

VkBuffer buffer = {};
VmaAllocation buffer_allocation = {};
VmaAllocationInfo buffer_allocation_info = {};
auto result = vk::Result{::vmaCreateBuffer(allocator_, //
auto result = vk::Result{::vmaCreateBuffer(allocator_.get(), //
&buffer_info_native, //
&allocation_info, //
&buffer, //
Expand All @@ -497,53 +505,14 @@ std::shared_ptr<DeviceBuffer> AllocatorVK::OnCreateBuffer(
return {};
}

return std::make_shared<DeviceBufferVK>(desc, //
context_, //
allocator_, //
buffer_allocation, //
buffer_allocation_info, //
vk::Buffer{buffer} //
return std::make_shared<DeviceBufferVK>(
desc, //
context_, //
UniqueBufferVMA{BufferVMA{allocator_.get(), //
buffer_allocation, //
vk::Buffer{buffer}}}, //
buffer_allocation_info //
);
}

// static
bool AllocatorVK::CreateBufferPool(VmaAllocator allocator, VmaPool* pool) {
vk::BufferCreateInfo buffer_info;
buffer_info.usage = vk::BufferUsageFlagBits::eVertexBuffer |
vk::BufferUsageFlagBits::eIndexBuffer |
vk::BufferUsageFlagBits::eUniformBuffer |
vk::BufferUsageFlagBits::eStorageBuffer |
vk::BufferUsageFlagBits::eTransferSrc |
vk::BufferUsageFlagBits::eTransferDst;
buffer_info.size = 1u; // doesn't matter
buffer_info.sharingMode = vk::SharingMode::eExclusive;
auto buffer_info_native =
static_cast<vk::BufferCreateInfo::NativeType>(buffer_info);

VmaAllocationCreateInfo allocation_info = {};
allocation_info.usage = VMA_MEMORY_USAGE_AUTO;
allocation_info.preferredFlags = static_cast<VkMemoryPropertyFlags>(
ToVKBufferMemoryPropertyFlags(StorageMode::kHostVisible));
allocation_info.flags =
ToVmaAllocationBufferCreateFlags(StorageMode::kHostVisible);

uint32_t memTypeIndex;
auto result = vk::Result{vmaFindMemoryTypeIndexForBufferInfo(
allocator, &buffer_info_native, &allocation_info, &memTypeIndex)};
if (result != vk::Result::eSuccess) {
return false;
}

VmaPoolCreateInfo pool_create_info = {};
pool_create_info.memoryTypeIndex = memTypeIndex;
pool_create_info.flags = VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT |
VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT;

result = vk::Result{vmaCreatePool(allocator, &pool_create_info, pool)};
if (result != vk::Result::eSuccess) {
return false;
}
return true;
}

} // namespace impeller
7 changes: 3 additions & 4 deletions impeller/renderer/backend/vulkan/allocator_vk.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
#include "impeller/renderer/backend/vulkan/device_holder.h"
#include "impeller/renderer/backend/vulkan/vk.h"

#include <array>
#include <memory>

namespace impeller {
Expand All @@ -28,8 +29,8 @@ class AllocatorVK final : public Allocator {
static constexpr size_t kPoolCount = 3;

fml::RefPtr<vulkan::VulkanProcTable> vk_;
VmaAllocator allocator_ = {};
VmaPool staging_buffer_pools_[kPoolCount] = {};
UniqueAllocatorVMA allocator_;
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So, to state the obvious as I'm still a C++ noob:

The RAII wrappers handle destroying the native resources, and we always go though destructors in reverse order. So staging_buffer_pools_ being declared after allocator_ ensures that we destroy the pools with the allocator before destroying the allocator itself?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Right.

std::array<UniquePoolVMA, kPoolCount> staging_buffer_pools_;
std::weak_ptr<Context> context_;
std::weak_ptr<DeviceHolder> device_holder_;
ISize max_texture_size_;
Expand Down Expand Up @@ -66,8 +67,6 @@ class AllocatorVK final : public Allocator {
// |Allocator|
ISize GetMaxTextureSizeSupported() const override;

static bool CreateBufferPool(VmaAllocator allocator, VmaPool* pool);

FML_DISALLOW_COPY_AND_ASSIGN(AllocatorVK);
};

Expand Down
Loading