diff --git a/README.md b/README.md index 40696f65573..cbfa132634b 100644 --- a/README.md +++ b/README.md @@ -31,7 +31,7 @@ repositories { } dependencies { - implementation 'com.google.android.filament:filament-android:1.56.0' + implementation 'com.google.android.filament:filament-android:1.56.1' } ``` @@ -51,7 +51,7 @@ Here are all the libraries available in the group `com.google.android.filament`: iOS projects can use CocoaPods to install the latest release: ```shell -pod 'Filament', '~> 1.56.0' +pod 'Filament', '~> 1.56.1' ``` ## Documentation diff --git a/RELEASE_NOTES.md b/RELEASE_NOTES.md index 190c274b3fb..42e68e62acc 100644 --- a/RELEASE_NOTES.md +++ b/RELEASE_NOTES.md @@ -7,6 +7,8 @@ A new header is inserted each time a *tag* is created. Instead, if you are authoring a PR for the main branch, add your release note to [NEW_RELEASE_NOTES.md](./NEW_RELEASE_NOTES.md). +## v1.56.1 + ## v1.56.0 - backend: descriptor layouts distinguish samplers and external samplers (b/376089915) [⚠️ **New Material Version**] diff --git a/android/filament-android/src/main/cpp/Engine.cpp b/android/filament-android/src/main/cpp/Engine.cpp index 7de25a58ba2..56aecc66dd9 100644 --- a/android/filament-android/src/main/cpp/Engine.cpp +++ b/android/filament-android/src/main/cpp/Engine.cpp @@ -499,6 +499,37 @@ Java_com_google_android_filament_Engine_nGetActiveFeatureLevel(JNIEnv *, jclass, return (jint)engine->getActiveFeatureLevel(); } +extern "C" +JNIEXPORT jboolean JNICALL +Java_com_google_android_filament_Engine_nHasFeatureFlag(JNIEnv *env, jclass clazz, + jlong nativeEngine, jstring name_) { + Engine* engine = (Engine*) nativeEngine; + const char *name = env->GetStringUTFChars(name_, 0); + std::optional result = engine->getFeatureFlag(name); + env->ReleaseStringUTFChars(name_, name); + return result.has_value(); +} +extern "C" +JNIEXPORT jboolean JNICALL +Java_com_google_android_filament_Engine_nSetFeatureFlag(JNIEnv *env, jclass clazz, + jlong nativeEngine, jstring name_, jboolean value) { + Engine* engine = (Engine*) nativeEngine; + const char *name = env->GetStringUTFChars(name_, 0); + jboolean result = engine->setFeatureFlag(name, (bool)value); + env->ReleaseStringUTFChars(name_, name); + return result; +} +extern "C" +JNIEXPORT jboolean JNICALL +Java_com_google_android_filament_Engine_nGetFeatureFlag(JNIEnv *env, jclass clazz, + jlong nativeEngine, jstring name_) { + Engine* engine = (Engine*) nativeEngine; + const char *name = env->GetStringUTFChars(name_, 0); + std::optional result = engine->getFeatureFlag(name); + env->ReleaseStringUTFChars(name_, name); + return result.value_or(false); // we should never fail here +} + extern "C" JNIEXPORT jlong JNICALL Java_com_google_android_filament_Engine_nCreateBuilder(JNIEnv*, jclass) { Engine::Builder* builder = new Engine::Builder{}; @@ -565,6 +596,16 @@ extern "C" JNIEXPORT void JNICALL Java_com_google_android_filament_Engine_nSetBu builder->paused((bool) paused); } +extern "C" +JNIEXPORT void JNICALL +Java_com_google_android_filament_Engine_nSetBuilderFeature(JNIEnv *env, jclass clazz, + jlong nativeBuilder, jstring name_, jboolean value) { + Engine::Builder* builder = (Engine::Builder*) nativeBuilder; + const char *name = env->GetStringUTFChars(name_, 0); + builder->feature(name, (bool)value); + env->ReleaseStringUTFChars(name_, name); +} + extern "C" JNIEXPORT jlong JNICALL Java_com_google_android_filament_Engine_nBuilderBuild(JNIEnv*, jclass, jlong nativeBuilder) { Engine::Builder* builder = (Engine::Builder*) nativeBuilder; diff --git a/android/filament-android/src/main/java/com/google/android/filament/Engine.java b/android/filament-android/src/main/java/com/google/android/filament/Engine.java index a433a55d8c0..1e49bf54a33 100644 --- a/android/filament-android/src/main/java/com/google/android/filament/Engine.java +++ b/android/filament-android/src/main/java/com/google/android/filament/Engine.java @@ -258,6 +258,17 @@ public Builder paused(boolean paused) { return this; } + /** + * Set a feature flag value. This is the only way to set constant feature flags. + * @param name feature name + * @param value true to enable, false to disable + * @return A reference to this Builder for chaining calls. + */ + public Builder feature(@NonNull String name, boolean value) { + nSetBuilderFeature(mNativeBuilder, name, value); + return this; + } + /** * Creates an instance of Engine * @@ -393,6 +404,7 @@ public static class Config { /** * Set to `true` to forcibly disable parallel shader compilation in the backend. * Currently only honored by the GL backend. + * @Deprecated use "backend.disable_parallel_shader_compile" feature flag instead */ public boolean disableParallelShaderCompile = false; @@ -433,6 +445,7 @@ public static class Config { /** * Disable backend handles use-after-free checks. + * @Deprecated use "backend.disable_handle_use_after_free_check" feature flag instead */ public boolean disableHandleUseAfterFreeCheck = false; @@ -469,6 +482,7 @@ public enum ShaderLanguage { * Assert the native window associated to a SwapChain is valid when calling makeCurrent(). * This is only supported for: * - PlatformEGLAndroid + * @Deprecated use "backend.opengl.assert_native_window_is_valid" feature flag instead */ public boolean assertNativeWindowIsValid = false; } @@ -693,11 +707,11 @@ public Config getConfig() { /** * Returns the maximum number of stereoscopic eyes supported by Filament. The actual number of - * eyes rendered is set at Engine creation time with the {@link - * Engine#Config#stereoscopicEyeCount} setting. + * eyes rendered is set at Engine creation time with the {@link Config#stereoscopicEyeCount} + * setting. * * @return the max number of stereoscopic eyes supported - * @see Engine#Config#stereoscopicEyeCount + * @see Config#stereoscopicEyeCount */ public long getMaxStereoscopicEyes() { return nGetMaxStereoscopicEyes(getNativeObject()); @@ -894,7 +908,8 @@ public boolean isValidMaterial(@NonNull Material object) { /** * Returns whether the object is valid. - * @param object Object to check for validity + * @param ma Material + * @param mi MaterialInstance to check for validity * @return returns true if the specified object is valid. */ public boolean isValidMaterialInstance(@NonNull Material ma, MaterialInstance mi) { @@ -1316,6 +1331,39 @@ public void unprotected() { */ public static native long getSteadyClockTimeNano(); + + /** + * Checks if a feature flag exists + * @param name name of the feature flag to check + * @return true if it exists false otherwise + */ + public boolean hasFeatureFlag(@NonNull String name) { + return nHasFeatureFlag(mNativeObject, name); + } + + /** + * Set the value of a non-constant feature flag. + * @param name name of the feature flag to set + * @param value value to set + * @return true if the value was set, false if the feature flag is constant or doesn't exist. + */ + public boolean setFeatureFlag(@NonNull String name, boolean value) { + return nSetFeatureFlag(mNativeObject, name, value); + } + + /** + * Retrieves the value of any feature flag. + * @param name name of the feature flag + * @return the value of the flag if it exists + * @exception IllegalArgumentException is thrown if the feature flag doesn't exist + */ + public boolean getFeatureFlag(@NonNull String name) { + if (!hasFeatureFlag(name)) { + throw new IllegalArgumentException("The feature flag \"" + name + "\" doesn't exist"); + } + return nGetFeatureFlag(mNativeObject, name); + } + @UsedByReflection("TextureHelper.java") public long getNativeObject() { if (mNativeObject == 0) { @@ -1405,6 +1453,9 @@ private static void assertDestroy(boolean success) { private static native int nGetSupportedFeatureLevel(long nativeEngine); private static native int nSetActiveFeatureLevel(long nativeEngine, int ordinal); private static native int nGetActiveFeatureLevel(long nativeEngine); + private static native boolean nHasFeatureFlag(long nativeEngine, String name); + private static native boolean nSetFeatureFlag(long nativeEngine, String name, boolean value); + private static native boolean nGetFeatureFlag(long nativeEngine, String name); private static native long nCreateBuilder(); private static native void nDestroyBuilder(long nativeBuilder); @@ -1420,5 +1471,6 @@ private static native void nSetBuilderConfig(long nativeBuilder, long commandBuf private static native void nSetBuilderFeatureLevel(long nativeBuilder, int ordinal); private static native void nSetBuilderSharedContext(long nativeBuilder, long sharedContext); private static native void nSetBuilderPaused(long nativeBuilder, boolean paused); + private static native void nSetBuilderFeature(long nativeBuilder, String name, boolean value); private static native long nBuilderBuild(long nativeBuilder); } diff --git a/android/gradle.properties b/android/gradle.properties index 319fd5a0738..c657979b6c3 100644 --- a/android/gradle.properties +++ b/android/gradle.properties @@ -1,5 +1,5 @@ GROUP=com.google.android.filament -VERSION_NAME=1.56.0 +VERSION_NAME=1.56.1 POM_DESCRIPTION=Real-time physically based rendering engine for Android. diff --git a/android/samples/sample-hello-triangle/src/main/java/com/google/android/filament/hellotriangle/MainActivity.kt b/android/samples/sample-hello-triangle/src/main/java/com/google/android/filament/hellotriangle/MainActivity.kt index ed6a507a766..a799acac52c 100644 --- a/android/samples/sample-hello-triangle/src/main/java/com/google/android/filament/hellotriangle/MainActivity.kt +++ b/android/samples/sample-hello-triangle/src/main/java/com/google/android/filament/hellotriangle/MainActivity.kt @@ -112,7 +112,13 @@ class MainActivity : Activity() { } private fun setupFilament() { - engine = Engine.Builder().featureLevel(Engine.FeatureLevel.FEATURE_LEVEL_0).build() + val config = Engine.Config() + //config.forceGLES2Context = true + + engine = Engine.Builder() + .config(config) + .featureLevel(Engine.FeatureLevel.FEATURE_LEVEL_0) + .build() renderer = engine.createRenderer() scene = engine.createScene() view = engine.createView() @@ -123,7 +129,9 @@ class MainActivity : Activity() { scene.skybox = Skybox.Builder().color(0.035f, 0.035f, 0.035f, 1.0f).build(engine) // post-processing is not supported at feature level 0 - view.isPostProcessingEnabled = false + if (engine.activeFeatureLevel == Engine.FeatureLevel.FEATURE_LEVEL_0) { + view.isPostProcessingEnabled = false + } // Tell the view which camera we want to use view.camera = camera diff --git a/filament/backend/src/metal/MetalDriver.mm b/filament/backend/src/metal/MetalDriver.mm index 29a0f6fed90..26045518199 100644 --- a/filament/backend/src/metal/MetalDriver.mm +++ b/filament/backend/src/metal/MetalDriver.mm @@ -861,10 +861,20 @@ void MetalDriver::destroyDescriptorSet(Handle dsh) { DEBUG_LOG("destroyDescriptorSet(dsh = %d)\n", dsh.getId()); - if (dsh) { - executeAfterCurrentCommandBufferCompletes( - [this, dsh]() mutable { destruct_handle(dsh); }); + if (!dsh) { + return; + } + + // Unbind this descriptor set. + auto* descriptorSet = handle_cast(dsh); + for (size_t i = 0; i < MAX_DESCRIPTOR_SET_COUNT; i++) { + if (UTILS_UNLIKELY(mContext->currentDescriptorSets[i] == descriptorSet)) { + mContext->currentDescriptorSets[i] = nullptr; + } } + + executeAfterCurrentCommandBufferCompletes( + [this, dsh]() mutable { destruct_handle(dsh); }); } void MetalDriver::terminate() { diff --git a/filament/backend/src/opengl/OpenGLContext.cpp b/filament/backend/src/opengl/OpenGLContext.cpp index 826994c3656..fc76f5e38d8 100644 --- a/filament/backend/src/opengl/OpenGLContext.cpp +++ b/filament/backend/src/opengl/OpenGLContext.cpp @@ -614,7 +614,7 @@ FeatureLevel OpenGLContext::resolveFeatureLevel(GLint major, GLint minor, featureLevel = FeatureLevel::FEATURE_LEVEL_2; if (gets.max_texture_image_units >= MAX_FRAGMENT_SAMPLER_COUNT && gets.max_combined_texture_image_units >= - (MAX_FRAGMENT_SAMPLER_COUNT + MAX_VERTEX_SAMPLER_COUNT)) { + (MAX_FRAGMENT_SAMPLER_COUNT + MAX_VERTEX_SAMPLER_COUNT)) { featureLevel = FeatureLevel::FEATURE_LEVEL_3; } } @@ -623,15 +623,13 @@ FeatureLevel OpenGLContext::resolveFeatureLevel(GLint major, GLint minor, # ifndef IOS // IOS is guaranteed to have ES3.x else if (UTILS_UNLIKELY(major == 2)) { // Runtime OpenGL version is ES 2.x -# if defined(BACKEND_OPENGL_LEVEL_GLES30) - // mandatory extensions (all supported by Mali-400 and Adreno 304) - assert_invariant(exts.OES_depth_texture); - assert_invariant(exts.OES_depth24); - assert_invariant(exts.OES_packed_depth_stencil); - assert_invariant(exts.OES_rgb8_rgba8); - assert_invariant(exts.OES_standard_derivatives); - assert_invariant(exts.OES_texture_npot); -# endif + // note: mandatory extensions (all supported by Mali-400 and Adreno 304) + // OES_depth_texture + // OES_depth24 + // OES_packed_depth_stencil + // OES_rgb8_rgba8 + // OES_standard_derivatives + // OES_texture_npot featureLevel = FeatureLevel::FEATURE_LEVEL_0; } # endif // IOS diff --git a/filament/backend/src/opengl/OpenGLDriver.cpp b/filament/backend/src/opengl/OpenGLDriver.cpp index 521b23ac634..e5d535f036f 100644 --- a/filament/backend/src/opengl/OpenGLDriver.cpp +++ b/filament/backend/src/opengl/OpenGLDriver.cpp @@ -394,24 +394,21 @@ void OpenGLDriver::bindTexture(GLuint unit, GLTexture const* t) noexcept { } bool OpenGLDriver::useProgram(OpenGLProgram* p) noexcept { - if (UTILS_UNLIKELY(mBoundProgram == p)) { - // program didn't change, don't do anything. - return true; - } - - // compile/link the program if needed and call glUseProgram - bool const success = p->use(this, mContext); - assert_invariant(success == p->isValid()); - - if (success) { - // TODO: we could even improve this if the program could tell us which of the descriptors - // bindings actually changed. In practice, it is likely that set 0 or 1 might not - // change often. - decltype(mInvalidDescriptorSetBindings) changed; - changed.setValue((1 << MAX_DESCRIPTOR_SET_COUNT) - 1); - mInvalidDescriptorSetBindings |= changed; - - mBoundProgram = p; + bool success = true; + if (mBoundProgram != p) { + // compile/link the program if needed and call glUseProgram + success = p->use(this, mContext); + assert_invariant(success == p->isValid()); + if (success) { + // TODO: we could even improve this if the program could tell us which of the descriptors + // bindings actually changed. In practice, it is likely that set 0 or 1 might not + // change often. + decltype(mInvalidDescriptorSetBindings) changed; + changed.setValue((1 << MAX_DESCRIPTOR_SET_COUNT) - 1); + mInvalidDescriptorSetBindings |= changed; + + mBoundProgram = p; + } } if (UTILS_UNLIKELY(mContext.isES2() && success)) { diff --git a/filament/backend/src/opengl/OpenGLProgram.cpp b/filament/backend/src/opengl/OpenGLProgram.cpp index 5f3c8d50802..b4552d5c21c 100644 --- a/filament/backend/src/opengl/OpenGLProgram.cpp +++ b/filament/backend/src/opengl/OpenGLProgram.cpp @@ -255,7 +255,9 @@ void OpenGLProgram::updateUniforms( for (size_t i = 0, c = records.uniforms.size(); i < c; i++) { Program::Uniform const& u = records.uniforms[i]; GLint const loc = records.locations[i]; - if (loc < 0) { + // mRec709Location is special, it is handled by setRec709ColorSpace() and the corresponding + // entry in `buffer` is typically not initialized, so we skip it. + if (loc < 0 || loc == mRec709Location) { continue; } // u.offset is in 'uint32_t' units diff --git a/filament/backend/src/vulkan/VulkanAsyncHandles.h b/filament/backend/src/vulkan/VulkanAsyncHandles.h index d5d9eacc082..4e7920fd4da 100644 --- a/filament/backend/src/vulkan/VulkanAsyncHandles.h +++ b/filament/backend/src/vulkan/VulkanAsyncHandles.h @@ -30,43 +30,20 @@ namespace filament::backend { // Wrapper to enable use of shared_ptr for implementing shared ownership of low-level Vulkan fences. struct VulkanCmdFence { - struct SetValueScope { - public: - ~SetValueScope() { - mHolder->mutex.unlock(); - mHolder->condition.notify_all(); - } - - private: - SetValueScope(VulkanCmdFence* fenceHolder, VkResult result) : - mHolder(fenceHolder) { - mHolder->mutex.lock(); - mHolder->status.store(result); - } - VulkanCmdFence* mHolder; - friend struct VulkanCmdFence; - }; - - VulkanCmdFence(VkFence ifence); - ~VulkanCmdFence() = default; - - SetValueScope setValue(VkResult value) { - return {this, value}; + VulkanCmdFence(VkResult initialStatus) { + // Internally we use the VK_INCOMPLETE status to mean "not yet submitted". When this fence + // gets submitted, its status changes to VK_NOT_READY. Finally, when the GPU actually + // finishes executing the command buffer, the status changes to VK_SUCCESS. + status.store(initialStatus); } - VkFence& getFence() { - return fence; - } + ~VulkanCmdFence() = default; - VkResult getStatus() { - std::unique_lock lock(mutex); - return status.load(std::memory_order_acquire); - } + void setStatus(VkResult value) { status.store(value); } + + VkResult getStatus() { return status.load(std::memory_order_acquire); } private: - VkFence fence; - utils::Condition condition; - utils::Mutex mutex; std::atomic status; }; diff --git a/filament/backend/src/vulkan/VulkanCommands.cpp b/filament/backend/src/vulkan/VulkanCommands.cpp index ee8ffe24971..c3548d5cbbc 100644 --- a/filament/backend/src/vulkan/VulkanCommands.cpp +++ b/filament/backend/src/vulkan/VulkanCommands.cpp @@ -33,48 +33,28 @@ using namespace utils; namespace filament::backend { +namespace { + #if FVK_ENABLED(FVK_DEBUG_GROUP_MARKERS) using Timestamp = VulkanGroupMarkers::Timestamp; #endif -VulkanCmdFence::VulkanCmdFence(VkFence ifence) - : fence(ifence) { - // Internally we use the VK_INCOMPLETE status to mean "not yet submitted". When this fence gets - // submitted, its status changes to VK_NOT_READY. Finally, when the GPU actually finishes - // executing the command buffer, the status changes to VK_SUCCESS. - status.store(VK_INCOMPLETE); -} - -VulkanCommandBuffer::VulkanCommandBuffer(VulkanResourceAllocator* allocator, VkDevice device, - VkCommandPool pool) - : mResourceManager(allocator), - mPipeline(VK_NULL_HANDLE) { +VkCommandBuffer createCommandBuffer(VkDevice device, VkCommandPool pool) { + VkCommandBuffer cmdbuffer; // Create the low-level command buffer. - const VkCommandBufferAllocateInfo allocateInfo{ - .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, - .commandPool = pool, - .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY, - .commandBufferCount = 1, + VkCommandBufferAllocateInfo const allocateInfo{ + .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, + .commandPool = pool, + .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY, + .commandBufferCount = 1, }; - // The buffer allocated here will be implicitly reset when vkBeginCommandBuffer is called. // We don't need to deallocate since destroying the pool will free all of the buffers. - vkAllocateCommandBuffers(device, &allocateInfo, &mBuffer); + vkAllocateCommandBuffers(device, &allocateInfo, &cmdbuffer); + return cmdbuffer; } -CommandBufferObserver::~CommandBufferObserver() {} - -static VkCommandPool createPool(VkDevice device, uint32_t queueFamilyIndex) { - VkCommandPoolCreateInfo createInfo = { - .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, - .flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT - | VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, - .queueFamilyIndex = queueFamilyIndex, - }; - VkCommandPool pool; - vkCreateCommandPool(device, &createInfo, VKALLOC, &pool); - return pool; -} +} // anonymous namespace #if FVK_ENABLED(FVK_DEBUG_GROUP_MARKERS) void VulkanGroupMarkers::push(std::string const& marker, Timestamp start) noexcept { @@ -130,178 +110,142 @@ bool VulkanGroupMarkers::empty() const noexcept { #endif // FVK_DEBUG_GROUP_MARKERS -VulkanCommands::VulkanCommands(VkDevice device, VkQueue queue, uint32_t queueFamilyIndex, - VulkanContext* context, VulkanResourceAllocator* allocator) - : mDevice(device), +VulkanCommandBuffer::VulkanCommandBuffer(VulkanContext* context, VulkanResourceAllocator* allocator, + VkDevice device, VkQueue queue, VkCommandPool pool, bool isProtected) + : mContext(context), + mMarkerCount(0), + isProtected(isProtected), + mDevice(device), mQueue(queue), - mPool(createPool(mDevice, queueFamilyIndex)), - mContext(context), - mStorage(CAPACITY) { + mResourceManager(allocator), + mBuffer(createCommandBuffer(device, pool)), + mFenceStatus(std::make_shared(VK_INCOMPLETE)) { VkSemaphoreCreateInfo sci{.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO}; - for (auto& semaphore: mSubmissionSignals) { - vkCreateSemaphore(mDevice, &sci, nullptr, &semaphore); - } + vkCreateSemaphore(mDevice, &sci, VKALLOC, &mSubmission); VkFenceCreateInfo fenceCreateInfo{.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO}; - for (auto& fence: mFences) { - vkCreateFence(device, &fenceCreateInfo, VKALLOC, &fence); - } - - for (size_t i = 0; i < CAPACITY; ++i) { - mStorage[i] = std::make_unique(allocator, mDevice, mPool); - } + vkCreateFence(device, &fenceCreateInfo, VKALLOC, &mFence); +} -#if !FVK_ENABLED(FVK_DEBUG_GROUP_MARKERS) - (void) mContext; -#endif +VulkanCommandBuffer::~VulkanCommandBuffer() { + vkDestroySemaphore(mDevice, mSubmission, VKALLOC); + vkDestroyFence(mDevice, mFence, VKALLOC); } -void VulkanCommands::terminate() { - wait(); - gc(); - vkDestroyCommandPool(mDevice, mPool, VKALLOC); - for (VkSemaphore sema: mSubmissionSignals) { - vkDestroySemaphore(mDevice, sema, VKALLOC); - } - for (VkFence fence: mFences) { - vkDestroyFence(mDevice, fence, VKALLOC); - } +void VulkanCommandBuffer::reset() noexcept { + mMarkerCount = 0; + mResourceManager.clear(); + mWaitSemaphores.clear(); + + // Internally we use the VK_INCOMPLETE status to mean "not yet submitted". When this fence + // gets, gets submitted, its status changes to VK_NOT_READY. Finally, when the GPU actually + // finishes executing the command buffer, the status changes to VK_SUCCESS. + mFenceStatus = std::make_shared(VK_INCOMPLETE); + vkResetFences(mDevice, 1, &mFence); } -VulkanCommandBuffer& VulkanCommands::get() { - if (mCurrentCommandBufferIndex >= 0) { - return *mStorage[mCurrentCommandBufferIndex].get(); +void VulkanCommandBuffer::pushMarker(char const* marker) noexcept { + if (mContext->isDebugUtilsSupported()) { + VkDebugUtilsLabelEXT labelInfo = { + .sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT, + .pLabelName = marker, + .color = {0, 1, 0, 1}, + }; + vkCmdBeginDebugUtilsLabelEXT(mBuffer, &labelInfo); + } else if (mContext->isDebugMarkersSupported()) { + VkDebugMarkerMarkerInfoEXT markerInfo = { + .sType = VK_STRUCTURE_TYPE_DEBUG_MARKER_MARKER_INFO_EXT, + .pMarkerName = marker, + .color = {0.0f, 1.0f, 0.0f, 1.0f}, + }; + vkCmdDebugMarkerBeginEXT(mBuffer, &markerInfo); } + mMarkerCount++; +} - // If we ran out of available command buffers, stall until one finishes. This is very rare. - // It occurs only when Filament invokes commit() or endFrame() a large number of times without - // presenting the swap chain or waiting on a fence. - while (mAvailableBufferCount == 0) { -#if FVK_ENABLED(FVK_DEBUG_COMMAND_BUFFER) - FVK_LOGI << "VulkanCommands has stalled. " - << "If this occurs frequently, consider increasing VK_MAX_COMMAND_BUFFERS." - << io::endl; -#endif - wait(); - gc(); +void VulkanCommandBuffer::popMarker() noexcept{ + assert_invariant(mMarkerCount > 0); + if (mContext->isDebugUtilsSupported()) { + vkCmdEndDebugUtilsLabelEXT(mBuffer); + } else if (mContext->isDebugMarkersSupported()) { + vkCmdDebugMarkerEndEXT(mBuffer); } + mMarkerCount--; +} - VulkanCommandBuffer* currentbuf = nullptr; - // Find an available slot. - for (size_t i = 0; i < CAPACITY; ++i) { - auto wrapper = mStorage[i].get(); - if (wrapper->buffer() == VK_NULL_HANDLE) { - mCurrentCommandBufferIndex = static_cast(i); - currentbuf = wrapper; - break; - } +void VulkanCommandBuffer::insertEvent(char const* marker) noexcept { + if (mContext->isDebugUtilsSupported()) { + VkDebugUtilsLabelEXT labelInfo = { + .sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT, + .pLabelName = marker, + .color = {1, 1, 0, 1}, + }; + vkCmdInsertDebugUtilsLabelEXT(mBuffer, &labelInfo); + } else if (mContext->isDebugMarkersSupported()) { + VkDebugMarkerMarkerInfoEXT markerInfo = { + .sType = VK_STRUCTURE_TYPE_DEBUG_MARKER_MARKER_INFO_EXT, + .pMarkerName = marker, + .color = {0.0f, 1.0f, 0.0f, 1.0f}, + }; + vkCmdDebugMarkerInsertEXT(mBuffer, &markerInfo); } +} - assert_invariant(currentbuf); - mAvailableBufferCount--; - - // Note that the fence wrapper uses shared_ptr because a DriverAPI fence can also have ownership - // over it. The destruction of the low-level fence occurs either in VulkanCommands::gc(), or in - // VulkanDriver::destroyFence(), both of which are safe spots. - currentbuf->fence = std::make_shared(mFences[mCurrentCommandBufferIndex]); - +void VulkanCommandBuffer::begin() noexcept { // Begin writing into the command buffer. - const VkCommandBufferBeginInfo binfo{ - .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, - .flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, + VkCommandBufferBeginInfo const binfo{ + .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, + .flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, }; - vkBeginCommandBuffer(currentbuf->buffer(), &binfo); - - // Notify the observer that a new command buffer has been activated. - if (mObserver) { - mObserver->onCommandBuffer(*currentbuf); - } - -#if FVK_ENABLED(FVK_DEBUG_GROUP_MARKERS) - // We push the current markers onto a temporary stack. This must be placed after currentbuf is - // set to the new command buffer since pushGroupMarker also calls get(). - while (mCarriedOverMarkers && !mCarriedOverMarkers->empty()) { - auto [marker, time] = mCarriedOverMarkers->pop(); - pushGroupMarker(marker.c_str(), time); - } -#endif - return *currentbuf; + vkBeginCommandBuffer(mBuffer, &binfo); } -bool VulkanCommands::flush() { - // It's perfectly fine to call flush when no commands have been written. - if (mCurrentCommandBufferIndex < 0) { - return false; +VkSemaphore VulkanCommandBuffer::submit() { + while (mMarkerCount > 0) { + popMarker(); } - // Before actually submitting, we need to pop any leftover group markers. - // Note that this needs to occur before vkEndCommandBuffer. -#if FVK_ENABLED(FVK_DEBUG_GROUP_MARKERS) - while (mGroupMarkers && !mGroupMarkers->empty()) { - if (!mCarriedOverMarkers) { - mCarriedOverMarkers = std::make_unique(); - } - auto const [marker, time] = mGroupMarkers->top(); - mCarriedOverMarkers->push(marker, time); - // We still need to call through to vkCmdEndDebugUtilsLabelEXT. - popGroupMarker(); - } -#endif + vkEndCommandBuffer(mBuffer); - int8_t const index = mCurrentCommandBufferIndex; - VulkanCommandBuffer const* currentbuf = mStorage[index].get(); - VkSemaphore const renderingFinished = mSubmissionSignals[index]; - - vkEndCommandBuffer(currentbuf->buffer()); - - // If the injected semaphore is an "image available" semaphore that has not yet been signaled, - // it is sometimes fine to start executing commands anyway, as along as we stall the GPU at the - // VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT stage. However we need to assume the worst - // here and use VK_PIPELINE_STAGE_ALL_COMMANDS_BIT. This is a more aggressive stall, but it is - // the only safe option because the previously submitted command buffer might have set up some - // state that the new command buffer depends on. - VkPipelineStageFlags waitDestStageMasks[2] = { - VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, - VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, + VkPipelineStageFlags const waitDestStageMasks[2] = { + VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, + VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, }; - VkSemaphore signals[2] = { - VK_NULL_HANDLE, - VK_NULL_HANDLE, - }; - uint32_t waitSemaphoreCount = 0; - if (mSubmissionSignal) { - signals[waitSemaphoreCount++] = mSubmissionSignal; - } - if (mInjectedSignal) { - signals[waitSemaphoreCount++] = mInjectedSignal; - } - VkCommandBuffer const cmdbuffer = currentbuf->buffer(); VkSubmitInfo submitInfo{ - .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO, - .waitSemaphoreCount = waitSemaphoreCount, - .pWaitSemaphores = waitSemaphoreCount > 0 ? signals : nullptr, - .pWaitDstStageMask = waitDestStageMasks, - .commandBufferCount = 1, - .pCommandBuffers = &cmdbuffer, - .signalSemaphoreCount = 1u, - .pSignalSemaphores = &renderingFinished, + .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO, + .waitSemaphoreCount = mWaitSemaphores.size(), + .pWaitSemaphores = mWaitSemaphores.data(), + .pWaitDstStageMask = waitDestStageMasks, + .commandBufferCount = 1u, + .pCommandBuffers = &mBuffer, + .signalSemaphoreCount = 1u, + .pSignalSemaphores = &mSubmission, + }; + // add submit protection if needed + VkProtectedSubmitInfo protectedSubmitInfo{ + .sType = VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO, + .protectedSubmit = VK_TRUE, }; + if (isProtected) { + submitInfo.pNext = &protectedSubmitInfo; + } + #if FVK_ENABLED(FVK_DEBUG_COMMAND_BUFFER) - FVK_LOGI << "Submitting cmdbuffer=" << cmdbuffer - << " wait=(" << signals[0] << ", " << signals[1] << ") " - << " signal=" << renderingFinished - << " fence=" << currentbuf->fence->fence - << utils::io::endl; + FVK_LOGI << "Submitting cmdbuffer=" << mBuffer + << " wait=("; + for (size_t s = 0, count = mWaitSemaphores.size(); s < count; ++s) { + FVK_LOGI << mWaitSemaphores[s] << " "; + } + FVK_LOGI << ") " + << " signal=" << mSubmission + << " fence=" << mFence << utils::io::endl; #endif - auto& cmdfence = currentbuf->fence; - UTILS_UNUSED_IN_RELEASE VkResult result = VK_SUCCESS; - { - auto scope = cmdfence->setValue(VK_NOT_READY); - result = vkQueueSubmit(mQueue, 1, &submitInfo, cmdfence->getFence()); - } + mFenceStatus->setStatus(VK_NOT_READY); + UTILS_UNUSED_IN_RELEASE VkResult result = + vkQueueSubmit(mQueue, 1, &submitInfo, mFence); #if FVK_ENABLED(FVK_DEBUG_COMMAND_BUFFER) if (result != VK_SUCCESS) { @@ -309,86 +253,212 @@ bool VulkanCommands::flush() { } #endif assert_invariant(result == VK_SUCCESS); + mWaitSemaphores.clear(); + return mSubmission; +} - mSubmissionSignal = renderingFinished; - mInjectedSignal = VK_NULL_HANDLE; - mCurrentCommandBufferIndex = -1; - return true; +CommandBufferPool::CommandBufferPool(VulkanContext* context, VulkanResourceAllocator* allocator, + VkDevice device, VkQueue queue, uint8_t queueFamilyIndex, bool isProtected) + : mDevice(device), + mRecording(INVALID) { + VkCommandPoolCreateInfo createInfo = { + .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, + .flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT | + VK_COMMAND_POOL_CREATE_TRANSIENT_BIT | + (isProtected ? VK_COMMAND_POOL_CREATE_PROTECTED_BIT : 0u), + .queueFamilyIndex = queueFamilyIndex, + }; + vkCreateCommandPool(device, &createInfo, VKALLOC, &mPool); + + for (size_t i = 0; i < CAPACITY; ++i) { + mBuffers.emplace_back(std::make_unique(context, allocator, device, + queue, mPool, isProtected)); + } } -VkSemaphore VulkanCommands::acquireFinishedSignal() { - VkSemaphore semaphore = mSubmissionSignal; - mSubmissionSignal = VK_NULL_HANDLE; -#if FVK_ENABLED(FVK_DEBUG_COMMAND_BUFFER) - FVK_LOGI << "Acquiring " << semaphore << " (e.g. for vkQueuePresentKHR)" << io::endl; -#endif - return semaphore; +CommandBufferPool::~CommandBufferPool() { + wait(); + gc(); + vkDestroyCommandPool(mDevice, mPool, VKALLOC); } -void VulkanCommands::injectDependency(VkSemaphore next) { - assert_invariant(mInjectedSignal == VK_NULL_HANDLE); - mInjectedSignal = next; -#if FVK_ENABLED(FVK_DEBUG_COMMAND_BUFFER) - FVK_LOGI << "Injecting " << next << " (e.g. due to vkAcquireNextImageKHR)" << io::endl; -#endif +VulkanCommandBuffer& CommandBufferPool::getRecording() { + if (isRecording()) { + return *mBuffers[mRecording]; + } + + auto const findNext = [this]() { + for (int8_t i = 0; i < CAPACITY; ++i) { + if (!mSubmitted[i]) { + return i; + } + } + return INVALID; + }; + + while ((mRecording = findNext()) == INVALID) { + wait(); + gc(); + } + + auto& recording = *mBuffers[mRecording]; + recording.begin(); + return recording; } -void VulkanCommands::wait() { - VkFence fences[CAPACITY]; - size_t count = 0; - for (size_t i = 0; i < CAPACITY; i++) { - auto wrapper = mStorage[i].get(); - if (wrapper->buffer() != VK_NULL_HANDLE - && mCurrentCommandBufferIndex != static_cast(i)) { - fences[count++] = wrapper->fence->getFence(); +void CommandBufferPool::gc() { + ActiveBuffers reclaimed; + mSubmitted.forEachSetBit([this,&reclaimed] (size_t index) { + auto& buffer = mBuffers[index]; + if (buffer->getStatus() == VK_SUCCESS) { + reclaimed.set(index, true); + buffer->reset(); + } + }); + mSubmitted &= ~reclaimed; +} + +void CommandBufferPool::update() { + mSubmitted.forEachSetBit([this] (size_t index) { + auto& buffer = mBuffers[index]; + VkResult status = vkGetFenceStatus(mDevice, buffer->getVkFence()); + if (status == VK_SUCCESS) { + buffer->setComplete(); } + }); +} + +VkSemaphore CommandBufferPool::flush() { + // We're not recording right now. + if (!isRecording()) { + return VK_NULL_HANDLE; } - if (count > 0) { - vkWaitForFences(mDevice, count, fences, VK_TRUE, UINT64_MAX); - updateFences(); + auto submitSemaphore = mBuffers[mRecording]->submit(); + mSubmitted.set(mRecording, true); + mRecording = INVALID; + return submitSemaphore; +} + +void CommandBufferPool::wait() { + uint8_t count = 0; + VkFence fences[CAPACITY]; + mSubmitted.forEachSetBit([this, &count, &fences] (size_t index) { + fences[count++] = mBuffers[index]->getVkFence(); + }); + vkWaitForFences(mDevice, count, fences, VK_TRUE, UINT64_MAX); + update(); +} + +void CommandBufferPool::waitFor(VkSemaphore previousAction) { + if (!isRecording()) { + return; } + auto& recording = mBuffers[mRecording]; + recording->insertWait(previousAction); } -void VulkanCommands::gc() { - FVK_SYSTRACE_CONTEXT(); - FVK_SYSTRACE_START("commands::gc"); +void CommandBufferPool::pushMarker(char const* marker) { + getRecording().pushMarker(marker); +} - VkFence fences[CAPACITY]; - size_t count = 0; +void CommandBufferPool::popMarker() { + getRecording().popMarker(); +} + +void CommandBufferPool::insertEvent(char const* marker) { + getRecording().insertEvent(marker); +} + +VulkanCommands::VulkanCommands(VkDevice device, VkQueue queue, uint32_t queueFamilyIndex, + VkQueue protectedQueue, uint32_t protectedQueueFamilyIndex, VulkanContext* context, + VulkanResourceAllocator* allocator) + : mDevice(device), + mProtectedQueue(protectedQueue), + mProtectedQueueFamilyIndex(protectedQueueFamilyIndex), + mAllocator(allocator), + mContext(context), + mPool(std::make_unique(context, allocator, device, queue, queueFamilyIndex, + false)) { +} + +void VulkanCommands::terminate() { + mPool.reset(); + mProtectedPool.reset(); +} + +VulkanCommandBuffer& VulkanCommands::get() { + auto& ret = mPool->getRecording(); + return ret; +} + +VulkanCommandBuffer& VulkanCommands::getProtected() { + assert_invariant(mProtectedQueue != VK_NULL_HANDLE); + + if (!mProtectedPool) { + mProtectedPool = std::make_unique(mContext, mAllocator, mDevice, + mProtectedQueue, mProtectedQueueFamilyIndex, true); + } + auto& ret = mProtectedPool->getRecording(); + return ret; +} - for (size_t i = 0; i < CAPACITY; i++) { - auto wrapper = mStorage[i].get(); - if (wrapper->buffer() == VK_NULL_HANDLE) { +bool VulkanCommands::flush() { + VkSemaphore dependency = mInjectedDependency; + VkSemaphore lastSubmit = mLastSubmit; + bool hasFlushed = false; + + // Note that we've ordered it so that the non-protected commands are followed by the protected + // commands. This assumes that the protected commands will be that one doing the rendering into + // the protected memory (i.e. protected render target). + for (auto pool: {mPool.get(), mProtectedPool.get()}) { + if (!pool || !pool->isRecording()) { continue; } - auto const vkfence = wrapper->fence->getFence(); - VkResult const result = vkGetFenceStatus(mDevice, vkfence); - if (result != VK_SUCCESS) { - continue; + if (dependency != VK_NULL_HANDLE) { + pool->waitFor(dependency); + } + if (lastSubmit != VK_NULL_HANDLE) { + pool->waitFor(lastSubmit); + lastSubmit = VK_NULL_HANDLE; } - fences[count++] = vkfence; - wrapper->fence->setValue(VK_SUCCESS); - wrapper->reset(); - mAvailableBufferCount++; + dependency = pool->flush(); + hasFlushed = true; } - if (count > 0) { - vkResetFences(mDevice, count, fences); + if (hasFlushed) { + mInjectedDependency = VK_NULL_HANDLE; + mLastSubmit = dependency; + } + + return true; +} + +void VulkanCommands::wait() { + FVK_SYSTRACE_CONTEXT(); + FVK_SYSTRACE_START("commands::wait"); + + mPool->wait(); + if (mProtectedPool) { + mProtectedPool->wait(); + } + FVK_SYSTRACE_END(); +} + +void VulkanCommands::gc() { + FVK_SYSTRACE_CONTEXT(); + FVK_SYSTRACE_START("commands::gc"); + + mPool->gc(); + if (mProtectedPool) { + mProtectedPool->gc(); } FVK_SYSTRACE_END(); } void VulkanCommands::updateFences() { - for (size_t i = 0; i < CAPACITY; i++) { - auto wrapper = mStorage[i].get(); - if (wrapper->buffer() != VK_NULL_HANDLE) { - VulkanCmdFence* fence = wrapper->fence.get(); - if (fence) { - VkResult status = vkGetFenceStatus(mDevice, fence->getFence()); - // This is either VK_SUCCESS, VK_NOT_READY, or VK_ERROR_DEVICE_LOST. - fence->setValue(status); - } - } + mPool->update(); + if (mProtectedPool) { + mProtectedPool->update(); } } @@ -402,29 +472,14 @@ void VulkanCommands::pushGroupMarker(char const* str, VulkanGroupMarkers::Timest FVK_LOGD << "----> " << str << utils::io::endl; } #endif - - // TODO: Add group marker color to the Driver API - VkCommandBuffer const cmdbuffer = get().buffer(); - if (!mGroupMarkers) { mGroupMarkers = std::make_unique(); } mGroupMarkers->push(str, timestamp); - if (mContext->isDebugUtilsSupported()) { - VkDebugUtilsLabelEXT labelInfo = { - .sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT, - .pLabelName = str, - .color = {0, 1, 0, 1}, - }; - vkCmdBeginDebugUtilsLabelEXT(cmdbuffer, &labelInfo); - } else if (mContext->isDebugMarkersSupported()) { - VkDebugMarkerMarkerInfoEXT markerInfo = { - .sType = VK_STRUCTURE_TYPE_DEBUG_MARKER_MARKER_INFO_EXT, - .pMarkerName = str, - .color = {0.0f, 1.0f, 0.0f, 1.0f}, - }; - vkCmdDebugMarkerBeginEXT(cmdbuffer, &markerInfo); + mPool->pushMarker(str); + if (mProtectedPool) { + mProtectedPool->pushMarker(str); } } @@ -442,10 +497,9 @@ void VulkanCommands::popGroupMarker() { #else mGroupMarkers->pop(); #endif - if (mContext->isDebugUtilsSupported()) { - vkCmdEndDebugUtilsLabelEXT(cmdbuffer); - } else if (mContext->isDebugMarkersSupported()) { - vkCmdDebugMarkerEndEXT(cmdbuffer); + mPool->popMarker(); + if (mProtectedPool) { + mProtectedPool->popMarker(); } } else if (mCarriedOverMarkers && !mCarriedOverMarkers->empty()) { // It could be that pop is called between flush() and get() (new command buffer), in which @@ -455,22 +509,10 @@ void VulkanCommands::popGroupMarker() { } } -void VulkanCommands::insertEventMarker(char const* string, uint32_t len) { - VkCommandBuffer const cmdbuffer = get().buffer(); - if (mContext->isDebugUtilsSupported()) { - VkDebugUtilsLabelEXT labelInfo = { - .sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT, - .pLabelName = string, - .color = {1, 1, 0, 1}, - }; - vkCmdInsertDebugUtilsLabelEXT(cmdbuffer, &labelInfo); - } else if (mContext->isDebugMarkersSupported()) { - VkDebugMarkerMarkerInfoEXT markerInfo = { - .sType = VK_STRUCTURE_TYPE_DEBUG_MARKER_MARKER_INFO_EXT, - .pMarkerName = string, - .color = {0.0f, 1.0f, 0.0f, 1.0f}, - }; - vkCmdDebugMarkerInsertEXT(cmdbuffer, &markerInfo); +void VulkanCommands::insertEventMarker(char const* str, uint32_t len) { + mPool->insertEvent(str); + if (mProtectedPool) { + mProtectedPool->insertEvent(str); } } diff --git a/filament/backend/src/vulkan/VulkanCommands.h b/filament/backend/src/vulkan/VulkanCommands.h index f037528c2ef..25b839ec49e 100644 --- a/filament/backend/src/vulkan/VulkanCommands.h +++ b/filament/backend/src/vulkan/VulkanCommands.h @@ -24,6 +24,7 @@ #include "VulkanAsyncHandles.h" #include "VulkanConstants.h" #include "VulkanResources.h" +#include "VulkanUtility.h" #include #include @@ -64,11 +65,14 @@ class VulkanGroupMarkers { // DriverApi fence object and should not be destroyed until both the DriverApi object is freed and // we're done waiting on the most recent submission of the given command buffer. struct VulkanCommandBuffer { - VulkanCommandBuffer(VulkanResourceAllocator* allocator, VkDevice device, VkCommandPool pool); + VulkanCommandBuffer(VulkanContext* mContext, VulkanResourceAllocator* allocator, + VkDevice device, VkQueue queue, VkCommandPool pool, bool isProtected); VulkanCommandBuffer(VulkanCommandBuffer const&) = delete; VulkanCommandBuffer& operator=(VulkanCommandBuffer const&) = delete; + ~VulkanCommandBuffer(); + inline void acquire(VulkanResource* resource) { mResourceManager.acquire(resource); } @@ -76,41 +80,87 @@ struct VulkanCommandBuffer { inline void acquire(VulkanAcquireOnlyResourceManager* srcResources) { mResourceManager.acquireAll(srcResources); } + void reset() noexcept; - inline void reset() { - fence.reset(); - mResourceManager.clear(); - mPipeline = VK_NULL_HANDLE; + inline void insertWait(VkSemaphore sem) { + mWaitSemaphores.insert(sem); } - inline void setPipeline(VkPipeline pipeline) { - mPipeline = pipeline; + void pushMarker(char const* marker) noexcept; + void popMarker() noexcept; + void insertEvent(char const* marker) noexcept; + + void begin() noexcept; + VkSemaphore submit(); + + inline void setComplete() { + mFenceStatus->setStatus(VK_SUCCESS); } - inline VkPipeline pipeline() const { - return mPipeline; + VkResult getStatus() { + return mFenceStatus->getStatus(); } - inline VkCommandBuffer buffer() const { - if (fence) { - return mBuffer; - } - return VK_NULL_HANDLE; + std::shared_ptr getFenceStatus() const { + return mFenceStatus; } - std::shared_ptr fence; + VkFence getVkFence() const { + return mFence; + } + + VkCommandBuffer buffer() const { + return mBuffer; + } private: + VulkanContext* mContext; + uint8_t mMarkerCount; + bool const isProtected; + VkDevice mDevice; + VkQueue mQueue; VulkanAcquireOnlyResourceManager mResourceManager; + CappedArray mWaitSemaphores; VkCommandBuffer mBuffer; - VkPipeline mPipeline; + VkSemaphore mSubmission; + VkFence mFence; + std::shared_ptr mFenceStatus; + }; -// Allows classes to be notified after a new command buffer has been activated. -class CommandBufferObserver { -public: - virtual void onCommandBuffer(const VulkanCommandBuffer& cmdbuffer) = 0; - virtual ~CommandBufferObserver(); +struct CommandBufferPool { + using ActiveBuffers = utils::bitset32; + static constexpr int8_t INVALID = -1; + + CommandBufferPool(VulkanContext* context, VulkanResourceAllocator* allocator, VkDevice device, + VkQueue queue, uint8_t queueFamilyIndex, bool isProtected); + ~CommandBufferPool(); + + VulkanCommandBuffer& getRecording(); + + void gc(); + void update(); + VkSemaphore flush(); + void wait(); + + void waitFor(VkSemaphore previousAction); + + void pushMarker(char const* marker); + void popMarker(); + void insertEvent(char const* marker); + + inline bool isRecording() const { return mRecording != INVALID; } + +private: + static constexpr int CAPACITY = FVK_MAX_COMMAND_BUFFERS; + // int8 only goes up to 127, therefore capacity must be less than that. + static_assert(CAPACITY < 128); + using BufferList = utils::FixedCapacityVector>; + VkDevice mDevice; + VkCommandPool mPool; + ActiveBuffers mSubmitted; + std::vector> mBuffers; + int8_t mRecording; }; // Manages a set of command buffers and semaphores, exposing an API that is significantly simpler @@ -124,9 +174,6 @@ class CommandBufferObserver { // - This creates a guarantee of in-order execution. // - Semaphores are recycled to prevent create / destroy churn. // -// - Notifies listeners when recording begins in a new VkCommandBuffer. -// - Used by PipelineCache so that it knows when to clear out its shadow state. -// // - Allows 1 user to inject a "dependency" semaphore that stalls the next flush. // - This is used for asynchronous acquisition of a swap chain image, since the GPU // might require a valid swap chain image when it starts executing the command buffer. @@ -142,13 +189,18 @@ class CommandBufferObserver { class VulkanCommands { public: VulkanCommands(VkDevice device, VkQueue queue, uint32_t queueFamilyIndex, - VulkanContext* context, VulkanResourceAllocator* allocator); + VkQueue protectedQueue, uint32_t protectedQueueFamilyIndex, VulkanContext* context, + VulkanResourceAllocator* allocator); void terminate(); // Creates a "current" command buffer if none exists, otherwise returns the current one. VulkanCommandBuffer& get(); + // Creates a "current" protected capable command buffer if none exists, otherwise + // returns the current one. + VulkanCommandBuffer& getProtected(); + // Submits the current command buffer if it exists, then sets "current" to null. // If there are no outstanding commands then nothing happens and this returns false. bool flush(); @@ -156,11 +208,17 @@ class VulkanCommands { // Returns the "rendering finished" semaphore for the most recent flush and removes // it from the existing dependency chain. This is especially useful for setting up // vkQueuePresentKHR. - VkSemaphore acquireFinishedSignal(); + VkSemaphore acquireFinishedSignal() { + VkSemaphore ret= mLastSubmit; + mLastSubmit = VK_NULL_HANDLE; + return ret; + } // Takes a semaphore that signals when the next flush can occur. Only one injected // semaphore is allowed per flush. Useful after calling vkAcquireNextImageKHR. - void injectDependency(VkSemaphore next); + void injectDependency(VkSemaphore next) { + mInjectedDependency = next; + } // Destroys all command buffers that are no longer in use. void gc(); @@ -171,37 +229,26 @@ class VulkanCommands { // Updates the atomic "status" variable in every extant fence. void updateFences(); - // Sets an observer who is notified every time a new command buffer has been made "current". - // The observer's event handler can only be called during get(). - void setObserver(CommandBufferObserver* observer) { mObserver = observer; } - #if FVK_ENABLED(FVK_DEBUG_GROUP_MARKERS) void pushGroupMarker(char const* str, VulkanGroupMarkers::Timestamp timestamp = {}); - void popGroupMarker(); - void insertEventMarker(char const* string, uint32_t len); - std::string getTopGroupMarker() const; #endif private: - static constexpr int CAPACITY = FVK_MAX_COMMAND_BUFFERS; VkDevice const mDevice; - VkQueue const mQueue; - VkCommandPool const mPool; - VulkanContext const* mContext; + VkQueue const mProtectedQueue; + // For defered initialization if/when we need protected content + uint32_t const mProtectedQueueFamilyIndex; + VulkanResourceAllocator* mAllocator; + VulkanContext* mContext; - // int8 only goes up to 127, therefore capacity must be less than that. - static_assert(CAPACITY < 128); - int8_t mCurrentCommandBufferIndex = -1; - VkSemaphore mSubmissionSignal = {}; - VkSemaphore mInjectedSignal = {}; - utils::FixedCapacityVector> mStorage; - VkFence mFences[CAPACITY] = {}; - VkSemaphore mSubmissionSignals[CAPACITY] = {}; - uint8_t mAvailableBufferCount = CAPACITY; - CommandBufferObserver* mObserver = nullptr; + std::unique_ptr mPool; + std::unique_ptr mProtectedPool; + + VkSemaphore mInjectedDependency = VK_NULL_HANDLE; + VkSemaphore mLastSubmit = VK_NULL_HANDLE; #if FVK_ENABLED(FVK_DEBUG_GROUP_MARKERS) std::unique_ptr mGroupMarkers; diff --git a/filament/backend/src/vulkan/VulkanContext.cpp b/filament/backend/src/vulkan/VulkanContext.cpp index 9fea8e584a8..576fff38e17 100644 --- a/filament/backend/src/vulkan/VulkanContext.cpp +++ b/filament/backend/src/vulkan/VulkanContext.cpp @@ -121,7 +121,7 @@ void VulkanTimestamps::beginQuery(VulkanCommandBuffer const* commands, vkCmdWriteTimestamp(cmdbuffer, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, mPool, index); // We stash this because getResult might come before the query is actually processed. - query->setFence(commands->fence); + query->setFence(commands->getFenceStatus()); } void VulkanTimestamps::endQuery(VulkanCommandBuffer const* commands, diff --git a/filament/backend/src/vulkan/VulkanDriver.cpp b/filament/backend/src/vulkan/VulkanDriver.cpp index 59cecc8a0e7..bb72633af65 100644 --- a/filament/backend/src/vulkan/VulkanDriver.cpp +++ b/filament/backend/src/vulkan/VulkanDriver.cpp @@ -201,7 +201,8 @@ VulkanDriver::VulkanDriver(VulkanPlatform* platform, VulkanContext const& contex mResourceManager(&mResourceAllocator), mThreadSafeResourceManager(&mResourceAllocator), mCommands(mPlatform->getDevice(), mPlatform->getGraphicsQueue(), - mPlatform->getGraphicsQueueFamilyIndex(), &mContext, &mResourceAllocator), + mPlatform->getGraphicsQueueFamilyIndex(), + nullptr, 0, &mContext, &mResourceAllocator), mPipelineLayoutCache(mPlatform->getDevice(), &mResourceAllocator), mPipelineCache(mPlatform->getDevice(), mAllocator), mStagePool(mAllocator, &mCommands), @@ -681,8 +682,8 @@ void VulkanDriver::destroyRenderTarget(Handle rth) { } void VulkanDriver::createFenceR(Handle fh, int) { - VulkanCommandBuffer const& commandBuffer = mCommands.get(); - mResourceAllocator.construct(fh, commandBuffer.fence); + VulkanCommandBuffer* cmdbuf = &mCommands.get(); + mResourceAllocator.construct(fh, cmdbuf->getFenceStatus()); } void VulkanDriver::createSwapChainR(Handle sch, void* nativeWindow, uint64_t flags) { diff --git a/filament/backend/src/vulkan/VulkanHandles.cpp b/filament/backend/src/vulkan/VulkanHandles.cpp index dfb4d51d17f..9fda018943b 100644 --- a/filament/backend/src/vulkan/VulkanHandles.cpp +++ b/filament/backend/src/vulkan/VulkanHandles.cpp @@ -87,14 +87,12 @@ BitmaskGroup fromBackendLayout(DescriptorSetLayout const& layout) { } break; } + // TODO: properly handle external sampler + case DescriptorType::SAMPLER_EXTERNAL: case DescriptorType::SAMPLER: { fromStageFlags(binding.stageFlags, binding.binding, mask.sampler); break; } - case DescriptorType::SAMPLER_EXTERNAL: { - PANIC_POSTCONDITION("DescriptorType::SAMPLER_EXTERNAL is not supported yet"); - break; - } case DescriptorType::INPUT_ATTACHMENT: { fromStageFlags(binding.stageFlags, binding.binding, mask.inputAttachment); break; diff --git a/filament/backend/src/vulkan/VulkanPipelineCache.cpp b/filament/backend/src/vulkan/VulkanPipelineCache.cpp index 4c440a00857..dd730fdab73 100644 --- a/filament/backend/src/vulkan/VulkanPipelineCache.cpp +++ b/filament/backend/src/vulkan/VulkanPipelineCache.cpp @@ -66,17 +66,12 @@ void VulkanPipelineCache::bindPipeline(VulkanCommandBuffer* commands) { VkCommandBuffer const cmdbuffer = commands->buffer(); PipelineCacheEntry* cacheEntry = getOrCreatePipeline(); - // Check if the required pipeline is already bound. - if (cacheEntry->handle == commands->pipeline()) { - return; - } // If an error occurred, allow higher levels to handle it gracefully. assert_invariant(cacheEntry != nullptr && "Failed to create/find pipeline"); mBoundPipeline = mPipelineRequirements; vkCmdBindPipeline(cmdbuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, cacheEntry->handle); - commands->setPipeline(cacheEntry->handle); } VulkanPipelineCache::PipelineCacheEntry* VulkanPipelineCache::createPipeline() noexcept { diff --git a/filament/backend/src/vulkan/caching/VulkanDescriptorSetManager.h b/filament/backend/src/vulkan/caching/VulkanDescriptorSetManager.h index 2735e5062b1..1bbf6e29b04 100644 --- a/filament/backend/src/vulkan/caching/VulkanDescriptorSetManager.h +++ b/filament/backend/src/vulkan/caching/VulkanDescriptorSetManager.h @@ -94,7 +94,7 @@ class VulkanDescriptorSetManager { struct { VkPipelineLayout pipelineLayout = VK_NULL_HANDLE; DescriptorSetMask setMask; - DescriptorSetArray boundSets; + DescriptorSetArray boundSets = {}; } mLastBoundInfo; }; diff --git a/filament/include/filament/Engine.h b/filament/include/filament/Engine.h index 311cf2e5a0c..17aaea975f5 100644 --- a/filament/include/filament/Engine.h +++ b/filament/include/filament/Engine.h @@ -24,6 +24,10 @@ #include #include +#include + +#include +#include #include #include @@ -309,6 +313,7 @@ class UTILS_PUBLIC Engine { /** * Set to `true` to forcibly disable parallel shader compilation in the backend. * Currently only honored by the GL and Metal backends. + * @deprecated use "backend.disable_parallel_shader_compile" feature flag instead */ bool disableParallelShaderCompile = false; @@ -349,6 +354,7 @@ class UTILS_PUBLIC Engine { /* * Disable backend handles use-after-free checks. + * @deprecated use "backend.disable_handle_use_after_free_check" feature flag instead */ bool disableHandleUseAfterFreeCheck = false; @@ -385,10 +391,32 @@ class UTILS_PUBLIC Engine { * Assert the native window associated to a SwapChain is valid when calling makeCurrent(). * This is only supported for: * - PlatformEGLAndroid + * @deprecated use "backend.opengl.assert_native_window_is_valid" feature flag instead */ bool assertNativeWindowIsValid = false; }; + + /** + * Feature flags can be enabled or disabled when the Engine is built. Some Feature flags can + * also be toggled at any time. Feature flags should alawys use their default value unless + * the feature enabled by the flag is faulty. Feature flags provide a last resort way to + * disable problematic features. + * Feature flags are intended to have a short life-time and are regularly removed as features + * mature. + */ + struct FeatureFlag { + char const* UTILS_NONNULL name; //!< name of the feature flag + char const* UTILS_NONNULL description; //!< short description + bool const* UTILS_NONNULL value; //!< pointer to the value of the flag + bool constant; //!< whether the flag is constant after construction + }; + + /** + * Returns the list of available feature flags + */ + utils::Slice getFeatureFlags() const noexcept; + #if UTILS_HAS_THREADING using CreateCallback = void(void* UTILS_NULLABLE user, void* UTILS_NONNULL token); #endif @@ -461,6 +489,21 @@ class UTILS_PUBLIC Engine { */ Builder& paused(bool paused) noexcept; + /** + * Set a feature flag value. This is the only way to set constant feature flags. + * @param name feature name + * @param value true to enable, false to disable + * @return A reference to this Builder for chaining calls. + */ + Builder& feature(char const* UTILS_NONNULL name, bool value) noexcept; + + /** + * Enables a list of features. + * @param list list of feature names to enable. + * @return A reference to this Builder for chaining calls. + */ + Builder& features(std::initializer_list list) noexcept; + #if UTILS_HAS_THREADING /** * Creates the filament Engine asynchronously. @@ -1078,6 +1121,37 @@ class UTILS_PUBLIC Engine { DebugRegistry& getDebugRegistry() noexcept; + /** + * Check if a feature flag exists + * @param name name of the feature flag to check + * @return true if the feature flag exists, false otherwise + */ + inline bool hasFeatureFlag(char const* UTILS_NONNULL name) noexcept { + return getFeatureFlag(name).has_value(); + } + + /** + * Set the value of a non-constant feature flag. + * @param name name of the feature flag to set + * @param value value to set + * @return true if the value was set, false if the feature flag is constant or doesn't exist. + */ + bool setFeatureFlag(char const* UTILS_NONNULL name, bool value) noexcept; + + /** + * Retrieves the value of any feature flag. + * @param name name of the feature flag + * @return the value of the flag if it exists + */ + std::optional getFeatureFlag(char const* UTILS_NONNULL name) const noexcept; + + /** + * Returns a pointer to a non-constant feature flag value. + * @param name name of the feature flag + * @return a pointer to the feature flag value, or nullptr if the feature flag is constant or doesn't exist + */ + bool* UTILS_NULLABLE getFeatureFlagPtr(char const* UTILS_NONNULL name) const noexcept; + protected: //! \privatesection Engine() noexcept = default; diff --git a/filament/src/Engine.cpp b/filament/src/Engine.cpp index 771d02c22a3..c95b944ccff 100644 --- a/filament/src/Engine.cpp +++ b/filament/src/Engine.cpp @@ -40,6 +40,7 @@ #include #include +#include #include @@ -442,6 +443,22 @@ uint64_t Engine::getSteadyClockTimeNano() noexcept { return std::chrono::steady_clock::now().time_since_epoch().count(); } +utils::Slice Engine::getFeatureFlags() const noexcept { + return downcast(this)->getFeatureFlags(); +} + +bool Engine::setFeatureFlag(char const* name, bool value) noexcept { + return downcast(this)->setFeatureFlag(name, value); +} + +std::optional Engine::getFeatureFlag(char const* name) const noexcept { + return downcast(this)->getFeatureFlag(name); +} + +bool* Engine::getFeatureFlagPtr(char const* UTILS_NONNULL name) const noexcept { + return downcast(this)->getFeatureFlagPtr(name); +} + #if defined(__EMSCRIPTEN__) void Engine::resetBackendState() noexcept { downcast(this)->resetBackendState(); diff --git a/filament/src/details/Engine.cpp b/filament/src/details/Engine.cpp index a6c5f2eead8..6b4e56a1e42 100644 --- a/filament/src/details/Engine.cpp +++ b/filament/src/details/Engine.cpp @@ -46,6 +46,7 @@ #include #include +#include #include #include #include @@ -53,8 +54,17 @@ #include #include +#include #include +#include #include +#include +#include +#include + +#include +#include +#include #include "generated/resources/materials.h" @@ -73,6 +83,8 @@ struct Engine::BuilderDetails { FeatureLevel mFeatureLevel = FeatureLevel::FEATURE_LEVEL_1; void* mSharedContext = nullptr; bool mPaused = false; + std::unordered_map mFeatureFlags; + static Config validateConfig(Config config) noexcept; }; @@ -104,11 +116,11 @@ Engine* FEngine::create(Engine::Builder const& builder) { DriverConfig const driverConfig{ .handleArenaSize = instance->getRequestedDriverHandleArenaSize(), .metalUploadBufferSizeBytes = instance->getConfig().metalUploadBufferSizeBytes, - .disableParallelShaderCompile = instance->getConfig().disableParallelShaderCompile, - .disableHandleUseAfterFreeCheck = instance->getConfig().disableHandleUseAfterFreeCheck, + .disableParallelShaderCompile = instance->features.backend.disable_parallel_shader_compile, + .disableHandleUseAfterFreeCheck = instance->features.backend.disable_handle_use_after_free_check, .forceGLES2Context = instance->getConfig().forceGLES2Context, .stereoscopicType = instance->getConfig().stereoscopicType, - .assertNativeWindowIsValid = instance->getConfig().assertNativeWindowIsValid, + .assertNativeWindowIsValid = instance->features.backend.opengl.assert_native_window_is_valid, }; instance->mDriver = platform->createDriver(sharedContext, driverConfig); @@ -219,7 +231,34 @@ FEngine::FEngine(Engine::Builder const& builder) : mMainThreadId(ThreadUtils::getThreadId()), mConfig(builder->mConfig) { - // we're assuming we're on the main thread here. + // update a feature flag from Engine::Config if the flag is not specified in the Builder + auto const featureFlagsBackwardCompatibility = + [this, &builder](std::string_view name, bool value) { + if (builder->mFeatureFlags.find(name) == builder->mFeatureFlags.end()) { + auto* const p = getFeatureFlagPtr(name, true); + if (p) { + *p = value; + } + } + }; + + // update all the features flags specified in the builder + for (auto const& feature : builder->mFeatureFlags) { + auto* const p = getFeatureFlagPtr(feature.first, true); + if (p) { + *p = feature.second; + } + } + + // update "old" feature flags that were specified in Engine::Config + featureFlagsBackwardCompatibility("backend.disable_parallel_shader_compile", + mConfig.disableParallelShaderCompile); + featureFlagsBackwardCompatibility("backend.disable_handle_use_after_free_check", + mConfig.disableHandleUseAfterFreeCheck); + featureFlagsBackwardCompatibility("backend.opengl.assert_native_window_is_valid", + mConfig.assertNativeWindowIsValid); + + // We're assuming we're on the main thread here. // (it may not be the case) mJobSystem.adopt(); @@ -702,11 +741,11 @@ int FEngine::loop() { DriverConfig const driverConfig { .handleArenaSize = getRequestedDriverHandleArenaSize(), .metalUploadBufferSizeBytes = mConfig.metalUploadBufferSizeBytes, - .disableParallelShaderCompile = mConfig.disableParallelShaderCompile, - .disableHandleUseAfterFreeCheck = mConfig.disableHandleUseAfterFreeCheck, + .disableParallelShaderCompile = features.backend.disable_parallel_shader_compile, + .disableHandleUseAfterFreeCheck = features.backend.disable_handle_use_after_free_check, .forceGLES2Context = mConfig.forceGLES2Context, .stereoscopicType = mConfig.stereoscopicType, - .assertNativeWindowIsValid = mConfig.assertNativeWindowIsValid, + .assertNativeWindowIsValid = features.backend.opengl.assert_native_window_is_valid, }; mDriver = mPlatform->createDriver(mSharedGLContext, driverConfig); @@ -1316,6 +1355,32 @@ void FEngine::unprotected() noexcept { mUnprotectedDummySwapchain->makeCurrent(getDriverApi()); } +bool FEngine::setFeatureFlag(char const* name, bool value) noexcept { + auto* const p = getFeatureFlagPtr(name); + if (p) { + *p = value; + } + return p != nullptr; +} + +std::optional FEngine::getFeatureFlag(char const* name) const noexcept { + auto* const p = getFeatureFlagPtr(name, true); + if (p) { + return *p; + } + return std::nullopt; +} + +bool* FEngine::getFeatureFlagPtr(std::string_view name, bool allowConstant) const noexcept { + auto pos = std::find_if(mFeatures.begin(), mFeatures.end(), + [name](FeatureFlag const& entry) { + return name == entry.name; + }); + + return (pos != mFeatures.end() && (!pos->constant || allowConstant)) ? + const_cast(pos->value) : nullptr; +} + // ------------------------------------------------------------------------------------------------ Engine::Builder::Builder() noexcept = default; @@ -1355,6 +1420,20 @@ Engine::Builder& Engine::Builder::paused(bool paused) noexcept { return *this; } +Engine::Builder& Engine::Builder::feature(char const* name, bool value) noexcept { + mImpl->mFeatureFlags[name] = value; + return *this; +} + +Engine::Builder& Engine::Builder::features(std::initializer_list list) noexcept { + for (auto name : list) { + if (name) { + feature(name, true); + } + } + return *this; +} + #if UTILS_HAS_THREADING void Engine::Builder::build(Invocable&& callback) const { diff --git a/filament/src/details/Engine.h b/filament/src/details/Engine.h index 712b1030382..1f21849051c 100644 --- a/filament/src/details/Engine.h +++ b/filament/src/details/Engine.h @@ -56,21 +56,24 @@ #include #include #include -#include #include #include #include #include #include +#include #include #include #include -#include +#include +#include #include #include #include +#include +#include #include #include #include @@ -672,6 +675,36 @@ class FEngine : public Engine { } stereo; matdbg::DebugServer* server = nullptr; } debug; + + struct { + struct { + struct { + bool assert_native_window_is_valid = false; + } opengl; + bool disable_parallel_shader_compile = false; + bool disable_handle_use_after_free_check = false; + } backend; + } features; + + std::array const mFeatures{{ + { "backend.disable_parallel_shader_compile", + "Disable parallel shader compilation in GL and Metal backends.", + &features.backend.disable_parallel_shader_compile, true }, + { "backend.disable_handle_use_after_free_check", + "Disable Handle<> use-after-free checks.", + &features.backend.disable_handle_use_after_free_check, true }, + { "backend.opengl.assert_native_window_is_valid", + "Asserts that the ANativeWindow is valid when rendering starts.", + &features.backend.opengl.assert_native_window_is_valid, true } + }}; + + utils::Slice getFeatureFlags() const noexcept { + return { mFeatures.data(), mFeatures.size() }; + } + + bool setFeatureFlag(char const* name, bool value) noexcept; + std::optional getFeatureFlag(char const* name) const noexcept; + bool* getFeatureFlagPtr(std::string_view name, bool allowConstant = false) const noexcept; }; FILAMENT_DOWNCAST(Engine) diff --git a/ios/CocoaPods/Filament.podspec b/ios/CocoaPods/Filament.podspec index af98212f0e8..44fe5926955 100644 --- a/ios/CocoaPods/Filament.podspec +++ b/ios/CocoaPods/Filament.podspec @@ -1,12 +1,12 @@ Pod::Spec.new do |spec| spec.name = "Filament" - spec.version = "1.56.0" + spec.version = "1.56.1" spec.license = { :type => "Apache 2.0", :file => "LICENSE" } spec.homepage = "https://google.github.io/filament" spec.authors = "Google LLC." spec.summary = "Filament is a real-time physically based rendering engine for Android, iOS, Windows, Linux, macOS, and WASM/WebGL." spec.platform = :ios, "11.0" - spec.source = { :http => "https://github.com/google/filament/releases/download/v1.56.0/filament-v1.56.0-ios.tgz" } + spec.source = { :http => "https://github.com/google/filament/releases/download/v1.56.1/filament-v1.56.1-ios.tgz" } # Fix linking error with Xcode 12; we do not yet support the simulator on Apple silicon. spec.pod_target_xcconfig = { diff --git a/web/filament-js/package.json b/web/filament-js/package.json index 3fde34d6082..bbca1748ed7 100644 --- a/web/filament-js/package.json +++ b/web/filament-js/package.json @@ -1,6 +1,6 @@ { "name": "filament", - "version": "1.56.0", + "version": "1.56.1", "description": "Real-time physically based rendering engine", "main": "filament.js", "module": "filament.js",