Skip to content
This repository was archived by the owner on Feb 25, 2025. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 19 additions & 1 deletion fml/synchronization/sync_switch.cc
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,26 @@ void SyncSwitch::Execute(const SyncSwitch::Handlers& handlers) const {
}

void SyncSwitch::SetSwitch(bool value) {
{
fml::UniqueLock lock(*mutex_);
value_ = value;
}
for (Observer* observer : observers_) {
observer->OnSyncSwitchUpdate(value);
}
}

void SyncSwitch::AddObserver(Observer* observer) const {
fml::UniqueLock lock(*mutex_);
value_ = value;
if (std::find(observers_.begin(), observers_.end(), observer) ==
observers_.end()) {
observers_.push_back(observer);
}
}

void SyncSwitch::RemoveObserver(Observer* observer) const {
fml::UniqueLock lock(*mutex_);
observers_.erase(std::remove(observers_.begin(), observers_.end(), observer),
observers_.end());
}
} // namespace fml
17 changes: 17 additions & 0 deletions fml/synchronization/sync_switch.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,16 @@ namespace fml {
/// at a time.
class SyncSwitch {
public:
/// Observes changes to the SyncSwitch.
class Observer {
public:
virtual ~Observer() = default;
/// `new_is_disabled` not guaranteed to be the value of the SyncSwitch
/// during execution, it should be checked with calls to
/// SyncSwitch::Execute.
virtual void OnSyncSwitchUpdate(bool new_is_disabled) = 0;
};

/// Represents the 2 code paths available when calling |SyncSwitch::Execute|.
struct Handlers {
/// Sets the handler that will be executed if the |SyncSwitch| is true.
Expand Down Expand Up @@ -53,8 +63,15 @@ class SyncSwitch {
/// @param[in] value New value for the |SyncSwitch|.
void SetSwitch(bool value);

/// Threadsafe.
void AddObserver(Observer* observer) const;

/// Threadsafe.
void RemoveObserver(Observer* observer) const;

private:
mutable std::unique_ptr<fml::SharedMutex> mutex_;
mutable std::vector<Observer*> observers_;
bool value_;

FML_DISALLOW_COPY_AND_ASSIGN(SyncSwitch);
Expand Down
19 changes: 18 additions & 1 deletion impeller/renderer/backend/metal/context_mtl.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@

#include <Metal/Metal.h>

#include <deque>
#include <string>
#include <vector>

#include "flutter/fml/concurrent_message_loop.h"
#include "flutter/fml/macros.h"
Expand Down Expand Up @@ -93,7 +93,20 @@ class ContextMTL final : public Context,

std::shared_ptr<const fml::SyncSwitch> GetIsGpuDisabledSyncSwitch() const;

// |Context|
void StoreTaskForGPU(std::function<void()> task) override;

private:
class SyncSwitchObserver : public fml::SyncSwitch::Observer {
public:
SyncSwitchObserver(ContextMTL& parent);
virtual ~SyncSwitchObserver() = default;
void OnSyncSwitchUpdate(bool new_value) override;

private:
ContextMTL& parent_;
};

id<MTLDevice> device_ = nullptr;
id<MTLCommandQueue> command_queue_ = nullptr;
std::shared_ptr<ShaderLibraryMTL> shader_library_;
Expand All @@ -103,6 +116,8 @@ class ContextMTL final : public Context,
std::shared_ptr<const Capabilities> device_capabilities_;
std::shared_ptr<fml::ConcurrentMessageLoop> raster_message_loop_;
std::shared_ptr<const fml::SyncSwitch> is_gpu_disabled_sync_switch_;
std::deque<std::function<void()>> tasks_awaiting_gpu_;
std::unique_ptr<SyncSwitchObserver> sync_switch_observer_;
bool is_valid_ = false;

ContextMTL(
Expand All @@ -114,6 +129,8 @@ class ContextMTL final : public Context,
std::shared_ptr<CommandBuffer> CreateCommandBufferInQueue(
id<MTLCommandQueue> queue) const;

void FlushTasksAwaitingGPU();

FML_DISALLOW_COPY_AND_ASSIGN(ContextMTL);
};

Expand Down
31 changes: 30 additions & 1 deletion impeller/renderer/backend/metal/context_mtl.mm
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,9 @@ static bool DeviceSupportsComputeSubgroups(id<MTLDevice> device) {
return;
}

sync_switch_observer_.reset(new SyncSwitchObserver(*this));
is_gpu_disabled_sync_switch_->AddObserver(sync_switch_observer_.get());

// Worker task runner.
{
raster_message_loop_ = fml::ConcurrentMessageLoop::Create(
Expand Down Expand Up @@ -284,7 +287,9 @@ new ContextMTL(device, command_queue,
return context;
}

ContextMTL::~ContextMTL() = default;
ContextMTL::~ContextMTL() {
is_gpu_disabled_sync_switch_->RemoveObserver(sync_switch_observer_.get());
}

Context::BackendType ContextMTL::GetBackendType() const {
return Context::BackendType::kMetal;
Expand Down Expand Up @@ -376,4 +381,28 @@ new ContextMTL(device, command_queue,
return buffer;
}

void ContextMTL::StoreTaskForGPU(std::function<void()> task) {
tasks_awaiting_gpu_.emplace_back(std::move(task));
while (tasks_awaiting_gpu_.size() > kMaxTasksAwaitingGPU) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why do we need a maximum limit of tasks? If we do have a limit, do we need a mechanism to drop them gracefully?

For example, if there is a Future pending from toByteData, I'd think that dropping the task should cause an error to be thrown?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

EncodeImageFailsWithoutGPUImpeller tests this, it's handled gracefully since you have to check for the GPU in the callback and we never retry more than once. Getting dropped is just like the behavior we had before we implemented the retry.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Oh, so when we exceed the size we execute the task, and then the task sees the GPU is still unavailable and errors? makes sense. I would document that a bit.

Copy link
Member Author

@gaaclarke gaaclarke Oct 13, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Added that info to the Context::StoreTaskForGPU docstring.

tasks_awaiting_gpu_.front()();
tasks_awaiting_gpu_.pop_front();
}
}

void ContextMTL::FlushTasksAwaitingGPU() {
for (const auto& task : tasks_awaiting_gpu_) {
task();
}
tasks_awaiting_gpu_.clear();
}

ContextMTL::SyncSwitchObserver::SyncSwitchObserver(ContextMTL& parent)
: parent_(parent) {}

void ContextMTL::SyncSwitchObserver::OnSyncSwitchUpdate(bool new_is_disabled) {
if (!new_is_disabled) {
parent_.FlushTasksAwaitingGPU();
}
}

} // namespace impeller
21 changes: 21 additions & 0 deletions impeller/renderer/context.h
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,14 @@ class Context {
kVulkan,
};

/// The maximum number of tasks that should ever be stored for
/// `StoreTaskForGPU`.
///
/// This number was arbitrarily chosen. The idea is that this is a somewhat
/// rare situation where tasks happen to get executed in that tiny amount of
/// time while an app is being backgrounded but still executing.
static constexpr int32_t kMaxTasksAwaitingGPU = 10;
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Oh by the way. I'm open to increasing this. I just pulled this number out of nowhere but I knew I didn't want it to be completely unbounded.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I would leave a note that this was arbitrarily chosen so we're not afraid to increase it in the future.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

done!


//----------------------------------------------------------------------------
/// @brief Destroys an Impeller context.
///
Expand Down Expand Up @@ -176,6 +184,19 @@ class Context {

CaptureContext capture;

/// Stores a task on the `ContextMTL` that is awaiting access for the GPU.
///
/// The task will be executed in the event that the GPU access has changed to
/// being available or that the task has been canceled. The task should
/// operate with the `SyncSwitch` to make sure the GPU is accessible.
///
/// Threadsafe.
///
/// `task` will be executed on the platform thread.
virtual void StoreTaskForGPU(std::function<void()> task) {
FML_CHECK(false && "not supported in this context");
}

protected:
Context();

Expand Down
40 changes: 36 additions & 4 deletions lib/ui/fixtures/ui_test.dart
Original file line number Diff line number Diff line change
Expand Up @@ -325,7 +325,11 @@ external void _validateExternal(Uint8List result);
@pragma('vm:external-name', 'ValidateError')
external void _validateError(String? error);
@pragma('vm:external-name', 'TurnOffGPU')
external void _turnOffGPU();
external void _turnOffGPU(bool value);
@pragma('vm:external-name', 'FlushGpuAwaitingTasks')
external void _flushGpuAwaitingTasks();
@pragma('vm:external-name', 'ValidateNotNull')
external void _validateNotNull(Object? object);

@pragma('vm:entry-point')
Future<void> toByteDataWithoutGPU() async {
Expand All @@ -338,12 +342,40 @@ Future<void> toByteDataWithoutGPU() async {
canvas.drawCircle(c, 25.0, paint);
final Picture picture = pictureRecorder.endRecording();
final Image image = await picture.toImage(100, 100);
_turnOffGPU();
_turnOffGPU(true);
Timer flusher = Timer.periodic(Duration(milliseconds: 1), (timer) {
_flushGpuAwaitingTasks();
});
try {
ByteData? byteData = await image.toByteData();
_validateError(null);
} catch (ex) {
_validateError(ex.toString());
} catch (error) {
_validateError(error.toString());
} finally {
flusher.cancel();
}
}

@pragma('vm:entry-point')
Future<void> toByteDataRetries() async {
final PictureRecorder pictureRecorder = PictureRecorder();
final Canvas canvas = Canvas(pictureRecorder);
final Paint paint = Paint()
..color = Color.fromRGBO(255, 255, 255, 1.0)
..style = PaintingStyle.fill;
final Offset c = Offset(50.0, 50.0);
canvas.drawCircle(c, 25.0, paint);
final Picture picture = pictureRecorder.endRecording();
final Image image = await picture.toImage(100, 100);
_turnOffGPU(true);
Future<void>.delayed(Duration(milliseconds: 10), () {
_turnOffGPU(false);
});
try {
ByteData? byteData = await image.toByteData();
_validateNotNull(byteData);
} catch (error) {
_validateNotNull(null);
}
}

Expand Down
75 changes: 62 additions & 13 deletions lib/ui/painting/image_encoding_impeller.cc
Original file line number Diff line number Diff line change
Expand Up @@ -56,21 +56,68 @@ sk_sp<SkImage> ConvertBufferToSkImage(
return raster_image;
}

void DoConvertImageToRasterImpeller(
[[nodiscard]] fml::Status DoConvertImageToRasterImpeller(
const sk_sp<DlImage>& dl_image,
std::function<void(fml::StatusOr<sk_sp<SkImage>>)> encode_task,
const std::function<void(fml::StatusOr<sk_sp<SkImage>>)>& encode_task,
const std::shared_ptr<const fml::SyncSwitch>& is_gpu_disabled_sync_switch,
const std::shared_ptr<impeller::Context>& impeller_context) {
fml::Status result;
is_gpu_disabled_sync_switch->Execute(
fml::SyncSwitch::Handlers()
.SetIfTrue([&encode_task] {
encode_task(
fml::Status(fml::StatusCode::kUnavailable, "GPU unavailable."));
.SetIfTrue([&result] {
result =
fml::Status(fml::StatusCode::kUnavailable, "GPU unavailable.");
})
.SetIfFalse([&dl_image, &encode_task, &impeller_context] {
ImageEncodingImpeller::ConvertDlImageToSkImage(
dl_image, std::move(encode_task), impeller_context);
dl_image, encode_task, impeller_context);
}));
return result;
}

/// Same as `DoConvertImageToRasterImpeller` but it will attempt to retry the
/// operation if `DoConvertImageToRasterImpeller` returns kUnavailable when the
/// GPU becomes available again.
void DoConvertImageToRasterImpellerWithRetry(
const sk_sp<DlImage>& dl_image,
std::function<void(fml::StatusOr<sk_sp<SkImage>>)>&& encode_task,
const std::shared_ptr<const fml::SyncSwitch>& is_gpu_disabled_sync_switch,
const std::shared_ptr<impeller::Context>& impeller_context,
const fml::RefPtr<fml::TaskRunner>& retry_runner) {
fml::Status status = DoConvertImageToRasterImpeller(
dl_image, encode_task, is_gpu_disabled_sync_switch, impeller_context);
if (!status.ok()) {
// If the conversion failed because of the GPU is unavailable, store the
// task on the Context so it can be executed when the GPU becomes available.
if (status.code() == fml::StatusCode::kUnavailable) {
impeller_context->StoreTaskForGPU(
[dl_image, encode_task = std::move(encode_task),
is_gpu_disabled_sync_switch, impeller_context,
retry_runner]() mutable {
auto retry_task = [dl_image, encode_task = std::move(encode_task),
is_gpu_disabled_sync_switch, impeller_context] {
fml::Status retry_status = DoConvertImageToRasterImpeller(
dl_image, encode_task, is_gpu_disabled_sync_switch,
impeller_context);
if (!retry_status.ok()) {
// The retry failed for some reason, maybe the GPU became
// unavailable again. Don't retry again, just fail in this case.
encode_task(retry_status);
}
};
// If a `retry_runner` is specified, post the retry to it, otherwise
// execute it directly.
if (retry_runner) {
retry_runner->PostTask(retry_task);
} else {
retry_task();
}
});
} else {
// Pass on errors that are not `kUnavailable`.
encode_task(status);
}
}
}

} // namespace
Expand Down Expand Up @@ -153,18 +200,20 @@ void ImageEncodingImpeller::ConvertImageToRaster(
};

if (dl_image->owning_context() != DlImage::OwningContext::kRaster) {
DoConvertImageToRasterImpeller(dl_image, std::move(encode_task),
is_gpu_disabled_sync_switch,
impeller_context);
DoConvertImageToRasterImpellerWithRetry(dl_image, std::move(encode_task),
is_gpu_disabled_sync_switch,
impeller_context,
/*retry_runner=*/nullptr);
return;
}

raster_task_runner->PostTask([dl_image, encode_task = std::move(encode_task),
io_task_runner, is_gpu_disabled_sync_switch,
impeller_context]() mutable {
DoConvertImageToRasterImpeller(dl_image, std::move(encode_task),
is_gpu_disabled_sync_switch,
impeller_context);
impeller_context,
raster_task_runner]() mutable {
DoConvertImageToRasterImpellerWithRetry(
dl_image, std::move(encode_task), is_gpu_disabled_sync_switch,
impeller_context, raster_task_runner);
});
}

Expand Down
Loading