Skip to content

Commit

Permalink
GPU: Introduce CommandBuffer Mojo interface
Browse files Browse the repository at this point in the history
This adds CommandBuffer as a new Mojo interface associated with its
owning GpuChannel. There's also a corresponding CommandBufferClient
associated and running in the opposite direction. Since there's
a good bit of new infrastructure involved to ensure behavioral
parity with the current message dispatch, this CL only migrates
one message to each new interface for now.

To replicate the existing scheduling behavior of messages moved to
the CommandBuffer interface, a new gpu::SchedulerTaskRunner is
introduced to expose a base::SequencedTaskRunner interface for a
specific sequence on the gpu::Scheduler. The CommandBuffer's
Mojo receiver binds using this task runner.

This also requires loosening some sequence safety checks inside Mojo
bindings. To wit, we want all received messages to be dispatched by
a SchedulerTaskRunner, but we want to set up and tear down the
receiver from tasks that are NOT run by that SchedulerTaskRunner.
Since the base sequencing APIs (and in turn Mojo bindings' internal
safety checks) aren't very well suited to this kind of arrangement,
we have to pull a few strings: endpoints may now be bound to a
sequence other than the one doing the binding (this turns out to be
trivially safe to allow), and they can also be torn down from a
sequence other than the one to which they're bound, provided the
caller knows what they're doing and uses the dubious but aptly
foreboding ResetFromAnotherSequenceUnsafe().

Bug: 1196476
Change-Id: Ie2b2cd775c271f0e9a105949e9df65b88f21ffa2
Binary-Size: New usage of associated endpoints introduces a lot of new generated code. https://crbug.com/1208544
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2863930
Commit-Queue: Ken Rockot <rockot@google.com>
Reviewed-by: Sunny Sachanandani <sunnyps@chromium.org>
Reviewed-by: Tom Sepez <tsepez@chromium.org>
Reviewed-by: Daniel Cheng <dcheng@chromium.org>
Cr-Commit-Position: refs/heads/master@{#885034}
  • Loading branch information
krockot authored and Chromium LUCI CQ committed May 20, 2021
1 parent 702f4d4 commit 1b1ee65
Show file tree
Hide file tree
Showing 26 changed files with 542 additions and 93 deletions.
2 changes: 2 additions & 0 deletions gpu/command_buffer/service/BUILD.gn
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,8 @@ target(link_target_type, "service_sources") {
"memory_tracking.h",
"scheduler.cc",
"scheduler.h",
"scheduler_task_runner.cc",
"scheduler_task_runner.h",
"sequence_id.h",
"sync_point_manager.cc",
"sync_point_manager.h",
Expand Down
82 changes: 82 additions & 0 deletions gpu/command_buffer/service/scheduler_task_runner.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
// Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "gpu/command_buffer/service/scheduler_task_runner.h"

#include <utility>
#include <vector>

#include "base/bind.h"
#include "base/check.h"
#include "base/no_destructor.h"
#include "base/threading/thread_local.h"
#include "gpu/command_buffer/common/sync_token.h"
#include "gpu/command_buffer/service/scheduler.h"

namespace gpu {

namespace {

base::ThreadLocalPointer<const SchedulerTaskRunner>&
GetCurrentTaskRunnerStorage() {
static base::NoDestructor<base::ThreadLocalPointer<const SchedulerTaskRunner>>
runner;
return *runner;
}

void SetCurrentTaskRunner(const SchedulerTaskRunner* runner) {
GetCurrentTaskRunnerStorage().Set(runner);
}

const SchedulerTaskRunner* GetCurrentTaskRunner() {
return GetCurrentTaskRunnerStorage().Get();
}

} // namespace

SchedulerTaskRunner::SchedulerTaskRunner(Scheduler& scheduler,
SequenceId sequence_id)
: scheduler_(scheduler), sequence_id_(sequence_id) {}

SchedulerTaskRunner::~SchedulerTaskRunner() = default;

void SchedulerTaskRunner::ShutDown() {
is_running_ = false;
}

bool SchedulerTaskRunner::PostDelayedTask(const base::Location& from_here,
base::OnceClosure task,
base::TimeDelta delay) {
return PostNonNestableDelayedTask(from_here, std::move(task), delay);
}

bool SchedulerTaskRunner::PostNonNestableDelayedTask(
const base::Location& from_here,
base::OnceClosure task,
base::TimeDelta delay) {
if (!is_running_)
return false;

CHECK(delay.is_zero());
scheduler_.ScheduleTask(Scheduler::Task(
sequence_id_,
base::BindOnce(&SchedulerTaskRunner::RunTask, this, std::move(task)),
std::vector<SyncToken>()));
return true;
}

bool SchedulerTaskRunner::RunsTasksInCurrentSequence() const {
const SchedulerTaskRunner* current = GetCurrentTaskRunner();
return current != nullptr && current->sequence_id_ == sequence_id_;
}

void SchedulerTaskRunner::RunTask(base::OnceClosure task) {
// Scheduler doesn't nest tasks, so we don't support nesting.
DCHECK(!GetCurrentTaskRunner());
SetCurrentTaskRunner(this);
std::move(task).Run();
SetCurrentTaskRunner(nullptr);
}

} // namespace gpu
52 changes: 52 additions & 0 deletions gpu/command_buffer/service/scheduler_task_runner.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
// Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#ifndef GPU_COMMAND_BUFFER_SERVICE_SCHEDULER_TASK_RUNNER_H_
#define GPU_COMMAND_BUFFER_SERVICE_SCHEDULER_TASK_RUNNER_H_

#include "base/sequenced_task_runner.h"
#include "gpu/command_buffer/service/sequence_id.h"
#include "gpu/gpu_export.h"

namespace gpu {

class Scheduler;

// A SequencedTaskRunner implementation to abstract task execution for a
// specific SequenceId on the GPU Scheduler. This object does not support
// delayed tasks, because the underlying Scheduler implementation does not
// support scheduling delayed tasks. Also note that tasks run by this object do
// not support running nested RunLoops.
class GPU_EXPORT SchedulerTaskRunner : public base::SequencedTaskRunner {
public:
// Constructs a SchedulerTaskRunner that runs tasks on `scheduler`, on the
// sequence identified by `sequence_id`. This instance must not outlive
// `scheduler`.
SchedulerTaskRunner(Scheduler& scheduler, SequenceId sequence_id);

// Once this is called, all subsequent tasks will be rejected.
void ShutDown();

// base::SequencedTaskRunner:
bool PostDelayedTask(const base::Location& from_here,
base::OnceClosure task,
base::TimeDelta delay) override;
bool PostNonNestableDelayedTask(const base::Location& from_here,
base::OnceClosure task,
base::TimeDelta delay) override;
bool RunsTasksInCurrentSequence() const override;

private:
~SchedulerTaskRunner() override;

void RunTask(base::OnceClosure task);

Scheduler& scheduler_;
const SequenceId sequence_id_;
bool is_running_ = true;
};

} // namespace gpu

#endif // GPU_COMMAND_BUFFER_SERVICE_SCHEDULER_TASK_RUNNER_H_
20 changes: 13 additions & 7 deletions gpu/ipc/client/command_buffer_proxy_impl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
#include "gpu/ipc/common/gpu_channel.mojom.h"
#include "gpu/ipc/common/gpu_messages.h"
#include "gpu/ipc/common/gpu_param_traits.h"
#include "ipc/ipc_mojo_bootstrap.h"
#include "mojo/public/cpp/bindings/sync_call_restrictions.h"
#include "mojo/public/cpp/system/buffer.h"
#include "mojo/public/cpp/system/platform_handle.h"
Expand Down Expand Up @@ -119,15 +120,23 @@ ContextResult CommandBufferProxyImpl::Initialize(
// TODO(piman): Make this asynchronous (http://crbug.com/125248).
ContextResult result = ContextResult::kSuccess;
mojo::SyncCallRestrictions::ScopedAllowSyncCall allow_sync;
IPC::ScopedAllowOffSequenceChannelAssociatedBindings allow_binding;
bool sent = channel->GetGpuChannel().CreateCommandBuffer(
std::move(params), route_id_, std::move(region), &result, &capabilities_);
std::move(params), route_id_, std::move(region),
command_buffer_.BindNewEndpointAndPassReceiver(channel->io_task_runner()),
client_receiver_.BindNewEndpointAndPassRemote(callback_thread_), &result,
&capabilities_);
if (!sent) {
command_buffer_.reset();
client_receiver_.reset();
channel->RemoveRoute(route_id_);
LOG(ERROR) << "ContextResult::kTransientFailure: "
"Failed to send GpuControl.CreateCommandBuffer.";
return ContextResult::kTransientFailure;
}
if (result != ContextResult::kSuccess) {
command_buffer_.reset();
client_receiver_.reset();
DLOG(ERROR) << "Failure processing GpuControl.CreateCommandBuffer.";
channel->RemoveRoute(route_id_);
return result;
Expand All @@ -142,7 +151,6 @@ bool CommandBufferProxyImpl::OnMessageReceived(const IPC::Message& message) {
bool handled = true;
IPC_BEGIN_MESSAGE_MAP(CommandBufferProxyImpl, message)
IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Destroyed, OnDestroyed);
IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ConsoleMsg, OnConsoleMessage);
IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_GpuSwitched, OnGpuSwitched);
IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalAck, OnSignalAck);
IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SwapBuffersCompleted,
Expand Down Expand Up @@ -185,11 +193,9 @@ void CommandBufferProxyImpl::OnDestroyed(gpu::error::ContextLostReason reason,
OnGpuAsyncMessageError(reason, error);
}

void CommandBufferProxyImpl::OnConsoleMessage(
const GPUCommandBufferConsoleMessage& message) {
void CommandBufferProxyImpl::OnConsoleMessage(const std::string& message) {
if (gpu_control_client_)
gpu_control_client_->OnGpuControlErrorMessage(message.message.c_str(),
message.id);
gpu_control_client_->OnGpuControlErrorMessage(message.c_str(), /*id=*/0);
}

void CommandBufferProxyImpl::OnGpuSwitched(
Expand Down Expand Up @@ -359,7 +365,7 @@ void CommandBufferProxyImpl::SetGetBuffer(int32_t shm_id) {
if (last_state_.error != gpu::error::kNoError)
return;

Send(new GpuCommandBufferMsg_SetGetBuffer(route_id_, shm_id));
command_buffer_->SetGetBuffer(shm_id);
last_put_offset_ = -1;
has_buffer_ = (shm_id > 0);
}
Expand Down
14 changes: 11 additions & 3 deletions gpu/ipc/client/command_buffer_proxy_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,12 +35,14 @@
#include "gpu/command_buffer/common/gpu_memory_allocation.h"
#include "gpu/command_buffer/common/scheduling_priority.h"
#include "gpu/gpu_export.h"
#include "gpu/ipc/common/gpu_channel.mojom.h"
#include "gpu/ipc/common/surface_handle.h"
#include "ipc/ipc_listener.h"
#include "mojo/public/cpp/bindings/associated_receiver.h"
#include "mojo/public/cpp/bindings/shared_associated_remote.h"
#include "ui/gfx/swap_result.h"
#include "ui/gl/gpu_preference.h"

struct GPUCommandBufferConsoleMessage;
class GURL;

namespace base {
Expand All @@ -67,7 +69,8 @@ class GpuMemoryBufferManager;
// CommandBufferStub.
class GPU_EXPORT CommandBufferProxyImpl : public gpu::CommandBuffer,
public gpu::GpuControl,
public IPC::Listener {
public IPC::Listener,
public mojom::CommandBufferClient {
public:
class DeletionObserver {
public:
Expand Down Expand Up @@ -183,10 +186,12 @@ class GPU_EXPORT CommandBufferProxyImpl : public gpu::CommandBuffer,
std::pair<base::UnsafeSharedMemoryRegion, base::WritableSharedMemoryMapping>
AllocateAndMapSharedMemory(size_t size);

// mojom::CommandBufferClient:
void OnConsoleMessage(const std::string& message) override;

// Message handlers:
void OnDestroyed(gpu::error::ContextLostReason reason,
gpu::error::Error error);
void OnConsoleMessage(const GPUCommandBufferConsoleMessage& message);
void OnGpuSwitched(gl::GpuPreference active_gpu_heuristic);
void OnSignalAck(uint32_t id, const CommandBuffer::State& state);
void OnSwapBuffersCompleted(const SwapBuffersCompleteParams& params);
Expand Down Expand Up @@ -271,6 +276,9 @@ class GPU_EXPORT CommandBufferProxyImpl : public gpu::CommandBuffer,
int32_t last_put_offset_ = -1;
bool has_buffer_ = false;

mojo::SharedAssociatedRemote<mojom::CommandBuffer> command_buffer_;
mojo::AssociatedReceiver<mojom::CommandBufferClient> client_receiver_{this};

// Next generated fence sync.
uint64_t next_fence_sync_release_ = 1;

Expand Down
17 changes: 15 additions & 2 deletions gpu/ipc/client/command_buffer_proxy_impl_unittest.cc
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
#include "gpu/command_buffer/common/context_creation_attribs.h"
#include "gpu/ipc/client/gpu_channel_host.h"
#include "gpu/ipc/common/gpu_channel.mojom.h"
#include "gpu/ipc/common/mock_command_buffer.h"
#include "gpu/ipc/common/mock_gpu_channel.h"
#include "gpu/ipc/common/surface_handle.h"
#include "ipc/ipc_test_sink.h"
Expand Down Expand Up @@ -83,20 +84,32 @@ class CommandBufferProxyImplTest : public testing::Test {
base::RunLoop().RunUntilIdle();
}

std::unique_ptr<CommandBufferProxyImpl> CreateAndInitializeProxy() {
std::unique_ptr<CommandBufferProxyImpl> CreateAndInitializeProxy(
MockCommandBuffer* mock_command_buffer = nullptr) {
auto proxy = std::make_unique<CommandBufferProxyImpl>(
channel_, nullptr /* gpu_memory_buffer_manager */, 0 /* stream_id */,
base::ThreadTaskRunnerHandle::Get());

// The Initialize() call below synchronously requests a new CommandBuffer
// using the channel's GpuControl interface. Simulate success, since we're
// not actually talking to the service in these tests.
EXPECT_CALL(mock_gpu_channel_, CreateCommandBuffer(_, _, _, _, _))
EXPECT_CALL(mock_gpu_channel_, CreateCommandBuffer(_, _, _, _, _, _, _))
.Times(1)
.WillOnce(Invoke(
[&](mojom::CreateCommandBufferParamsPtr params, int32_t routing_id,
base::UnsafeSharedMemoryRegion shared_state,
mojo::PendingAssociatedReceiver<mojom::CommandBuffer> receiver,
mojo::PendingAssociatedRemote<mojom::CommandBufferClient>
client,
ContextResult* result, Capabilities* capabilities) -> bool {
// There's no real GpuChannel pipe for this endpoint to use, so
// give it its own dedicated pipe for these tests. This allows the
// CommandBufferProxyImpl to make calls on its CommandBuffer
// endpoint, which will send them to `mock_command_buffer` if
// provided by the test.
receiver.EnableUnassociatedUsage();
if (mock_command_buffer)
mock_command_buffer->Bind(std::move(receiver));
*result = ContextResult::kSuccess;
return true;
}));
Expand Down
4 changes: 4 additions & 0 deletions gpu/ipc/client/gpu_channel_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,10 @@ class GPU_EXPORT GpuChannelHost

int channel_id() const { return channel_id_; }

const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner() {
return io_thread_;
}

// Virtual for testing.
virtual mojom::GpuChannel& GetGpuChannel();

Expand Down
2 changes: 2 additions & 0 deletions gpu/ipc/common/BUILD.gn
Original file line number Diff line number Diff line change
Expand Up @@ -740,6 +740,8 @@ source_set("mojom_traits") {
source_set("test_support") {
testonly = true
sources = [
"mock_command_buffer.cc",
"mock_command_buffer.h",
"mock_gpu_channel.cc",
"mock_gpu_channel.h",
]
Expand Down
22 changes: 21 additions & 1 deletion gpu/ipc/common/gpu_channel.mojom
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,9 @@ interface GpuChannel {
// will create an offscreen backbuffer of dimensions `size`.
[Sync, NoInterrupt] CreateCommandBuffer(
CreateCommandBufferParams params, int32 routing_id,
mojo_base.mojom.UnsafeSharedMemoryRegion shared_state)
mojo_base.mojom.UnsafeSharedMemoryRegion shared_state,
pending_associated_receiver<CommandBuffer> receiver,
pending_associated_remote<CommandBufferClient> client)
=> (ContextResult result, Capabilities capabilties);

// The CommandBufferProxy sends this to the CommandBufferStub in its
Expand Down Expand Up @@ -198,6 +200,24 @@ interface GpuChannel {
=> (CommandBufferState state);
};

// Interface used to issue commands to a specific CommandBuffer instance in the
// GPU process.
interface CommandBuffer {
// Sets the shared memory buffer to use for commands. The ID given here must
// correspond to one registered by a prior RegisterTransferBuffer IPC to the
// same CommandBuffer.
SetGetBuffer(int32 shm_id);
};

// Interface used by the GPU process to send the client messages from a specific
// CommandBuffer instance.
interface CommandBufferClient {
// Notifies the client about a console message emitted on behalf of the
// command buffer. These messages are intended to be exposed by
// developer-facing UI such as the DevTools console.
OnConsoleMessage(string message);
};

// DeferredRequests are batched locally by clients and sent to the service only
// when flushing the channel via GpuChannelHost's EnsureFlush or VerifyFlush.
struct DeferredRequest {
Expand Down
12 changes: 0 additions & 12 deletions gpu/ipc/common/gpu_messages.h
Original file line number Diff line number Diff line change
Expand Up @@ -58,11 +58,6 @@

#define IPC_MESSAGE_START GpuChannelMsgStart

IPC_STRUCT_BEGIN(GPUCommandBufferConsoleMessage)
IPC_STRUCT_MEMBER(int32_t, id)
IPC_STRUCT_MEMBER(std::string, message)
IPC_STRUCT_END()

IPC_STRUCT_BEGIN(GpuCommandBufferMsg_CreateImage_Params)
IPC_STRUCT_MEMBER(int32_t, id)
IPC_STRUCT_MEMBER(gfx::GpuMemoryBufferHandle, gpu_memory_buffer)
Expand Down Expand Up @@ -139,13 +134,6 @@ IPC_MESSAGE_ROUTED1(GpuStreamTextureMsg_UpdateRotatedVisibleSize,
// These are messages between a renderer process to the GPU process relating to
// a single OpenGL context.

// Sets the shared memory buffer used for commands.
IPC_MESSAGE_ROUTED1(GpuCommandBufferMsg_SetGetBuffer, int32_t /* shm_id */)

// Sent by the GPU process to display messages in the console.
IPC_MESSAGE_ROUTED1(GpuCommandBufferMsg_ConsoleMsg,
GPUCommandBufferConsoleMessage /* msg */)

// Sent by the GPU process to notify the renderer process of a GPU switch.
IPC_MESSAGE_ROUTED1(GpuCommandBufferMsg_GpuSwitched,
gl::GpuPreference /* active_gpu_heuristic */)
Expand Down
Loading

0 comments on commit 1b1ee65

Please sign in to comment.