Skip to content

Commit

Permalink
Remove ReadWriteTokens from GpuVideoDecodeAccelerator + fallout
Browse files Browse the repository at this point in the history
With apatrick's changes in r93066, we no longer need to manually sync up with the
command buffer before handling IPC messages in GpuVideoDecodeAccelerator.

BUG=none
TEST=gles2 example runs without crashing

Review URL: http://codereview.chromium.org/7521015

git-svn-id: svn://svn.chromium.org/chrome/trunk/src@94561 0039d316-1c4b-4281-b951-d872f2087c98
  • Loading branch information
vrk@google.com committed Jul 28, 2011
1 parent 6930c52 commit a81c24c
Show file tree
Hide file tree
Showing 25 changed files with 75 additions and 317 deletions.
13 changes: 0 additions & 13 deletions content/common/gpu/gpu_command_buffer_stub.cc
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
#if defined(ENABLE_GPU)

#include "base/bind.h"
#include "base/callback.h"
#include "base/debug/trace_event.h"
#include "base/process_util.h"
#include "base/shared_memory.h"
Expand Down Expand Up @@ -202,8 +201,6 @@ void GpuCommandBufferStub::OnInitialize(
NewCallback(this, &GpuCommandBufferStub::OnSwapBuffers));
scheduler_->SetScheduledCallback(
NewCallback(channel_, &GpuChannel::OnScheduled));
scheduler_->SetTokenCallback(base::Bind(
&GpuCommandBufferStub::OnSetToken, base::Unretained(this)));
if (watchdog_)
scheduler_->SetCommandProcessedCallback(
NewCallback(this, &GpuCommandBufferStub::OnCommandProcessed));
Expand Down Expand Up @@ -502,16 +499,6 @@ void GpuCommandBufferStub::AcceleratedSurfaceBuffersSwapped(
}
#endif // defined(OS_MACOSX)

void GpuCommandBufferStub::AddSetTokenCallback(
const base::Callback<void(int32)>& callback) {
set_token_callbacks_.push_back(callback);
}

void GpuCommandBufferStub::OnSetToken(int32 token) {
for (size_t i = 0; i < set_token_callbacks_.size(); ++i)
set_token_callbacks_[i].Run(token);
}

void GpuCommandBufferStub::ResizeCallback(gfx::Size size) {
if (handle_ == gfx::kNullPluginWindow) {
scheduler_->decoder()->ResizeOffscreenFrameBuffer(size);
Expand Down
15 changes: 0 additions & 15 deletions content/common/gpu/gpu_command_buffer_stub.h
Original file line number Diff line number Diff line change
Expand Up @@ -71,12 +71,6 @@ class GpuCommandBufferStub
// to the same renderer process.
int32 route_id() const { return route_id_; }

// Return the current token in the underlying command buffer, or 0 if not yet
// initialized.
int32 token() const {
return command_buffer_.get() ? command_buffer_->GetState().token : 0;
}

#if defined(OS_WIN)
// Called only by the compositor window's window proc
void OnCompositorWindowPainted();
Expand All @@ -89,11 +83,6 @@ class GpuCommandBufferStub
void AcceleratedSurfaceBuffersSwapped(uint64 swap_buffers_count);
#endif // defined(OS_MACOSX)

// Register a callback to be Run() whenever the underlying scheduler receives
// a set_token() call. The callback will be Run() with the just-set token as
// its only parameter. Multiple callbacks may be registered.
void AddSetTokenCallback(const base::Callback<void(int32)>& callback);

private:
// Cleans up and sends reply if OnInitialize failed.
void OnInitializeFailed(IPC::Message* reply_message);
Expand Down Expand Up @@ -138,9 +127,6 @@ class GpuCommandBufferStub
void ResizeCallback(gfx::Size size);
void ReportState();

// Callback registered with GpuScheduler to receive set_token() notifications.
void OnSetToken(int32 token);

// The lifetime of objects of this class is managed by a GpuChannel. The
// GpuChannels destroy all the GpuCommandBufferStubs that they own when they
// are destroyed. So a raw pointer is safe.
Expand All @@ -162,7 +148,6 @@ class GpuCommandBufferStub

scoped_ptr<gpu::CommandBufferService> command_buffer_;
scoped_ptr<gpu::GpuScheduler> scheduler_;
std::vector<base::Callback<void(int32)> > set_token_callbacks_;

// SetParent may be called before Initialize, in which case we need to keep
// around the parent stub, so that Initialize can set the parent correctly.
Expand Down
31 changes: 7 additions & 24 deletions content/common/gpu/gpu_messages.h
Original file line number Diff line number Diff line change
Expand Up @@ -100,11 +100,6 @@ IPC_STRUCT_TRAITS_BEGIN(GPUInfo)
#endif
IPC_STRUCT_TRAITS_END()

IPC_STRUCT_TRAITS_BEGIN(gpu::ReadWriteTokens)
IPC_STRUCT_TRAITS_MEMBER(last_token_read)
IPC_STRUCT_TRAITS_MEMBER(last_token_written)
IPC_STRUCT_TRAITS_END()

IPC_ENUM_TRAITS(content::CauseForGpuLaunch)
IPC_ENUM_TRAITS(gpu::error::ContextLostReason)

Expand Down Expand Up @@ -458,45 +453,33 @@ IPC_MESSAGE_ROUTED1(GpuTransportTextureHostMsg_TextureUpdated,
//------------------------------------------------------------------------------
// Accelerated Video Decoder Messages
// These messages are sent from Renderer process to GPU process.
//
// These messages defer execution until |tokens.last_token_written| is
// seen (using |tokens.last_token_read| as a wrap-around indicator). The
// implementation REQUIRES that |tokens| be the first parameter of these
// messages.

// Send input buffer for decoding.
IPC_MESSAGE_ROUTED4(AcceleratedVideoDecoderMsg_Decode,
gpu::ReadWriteTokens, /* tokens */
IPC_MESSAGE_ROUTED3(AcceleratedVideoDecoderMsg_Decode,
base::SharedMemoryHandle, /* input_buffer_handle */
int32, /* bitstream_buffer_id */
int32) /* size */

// Sent from Renderer process to the GPU process to give the texture IDs for
// the textures the decoder will use for output. Delays evaluation until
// |token.second| is seen.
IPC_MESSAGE_ROUTED4(AcceleratedVideoDecoderMsg_AssignPictureBuffers,
gpu::ReadWriteTokens, /* tokens */
// the textures the decoder will use for output.
IPC_MESSAGE_ROUTED3(AcceleratedVideoDecoderMsg_AssignPictureBuffers,
std::vector<int32>, /* Picture buffer ID */
std::vector<uint32>, /* Texture ID */
std::vector<gfx::Size>) /* Size */

// Send from Renderer process to the GPU process to recycle the given picture
// buffer for further decoding.
IPC_MESSAGE_ROUTED2(AcceleratedVideoDecoderMsg_ReusePictureBuffer,
gpu::ReadWriteTokens, /* tokens */
IPC_MESSAGE_ROUTED1(AcceleratedVideoDecoderMsg_ReusePictureBuffer,
int32) /* Picture buffer ID */

// Send flush request to the decoder.
IPC_MESSAGE_ROUTED1(AcceleratedVideoDecoderMsg_Flush,
gpu::ReadWriteTokens) /* tokens */
IPC_MESSAGE_ROUTED0(AcceleratedVideoDecoderMsg_Flush)

// Send reset request to the decoder.
IPC_MESSAGE_ROUTED1(AcceleratedVideoDecoderMsg_Reset,
gpu::ReadWriteTokens) /* tokens */
IPC_MESSAGE_ROUTED0(AcceleratedVideoDecoderMsg_Reset)

// Send destroy request to the decoder.
IPC_SYNC_MESSAGE_ROUTED1_0(AcceleratedVideoDecoderMsg_Destroy,
gpu::ReadWriteTokens) /* tokens */
IPC_SYNC_MESSAGE_ROUTED0_0(AcceleratedVideoDecoderMsg_Destroy)

//------------------------------------------------------------------------------
// Accelerated Video Decoder Host Messages
Expand Down
71 changes: 3 additions & 68 deletions content/common/gpu/media/gpu_video_decode_accelerator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -29,63 +29,12 @@ GpuVideoDecodeAccelerator::GpuVideoDecodeAccelerator(
host_route_id_(host_route_id),
stub_(stub),
video_decode_accelerator_(NULL) {
// stub_ owns and will always outlive this object.
stub_->AddSetTokenCallback(base::Bind(
&GpuVideoDecodeAccelerator::OnSetToken, base::Unretained(this)));
}

GpuVideoDecodeAccelerator::~GpuVideoDecodeAccelerator() {
STLDeleteElements(&deferred_messages_);
// TODO(fischman/vrk): We need to synchronously wait for the OMX decoder
// to finish shutting down.
}

void GpuVideoDecodeAccelerator::OnSetToken(int32 token) {
// Note: this always retries all deferred messages on every token arrival.
// There's an optimization to be done here by only trying messages which are
// waiting for tokens which are earlier than |token|.
std::vector<IPC::Message*> deferred_messages_copy;
std::swap(deferred_messages_copy, deferred_messages_);
for (size_t i = 0; i < deferred_messages_copy.size(); ++i)
OnMessageReceived(*deferred_messages_copy[i]);
STLDeleteElements(&deferred_messages_copy);
}

bool GpuVideoDecodeAccelerator::DeferMessageIfNeeded(
const IPC::Message& msg, bool* deferred) {
// Only consider deferring for message types that need it.
switch (msg.type()) {
case AcceleratedVideoDecoderMsg_Decode::ID:
case AcceleratedVideoDecoderMsg_AssignPictureBuffers::ID:
case AcceleratedVideoDecoderMsg_ReusePictureBuffer::ID:
case AcceleratedVideoDecoderMsg_Flush::ID:
case AcceleratedVideoDecoderMsg_Reset::ID:
case AcceleratedVideoDecoderMsg_Destroy::ID:
break;
default:
return false;
}

gpu::ReadWriteTokens tokens;
void* iter = NULL;
if (!IPC::ParamTraits<gpu::ReadWriteTokens>::Read(&msg, &iter, &tokens))
return false;
if (tokens.InRange(stub_->token())) {
deferred_messages_.push_back(new IPC::Message(msg));
*deferred = true;
} else {
*deferred = false;
}
return true;
}

bool GpuVideoDecodeAccelerator::OnMessageReceived(const IPC::Message& msg) {
bool deferred = false;
if (!DeferMessageIfNeeded(msg, &deferred))
return false;
if (deferred)
return true;

bool handled = true;
IPC_BEGIN_MESSAGE_MAP(GpuVideoDecodeAccelerator, msg)
IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Decode, OnDecode)
Expand All @@ -101,14 +50,6 @@ bool GpuVideoDecodeAccelerator::OnMessageReceived(const IPC::Message& msg) {
return handled;
}

void GpuVideoDecodeAccelerator::OnChannelConnected(int32 peer_pid) {
// TODO(vmr): Do we have to react on channel connections?
}

void GpuVideoDecodeAccelerator::OnChannelError() {
// TODO(vmr): Do we have to react on channel errors?
}

void GpuVideoDecodeAccelerator::ProvidePictureBuffers(
uint32 requested_num_of_buffers, const gfx::Size& dimensions) {
if (!Send(new AcceleratedVideoDecoderHostMsg_ProvidePictureBuffers(
Expand Down Expand Up @@ -167,14 +108,12 @@ void GpuVideoDecodeAccelerator::Initialize(const std::vector<uint32>& configs) {
}

void GpuVideoDecodeAccelerator::OnDecode(
const gpu::ReadWriteTokens&, /* tokens */
base::SharedMemoryHandle handle, int32 id, int32 size) {
DCHECK(video_decode_accelerator_.get());
video_decode_accelerator_->Decode(media::BitstreamBuffer(id, handle, size));
}

void GpuVideoDecodeAccelerator::OnAssignPictureBuffers(
const gpu::ReadWriteTokens& /* tokens */,
const std::vector<int32>& buffer_ids,
const std::vector<uint32>& texture_ids,
const std::vector<gfx::Size>& sizes) {
Expand All @@ -197,26 +136,22 @@ void GpuVideoDecodeAccelerator::OnAssignPictureBuffers(
}

void GpuVideoDecodeAccelerator::OnReusePictureBuffer(
const gpu::ReadWriteTokens& /* tokens */,
int32 picture_buffer_id) {
DCHECK(video_decode_accelerator_.get());
video_decode_accelerator_->ReusePictureBuffer(picture_buffer_id);
}

void GpuVideoDecodeAccelerator::OnFlush(
const gpu::ReadWriteTokens& /* tokens */) {
void GpuVideoDecodeAccelerator::OnFlush() {
DCHECK(video_decode_accelerator_.get());
video_decode_accelerator_->Flush();
}

void GpuVideoDecodeAccelerator::OnReset(
const gpu::ReadWriteTokens& /* tokens */) {
void GpuVideoDecodeAccelerator::OnReset() {
DCHECK(video_decode_accelerator_.get());
video_decode_accelerator_->Reset();
}

void GpuVideoDecodeAccelerator::OnDestroy(
const gpu::ReadWriteTokens& /* tokens */) {
void GpuVideoDecodeAccelerator::OnDestroy() {
DCHECK(video_decode_accelerator_.get());
video_decode_accelerator_->Destroy();
}
Expand Down
28 changes: 4 additions & 24 deletions content/common/gpu/media/gpu_video_decode_accelerator.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,6 @@
#include "ipc/ipc_message.h"
#include "media/video/video_decode_accelerator.h"

namespace gpu {
class ReadWriteTokens;
}

class GpuCommandBufferStub;

class GpuVideoDecodeAccelerator
Expand All @@ -31,8 +27,6 @@ class GpuVideoDecodeAccelerator

// IPC::Channel::Listener implementation.
virtual bool OnMessageReceived(const IPC::Message& message) OVERRIDE;
virtual void OnChannelConnected(int32 peer_pid) OVERRIDE;
virtual void OnChannelError() OVERRIDE;

// media::VideoDecodeAccelerator::Client implementation.
virtual void ProvidePictureBuffers(
Expand All @@ -49,43 +43,29 @@ class GpuVideoDecodeAccelerator
// Function to delegate sending to actual sender.
virtual bool Send(IPC::Message* message);

// Callback to be fired when the underlying stub receives a new token.
void OnSetToken(int32 token);

// Initialize the accelerator with the given configuration.
void Initialize(const std::vector<uint32>& configs);

private:
// Defers |msg| for later processing if it specifies a write token that hasn't
// come to pass yet, and set |*deferred| to true. Return false if the message
// failed to parse.
bool DeferMessageIfNeeded(const IPC::Message& msg, bool* deferred);

// Handlers for IPC messages.
void OnDecode(
const gpu::ReadWriteTokens& /* tokens */,
base::SharedMemoryHandle handle, int32 id, int32 size);
void OnDecode(base::SharedMemoryHandle handle, int32 id, int32 size);
void OnAssignPictureBuffers(
const gpu::ReadWriteTokens& /* tokens */,
const std::vector<int32>& buffer_ids,
const std::vector<uint32>& texture_ids,
const std::vector<gfx::Size>& sizes);
void OnReusePictureBuffer(
const gpu::ReadWriteTokens& /* tokens */,
int32 picture_buffer_id);
void OnFlush(const gpu::ReadWriteTokens& /* tokens */);
void OnReset(const gpu::ReadWriteTokens& /* tokens */);
void OnDestroy(const gpu::ReadWriteTokens& /* tokens */);
void OnFlush();
void OnReset();
void OnDestroy();

// Pointer to the IPC message sender.
IPC::Message::Sender* sender_;

// Route ID to communicate with the host.
int32 host_route_id_;

// Messages deferred for later processing when their tokens have come to pass.
std::vector<IPC::Message*> deferred_messages_;

// Unowned pointer to the underlying GpuCommandBufferStub.
GpuCommandBufferStub* stub_;

Expand Down
3 changes: 1 addition & 2 deletions content/renderer/gpu/command_buffer_proxy.cc
Original file line number Diff line number Diff line change
Expand Up @@ -394,10 +394,9 @@ void CommandBufferProxy::SetNotifyRepaintTask(Task* task) {
scoped_refptr<GpuVideoDecodeAcceleratorHost>
CommandBufferProxy::CreateVideoDecoder(
const std::vector<uint32>& configs,
gpu::CommandBufferHelper* cmd_buffer_helper,
media::VideoDecodeAccelerator::Client* client) {
video_decoder_host_ = new GpuVideoDecodeAcceleratorHost(
channel_, route_id_, cmd_buffer_helper, client);
channel_, route_id_, client);

if (!Send(new GpuCommandBufferMsg_CreateVideoDecoder(route_id_, configs))) {
LOG(ERROR) << "Send(GpuChannelMsg_CreateVideoDecoder) failed";
Expand Down
1 change: 0 additions & 1 deletion content/renderer/gpu/command_buffer_proxy.h
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,6 @@ class CommandBufferProxy : public gpu::CommandBuffer,
// notified of an error later.
scoped_refptr<GpuVideoDecodeAcceleratorHost> CreateVideoDecoder(
const std::vector<uint32>& configs,
gpu::CommandBufferHelper* cmd_buffer_helper,
media::VideoDecodeAccelerator::Client* client);

#if defined(OS_MACOSX)
Expand Down
3 changes: 1 addition & 2 deletions content/renderer/gpu/gpu_channel_host.cc
Original file line number Diff line number Diff line change
Expand Up @@ -131,12 +131,11 @@ CommandBufferProxy* GpuChannelHost::CreateViewCommandBuffer(
GpuVideoDecodeAcceleratorHost* GpuChannelHost::CreateVideoDecoder(
int command_buffer_route_id,
const std::vector<uint32>& configs,
gpu::CommandBufferHelper* cmd_buffer_helper,
media::VideoDecodeAccelerator::Client* client) {
ProxyMap::iterator it = proxies_.find(command_buffer_route_id);
DCHECK(it != proxies_.end());
CommandBufferProxy* proxy = it->second;
return proxy->CreateVideoDecoder(configs, cmd_buffer_helper, client);
return proxy->CreateVideoDecoder(configs, client);
}

CommandBufferProxy* GpuChannelHost::CreateOffscreenCommandBuffer(
Expand Down
1 change: 0 additions & 1 deletion content/renderer/gpu/gpu_channel_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,6 @@ class GpuChannelHost : public IPC::Channel::Listener,
GpuVideoDecodeAcceleratorHost* CreateVideoDecoder(
int command_buffer_route_id,
const std::vector<uint32>& configs,
gpu::CommandBufferHelper* cmd_buffer_helper,
media::VideoDecodeAccelerator::Client* client);

// Destroy a command buffer created by this channel.
Expand Down
Loading

0 comments on commit a81c24c

Please sign in to comment.