0

Revert of Added an extra sync token field for extra command buffer identification. (patchset id:140001 of https://codereview.chromium.org/1489573003/ )

Reason for revert:
This is causing a large number of layout tests in virtual/threaded/animations to crash.

They're hitting the following assertion:
FATAL:scheduler.cc(203)] Check failed: state_machine_.pending_swaps() > 0 (0 vs. 0){"compositor_timing_history":

e.g. see https://build.chromium.org/p/chromium.webkit/builders/WebKit%20Mac10.7%20%28dbg%29/builds/25835

Confirmed with a local bisect.

Original issue's description:
> Added an extra sync token field for extra command buffer identification.
>
> This CL adds an extra field within sync tokens without changing the size
> of the sync token. Currently this extra field is only used for the
> GPU channel implementation, storing the stream of the identified command
> buffer.
>
> BUG=514815
>
> Committed: https://crrev.com/f21e7e6e2b260d56fa54373dd662c81c9d955331
> Cr-Commit-Position: refs/heads/master@{#363231}

TBR=dcheng@chromium.org,dalecurtis@chromium.org,piman@chromium.org,sky@chromium.org,dyen@chromium.org
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=514815

Review URL: https://codereview.chromium.org/1495893005

Cr-Commit-Position: refs/heads/master@{#363290}
This commit is contained in:
ajuma
2015-12-04 12:56:41 -08:00
committed by Commit bot
parent 1a1d2f05b1
commit 5097a2103f
27 changed files with 21 additions and 114 deletions

@ -228,10 +228,6 @@ uint64_t CommandBufferLocal::GetCommandBufferID() const {
return command_buffer_id_;
}
int32_t CommandBufferLocal::GetExtraCommandBufferData() const {
return 0;
}
uint64_t CommandBufferLocal::GenerateFenceSyncRelease() {
return next_fence_sync_release_++;
}

@ -73,7 +73,6 @@ class CommandBufferLocal : public gpu::GpuControl {
bool IsGpuChannelLost() override;
gpu::CommandBufferNamespace GetNamespaceID() const override;
uint64_t GetCommandBufferID() const override;
int32_t GetExtraCommandBufferData() const override;
uint64_t GenerateFenceSyncRelease() override;
bool IsFenceSyncRelease(uint64_t release) override;
bool IsFenceSyncFlushed(uint64_t release) override;

@ -449,7 +449,7 @@ int32_t CommandBufferProxyImpl::CreateImage(ClientBuffer buffer,
if (image_fence_sync) {
gpu::SyncToken sync_token(GetNamespaceID(), GetCommandBufferID(),
GetExtraCommandBufferData(), image_fence_sync);
image_fence_sync);
// Force a synchronous IPC to validate sync token.
channel_->ValidateFlushIDReachedServer(stream_id_, true);
@ -519,10 +519,6 @@ uint64_t CommandBufferProxyImpl::GetCommandBufferID() const {
return command_buffer_id_;
}
int32_t CommandBufferProxyImpl::GetExtraCommandBufferData() const {
return stream_id_;
}
uint64_t CommandBufferProxyImpl::GenerateFenceSyncRelease() {
return next_fence_sync_release_++;
}
@ -581,20 +577,8 @@ bool CommandBufferProxyImpl::CanWaitUnverifiedSyncToken(
// Can only wait on an unverified sync token if it is from the same channel.
const uint64_t token_channel = sync_token->command_buffer_id() >> 32;
const uint64_t channel = command_buffer_id_ >> 32;
if (sync_token->namespace_id() != gpu::CommandBufferNamespace::GPU_IO ||
token_channel != channel) {
return false;
}
// If waiting on a different stream, flush pending commands on that stream.
const int32_t release_stream_id = sync_token->extra_data_field();
if (release_stream_id == 0)
return false;
if (release_stream_id != stream_id_)
channel_->FlushPendingStream(release_stream_id);
return true;
return (sync_token->namespace_id() == gpu::CommandBufferNamespace::GPU_IO &&
token_channel == channel);
}
uint32 CommandBufferProxyImpl::InsertSyncPoint() {

@ -122,7 +122,6 @@ class CommandBufferProxyImpl
bool IsGpuChannelLost() override;
gpu::CommandBufferNamespace GetNamespaceID() const override;
uint64_t GetCommandBufferID() const override;
int32_t GetExtraCommandBufferData() const override;
uint64_t GenerateFenceSyncRelease() override;
bool IsFenceSyncRelease(uint64_t release) override;
bool IsFenceSyncFlushed(uint64_t release) override;

@ -162,17 +162,6 @@ uint32_t GpuChannelHost::OrderingBarrier(
return 0;
}
void GpuChannelHost::FlushPendingStream(int32 stream_id) {
AutoLock lock(context_lock_);
auto flush_info_iter = stream_flush_info_.find(stream_id);
if (flush_info_iter == stream_flush_info_.end())
return;
StreamFlushInfo& flush_info = flush_info_iter->second;
if (flush_info.flush_pending)
InternalFlush(&flush_info);
}
void GpuChannelHost::InternalFlush(StreamFlushInfo* flush_info) {
context_lock_.AssertAcquired();
DCHECK(flush_info);

@ -116,8 +116,6 @@ class GpuChannelHost : public IPC::Sender,
bool put_offset_changed,
bool do_flush);
void FlushPendingStream(int32 stream_id);
// Create and connect to a command buffer in the GPU process.
scoped_ptr<CommandBufferProxyImpl> CreateViewCommandBuffer(
int32 surface_id,

@ -930,7 +930,7 @@ void GpuCommandBufferStub::OnRetireSyncPoint(uint32 sync_point) {
// We can simply use the global sync point number as the release count with
// 0 for the command buffer ID (under normal circumstances 0 is invalid so
// will not be used) until the old sync points are replaced.
gpu::SyncToken sync_token(gpu::CommandBufferNamespace::GPU_IO, 0, 0,
gpu::SyncToken sync_token(gpu::CommandBufferNamespace::GPU_IO, 0,
sync_point);
mailbox_manager->PushTextureUpdates(sync_token);
}
@ -986,7 +986,7 @@ void GpuCommandBufferStub::PullTextureUpdates(
gpu::gles2::MailboxManager* mailbox_manager =
context_group_->mailbox_manager();
if (mailbox_manager->UsesSync() && MakeCurrent()) {
gpu::SyncToken sync_token(namespace_id, 0, command_buffer_id, release);
gpu::SyncToken sync_token(namespace_id, command_buffer_id, release);
mailbox_manager->PullTextureUpdates(sync_token);
}
}
@ -1044,7 +1044,7 @@ void GpuCommandBufferStub::OnFenceSyncRelease(uint64_t release) {
gpu::gles2::MailboxManager* mailbox_manager =
context_group_->mailbox_manager();
if (mailbox_manager->UsesSync() && MakeCurrent()) {
gpu::SyncToken sync_token(gpu::CommandBufferNamespace::GPU_IO, 0,
gpu::SyncToken sync_token(gpu::CommandBufferNamespace::GPU_IO,
command_buffer_id_, release);
mailbox_manager->PushTextureUpdates(sync_token);
}

@ -114,7 +114,6 @@ class MockClientGpuControl : public GpuControl {
MOCK_METHOD0(IsGpuChannelLost, bool());
MOCK_CONST_METHOD0(GetNamespaceID, CommandBufferNamespace());
MOCK_CONST_METHOD0(GetCommandBufferID, uint64_t());
MOCK_CONST_METHOD0(GetExtraCommandBufferData, int32_t());
MOCK_METHOD0(GenerateFenceSyncRelease, uint64_t());
MOCK_METHOD1(IsFenceSyncRelease, bool(uint64_t release));
MOCK_METHOD1(IsFenceSyncFlushed, bool(uint64_t release));

@ -5552,7 +5552,6 @@ void GLES2Implementation::GenSyncTokenCHROMIUM(GLuint64 fence_sync,
// Copy the data over after setting the data to ensure alignment.
SyncToken sync_token_data(gpu_control_->GetNamespaceID(),
gpu_control_->GetExtraCommandBufferData(),
gpu_control_->GetCommandBufferID(), fence_sync);
sync_token_data.SetVerifyFlush();
memcpy(sync_token, &sync_token_data, sizeof(sync_token_data));
@ -5576,7 +5575,6 @@ void GLES2Implementation::GenUnverifiedSyncTokenCHROMIUM(GLuint64 fence_sync,
// Copy the data over after setting the data to ensure alignment.
SyncToken sync_token_data(gpu_control_->GetNamespaceID(),
gpu_control_->GetExtraCommandBufferData(),
gpu_control_->GetCommandBufferID(), fence_sync);
memcpy(sync_token, &sync_token_data, sizeof(sync_token_data));
}

@ -3782,8 +3782,6 @@ TEST_F(GLES2ImplementationTest, GenSyncTokenCHROMIUM) {
.WillRepeatedly(testing::Return(kNamespaceId));
EXPECT_CALL(*gpu_control_, GetCommandBufferID())
.WillRepeatedly(testing::Return(kCommandBufferId));
EXPECT_CALL(*gpu_control_, GetExtraCommandBufferData())
.WillRepeatedly(testing::Return(0));
gl_->GenSyncTokenCHROMIUM(kFenceSync, nullptr);
EXPECT_EQ(GL_INVALID_VALUE, CheckError());
@ -3827,8 +3825,6 @@ TEST_F(GLES2ImplementationTest, GenUnverifiedSyncTokenCHROMIUM) {
.WillRepeatedly(testing::Return(kNamespaceId));
EXPECT_CALL(*gpu_control_, GetCommandBufferID())
.WillRepeatedly(testing::Return(kCommandBufferId));
EXPECT_CALL(*gpu_control_, GetExtraCommandBufferData())
.WillRepeatedly(testing::Return(0));
gl_->GenUnverifiedSyncTokenCHROMIUM(kFenceSync, nullptr);
EXPECT_EQ(GL_INVALID_VALUE, CheckError());
@ -3876,8 +3872,6 @@ TEST_F(GLES2ImplementationTest, WaitSyncTokenCHROMIUM) {
.WillOnce(testing::Return(kNamespaceId));
EXPECT_CALL(*gpu_control_, GetCommandBufferID())
.WillOnce(testing::Return(kCommandBufferId));
EXPECT_CALL(*gpu_control_, GetExtraCommandBufferData())
.WillOnce(testing::Return(0));
gl_->GenSyncTokenCHROMIUM(kFenceSync, sync_token);
struct Cmds {
@ -3905,14 +3899,14 @@ TEST_F(GLES2ImplementationTest, WaitSyncTokenCHROMIUMErrors) {
// Invalid sync tokens should produce no error and be a nop.
ClearCommands();
gpu::SyncToken invalid_sync_token;
gpu::SyncToken invalid_sync_token(CommandBufferNamespace::INVALID, 0, 0);
gl_->WaitSyncTokenCHROMIUM(invalid_sync_token.GetConstData());
EXPECT_TRUE(NoCommandsWritten());
EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR), gl_->GetError());
// Unverified sync token should produce INVALID_OPERATION.
ClearCommands();
gpu::SyncToken unverified_sync_token(CommandBufferNamespace::GPU_IO, 0, 0, 0);
gpu::SyncToken unverified_sync_token(CommandBufferNamespace::GPU_IO, 0, 0);
EXPECT_CALL(*gpu_control_, CanWaitUnverifiedSyncToken(_))
.WillOnce(testing::Return(false));
gl_->WaitSyncTokenCHROMIUM(unverified_sync_token.GetConstData());

@ -87,13 +87,9 @@ class GPU_EXPORT GpuControl {
// The namespace and command buffer ID forms a unique pair for all existing
// GpuControl (on client) and matches for the corresponding command buffer
// (on server) in a single server process. The extra command buffer data can
// be used for extra identification purposes. One usage is to store some
// extra field to identify unverified sync tokens for the implementation of
// the CanWaitUnverifiedSyncToken() function.
// (on server) in a single server process.
virtual CommandBufferNamespace GetNamespaceID() const = 0;
virtual uint64_t GetCommandBufferID() const = 0;
virtual int32_t GetExtraCommandBufferData() const = 0;
// Fence Syncs use release counters at a context level, these fence syncs
// need to be flushed before they can be shared with other contexts across

@ -67,7 +67,7 @@ const int32_t kCommandBufferSharedMemoryId = 4;
const size_t kDefaultMaxProgramCacheMemoryBytes = 6 * 1024 * 1024;
// Namespace used to separate various command buffer types.
enum CommandBufferNamespace : int8_t {
enum CommandBufferNamespace {
INVALID = -1,
GPU_IO,

@ -26,7 +26,6 @@ struct GPU_EXPORT SyncToken {
SyncToken()
: verified_flush_(false),
namespace_id_(CommandBufferNamespace::INVALID),
extra_data_field_(0),
command_buffer_id_(0),
release_count_(0) {}
@ -37,26 +36,21 @@ struct GPU_EXPORT SyncToken {
: verified_flush_(sync_point ? true : false),
namespace_id_(sync_point ? gpu::CommandBufferNamespace::OLD_SYNC_POINTS
: gpu::CommandBufferNamespace::INVALID),
extra_data_field_(0),
command_buffer_id_(0),
release_count_(sync_point) {}
SyncToken(CommandBufferNamespace namespace_id,
int32_t extra_data_field,
uint64_t command_buffer_id,
uint64_t release_count)
: verified_flush_(false),
namespace_id_(namespace_id),
extra_data_field_(extra_data_field),
command_buffer_id_(command_buffer_id),
release_count_(release_count) {}
void Set(CommandBufferNamespace namespace_id,
int32_t extra_data_field,
uint64_t command_buffer_id,
uint64_t release_count) {
namespace_id_ = namespace_id;
extra_data_field_ = extra_data_field;
command_buffer_id_ = command_buffer_id;
release_count_ = release_count;
}
@ -64,7 +58,6 @@ struct GPU_EXPORT SyncToken {
void Clear() {
verified_flush_ = false;
namespace_id_ = CommandBufferNamespace::INVALID;
extra_data_field_ = 0;
command_buffer_id_ = 0;
release_count_ = 0;
}
@ -88,12 +81,6 @@ struct GPU_EXPORT SyncToken {
uint64_t command_buffer_id() const { return command_buffer_id_; }
uint64_t release_count() const { return release_count_; }
// This extra data field can be used by command buffers to add extra
// information to identify unverified sync tokens. The current purpose
// of this field is only for unverified sync tokens which only exist within
// the same process so this information will not survive cross-process IPCs.
int32_t extra_data_field() const { return extra_data_field_; }
bool operator<(const SyncToken& other) const {
// TODO(dyen): Once all our compilers support c++11, we can replace this
// long list of comparisons with std::tie().
@ -107,7 +94,6 @@ struct GPU_EXPORT SyncToken {
bool operator==(const SyncToken& other) const {
return verified_flush_ == other.verified_flush() &&
namespace_id_ == other.namespace_id() &&
extra_data_field_ == other.extra_data_field() &&
command_buffer_id_ == other.command_buffer_id() &&
release_count_ == other.release_count();
}
@ -117,7 +103,6 @@ struct GPU_EXPORT SyncToken {
private:
bool verified_flush_;
CommandBufferNamespace namespace_id_;
int32_t extra_data_field_;
uint64_t command_buffer_id_;
uint64_t release_count_;
};

@ -706,8 +706,7 @@ int32 InProcessCommandBuffer::CreateImage(ClientBuffer buffer,
if (fence_sync) {
flushed_fence_sync_release_ = fence_sync;
SyncToken sync_token(GetNamespaceID(), GetExtraCommandBufferData(),
GetCommandBufferID(), fence_sync);
SyncToken sync_token(GetNamespaceID(), GetCommandBufferID(), fence_sync);
sync_token.SetVerifyFlush();
gpu_memory_buffer_manager_->SetDestructionSyncToken(gpu_memory_buffer,
sync_token);
@ -853,8 +852,7 @@ void InProcessCommandBuffer::RetireSyncPointOnGpuThread(uint32 sync_point) {
// We can simply use the GPUIO namespace with 0 for the command buffer ID
// (under normal circumstances 0 is invalid so will not be used) until
// the old sync points are replaced.
SyncToken sync_token(gpu::CommandBufferNamespace::GPU_IO, 0, 0,
sync_point);
SyncToken sync_token(gpu::CommandBufferNamespace::GPU_IO, 0, sync_point);
mailbox_manager->PushTextureUpdates(sync_token);
}
}
@ -875,7 +873,7 @@ bool InProcessCommandBuffer::WaitSyncPointOnGpuThread(unsigned sync_point) {
// We can simply use the GPUIO namespace with 0 for the command buffer ID
// (under normal circumstances 0 is invalid so will not be used) until
// the old sync points are replaced.
SyncToken sync_token(gpu::CommandBufferNamespace::GPU_IO, 0, 0, sync_point);
SyncToken sync_token(gpu::CommandBufferNamespace::GPU_IO, 0, sync_point);
mailbox_manager->PullTextureUpdates(sync_token);
return true;
}
@ -891,8 +889,7 @@ void InProcessCommandBuffer::FenceSyncReleaseOnGpuThread(uint64_t release) {
make_current_success = MakeCurrent();
}
if (make_current_success) {
SyncToken sync_token(GetNamespaceID(), GetExtraCommandBufferData(),
GetCommandBufferID(), release);
SyncToken sync_token(GetNamespaceID(), GetCommandBufferID(), release);
mailbox_manager->PushTextureUpdates(sync_token);
}
}
@ -925,7 +922,7 @@ bool InProcessCommandBuffer::WaitFenceSyncOnGpuThread(
gles2::MailboxManager* mailbox_manager =
decoder_->GetContextGroup()->mailbox_manager();
SyncToken sync_token(namespace_id, 0, command_buffer_id, release);
SyncToken sync_token(namespace_id, command_buffer_id, release);
mailbox_manager->PullTextureUpdates(sync_token);
return true;
}
@ -987,10 +984,6 @@ uint64_t InProcessCommandBuffer::GetCommandBufferID() const {
return command_buffer_id_;
}
int32_t InProcessCommandBuffer::GetExtraCommandBufferData() const {
return 0;
}
uint64_t InProcessCommandBuffer::GenerateFenceSyncRelease() {
return next_fence_sync_release_++;
}

@ -128,7 +128,6 @@ class GPU_EXPORT InProcessCommandBuffer : public CommandBuffer,
bool IsGpuChannelLost() override;
CommandBufferNamespace GetNamespaceID() const override;
uint64_t GetCommandBufferID() const override;
int32_t GetExtraCommandBufferData() const override;
uint64_t GenerateFenceSyncRelease() override;
bool IsFenceSyncRelease(uint64_t release) override;
bool IsFenceSyncFlushed(uint64_t release) override;

@ -19,7 +19,6 @@ namespace gles2 {
using namespace ::testing;
static const SyncToken g_sync_token(gpu::CommandBufferNamespace::GPU_IO,
0,
123,
0);

@ -505,10 +505,6 @@ uint64_t GLManager::GetCommandBufferID() const {
return command_buffer_id_;
}
int32_t GLManager::GetExtraCommandBufferData() const {
return 0;
}
uint64_t GLManager::GenerateFenceSyncRelease() {
return next_fence_sync_release_++;
}

@ -132,7 +132,6 @@ class GLManager : private GpuControl {
bool IsGpuChannelLost() override;
gpu::CommandBufferNamespace GetNamespaceID() const override;
uint64_t GetCommandBufferID() const override;
int32_t GetExtraCommandBufferData() const override;
uint64_t GenerateFenceSyncRelease() override;
bool IsFenceSyncRelease(uint64_t release) override;
bool IsFenceSyncFlushed(uint64_t release) override;

@ -345,10 +345,6 @@ uint64_t Display::GetCommandBufferID() const {
return 0;
}
int32_t Display::GetExtraCommandBufferData() const {
return 0;
}
uint64_t Display::GenerateFenceSyncRelease() {
return next_fence_sync_release_++;
}

@ -98,7 +98,6 @@ class Display : private gpu::GpuControl {
bool IsGpuChannelLost() override;
gpu::CommandBufferNamespace GetNamespaceID() const override;
uint64_t GetCommandBufferID() const override;
int32_t GetExtraCommandBufferData() const override;
uint64_t GenerateFenceSyncRelease() override;
bool IsFenceSyncRelease(uint64_t release) override;
bool IsFenceSyncFlushed(uint64_t release) override;

@ -79,7 +79,7 @@ bool ParamTraits<gpu::SyncToken>::Read(const Message* m,
return false;
}
p->Set(namespace_id, 0, command_buffer_id, release_count);
p->Set(namespace_id, command_buffer_id, release_count);
if (p->HasData()) {
if (!verified_flush)
return false;

@ -269,8 +269,7 @@ static void TextureCallback(gpu::SyncToken* called_sync_token,
// Verify the gpu::MailboxHolder::ReleaseCallback is called when VideoFrame is
// destroyed with the default release sync point.
TEST(VideoFrame, TextureNoLongerNeededCallbackIsCalled) {
gpu::SyncToken called_sync_token(gpu::CommandBufferNamespace::GPU_IO, 0, 1,
1);
gpu::SyncToken called_sync_token(gpu::CommandBufferNamespace::GPU_IO, 1, 1);
{
scoped_refptr<VideoFrame> frame = VideoFrame::WrapNativeTexture(
@ -322,10 +321,10 @@ TEST(VideoFrame,
mailbox[i].name[0] = 50 + 1;
}
gpu::SyncToken sync_token(kNamespace, 0, kCommandBufferId, 7);
gpu::SyncToken sync_token(kNamespace, kCommandBufferId, 7);
sync_token.SetVerifyFlush();
uint32 target = 9;
gpu::SyncToken release_sync_token(kNamespace, 0, kCommandBufferId, 111);
gpu::SyncToken release_sync_token(kNamespace, kCommandBufferId, 111);
release_sync_token.SetVerifyFlush();
gpu::SyncToken called_sync_token;

@ -547,8 +547,8 @@ gpu::SyncToken TypeConverter<gpu::SyncToken, SyncTokenPtr>::Convert(
const SyncTokenPtr& input) {
const gpu::CommandBufferNamespace namespace_id =
static_cast<gpu::CommandBufferNamespace>(input->namespace_id);
gpu::SyncToken sync_token(namespace_id, 0,
input->command_buffer_id, input->release_count);
gpu::SyncToken sync_token(namespace_id, input->command_buffer_id,
input->release_count);
if (input->verified_flush)
sync_token.SetVerifyFlush();

@ -413,10 +413,6 @@ uint64_t CommandBufferClientImpl::GetCommandBufferID() const {
return sync_client_impl_->GetCommandBufferID();
}
int32_t CommandBufferClientImpl::GetExtraCommandBufferData() const {
return 0;
}
uint64_t CommandBufferClientImpl::GenerateFenceSyncRelease() {
return next_fence_sync_release_++;
}

@ -75,7 +75,6 @@ class CommandBufferClientImpl
bool IsGpuChannelLost() override;
gpu::CommandBufferNamespace GetNamespaceID() const override;
uint64_t GetCommandBufferID() const override;
int32_t GetExtraCommandBufferData() const override;
uint64_t GenerateFenceSyncRelease() override;
bool IsFenceSyncRelease(uint64_t release) override;
bool IsFenceSyncFlushed(uint64_t release) override;

@ -213,10 +213,6 @@ bool PpapiCommandBufferProxy::CanWaitUnverifiedSyncToken(
return false;
}
int32_t PpapiCommandBufferProxy::GetExtraCommandBufferData() const {
return 0;
}
uint32 PpapiCommandBufferProxy::InsertSyncPoint() {
uint32 sync_point = 0;
if (last_state_.error == gpu::error::kNoError) {

@ -75,7 +75,6 @@ class PPAPI_PROXY_EXPORT PpapiCommandBufferProxy : public gpu::CommandBuffer,
void SignalSyncToken(const gpu::SyncToken& sync_token,
const base::Closure& callback) override;
bool CanWaitUnverifiedSyncToken(const gpu::SyncToken* sync_token) override;
int32_t GetExtraCommandBufferData() const override;
private:
bool Send(IPC::Message* msg);