GpuChannelHost: finer-grained locking for VerifyFlush.
This CL avoids locking the deferred message lock for the entire duration of VerifyFlush(). Bug: 361002668 Change-Id: I167cb1afcb2cc65e865f23e6ab33d26c745c01b2 Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/5838011 Reviewed-by: Vasiliy Telezhnikov <vasilyt@chromium.org> Commit-Queue: Yuzhu Shen <yzshen@chromium.org> Cr-Commit-Position: refs/heads/main@{#1352082}
This commit is contained in:

committed by
Chromium LUCI CQ

parent
506414ad8f
commit
2c29e22bd9
gpu/ipc/client
@ -77,7 +77,7 @@ uint32_t GpuChannelHost::OrderingBarrier(
|
||||
int32_t put_offset,
|
||||
std::vector<SyncToken> sync_token_fences,
|
||||
uint64_t release_count) {
|
||||
AutoLock lock(context_lock_);
|
||||
AutoLock lock(deferred_message_lock_);
|
||||
|
||||
if (pending_ordering_barrier_ &&
|
||||
pending_ordering_barrier_->route_id != route_id)
|
||||
@ -100,7 +100,7 @@ uint32_t GpuChannelHost::EnqueueDeferredMessage(
|
||||
mojom::DeferredRequestParamsPtr params,
|
||||
std::vector<SyncToken> sync_token_fences,
|
||||
uint64_t release_count) {
|
||||
AutoLock lock(context_lock_);
|
||||
AutoLock lock(deferred_message_lock_);
|
||||
|
||||
EnqueuePendingOrderingBarrier();
|
||||
enqueued_deferred_message_id_ = next_deferred_message_id_++;
|
||||
@ -115,7 +115,7 @@ void GpuChannelHost::CopyToGpuMemoryBufferAsync(
|
||||
std::vector<SyncToken> sync_token_dependencies,
|
||||
uint64_t release_count,
|
||||
base::OnceCallback<void(bool)> callback) {
|
||||
AutoLock lock(context_lock_);
|
||||
AutoLock lock(deferred_message_lock_);
|
||||
InternalFlush(UINT32_MAX);
|
||||
GetGpuChannel().CopyToGpuMemoryBufferAsync(
|
||||
mailbox, std::move(sync_token_dependencies), release_count,
|
||||
@ -124,14 +124,17 @@ void GpuChannelHost::CopyToGpuMemoryBufferAsync(
|
||||
#endif
|
||||
|
||||
void GpuChannelHost::EnsureFlush(uint32_t deferred_message_id) {
|
||||
AutoLock lock(context_lock_);
|
||||
AutoLock lock(deferred_message_lock_);
|
||||
InternalFlush(deferred_message_id);
|
||||
}
|
||||
|
||||
void GpuChannelHost::VerifyFlush(uint32_t deferred_message_id) {
|
||||
AutoLock lock(context_lock_);
|
||||
|
||||
InternalFlush(deferred_message_id);
|
||||
uint32_t cached_flushed_deferred_message_id;
|
||||
{
|
||||
AutoLock lock(deferred_message_lock_);
|
||||
InternalFlush(deferred_message_id);
|
||||
cached_flushed_deferred_message_id = flushed_deferred_message_id_;
|
||||
}
|
||||
|
||||
if (sync_point_graph_validation_enabled_) {
|
||||
// No need to do synchronous flush when graph validation of sync points is
|
||||
@ -155,6 +158,8 @@ void GpuChannelHost::VerifyFlush(uint32_t deferred_message_id) {
|
||||
// is used.
|
||||
//
|
||||
if (skip_flush_if_possible) {
|
||||
base::AutoLock lock(shared_memory_version_lock_);
|
||||
|
||||
// If shared memory communication is not established, do so.
|
||||
if (!shared_memory_version_client_.has_value()) {
|
||||
mojo::SyncCallRestrictions::ScopedAllowSyncCall allow_sync;
|
||||
@ -166,7 +171,7 @@ void GpuChannelHost::VerifyFlush(uint32_t deferred_message_id) {
|
||||
// GPUChannel has not processed ids up to the ones that were flushed. IPC
|
||||
// needed.
|
||||
else if (shared_memory_version_client_->SharedVersionIsLessThan(
|
||||
flushed_deferred_message_id_)) {
|
||||
cached_flushed_deferred_message_id)) {
|
||||
ipc_needed = true;
|
||||
}
|
||||
} else {
|
||||
@ -181,7 +186,7 @@ void GpuChannelHost::VerifyFlush(uint32_t deferred_message_id) {
|
||||
}
|
||||
|
||||
void GpuChannelHost::EnqueuePendingOrderingBarrier() {
|
||||
context_lock_.AssertAcquired();
|
||||
deferred_message_lock_.AssertAcquired();
|
||||
if (!pending_ordering_barrier_)
|
||||
return;
|
||||
DCHECK_LT(enqueued_deferred_message_id_,
|
||||
@ -212,7 +217,7 @@ void GpuChannelHost::EstablishSharedMemoryForFlushVerification() {
|
||||
}
|
||||
|
||||
void GpuChannelHost::InternalFlush(uint32_t deferred_message_id) {
|
||||
context_lock_.AssertAcquired();
|
||||
deferred_message_lock_.AssertAcquired();
|
||||
|
||||
EnqueuePendingOrderingBarrier();
|
||||
if (!deferred_messages_.empty() &&
|
||||
|
@ -181,7 +181,8 @@ class GPU_EXPORT GpuChannelHost
|
||||
private:
|
||||
// Establishes shared memory communication with the GPU process. This memory
|
||||
// is used to keep track of flushed items and avoid unnecessary IPCs.
|
||||
void EstablishSharedMemoryForFlushVerification();
|
||||
void EstablishSharedMemoryForFlushVerification()
|
||||
EXCLUSIVE_LOCKS_REQUIRED(shared_memory_version_lock_);
|
||||
|
||||
// Tracks whether we still have a working connection to the GPU process. This
|
||||
// is updated eaglerly from the IO thread if the connection is broken, but it
|
||||
@ -273,7 +274,7 @@ class GPU_EXPORT GpuChannelHost
|
||||
// - |next_image_id_|, atomic type
|
||||
// - |next_route_id_|, atomic type
|
||||
// - |deferred_messages_| and |*_deferred_message_id_| protected by
|
||||
// |context_lock_|
|
||||
// |deferred_message_lock_|
|
||||
const scoped_refptr<base::SingleThreadTaskRunner> io_thread_;
|
||||
|
||||
const int channel_id_;
|
||||
@ -293,8 +294,10 @@ class GPU_EXPORT GpuChannelHost
|
||||
mojo::SharedAssociatedRemote<mojom::GpuChannel> gpu_channel_;
|
||||
SharedImageInterfaceProxy shared_image_interface_;
|
||||
|
||||
mutable base::Lock shared_memory_version_lock_;
|
||||
// Used to synchronize flushed request ids with the GPU process.
|
||||
std::optional<mojo::SharedMemoryVersionClient> shared_memory_version_client_;
|
||||
std::optional<mojo::SharedMemoryVersionClient> shared_memory_version_client_
|
||||
GUARDED_BY(shared_memory_version_lock_);
|
||||
|
||||
// A client-side helper to send image decode requests to the GPU process.
|
||||
ImageDecodeAcceleratorProxy image_decode_accelerator_proxy_;
|
||||
@ -310,16 +313,16 @@ class GPU_EXPORT GpuChannelHost
|
||||
|
||||
// Protects |deferred_messages_|, |pending_ordering_barrier_| and
|
||||
// |*_deferred_message_id_|.
|
||||
mutable base::Lock context_lock_;
|
||||
mutable base::Lock deferred_message_lock_;
|
||||
std::vector<mojom::DeferredRequestPtr> deferred_messages_
|
||||
GUARDED_BY(context_lock_);
|
||||
GUARDED_BY(deferred_message_lock_);
|
||||
std::optional<OrderingBarrierInfo> pending_ordering_barrier_
|
||||
GUARDED_BY(context_lock_);
|
||||
uint32_t next_deferred_message_id_ GUARDED_BY(context_lock_) = 1;
|
||||
GUARDED_BY(deferred_message_lock_);
|
||||
uint32_t next_deferred_message_id_ GUARDED_BY(deferred_message_lock_) = 1;
|
||||
// Highest deferred message id in |deferred_messages_|.
|
||||
uint32_t enqueued_deferred_message_id_ GUARDED_BY(context_lock_) = 0;
|
||||
uint32_t enqueued_deferred_message_id_ GUARDED_BY(deferred_message_lock_) = 0;
|
||||
// Highest deferred message id sent to the channel.
|
||||
uint32_t flushed_deferred_message_id_ GUARDED_BY(context_lock_) = 0;
|
||||
uint32_t flushed_deferred_message_id_ GUARDED_BY(deferred_message_lock_) = 0;
|
||||
|
||||
const bool sync_point_graph_validation_enabled_;
|
||||
};
|
||||
|
Reference in New Issue
Block a user