gpu: Flow trace events for command buffer flush
Add flow trace events for command buffer flushes including all stages like ordering barrier, gpu channel flush and command buffer stub. We use (channel_id << 32 | flush_message_id) as the unique global identifier for these flows (within a special event scope). One small change is that we don't increment the flush id for every `OrderingBarrier` call if it doesn't switch contexts. This shouldn't matter in practice, but it means we won't have gaps in the flush ids anymore. Bug: 394772735 Change-Id: Ic66c5a3c57e1e95fafd4539d94effb5b95745583 Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/6240514 Reviewed-by: Vasiliy Telezhnikov <vasilyt@chromium.org> Auto-Submit: Sunny Sachanandani <sunnyps@chromium.org> Reviewed-by: Nasko Oskov <nasko@chromium.org> Commit-Queue: Nasko Oskov <nasko@chromium.org> Commit-Queue: Sunny Sachanandani <sunnyps@chromium.org> Cr-Commit-Position: refs/heads/main@{#1417087}
This commit is contained in:

committed by
Chromium LUCI CQ

parent
c3dc59cd97
commit
bc69f9633e
gpu/ipc
@ -17,6 +17,7 @@
|
||||
#include "gpu/config/gpu_finch_features.h"
|
||||
#include "gpu/ipc/client/client_shared_image_interface.h"
|
||||
#include "gpu/ipc/common/command_buffer_id.h"
|
||||
#include "gpu/ipc/common/command_buffer_trace_utils.h"
|
||||
#include "gpu/ipc/common/gpu_watchdog_timeout.h"
|
||||
#include "ipc/ipc_channel_mojo.h"
|
||||
#include "mojo/public/cpp/bindings/sync_call_restrictions.h"
|
||||
@ -80,12 +81,26 @@ uint32_t GpuChannelHost::OrderingBarrier(
|
||||
AutoLock lock(deferred_message_lock_);
|
||||
|
||||
if (pending_ordering_barrier_ &&
|
||||
pending_ordering_barrier_->route_id != route_id)
|
||||
pending_ordering_barrier_->route_id != route_id) {
|
||||
EnqueuePendingOrderingBarrier();
|
||||
if (!pending_ordering_barrier_)
|
||||
pending_ordering_barrier_.emplace();
|
||||
}
|
||||
|
||||
unsigned int trace_event_flags = TRACE_EVENT_FLAG_FLOW_OUT;
|
||||
if (!pending_ordering_barrier_) {
|
||||
pending_ordering_barrier_.emplace();
|
||||
pending_ordering_barrier_->deferred_message_id =
|
||||
next_deferred_message_id_++;
|
||||
} else {
|
||||
trace_event_flags |= TRACE_EVENT_FLAG_FLOW_IN;
|
||||
}
|
||||
|
||||
const uint64_t global_flush_id = GlobalFlushTracingId(
|
||||
channel_id_, pending_ordering_barrier_->deferred_message_id);
|
||||
TRACE_EVENT_WITH_FLOW0(
|
||||
"gpu,toplevel.flow", "CommandBuffer::OrderingBarrier",
|
||||
TRACE_ID_WITH_SCOPE("CommandBuffer::Flush", global_flush_id),
|
||||
trace_event_flags);
|
||||
|
||||
pending_ordering_barrier_->deferred_message_id = next_deferred_message_id_++;
|
||||
pending_ordering_barrier_->route_id = route_id;
|
||||
pending_ordering_barrier_->put_offset = put_offset;
|
||||
pending_ordering_barrier_->sync_token_fences.insert(
|
||||
@ -93,6 +108,7 @@ uint32_t GpuChannelHost::OrderingBarrier(
|
||||
std::make_move_iterator(sync_token_fences.begin()),
|
||||
std::make_move_iterator(sync_token_fences.end()));
|
||||
pending_ordering_barrier_->release_count = release_count;
|
||||
|
||||
return pending_ordering_barrier_->deferred_message_id;
|
||||
}
|
||||
|
||||
@ -215,14 +231,24 @@ void GpuChannelHost::EnqueuePendingOrderingBarrier() {
|
||||
deferred_message_lock_.AssertAcquired();
|
||||
if (!pending_ordering_barrier_)
|
||||
return;
|
||||
|
||||
const uint64_t global_flush_id = GlobalFlushTracingId(
|
||||
channel_id_, pending_ordering_barrier_->deferred_message_id);
|
||||
TRACE_EVENT_WITH_FLOW0(
|
||||
"gpu,toplevel.flow", "CommandBuffer::OrderingBarrier",
|
||||
TRACE_ID_WITH_SCOPE("CommandBuffer::Flush", global_flush_id),
|
||||
TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT);
|
||||
|
||||
DCHECK_LT(enqueued_deferred_message_id_,
|
||||
pending_ordering_barrier_->deferred_message_id);
|
||||
enqueued_deferred_message_id_ =
|
||||
pending_ordering_barrier_->deferred_message_id;
|
||||
|
||||
auto params = mojom::AsyncFlushParams::New(
|
||||
pending_ordering_barrier_->put_offset,
|
||||
pending_ordering_barrier_->deferred_message_id,
|
||||
pending_ordering_barrier_->sync_token_fences);
|
||||
|
||||
deferred_messages_.push_back(mojom::DeferredRequest::New(
|
||||
mojom::DeferredRequestParams::NewCommandBufferRequest(
|
||||
mojom::DeferredCommandBufferRequest::New(
|
||||
@ -231,6 +257,7 @@ void GpuChannelHost::EnqueuePendingOrderingBarrier() {
|
||||
std::move(params)))),
|
||||
std::move(pending_ordering_barrier_->sync_token_fences),
|
||||
pending_ordering_barrier_->release_count));
|
||||
|
||||
pending_ordering_barrier_.reset();
|
||||
}
|
||||
|
||||
@ -246,8 +273,27 @@ void GpuChannelHost::InternalFlush(uint32_t deferred_message_id) {
|
||||
deferred_message_lock_.AssertAcquired();
|
||||
|
||||
EnqueuePendingOrderingBarrier();
|
||||
|
||||
if (!deferred_messages_.empty() &&
|
||||
deferred_message_id > flushed_deferred_message_id_) {
|
||||
if (TRACE_EVENT_CATEGORY_ENABLED("gpu,toplevel.flow")) {
|
||||
for (auto& message : deferred_messages_) {
|
||||
if (message->params->is_command_buffer_request()) {
|
||||
auto& command_buffer_request =
|
||||
message->params->get_command_buffer_request();
|
||||
if (command_buffer_request->params->is_async_flush()) {
|
||||
auto& flush = command_buffer_request->params->get_async_flush();
|
||||
const uint64_t global_flush_id =
|
||||
GlobalFlushTracingId(channel_id_, flush->flush_id);
|
||||
TRACE_EVENT_WITH_FLOW0(
|
||||
"gpu,toplevel.flow", "GpuChannel::Flush",
|
||||
TRACE_ID_WITH_SCOPE("CommandBuffer::Flush", global_flush_id),
|
||||
TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
DCHECK_EQ(enqueued_deferred_message_id_, next_deferred_message_id_ - 1);
|
||||
flushed_deferred_message_id_ = enqueued_deferred_message_id_;
|
||||
|
||||
|
@ -78,6 +78,7 @@ source_set("ipc_common_sources") {
|
||||
|
||||
sources = [
|
||||
"command_buffer_id.h",
|
||||
"command_buffer_trace_utils.h",
|
||||
"gpu_client_ids.h",
|
||||
"gpu_disk_cache_type.cc",
|
||||
"gpu_disk_cache_type.h",
|
||||
|
16
gpu/ipc/common/command_buffer_trace_utils.h
Normal file
16
gpu/ipc/common/command_buffer_trace_utils.h
Normal file
@ -0,0 +1,16 @@
|
||||
// Copyright 2025 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef GPU_IPC_COMMON_COMMAND_BUFFER_TRACE_UTILS_H_
|
||||
#define GPU_IPC_COMMON_COMMAND_BUFFER_TRACE_UTILS_H_
|
||||
|
||||
namespace gpu {
|
||||
|
||||
inline uint64_t GlobalFlushTracingId(int channel_id, uint32_t local_flush_id) {
|
||||
return (static_cast<uint64_t>(channel_id) << 32) | local_flush_id;
|
||||
}
|
||||
|
||||
} // namespace gpu
|
||||
|
||||
#endif // GPU_IPC_COMMON_COMMAND_BUFFER_TRACE_UTILS_H_
|
@ -32,6 +32,7 @@
|
||||
#include "gpu/command_buffer/service/service_utils.h"
|
||||
#include "gpu/command_buffer/service/sync_point_manager.h"
|
||||
#include "gpu/config/gpu_crash_keys.h"
|
||||
#include "gpu/ipc/common/command_buffer_trace_utils.h"
|
||||
#include "gpu/ipc/service/gpu_channel.h"
|
||||
#include "gpu/ipc/service/gpu_channel_manager.h"
|
||||
#include "gpu/ipc/service/gpu_channel_manager_delegate.h"
|
||||
@ -478,14 +479,22 @@ void CommandBufferStub::OnAsyncFlush(
|
||||
int32_t put_offset,
|
||||
uint32_t flush_id,
|
||||
const std::vector<SyncToken>& sync_token_fences) {
|
||||
TRACE_EVENT1("gpu", "CommandBufferStub::OnAsyncFlush", "put_offset",
|
||||
put_offset);
|
||||
DCHECK(command_buffer_);
|
||||
// We received this message out-of-order. This should not happen but is here
|
||||
// to catch regressions. Ignore the message.
|
||||
DVLOG_IF(0, flush_id - last_flush_id_ >= 0x8000000U)
|
||||
<< "Received a Flush message out-of-order";
|
||||
|
||||
const uint64_t global_flush_id =
|
||||
GlobalFlushTracingId(channel_->client_id(), flush_id);
|
||||
TRACE_EVENT_WITH_FLOW0(
|
||||
"gpu,toplevel.flow", "CommandBuffer::Flush",
|
||||
TRACE_ID_WITH_SCOPE("CommandBuffer::Flush", global_flush_id),
|
||||
TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT);
|
||||
|
||||
TRACE_EVENT1("gpu", "CommandBufferStub::OnAsyncFlush", "put_offset",
|
||||
put_offset);
|
||||
|
||||
last_flush_id_ = flush_id;
|
||||
gpu::CommandBuffer::State pre_state = command_buffer_->GetState();
|
||||
UpdateActiveUrl();
|
||||
@ -504,9 +513,15 @@ void CommandBufferStub::OnAsyncFlush(
|
||||
ReportState();
|
||||
|
||||
#if BUILDFLAG(IS_ANDROID)
|
||||
GpuChannelManager* manager = channel_->gpu_channel_manager();
|
||||
manager->DidAccessGpu();
|
||||
channel_->gpu_channel_manager()->DidAccessGpu();
|
||||
#endif
|
||||
|
||||
if (!HasUnprocessedCommands()) {
|
||||
TRACE_EVENT_WITH_FLOW0(
|
||||
"gpu,toplevel.flow", "CommandBuffer::FlushComplete",
|
||||
TRACE_ID_WITH_SCOPE("CommandBuffer::Flush", global_flush_id),
|
||||
TRACE_EVENT_FLAG_FLOW_IN);
|
||||
}
|
||||
}
|
||||
|
||||
void CommandBufferStub::RegisterTransferBuffer(
|
||||
|
Reference in New Issue
Block a user