0

RawDraw: support skia read access for the raw draw backing

For some use cases, using raw draw will get negative performance
impact. This CL allows some tiles fallback to non raw draw, if we know
raw draw will regress performance. For example, the tiles need to be
blend with content behind it. In this case, raw draw has to raster the
tile to an offscreen texture first, and then draw the offscreen texture
to the framebuffer.

Bug: 1288524,b/214325586
Change-Id: I62854ad74d85f779c283e0fd4ed8e1798d13c84a
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/3399207
Reviewed-by: Vasiliy Telezhnikov <vasilyt@chromium.org>
Commit-Queue: Peng Huang <penghuang@chromium.org>
Cr-Commit-Position: refs/heads/main@{#961174}
This commit is contained in:
Peng Huang
2022-01-19 23:09:59 +00:00
committed by Chromium LUCI CQ
parent fd0e15d7d5
commit 8b9288f96a
12 changed files with 270 additions and 88 deletions

@ -129,7 +129,8 @@ DisplayResourceProviderSkia::LockSetForExternalUse::LockResource(
ResourceId id,
bool maybe_concurrent_reads,
bool is_video_plane,
const absl::optional<gfx::ColorSpace>& override_color_space) {
const absl::optional<gfx::ColorSpace>& override_color_space,
bool raw_draw_is_possible) {
auto it = resource_provider_->resources_.find(id);
DCHECK(it != resource_provider_->resources_.end());
@ -159,7 +160,8 @@ DisplayResourceProviderSkia::LockSetForExternalUse::LockResource(
resource_provider_->external_use_client_->CreateImageContext(
resource.transferable.mailbox_holder, resource.transferable.size,
resource.transferable.format, maybe_concurrent_reads,
resource.transferable.ycbcr_info, std::move(image_color_space));
resource.transferable.ycbcr_info, std::move(image_color_space),
raw_draw_is_possible);
}
resource.locked_for_external_use = true;

@ -59,7 +59,8 @@ class VIZ_SERVICE_EXPORT DisplayResourceProviderSkia
bool maybe_concurrent_reads,
bool is_video_plane,
const absl::optional<gfx::ColorSpace>& override_color_space =
absl::nullopt);
absl::nullopt,
bool raw_draw_if_possible = false);
// Unlock all locked resources with a |sync_token|. The |sync_token| should
// be waited on before reusing the resource's backing to ensure that any

@ -59,14 +59,15 @@ class MockExternalUseClient : public ExternalUseClient {
MOCK_METHOD1(ReleaseImageContexts,
gpu::SyncToken(
std::vector<std::unique_ptr<ImageContext>> image_contexts));
MOCK_METHOD6(CreateImageContext,
std::unique_ptr<ImageContext>(
const gpu::MailboxHolder&,
const gfx::Size&,
ResourceFormat,
bool,
const absl::optional<gpu::VulkanYCbCrInfo>& ycbcr_info,
sk_sp<SkColorSpace>));
MOCK_METHOD7(
CreateImageContext,
std::unique_ptr<ImageContext>(const gpu::MailboxHolder&,
const gfx::Size&,
ResourceFormat,
bool,
const absl::optional<gpu::VulkanYCbCrInfo>&,
sk_sp<SkColorSpace>,
bool));
};
class DisplayResourceProviderSkiaTest : public testing::Test {
@ -158,7 +159,7 @@ TEST_F(DisplayResourceProviderSkiaTest, LockForExternalUse) {
auto* image_context = owned_image_context.get();
gpu::MailboxHolder holder;
EXPECT_CALL(client_, CreateImageContext(_, _, _, _, _, _))
EXPECT_CALL(client_, CreateImageContext(_, _, _, _, _, _, _))
.WillOnce(DoAll(SaveArg<0>(&holder),
Return(ByMove(std::move(owned_image_context)))));
@ -238,7 +239,7 @@ TEST_F(DisplayResourceProviderSkiaTest, LockForExternalUseWebView) {
auto* image_context = owned_image_context.get();
gpu::MailboxHolder holder;
EXPECT_CALL(client_, CreateImageContext(_, _, _, _, _, _))
EXPECT_CALL(client_, CreateImageContext(_, _, _, _, _, _, _))
.WillOnce(DoAll(SaveArg<0>(&holder),
Return(ByMove(std::move(owned_image_context)))));

@ -121,7 +121,8 @@ class VIZ_SERVICE_EXPORT ExternalUseClient {
ResourceFormat format,
bool maybe_concurrent_reads,
const absl::optional<gpu::VulkanYCbCrInfo>& ycbcr_info,
sk_sp<SkColorSpace> color_space) = 0;
sk_sp<SkColorSpace> color_space,
bool raw_draw_if_possible) = 0;
virtual gpu::SyncToken ReleaseImageContexts(
std::vector<std::unique_ptr<ImageContext>> image_contexts) = 0;

@ -574,7 +574,8 @@ class SkiaRenderer::ScopedSkImageBuilder {
SkAlphaType alpha_type = kPremul_SkAlphaType,
GrSurfaceOrigin origin = kTopLeft_GrSurfaceOrigin,
const absl::optional<gfx::ColorSpace>&
override_colorspace = absl::nullopt);
override_colorspace = absl::nullopt,
bool raw_draw_if_possible = false);
ScopedSkImageBuilder(const ScopedSkImageBuilder&) = delete;
ScopedSkImageBuilder& operator=(const ScopedSkImageBuilder&) = delete;
@ -597,7 +598,8 @@ SkiaRenderer::ScopedSkImageBuilder::ScopedSkImageBuilder(
bool maybe_concurrent_reads,
SkAlphaType alpha_type,
GrSurfaceOrigin origin,
const absl::optional<gfx::ColorSpace>& override_color_space) {
const absl::optional<gfx::ColorSpace>& override_color_space,
bool raw_draw_if_possible) {
if (!resource_id)
return;
auto* resource_provider = skia_renderer->resource_provider();
@ -605,7 +607,7 @@ SkiaRenderer::ScopedSkImageBuilder::ScopedSkImageBuilder(
auto* image_context = skia_renderer->lock_set_for_external_use_->LockResource(
resource_id, maybe_concurrent_reads, /*is_video_plane=*/false,
override_color_space);
override_color_space, raw_draw_if_possible);
// |ImageContext::image| provides thread safety: (a) this ImageContext is
// only accessed by GPU thread after |image| is set and (b) the fields of
@ -2206,9 +2208,18 @@ void SkiaRenderer::DrawTileDrawQuad(const TileDrawQuad* quad,
// |resource_provider()| can be NULL in resourceless software draws, which
// should never produce tile quads in the first place.
DCHECK(resource_provider());
// If quad->ShouldDrawWithBlending() is true, we need to raster tile paint ops
// to an offscreen texture first, and then blend it with content behind the
// tile. Since a tile could be used cross frames, so it would better to not
// use raw draw.
bool raw_draw_if_possible =
is_using_raw_draw_ && !quad->ShouldDrawWithBlending();
ScopedSkImageBuilder builder(
this, quad->resource_id(), /*maybe_concurrent_reads=*/false,
quad->is_premultiplied ? kPremul_SkAlphaType : kUnpremul_SkAlphaType);
quad->is_premultiplied ? kPremul_SkAlphaType : kUnpremul_SkAlphaType,
/*origin=*/kTopLeft_GrSurfaceOrigin,
/*override_colorspace=*/absl::nullopt, raw_draw_if_possible);
params->vis_tex_coords = cc::MathUtil::ScaleRectProportional(
quad->tex_coord_rect, gfx::RectF(quad->rect), params->visible_rect);

@ -26,14 +26,16 @@ ImageContextImpl::ImageContextImpl(
bool maybe_concurrent_reads,
const absl::optional<gpu::VulkanYCbCrInfo>& ycbcr_info,
sk_sp<SkColorSpace> color_space,
const bool allow_keeping_read_access)
bool allow_keeping_read_access,
bool raw_draw_if_possible)
: ImageContext(mailbox_holder,
size,
resource_format,
ycbcr_info,
color_space),
maybe_concurrent_reads_(maybe_concurrent_reads),
allow_keeping_read_access_(allow_keeping_read_access) {}
allow_keeping_read_access_(allow_keeping_read_access),
raw_draw_if_possible_(raw_draw_if_possible) {}
ImageContextImpl::~ImageContextImpl() {
if (fallback_context_state_)
@ -169,7 +171,10 @@ bool ImageContextImpl::BeginRasterAccess(
return true;
}
auto raster = representation_factory->ProduceRaster(mailbox_holder().mailbox);
auto raster =
raw_draw_if_possible_
? representation_factory->ProduceRaster(mailbox_holder().mailbox)
: nullptr;
if (!raster)
return false;

@ -52,7 +52,8 @@ class ImageContextImpl final : public ExternalUseClient::ImageContext {
bool maybe_concurrent_reads,
const absl::optional<gpu::VulkanYCbCrInfo>& ycbcr_info,
sk_sp<SkColorSpace> color_space,
const bool allow_keeping_read_access = true);
bool allow_keeping_read_access = true,
bool raw_draw_if_possible = false);
ImageContextImpl(const ImageContextImpl&) = delete;
ImageContextImpl& operator=(const ImageContextImpl&) = delete;
@ -104,6 +105,7 @@ class ImageContextImpl final : public ExternalUseClient::ImageContext {
const bool maybe_concurrent_reads_ = false;
const bool allow_keeping_read_access_ = true;
const bool raw_draw_if_possible_ = false;
// Fallback in case we cannot produce a |representation_|.
raw_ptr<gpu::SharedContextState> fallback_context_state_ = nullptr;

@ -495,10 +495,12 @@ SkiaOutputSurfaceImpl::CreateImageContext(
ResourceFormat format,
bool maybe_concurrent_reads,
const absl::optional<gpu::VulkanYCbCrInfo>& ycbcr_info,
sk_sp<SkColorSpace> color_space) {
return std::make_unique<ImageContextImpl>(holder, size, format,
maybe_concurrent_reads, ycbcr_info,
std::move(color_space));
sk_sp<SkColorSpace> color_space,
bool raw_draw_if_possible) {
return std::make_unique<ImageContextImpl>(
holder, size, format, maybe_concurrent_reads, ycbcr_info,
std::move(color_space),
/*allow_keeping_read_access=*/true, raw_draw_if_possible);
}
void SkiaOutputSurfaceImpl::SwapBuffers(OutputSurfaceFrame frame) {

@ -171,7 +171,8 @@ class VIZ_SERVICE_EXPORT SkiaOutputSurfaceImpl : public SkiaOutputSurface {
ResourceFormat format,
bool maybe_concurrent_reads,
const absl::optional<gpu::VulkanYCbCrInfo>& ycbcr_info,
sk_sp<SkColorSpace> color_space) override;
sk_sp<SkColorSpace> color_space,
bool raw_draw_if_possible) override;
void InitDelegatedInkPointRendererReceiver(
mojo::PendingReceiver<gfx::mojom::DelegatedInkPointRenderer>

@ -184,7 +184,8 @@ FakeSkiaOutputSurface::CreateImageContext(
ResourceFormat format,
bool concurrent_reads,
const absl::optional<gpu::VulkanYCbCrInfo>& ycbcr_info,
sk_sp<SkColorSpace> color_space) {
sk_sp<SkColorSpace> color_space,
bool raw_draw_if_possible) {
return std::make_unique<ExternalUseClient::ImageContext>(
holder, size, format, ycbcr_info, std::move(color_space));
}

@ -131,7 +131,8 @@ class FakeSkiaOutputSurface : public SkiaOutputSurface {
ResourceFormat format,
bool concurrent_reads,
const absl::optional<gpu::VulkanYCbCrInfo>& ycbcr_info,
sk_sp<SkColorSpace> color_space) override;
sk_sp<SkColorSpace> color_space,
bool raw_draw_if_possible) override;
// If set true, callbacks triggering will be in a reverse order as SignalQuery
// calls.

@ -6,10 +6,15 @@
#include "base/logging.h"
#include "base/thread_annotations.h"
#include "base/threading/thread_checker.h"
#include "cc/paint/paint_op_buffer.h"
#include "components/viz/common/resources/resource_format_utils.h"
#include "gpu/command_buffer/common/shared_image_usage.h"
#include "gpu/command_buffer/service/shared_context_state.h"
#include "gpu/command_buffer/service/shared_image_backing.h"
#include "gpu/command_buffer/service/shared_image_representation.h"
#include "gpu/command_buffer/service/skia_utils.h"
#include "third_party/skia/include/core/SkPromiseImageTexture.h"
namespace gpu {
@ -38,20 +43,22 @@ class RawDrawBacking : public ClearTrackingSharedImageBacking {
true /* is_thread_safe */) {}
~RawDrawBacking() override {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
AutoLock auto_lock(this);
DCHECK_EQ(read_count_, 0);
DCHECK(!is_write_);
ResetPaintOpBuffer();
DestroyBackendTexture();
}
// SharedImageBacking implementation.
bool ProduceLegacyMailbox(MailboxManager* mailbox_manager) override {
NOTREACHED() << "Not supported.";
NOTIMPLEMENTED();
return false;
}
void Update(std::unique_ptr<gfx::GpuFence> in_fence) override {
NOTREACHED() << "Not supported.";
NOTIMPLEMENTED();
}
void OnMemoryDump(const std::string& dump_name,
@ -63,18 +70,24 @@ class RawDrawBacking : public ClearTrackingSharedImageBacking {
std::unique_ptr<SharedImageRepresentationRaster> ProduceRaster(
SharedImageManager* manager,
MemoryTypeTracker* tracker) override;
std::unique_ptr<SharedImageRepresentationSkia> ProduceSkia(
SharedImageManager* manager,
MemoryTypeTracker* tracker,
scoped_refptr<SharedContextState> context_state) override;
private:
class RepresentationRaster;
class RepresentationSkia;
void ResetPaintOpBuffer() {
if (!paint_op_buffer_) {
DCHECK(!clear_color_);
DCHECK(!paint_op_release_callback_);
DCHECK(!backend_texture_.isValid());
DCHECK(!promise_texture_);
return;
}
final_msaa_count_ = 0;
clear_color_.reset();
paint_op_buffer_->Reset();
@ -82,13 +95,155 @@ class RawDrawBacking : public ClearTrackingSharedImageBacking {
std::move(paint_op_release_callback_).Run();
}
void DestroyBackendTexture() {
if (backend_texture_.isValid()) {
DCHECK(context_state_);
DeleteGrBackendTexture(context_state_.get(), &backend_texture_);
backend_texture_ = {};
promise_texture_.reset();
}
}
cc::PaintOpBuffer* BeginRasterWriteAccess(
int final_msaa_count,
const SkSurfaceProps& surface_props,
const absl::optional<SkColor>& clear_color) {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
AutoLock auto_lock(this);
if (read_count_) {
LOG(ERROR) << "The backing is being read.";
return nullptr;
}
if (is_write_) {
LOG(ERROR) << "The backing is being written.";
return nullptr;
}
is_write_ = true;
ResetPaintOpBuffer();
// Should we keep the backing?
DestroyBackendTexture();
if (!paint_op_buffer_)
paint_op_buffer_ = sk_make_sp<cc::PaintOpBuffer>();
final_msaa_count_ = final_msaa_count;
surface_props_ = surface_props;
clear_color_ = clear_color;
return paint_op_buffer_.get();
}
void EndRasterWriteAccess(base::OnceClosure callback) {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
AutoLock auto_lock(this);
DCHECK_EQ(read_count_, 0);
DCHECK(is_write_);
is_write_ = false;
if (callback) {
DCHECK(!paint_op_release_callback_);
paint_op_release_callback_ = std::move(callback);
}
}
cc::PaintOpBuffer* BeginRasterReadAccess(
absl::optional<SkColor>& clear_color) {
// paint ops will be read on compositor thread, so do not check thread with
// DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
AutoLock auto_lock(this);
if (is_write_) {
LOG(ERROR) << "The backing is being written.";
return nullptr;
}
// If |backend_texture_| is valid, |paint_op_buffer_| should be played back
// to the |backend_texture_| already, and |paint_op_buffer_| could be
// released already. So we return nullptr here, and then SkiaRenderer will
// fallback to using |backend_texture_|.
if (backend_texture_.isValid())
return nullptr;
read_count_++;
if (!paint_op_buffer_) {
paint_op_buffer_ = sk_make_sp<cc::PaintOpBuffer>();
}
clear_color = clear_color_;
return paint_op_buffer_.get();
}
sk_sp<SkPromiseImageTexture> BeginSkiaReadAccess() {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
AutoLock auto_lock(this);
if (backend_texture_.isValid()) {
DCHECK(promise_texture_);
read_count_++;
return promise_texture_;
}
auto mipmap = usage() & SHARED_IMAGE_USAGE_MIPMAP ? GrMipMapped::kYes
: GrMipMapped::kNo;
auto sk_color = viz::ResourceFormatToClosestSkColorType(
/*gpu_compositing=*/true, format());
backend_texture_ = context_state_->gr_context()->createBackendTexture(
size().width(), size().height(), sk_color, mipmap, GrRenderable::kYes,
GrProtected::kNo);
if (!backend_texture_.isValid()) {
DLOG(ERROR) << "createBackendTexture() failed with SkColorType:"
<< sk_color;
return nullptr;
}
promise_texture_ = SkPromiseImageTexture::Make(backend_texture_);
auto surface = SkSurface::MakeFromBackendTexture(
context_state_->gr_context(), backend_texture_, surface_origin(),
final_msaa_count_, sk_color, color_space().ToSkColorSpace(),
&surface_props_);
if (clear_color_)
surface->getCanvas()->clear(*clear_color_);
if (paint_op_buffer_) {
cc::PlaybackParams playback_params(nullptr, SkM44());
paint_op_buffer_->Playback(surface->getCanvas(), playback_params);
}
read_count_++;
return promise_texture_;
}
void EndReadAccess() {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
AutoLock auto_lock(this);
DCHECK_GE(read_count_, 0);
DCHECK(!is_write_);
read_count_--;
// If the |backend_texture_| is valid, the |paint_op_buffer_| should have
// been played back to the |backend_texture_| already, so we can release
// the |paint_op_buffer_| now.
if (read_count_ == 0 && backend_texture_.isValid())
ResetPaintOpBuffer();
}
int32_t final_msaa_count_ = 0;
SkSurfaceProps surface_props_{};
absl::optional<SkColor> clear_color_;
sk_sp<cc::PaintOpBuffer> paint_op_buffer_;
base::OnceClosure paint_op_release_callback_;
scoped_refptr<SharedContextState> context_state_;
GrBackendTexture backend_texture_;
sk_sp<SkPromiseImageTexture> promise_texture_;
bool is_write_ GUARDED_BY(lock_) = false;
int read_count_ GUARDED_BY(lock_) = 0;
THREAD_CHECKER(thread_checker_);
};
class RawDrawBacking::RepresentationRaster
@ -100,71 +255,60 @@ class RawDrawBacking::RepresentationRaster
: SharedImageRepresentationRaster(manager, backing, tracker) {}
~RepresentationRaster() override = default;
cc::PaintOpBuffer* BeginReadAccess(
absl::optional<SkColor>& clear_color) override {
AutoLock auto_lock(raw_draw_backing());
if (raw_draw_backing()->is_write_) {
LOG(ERROR) << "The backing is being written.";
return nullptr;
}
raw_draw_backing()->read_count_++;
if (!raw_draw_backing()->paint_op_buffer_) {
raw_draw_backing()->paint_op_buffer_ = sk_make_sp<cc::PaintOpBuffer>();
}
clear_color = raw_draw_backing()->clear_color_;
return raw_draw_backing()->paint_op_buffer_.get();
}
void EndReadAccess() override {
AutoLock auto_lock(raw_draw_backing());
DCHECK_GE(raw_draw_backing()->read_count_, 0);
DCHECK(!raw_draw_backing()->is_write_);
raw_draw_backing()->read_count_--;
}
cc::PaintOpBuffer* BeginWriteAccess(
int final_msaa_count,
const SkSurfaceProps& surface_props,
const absl::optional<SkColor>& clear_color) override {
AutoLock auto_lock(raw_draw_backing());
if (raw_draw_backing()->read_count_) {
LOG(ERROR) << "The backing is being read.";
return nullptr;
}
if (raw_draw_backing()->is_write_) {
LOG(ERROR) << "The backing is being written.";
return nullptr;
}
raw_draw_backing()->is_write_ = true;
raw_draw_backing()->ResetPaintOpBuffer();
if (!raw_draw_backing()->paint_op_buffer_) {
raw_draw_backing()->paint_op_buffer_ = sk_make_sp<cc::PaintOpBuffer>();
}
raw_draw_backing()->final_msaa_count_ = final_msaa_count;
raw_draw_backing()->clear_color_ = clear_color;
return raw_draw_backing()->paint_op_buffer_.get();
return raw_draw_backing()->BeginRasterWriteAccess(
final_msaa_count, surface_props, clear_color);
}
void EndWriteAccess(base::OnceClosure callback) override {
AutoLock auto_lock(raw_draw_backing());
DCHECK_EQ(raw_draw_backing()->read_count_, 0);
DCHECK(raw_draw_backing()->is_write_);
raw_draw_backing()->is_write_ = false;
if (callback) {
DCHECK(!raw_draw_backing()->paint_op_release_callback_);
raw_draw_backing()->paint_op_release_callback_ = std::move(callback);
}
raw_draw_backing()->EndRasterWriteAccess(std::move(callback));
}
cc::PaintOpBuffer* BeginReadAccess(
absl::optional<SkColor>& clear_color) override {
return raw_draw_backing()->BeginRasterReadAccess(clear_color);
}
void EndReadAccess() override { raw_draw_backing()->EndReadAccess(); }
private:
RawDrawBacking* raw_draw_backing() {
return static_cast<RawDrawBacking*>(backing());
}
};
class RawDrawBacking::RepresentationSkia
: public SharedImageRepresentationSkia {
public:
RepresentationSkia(SharedImageManager* manager,
SharedImageBacking* backing,
MemoryTypeTracker* tracker)
: SharedImageRepresentationSkia(manager, backing, tracker) {}
bool SupportsMultipleConcurrentReadAccess() override { return true; }
sk_sp<SkPromiseImageTexture> BeginWriteAccess(
std::vector<GrBackendSemaphore>* begin_semaphores,
std::vector<GrBackendSemaphore>* end_semaphores,
std::unique_ptr<GrBackendSurfaceMutableState>* end_state) override {
NOTIMPLEMENTED();
return nullptr;
}
void EndWriteAccess(sk_sp<SkSurface> surface) override { NOTIMPLEMENTED(); }
sk_sp<SkPromiseImageTexture> BeginReadAccess(
std::vector<GrBackendSemaphore>* begin_semaphores,
std::vector<GrBackendSemaphore>* end_semaphores,
std::unique_ptr<GrBackendSurfaceMutableState>* end_state) override {
return raw_draw_backing()->BeginSkiaReadAccess();
}
void EndReadAccess() override { raw_draw_backing()->EndReadAccess(); }
private:
RawDrawBacking* raw_draw_backing() {
return static_cast<RawDrawBacking*>(backing());
@ -177,6 +321,16 @@ std::unique_ptr<SharedImageRepresentationRaster> RawDrawBacking::ProduceRaster(
return std::make_unique<RepresentationRaster>(manager, this, tracker);
}
std::unique_ptr<SharedImageRepresentationSkia> RawDrawBacking::ProduceSkia(
SharedImageManager* manager,
MemoryTypeTracker* tracker,
scoped_refptr<SharedContextState> context_state) {
if (!context_state_)
context_state_ = context_state;
DCHECK(context_state_ == context_state);
return std::make_unique<RepresentationSkia>(manager, this, tracker);
}
} // namespace
SharedImageBackingFactoryRawDraw::SharedImageBackingFactoryRawDraw() = default;