[media] Delete VTVDA
This CL removes VTVideoDecodeAccelerator along with a few bits of supporting code, including the VideoToolboxVideoDecoder feature flag. The VideoToolboxAv1Decoding flag is also removed, as it has been enabled for (more than) a full release cycle. Bug: 40227557 Change-Id: I16668424ac22964af9d3bbe18fc43d347d1bb5d9 Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/5597779 Commit-Queue: Dan Sanders <sandersd@chromium.org> Reviewed-by: Vasiliy Telezhnikov <vasilyt@chromium.org> Reviewed-by: Kenneth Russell <kbr@chromium.org> Cr-Commit-Position: refs/heads/main@{#1310769}
This commit is contained in:

committed by
Chromium LUCI CQ

parent
6adaf20209
commit
5119c40eb4
content/gpu
media
testing/variations
@ -105,7 +105,6 @@
|
||||
#include "base/message_loop/message_pump_apple.h"
|
||||
#include "components/metal_util/device_removal.h"
|
||||
#include "gpu/ipc/service/built_in_shader_cache_loader.h"
|
||||
#include "media/gpu/mac/vt_video_decode_accelerator_mac.h"
|
||||
#include "sandbox/mac/seatbelt.h"
|
||||
#endif
|
||||
|
||||
@ -167,13 +166,6 @@ class ContentSandboxHelper : public gpu::GpuSandboxHelper {
|
||||
media::PreSandboxMediaFoundationInitialization();
|
||||
#endif
|
||||
|
||||
#if BUILDFLAG(IS_MAC)
|
||||
{
|
||||
TRACE_EVENT0("gpu", "Initialize VideoToolbox");
|
||||
media::InitializeVideoToolbox();
|
||||
}
|
||||
#endif
|
||||
|
||||
// On Linux, reading system memory doesn't work through the GPU sandbox.
|
||||
// This value is cached, so access it here to populate the cache.
|
||||
base::SysInfo::AmountOfPhysicalMemory();
|
||||
|
@ -876,22 +876,6 @@ BASE_FEATURE(kVideoPictureInPictureMinimizeButton,
|
||||
"VideoPictureInPictureMinimizeButton",
|
||||
base::FEATURE_DISABLED_BY_DEFAULT);
|
||||
|
||||
#if BUILDFLAG(IS_APPLE)
|
||||
// Use VideoToolbox for AV1 hardware decoding.
|
||||
// Owner: dalecurtis@chromium.org, sandersd@chromium.org
|
||||
// Expiry: When enabled by default for a full release cycle
|
||||
BASE_FEATURE(kVideoToolboxAv1Decoding,
|
||||
"VideoToolboxAv1Decoding",
|
||||
base::FEATURE_ENABLED_BY_DEFAULT);
|
||||
|
||||
// Use the new VideoToolboxVideoDecoder for hardware decoding.
|
||||
// Owner: sandersd@chromium.org
|
||||
// Expiry: When VTVideoDecodeAccelerator is deleted
|
||||
BASE_FEATURE(kVideoToolboxVideoDecoder,
|
||||
"VideoToolboxVideoDecoder",
|
||||
base::FEATURE_ENABLED_BY_DEFAULT);
|
||||
#endif // BUILDFLAG(IS_APPLE)
|
||||
|
||||
// A video encoder is allowed to drop a frame in cast mirroring.
|
||||
BASE_FEATURE(kCastVideoEncoderFrameDrop,
|
||||
"CastVideoEncoderFrameDrop",
|
||||
|
@ -372,10 +372,6 @@ MEDIA_EXPORT BASE_DECLARE_FEATURE(kV4L2FlatStatefulVideoDecoder);
|
||||
#endif // BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_LINUX)
|
||||
MEDIA_EXPORT BASE_DECLARE_FEATURE(kVideoBlitColorAccuracy);
|
||||
MEDIA_EXPORT BASE_DECLARE_FEATURE(kVideoPictureInPictureMinimizeButton);
|
||||
#if BUILDFLAG(IS_APPLE)
|
||||
MEDIA_EXPORT BASE_DECLARE_FEATURE(kVideoToolboxAv1Decoding);
|
||||
MEDIA_EXPORT BASE_DECLARE_FEATURE(kVideoToolboxVideoDecoder);
|
||||
#endif // BUILDFLAG(IS_APPLE)
|
||||
MEDIA_EXPORT BASE_DECLARE_FEATURE(kCastVideoEncoderFrameDrop);
|
||||
MEDIA_EXPORT BASE_DECLARE_FEATURE(kWebCodecsVideoEncoderFrameDrop);
|
||||
MEDIA_EXPORT BASE_DECLARE_FEATURE(kWebRTCHardwareVideoEncoderFrameDrop);
|
||||
|
@ -48,16 +48,7 @@ class CommandBufferHelperImpl
|
||||
|
||||
stub_->AddDestructionObserver(this);
|
||||
wait_sequence_id_ = stub_->channel()->scheduler()->CreateSequence(
|
||||
#if BUILDFLAG(IS_MAC)
|
||||
// Workaround for crbug.com/1035750.
|
||||
// TODO(sandersd): Investigate whether there is a deeper scheduling
|
||||
// problem that can be resolved.
|
||||
gpu::SchedulingPriority::kHigh
|
||||
#else
|
||||
gpu::SchedulingPriority::kNormal
|
||||
#endif // BUILDFLAG(IS_MAC)
|
||||
,
|
||||
stub_->channel()->task_runner());
|
||||
gpu::SchedulingPriority::kNormal, stub_->channel()->task_runner());
|
||||
#if !BUILDFLAG(IS_ANDROID)
|
||||
decoder_helper_ = GLES2DecoderHelper::Create(stub_->decoder_context());
|
||||
#endif
|
||||
|
@ -16,9 +16,6 @@
|
||||
#include "media/gpu/media_gpu_export.h"
|
||||
#include "media/media_buildflags.h"
|
||||
|
||||
#if BUILDFLAG(IS_APPLE)
|
||||
#include "media/gpu/mac/vt_video_decode_accelerator_mac.h"
|
||||
#endif
|
||||
#if BUILDFLAG(USE_V4L2_CODEC) && \
|
||||
(BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_ASH))
|
||||
#include "media/gpu/v4l2/legacy/v4l2_video_decode_accelerator.h"
|
||||
@ -44,17 +41,12 @@ gpu::VideoDecodeAcceleratorCapabilities GetDecoderCapabilitiesInternal(
|
||||
// TODO(posciak,henryhsu): improve this so that we choose a superset of
|
||||
// resolutions and other supported profile parameters.
|
||||
VideoDecodeAccelerator::Capabilities capabilities;
|
||||
#if BUILDFLAG(USE_CHROMEOS_MEDIA_ACCELERATION)
|
||||
#if BUILDFLAG(USE_V4L2_CODEC) && \
|
||||
#if BUILDFLAG(USE_CHROMEOS_MEDIA_ACCELERATION) && BUILDFLAG(USE_V4L2_CODEC) && \
|
||||
(BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_ASH))
|
||||
GpuVideoAcceleratorUtil::InsertUniqueDecodeProfiles(
|
||||
V4L2VideoDecodeAccelerator::GetSupportedProfiles(),
|
||||
&capabilities.supported_profiles);
|
||||
#endif
|
||||
#elif BUILDFLAG(IS_APPLE)
|
||||
capabilities.supported_profiles =
|
||||
VTVideoDecodeAccelerator::GetSupportedProfiles(workarounds);
|
||||
#endif
|
||||
|
||||
return GpuVideoAcceleratorUtil::ConvertMediaToGpuDecodeCapabilities(
|
||||
capabilities);
|
||||
@ -122,10 +114,6 @@ GpuVideoDecodeAcceleratorFactory::CreateVDA(
|
||||
(BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_ASH))
|
||||
&GpuVideoDecodeAcceleratorFactory::CreateV4L2VDA,
|
||||
#endif
|
||||
|
||||
#if BUILDFLAG(IS_APPLE)
|
||||
&GpuVideoDecodeAcceleratorFactory::CreateVTVDA,
|
||||
#endif
|
||||
};
|
||||
|
||||
std::unique_ptr<VideoDecodeAccelerator> vda;
|
||||
@ -152,18 +140,6 @@ GpuVideoDecodeAcceleratorFactory::CreateV4L2VDA(
|
||||
}
|
||||
#endif
|
||||
|
||||
#if BUILDFLAG(IS_APPLE)
|
||||
std::unique_ptr<VideoDecodeAccelerator>
|
||||
GpuVideoDecodeAcceleratorFactory::CreateVTVDA(
|
||||
const gpu::GpuDriverBugWorkarounds& workarounds,
|
||||
const gpu::GpuPreferences& gpu_preferences,
|
||||
MediaLog* media_log) const {
|
||||
std::unique_ptr<VideoDecodeAccelerator> decoder;
|
||||
decoder.reset(new VTVideoDecodeAccelerator(workarounds, media_log));
|
||||
return decoder;
|
||||
}
|
||||
#endif
|
||||
|
||||
GpuVideoDecodeAcceleratorFactory::GpuVideoDecodeAcceleratorFactory() = default;
|
||||
GpuVideoDecodeAcceleratorFactory::~GpuVideoDecodeAcceleratorFactory() = default;
|
||||
|
||||
|
@ -22,8 +22,8 @@ target(link_target_type, "service") {
|
||||
"media_gpu_channel_manager.h",
|
||||
]
|
||||
|
||||
# The legacy VDA API is not supported on Android or Windows.
|
||||
if (!is_android && !is_win) {
|
||||
# The legacy VDA API is not supported on Android, Windows, or macOS.
|
||||
if (!is_android && !is_win && !is_apple) {
|
||||
sources += [
|
||||
"picture_buffer_manager.cc",
|
||||
"picture_buffer_manager.h",
|
||||
@ -62,8 +62,8 @@ source_set("unit_tests") {
|
||||
testonly = true
|
||||
sources = []
|
||||
|
||||
# The legacy VDA API is not supported on Android or Windows.
|
||||
if (!is_android && !is_win) {
|
||||
# The legacy VDA API is not supported on Android, Windows, or macOS.
|
||||
if (!is_android && !is_win && !is_apple) {
|
||||
sources += [
|
||||
"picture_buffer_manager_unittest.cc",
|
||||
"vda_video_decoder_unittest.cc",
|
||||
|
@ -147,9 +147,7 @@ VdaVideoDecoder::VdaVideoDecoder(
|
||||
DCHECK_EQ(vda_capabilities_.flags, 0U);
|
||||
DCHECK(media_log_);
|
||||
|
||||
#if !BUILDFLAG(IS_APPLE)
|
||||
CHECK_EQ(output_mode, VideoDecodeAccelerator::Config::OutputMode::kImport);
|
||||
#endif
|
||||
|
||||
gpu_weak_this_ = gpu_weak_this_factory_.GetWeakPtr();
|
||||
parent_weak_this_ = parent_weak_this_factory_.GetWeakPtr();
|
||||
@ -335,23 +333,8 @@ void VdaVideoDecoder::InitializeOnGpuThread() {
|
||||
|
||||
// Set up |command_buffer_helper_|.
|
||||
if (!reinitializing_) {
|
||||
#if BUILDFLAG(IS_APPLE)
|
||||
CHECK_EQ(output_mode_,
|
||||
VideoDecodeAccelerator::Config::OutputMode::kAllocate);
|
||||
command_buffer_helper_ = std::move(create_command_buffer_helper_cb_).Run();
|
||||
if (!command_buffer_helper_) {
|
||||
parent_task_runner_->PostTask(
|
||||
FROM_HERE,
|
||||
base::BindOnce(&VdaVideoDecoder::InitializeDone, parent_weak_this_,
|
||||
DecoderStatus::Codes::kFailed));
|
||||
return;
|
||||
}
|
||||
picture_buffer_manager_->Initialize(gpu_task_runner_,
|
||||
command_buffer_helper_);
|
||||
#else
|
||||
CHECK_EQ(output_mode_, VideoDecodeAccelerator::Config::OutputMode::kImport);
|
||||
picture_buffer_manager_->Initialize(gpu_task_runner_, nullptr);
|
||||
#endif
|
||||
}
|
||||
|
||||
// Convert the configuration.
|
||||
@ -796,13 +779,6 @@ void VdaVideoDecoder::NotifyError(VideoDecodeAccelerator::Error error) {
|
||||
parent_weak_this_, error));
|
||||
}
|
||||
|
||||
#if BUILDFLAG(IS_APPLE)
|
||||
gpu::SharedImageStub* VdaVideoDecoder::GetSharedImageStub() const {
|
||||
DCHECK_EQ(output_mode_, VideoDecodeAccelerator::Config::OutputMode::kAllocate);
|
||||
return command_buffer_helper_->GetSharedImageStub();
|
||||
}
|
||||
#endif
|
||||
|
||||
void VdaVideoDecoder::NotifyErrorOnParentThread(
|
||||
VideoDecodeAccelerator::Error error) {
|
||||
DVLOG(1) << __func__ << "(" << error << ")";
|
||||
|
@ -146,10 +146,6 @@ class VdaVideoDecoder : public VideoDecoder,
|
||||
void NotifyResetDone() override;
|
||||
void NotifyError(VideoDecodeAccelerator::Error error) override;
|
||||
|
||||
#if BUILDFLAG(IS_APPLE)
|
||||
gpu::SharedImageStub* GetSharedImageStub() const override;
|
||||
#endif
|
||||
|
||||
// Tasks and thread hopping.
|
||||
static void CleanupOnGpuThread(std::unique_ptr<VdaVideoDecoder>);
|
||||
void InitializeOnGpuThread();
|
||||
@ -212,9 +208,6 @@ class VdaVideoDecoder : public VideoDecoder,
|
||||
// Only written on the GPU thread during initialization, which is mutually
|
||||
// exclusive with reads on the parent thread.
|
||||
std::unique_ptr<VideoDecodeAccelerator> vda_;
|
||||
#if BUILDFLAG(IS_APPLE)
|
||||
scoped_refptr<CommandBufferHelper> command_buffer_helper_;
|
||||
#endif
|
||||
bool vda_initialized_ = false;
|
||||
bool decode_on_parent_thread_ = false;
|
||||
bool reinitializing_ = false;
|
||||
|
@ -350,13 +350,8 @@ class VdaVideoDecoderTest : public testing::TestWithParam<bool> {
|
||||
raw_ptr<VideoDecodeAccelerator::Client, AcrossTasksDanglingUntriaged> client_;
|
||||
uint64_t next_release_count_ = 1;
|
||||
|
||||
#if BUILDFLAG(IS_APPLE)
|
||||
static constexpr auto output_mode_ =
|
||||
VideoDecodeAccelerator::Config::OutputMode::kAllocate;
|
||||
#else
|
||||
static constexpr auto output_mode_ =
|
||||
VideoDecodeAccelerator::Config::OutputMode::kImport;
|
||||
#endif
|
||||
};
|
||||
|
||||
TEST_P(VdaVideoDecoderTest, CreateAndDestroy) {}
|
||||
@ -431,7 +426,6 @@ TEST_P(VdaVideoDecoderTest, Decode_NotifyError) {
|
||||
|
||||
// The below tests rely on creation of video frames from GL textures, which is
|
||||
// not supported on Apple platforms.
|
||||
#if !BUILDFLAG(IS_APPLE)
|
||||
TEST_P(VdaVideoDecoderTest, Decode_OutputAndReuse) {
|
||||
Initialize();
|
||||
int32_t bitstream_id = Decode(base::TimeDelta());
|
||||
@ -487,7 +481,6 @@ TEST_P(VdaVideoDecoderTest, Decode_Output_MaintainsAspect) {
|
||||
|
||||
UpdateSyncTokenAndDropFrame(std::move(frame), picture_buffer_id);
|
||||
}
|
||||
#endif // !BUILDFLAG(IS_APPLE)
|
||||
|
||||
TEST_P(VdaVideoDecoderTest, Flush) {
|
||||
Initialize();
|
||||
|
@ -37,12 +37,8 @@ source_set("mac") {
|
||||
"video_toolbox_video_decoder.h",
|
||||
"video_toolbox_vp9_accelerator.cc",
|
||||
"video_toolbox_vp9_accelerator.h",
|
||||
"vp9_super_frame_bitstream_filter.cc",
|
||||
"vp9_super_frame_bitstream_filter.h",
|
||||
"vt_config_util.h",
|
||||
"vt_config_util.mm",
|
||||
"vt_video_decode_accelerator_mac.h",
|
||||
"vt_video_decode_accelerator_mac.mm",
|
||||
"vt_video_encode_accelerator_mac.h",
|
||||
"vt_video_encode_accelerator_mac.mm",
|
||||
]
|
||||
@ -99,7 +95,6 @@ source_set("unit_tests") {
|
||||
"video_toolbox_h264_accelerator_unittest.cc",
|
||||
"video_toolbox_output_queue_unittest.cc",
|
||||
"video_toolbox_vp9_accelerator_unittest.cc",
|
||||
"vp9_super_frame_bitstream_filter_unittest.cc",
|
||||
"vt_config_util_unittest.mm",
|
||||
]
|
||||
if (enable_hevc_parser_and_hw_decoder) {
|
||||
|
@ -493,8 +493,7 @@ VideoToolboxVideoDecoder::GetSupportedVideoDecoderConfigs(
|
||||
}
|
||||
}
|
||||
|
||||
if (base::FeatureList::IsEnabled(kVideoToolboxAv1Decoding) &&
|
||||
!gpu_workarounds.disable_accelerated_av1_decode && SupportsAV1()) {
|
||||
if (!gpu_workarounds.disable_accelerated_av1_decode && SupportsAV1()) {
|
||||
supported.emplace_back(
|
||||
/*profile_min=*/AV1PROFILE_PROFILE_MAIN,
|
||||
/*profile_max=*/AV1PROFILE_PROFILE_MAIN,
|
||||
|
@ -1,223 +0,0 @@
|
||||
// Copyright 2020 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "media/gpu/mac/vp9_super_frame_bitstream_filter.h"
|
||||
|
||||
#include "base/apple/osstatus_logging.h"
|
||||
#include "base/bits.h"
|
||||
#include "base/check.h"
|
||||
#include "base/containers/heap_array.h"
|
||||
#include "base/logging.h"
|
||||
#include "media/filters/vp9_raw_bits_reader.h"
|
||||
|
||||
namespace {
|
||||
|
||||
void ReleaseDecoderBuffer(void* refcon,
|
||||
void* doomed_memory_block,
|
||||
size_t size_in_bytes) {
|
||||
if (refcon)
|
||||
static_cast<media::DecoderBuffer*>(refcon)->Release();
|
||||
}
|
||||
|
||||
// See Annex B of the VP9 specification for details.
|
||||
// https://www.webmproject.org/vp9/
|
||||
constexpr uint8_t kSuperFrameMarker = 0b11000000;
|
||||
|
||||
} // namespace
|
||||
|
||||
namespace media {
|
||||
|
||||
VP9SuperFrameBitstreamFilter::VP9SuperFrameBitstreamFilter() = default;
|
||||
VP9SuperFrameBitstreamFilter::~VP9SuperFrameBitstreamFilter() = default;
|
||||
|
||||
bool VP9SuperFrameBitstreamFilter::EnqueueBuffer(
|
||||
scoped_refptr<DecoderBuffer> buffer) {
|
||||
DCHECK(!buffer->end_of_stream());
|
||||
|
||||
Vp9RawBitsReader reader;
|
||||
reader.Initialize(buffer->data(), buffer->size());
|
||||
const bool show_frame = ShouldShowFrame(&reader);
|
||||
if (!reader.IsValid()) {
|
||||
DLOG(ERROR) << "Bitstream reading failed.";
|
||||
return false;
|
||||
}
|
||||
|
||||
// See Vp9Parser::ParseSuperframe() for more details.
|
||||
const bool is_superframe =
|
||||
(buffer->data()[buffer->size() - 1] & 0xE0) == kSuperFrameMarker;
|
||||
if (is_superframe && data_) {
|
||||
DLOG(WARNING) << "Mixing of superframe and raw frames not supported";
|
||||
return false;
|
||||
}
|
||||
|
||||
// Passthrough.
|
||||
if ((show_frame || is_superframe) && partial_buffers_.empty()) {
|
||||
DCHECK(!data_);
|
||||
return PreparePassthroughBuffer(std::move(buffer));
|
||||
}
|
||||
|
||||
partial_buffers_.emplace_back(std::move(buffer));
|
||||
if (!show_frame)
|
||||
return true;
|
||||
|
||||
// Time to merge buffers into one superframe.
|
||||
return BuildSuperFrame();
|
||||
}
|
||||
|
||||
base::apple::ScopedCFTypeRef<CMBlockBufferRef>
|
||||
VP9SuperFrameBitstreamFilter::CreatePassthroughBuffer(
|
||||
scoped_refptr<DecoderBuffer> buffer) {
|
||||
base::apple::ScopedCFTypeRef<CMBlockBufferRef> data;
|
||||
|
||||
// The created CMBlockBuffer owns a ref on DecoderBuffer to avoid a copy.
|
||||
CMBlockBufferCustomBlockSource source = {0};
|
||||
source.refCon = buffer.get();
|
||||
source.FreeBlock = &ReleaseDecoderBuffer;
|
||||
|
||||
// Create a memory-backed CMBlockBuffer for the translated data.
|
||||
OSStatus status = CMBlockBufferCreateWithMemoryBlock(
|
||||
kCFAllocatorDefault,
|
||||
static_cast<void*>(const_cast<uint8_t*>(buffer->data())), buffer->size(),
|
||||
kCFAllocatorDefault, &source, 0, buffer->size(), 0,
|
||||
data.InitializeInto());
|
||||
if (status != noErr) {
|
||||
OSSTATUS_DLOG(ERROR, status)
|
||||
<< "CMBlockBufferCreateWithMemoryBlock failed.";
|
||||
data.reset();
|
||||
return data;
|
||||
}
|
||||
buffer->AddRef();
|
||||
return data;
|
||||
}
|
||||
|
||||
void VP9SuperFrameBitstreamFilter::Flush() {
|
||||
partial_buffers_.clear();
|
||||
data_.reset();
|
||||
}
|
||||
|
||||
bool VP9SuperFrameBitstreamFilter::ShouldShowFrame(Vp9RawBitsReader* reader) {
|
||||
// See section 6.2 of the VP9 specification.
|
||||
reader->ReadLiteral(2); // frame_marker
|
||||
|
||||
uint8_t profile = 0;
|
||||
if (reader->ReadBool()) // profile_low_bit
|
||||
profile |= 1;
|
||||
if (reader->ReadBool()) // profile_high_bit
|
||||
profile |= 2;
|
||||
if (profile > 2 && reader->ReadBool()) // reserved_zero
|
||||
profile += 1;
|
||||
|
||||
if (reader->ReadBool()) // show_existing_frame
|
||||
return true;
|
||||
|
||||
reader->ReadBool(); // frame_type
|
||||
return reader->ReadBool(); // show_frame
|
||||
}
|
||||
|
||||
bool VP9SuperFrameBitstreamFilter::PreparePassthroughBuffer(
|
||||
scoped_refptr<DecoderBuffer> buffer) {
|
||||
data_ = CreatePassthroughBuffer(std::move(buffer));
|
||||
return !!data_;
|
||||
}
|
||||
|
||||
bool VP9SuperFrameBitstreamFilter::AllocateCombinedBlock(size_t total_size) {
|
||||
DCHECK(!data_);
|
||||
|
||||
OSStatus status = CMBlockBufferCreateWithMemoryBlock(
|
||||
kCFAllocatorDefault, nullptr, total_size, kCFAllocatorDefault, nullptr, 0,
|
||||
total_size, 0, data_.InitializeInto());
|
||||
if (status != noErr) {
|
||||
OSSTATUS_DLOG(ERROR, status)
|
||||
<< "CMBlockBufferCreateWithMemoryBlock failed.";
|
||||
return false;
|
||||
}
|
||||
|
||||
status = CMBlockBufferAssureBlockMemory(data_.get());
|
||||
if (status != noErr) {
|
||||
OSSTATUS_DLOG(ERROR, status) << "CMBlockBufferAssureBlockMemory failed.";
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool VP9SuperFrameBitstreamFilter::MergeBuffer(const DecoderBuffer& buffer,
|
||||
size_t offset) {
|
||||
OSStatus status = CMBlockBufferReplaceDataBytes(buffer.data(), data_.get(),
|
||||
offset, buffer.size());
|
||||
if (status != noErr) {
|
||||
OSSTATUS_DLOG(ERROR, status) << "CMBlockBufferReplaceDataBytes failed.";
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool VP9SuperFrameBitstreamFilter::BuildSuperFrame() {
|
||||
DCHECK(!partial_buffers_.empty());
|
||||
|
||||
// See Annex B of the VP9 specification for details on this process.
|
||||
|
||||
// Calculate maximum and total size.
|
||||
size_t total_size = 0, max_size = 0;
|
||||
for (const auto& b : partial_buffers_) {
|
||||
total_size += b->size();
|
||||
if (b->size() > max_size) {
|
||||
max_size = b->size();
|
||||
}
|
||||
}
|
||||
|
||||
const uint8_t bytes_per_frame_size =
|
||||
base::bits::AlignUpDeprecatedDoNotUse(
|
||||
base::bits::Log2Ceiling(base::checked_cast<uint32_t>(max_size)), 8) /
|
||||
8;
|
||||
DCHECK_GT(bytes_per_frame_size, 0);
|
||||
DCHECK_LE(bytes_per_frame_size, 4u);
|
||||
|
||||
// A leading and trailing marker byte plus storage for each frame size.
|
||||
total_size += 2 + bytes_per_frame_size * partial_buffers_.size();
|
||||
|
||||
// Allocate a block to hold the superframe.
|
||||
if (!AllocateCombinedBlock(total_size))
|
||||
return false;
|
||||
|
||||
// Merge each buffer into our superframe.
|
||||
size_t offset = 0;
|
||||
for (const auto& b : partial_buffers_) {
|
||||
if (!MergeBuffer(*b, offset))
|
||||
return false;
|
||||
offset += b->size();
|
||||
}
|
||||
|
||||
// Write superframe trailer which has size information for each buffer.
|
||||
size_t trailer_offset = 0;
|
||||
const size_t trailer_size = total_size - offset;
|
||||
auto trailer = base::HeapArray<uint8_t>::Uninit(trailer_size);
|
||||
|
||||
const uint8_t marker = kSuperFrameMarker + ((bytes_per_frame_size - 1) << 3) +
|
||||
(partial_buffers_.size() - 1);
|
||||
|
||||
trailer[trailer_offset++] = marker;
|
||||
for (const auto& b : partial_buffers_) {
|
||||
const uint32_t s = base::checked_cast<uint32_t>(b->size());
|
||||
DCHECK_LE(s, (1ULL << (bytes_per_frame_size * 8)) - 1);
|
||||
|
||||
memcpy(&trailer[trailer_offset], &s, bytes_per_frame_size);
|
||||
trailer_offset += bytes_per_frame_size;
|
||||
}
|
||||
DCHECK_EQ(trailer_offset, trailer_size - 1);
|
||||
trailer[trailer_offset] = marker;
|
||||
|
||||
OSStatus status = CMBlockBufferReplaceDataBytes(trailer.data(), data_.get(),
|
||||
offset, trailer_size);
|
||||
if (status != noErr) {
|
||||
OSSTATUS_DLOG(ERROR, status) << "CMBlockBufferReplaceDataBytes failed.";
|
||||
return false;
|
||||
}
|
||||
|
||||
partial_buffers_.clear();
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace media
|
@ -1,64 +0,0 @@
|
||||
// Copyright 2020 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef MEDIA_GPU_MAC_VP9_SUPER_FRAME_BITSTREAM_FILTER_H_
|
||||
#define MEDIA_GPU_MAC_VP9_SUPER_FRAME_BITSTREAM_FILTER_H_
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include <CoreMedia/CoreMedia.h>
|
||||
|
||||
#include "base/apple/scoped_cftyperef.h"
|
||||
#include "media/base/decoder_buffer.h"
|
||||
#include "media/gpu/media_gpu_export.h"
|
||||
|
||||
namespace media {
|
||||
class Vp9RawBitsReader;
|
||||
|
||||
// Combines alt-ref VP9 buffers into super frames and passes through non-alt-ref
|
||||
// buffers without modification.
|
||||
class MEDIA_GPU_EXPORT VP9SuperFrameBitstreamFilter {
|
||||
public:
|
||||
VP9SuperFrameBitstreamFilter();
|
||||
~VP9SuperFrameBitstreamFilter();
|
||||
|
||||
// Adds a buffer for processing. Clients must call take_buffer() after this
|
||||
// to see if a buffer is ready for processing.
|
||||
bool EnqueueBuffer(scoped_refptr<DecoderBuffer> buffer);
|
||||
|
||||
// Releases any pending data.
|
||||
void Flush();
|
||||
|
||||
// Releases any prepared buffer. Returns null if no buffers are available.
|
||||
base::apple::ScopedCFTypeRef<CMBlockBufferRef> take_buffer() {
|
||||
return std::move(data_);
|
||||
}
|
||||
|
||||
bool has_buffers_for_testing() const {
|
||||
return data_ || !partial_buffers_.empty();
|
||||
}
|
||||
|
||||
// Creates a CMBlockBufferRef which points into `buffer` and owns a reference
|
||||
// on it that will be released when the block buffer is destroyed.
|
||||
static base::apple::ScopedCFTypeRef<CMBlockBufferRef> CreatePassthroughBuffer(
|
||||
scoped_refptr<DecoderBuffer> buffer);
|
||||
|
||||
private:
|
||||
bool ShouldShowFrame(Vp9RawBitsReader* reader);
|
||||
bool PreparePassthroughBuffer(scoped_refptr<DecoderBuffer> buffer);
|
||||
bool AllocateCombinedBlock(size_t total_size);
|
||||
bool MergeBuffer(const DecoderBuffer& buffer, size_t offset);
|
||||
bool BuildSuperFrame();
|
||||
|
||||
// Prepared CMBlockBuffer -- either by assembling |partial_buffers_| or when
|
||||
// a super frame is unnecessary, just by passing through DecoderBuffer.
|
||||
base::apple::ScopedCFTypeRef<CMBlockBufferRef> data_;
|
||||
|
||||
// Partial buffers which need to be assembled into a super frame.
|
||||
std::vector<scoped_refptr<DecoderBuffer>> partial_buffers_;
|
||||
};
|
||||
|
||||
} // namespace media
|
||||
|
||||
#endif // MEDIA_GPU_MAC_VP9_SUPER_FRAME_BITSTREAM_FILTER_H_
|
@ -1,182 +0,0 @@
|
||||
// Copyright 2020 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "media/gpu/mac/vp9_super_frame_bitstream_filter.h"
|
||||
|
||||
#include <CoreMedia/CoreMedia.h>
|
||||
|
||||
#include "media/base/media.h"
|
||||
#include "media/base/test_data_util.h"
|
||||
#include "media/ffmpeg/ffmpeg_common.h"
|
||||
#include "media/ffmpeg/scoped_av_packet.h"
|
||||
#include "media/filters/ffmpeg_glue.h"
|
||||
#include "media/filters/in_memory_url_protocol.h"
|
||||
#include "media/filters/vp9_parser.h"
|
||||
#include "media/media_buildflags.h"
|
||||
#include "testing/gtest/include/gtest/gtest.h"
|
||||
|
||||
namespace media {
|
||||
|
||||
#if BUILDFLAG(ENABLE_FFMPEG)
|
||||
|
||||
class VP9SuperFrameBitstreamFilterTest : public testing::Test {
|
||||
public:
|
||||
VP9SuperFrameBitstreamFilterTest()
|
||||
: parser_(/*parsing_compressed_header=*/false) {
|
||||
InitializeMediaLibrary();
|
||||
}
|
||||
|
||||
~VP9SuperFrameBitstreamFilterTest() override = default;
|
||||
|
||||
void LoadTestData(const char* file_name) {
|
||||
buffer_ = ReadTestDataFile(file_name);
|
||||
ASSERT_TRUE(buffer_);
|
||||
|
||||
// Initialize ffmpeg with the file data.
|
||||
protocol_ = std::make_unique<InMemoryUrlProtocol>(buffer_->data(),
|
||||
buffer_->size(), false);
|
||||
glue_ = std::make_unique<FFmpegGlue>(protocol_.get());
|
||||
ASSERT_TRUE(glue_->OpenContext());
|
||||
}
|
||||
|
||||
scoped_refptr<DecoderBuffer> ReadPacket(int stream_index = 0) {
|
||||
auto packet = ScopedAVPacket::Allocate();
|
||||
while (av_read_frame(glue_->format_context(), packet.get()) >= 0) {
|
||||
if (packet->stream_index == stream_index) {
|
||||
auto buffer = DecoderBuffer::CopyFrom(AVPacketData(*packet));
|
||||
av_packet_unref(packet.get());
|
||||
return buffer;
|
||||
}
|
||||
av_packet_unref(packet.get());
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Vp9Parser::Result ParseNextFrame() {
|
||||
// Temporaries for the Vp9Parser.
|
||||
Vp9FrameHeader fhdr;
|
||||
gfx::Size coded_size;
|
||||
std::unique_ptr<DecryptConfig> null_config;
|
||||
return parser_.ParseNextFrame(&fhdr, &coded_size, &null_config);
|
||||
}
|
||||
|
||||
protected:
|
||||
Vp9Parser parser_;
|
||||
|
||||
private:
|
||||
scoped_refptr<DecoderBuffer> buffer_;
|
||||
std::unique_ptr<InMemoryUrlProtocol> protocol_;
|
||||
std::unique_ptr<FFmpegGlue> glue_;
|
||||
};
|
||||
|
||||
TEST_F(VP9SuperFrameBitstreamFilterTest, Passthrough) {
|
||||
// This test file has no super frames.
|
||||
ASSERT_NO_FATAL_FAILURE(LoadTestData("bear-vp9.webm"));
|
||||
|
||||
// Run through a few packets for good measure.
|
||||
VP9SuperFrameBitstreamFilter bsf;
|
||||
for (int i = 0; i < 16; ++i) {
|
||||
auto buffer = ReadPacket();
|
||||
EXPECT_TRUE(buffer->HasOneRef());
|
||||
|
||||
// Passthrough buffers should be zero-copy, so a ref should be added.
|
||||
bsf.EnqueueBuffer(buffer);
|
||||
EXPECT_FALSE(buffer->HasOneRef());
|
||||
|
||||
auto cm_block = bsf.take_buffer();
|
||||
ASSERT_TRUE(cm_block);
|
||||
|
||||
ASSERT_EQ(buffer->size(), CMBlockBufferGetDataLength(cm_block.get()));
|
||||
|
||||
std::unique_ptr<uint8_t> block_data(new uint8_t[buffer->size()]);
|
||||
ASSERT_EQ(noErr, CMBlockBufferCopyDataBytes(
|
||||
cm_block.get(), 0, buffer->size(), block_data.get()));
|
||||
|
||||
// Verify that the block is valid.
|
||||
parser_.SetStream(block_data.get(), buffer->size(), nullptr);
|
||||
EXPECT_EQ(Vp9Parser::kOk, ParseNextFrame());
|
||||
EXPECT_EQ(Vp9Parser::kEOStream, ParseNextFrame());
|
||||
|
||||
// Releasing the block should bring our ref count back down.
|
||||
cm_block.reset();
|
||||
ASSERT_TRUE(buffer->HasOneRef());
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(VP9SuperFrameBitstreamFilterTest, Superframe) {
|
||||
ASSERT_NO_FATAL_FAILURE(LoadTestData("buck-1280x720-vp9.webm"));
|
||||
|
||||
VP9SuperFrameBitstreamFilter bsf;
|
||||
|
||||
// The first packet in this file is not part of a super frame. We still need
|
||||
// to send it to the VP9 parser so that the superframe can reference it.
|
||||
auto buffer = ReadPacket();
|
||||
parser_.SetStream(buffer->data(), buffer->size(), nullptr);
|
||||
EXPECT_EQ(Vp9Parser::kOk, ParseNextFrame());
|
||||
bsf.EnqueueBuffer(std::move(buffer));
|
||||
ASSERT_TRUE(bsf.take_buffer());
|
||||
|
||||
// The second and third belong to a super frame.
|
||||
buffer = ReadPacket();
|
||||
size_t total_size = buffer->size();
|
||||
bsf.EnqueueBuffer(std::move(buffer));
|
||||
ASSERT_FALSE(bsf.take_buffer());
|
||||
buffer = ReadPacket();
|
||||
total_size += buffer->size();
|
||||
bsf.EnqueueBuffer(std::move(buffer));
|
||||
|
||||
auto cm_block = bsf.take_buffer();
|
||||
ASSERT_TRUE(cm_block);
|
||||
|
||||
// Two marker bytes and 2x 16-bit sizes.
|
||||
const size_t kExpectedTotalSize = 1 + 2 + 2 + 1 + total_size;
|
||||
EXPECT_EQ(kExpectedTotalSize, CMBlockBufferGetDataLength(cm_block.get()));
|
||||
|
||||
std::unique_ptr<uint8_t> block_data(new uint8_t[kExpectedTotalSize]);
|
||||
ASSERT_EQ(noErr,
|
||||
CMBlockBufferCopyDataBytes(cm_block.get(), 0, kExpectedTotalSize,
|
||||
block_data.get()));
|
||||
|
||||
parser_.SetStream(block_data.get(), kExpectedTotalSize, nullptr);
|
||||
EXPECT_EQ(Vp9Parser::kOk, ParseNextFrame());
|
||||
EXPECT_EQ(Vp9Parser::kOk, ParseNextFrame());
|
||||
EXPECT_EQ(Vp9Parser::kEOStream, ParseNextFrame());
|
||||
}
|
||||
|
||||
TEST_F(VP9SuperFrameBitstreamFilterTest, FlushPassthroughFrame) {
|
||||
ASSERT_NO_FATAL_FAILURE(LoadTestData("buck-1280x720-vp9.webm"));
|
||||
|
||||
VP9SuperFrameBitstreamFilter bsf;
|
||||
|
||||
// The first packet in this file is not part of a super frame.
|
||||
bsf.EnqueueBuffer(ReadPacket());
|
||||
ASSERT_TRUE(bsf.has_buffers_for_testing());
|
||||
bsf.Flush();
|
||||
ASSERT_FALSE(bsf.has_buffers_for_testing());
|
||||
ASSERT_FALSE(bsf.take_buffer());
|
||||
}
|
||||
|
||||
TEST_F(VP9SuperFrameBitstreamFilterTest, FlushPartialSuperFrame) {
|
||||
ASSERT_NO_FATAL_FAILURE(LoadTestData("buck-1280x720-vp9.webm"));
|
||||
|
||||
VP9SuperFrameBitstreamFilter bsf;
|
||||
|
||||
// The first packet in this file is not part of a super frame.
|
||||
bsf.EnqueueBuffer(ReadPacket());
|
||||
ASSERT_TRUE(bsf.has_buffers_for_testing());
|
||||
ASSERT_TRUE(bsf.take_buffer());
|
||||
|
||||
// The second and third belong to a super frame.
|
||||
bsf.EnqueueBuffer(ReadPacket());
|
||||
ASSERT_FALSE(bsf.take_buffer());
|
||||
ASSERT_TRUE(bsf.has_buffers_for_testing());
|
||||
|
||||
bsf.Flush();
|
||||
ASSERT_FALSE(bsf.has_buffers_for_testing());
|
||||
ASSERT_FALSE(bsf.take_buffer());
|
||||
}
|
||||
|
||||
#endif // BUILDFLAG(ENABLE_FFMPEG)
|
||||
|
||||
} // namespace media
|
@ -1,376 +0,0 @@
|
||||
// Copyright 2014 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef MEDIA_GPU_MAC_VT_VIDEO_DECODE_ACCELERATOR_MAC_H_
|
||||
#define MEDIA_GPU_MAC_VT_VIDEO_DECODE_ACCELERATOR_MAC_H_
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include <deque>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <queue>
|
||||
#include <set>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "base/apple/scoped_cftyperef.h"
|
||||
#include "base/containers/queue.h"
|
||||
#include "base/memory/raw_ptr.h"
|
||||
#include "base/memory/weak_ptr.h"
|
||||
#include "base/task/sequenced_task_runner.h"
|
||||
#include "base/task/single_thread_task_runner.h"
|
||||
#include "base/threading/thread_checker.h"
|
||||
#include "base/trace_event/memory_dump_provider.h"
|
||||
#include "components/viz/common/resources/shared_image_format.h"
|
||||
#include "gpu/config/gpu_driver_bug_workarounds.h"
|
||||
#include "media/base/media_log.h"
|
||||
#include "media/gpu/media_gpu_export.h"
|
||||
#include "media/video/h264_parser.h"
|
||||
#include "media/video/h264_poc.h"
|
||||
#if BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
|
||||
#include "media/video/h265_parser.h"
|
||||
#include "media/video/h265_poc.h"
|
||||
#endif // BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
|
||||
#include "media/video/video_decode_accelerator.h"
|
||||
#include "ui/gfx/geometry/size.h"
|
||||
#include "ui/gl/gl_bindings.h"
|
||||
|
||||
// This must be included after gl_bindings.h, or the various GL headers on the
|
||||
// system and in the source tree will conflict with each other.
|
||||
#include <VideoToolbox/VideoToolbox.h>
|
||||
|
||||
namespace base {
|
||||
class SequencedTaskRunner;
|
||||
class SingleThreadTaskRunner;
|
||||
} // namespace base
|
||||
|
||||
namespace media {
|
||||
class AV1ConfigChangeDetector;
|
||||
class VP9ConfigChangeDetector;
|
||||
class VP9SuperFrameBitstreamFilter;
|
||||
|
||||
// Preload VideoToolbox libraries, needed for sandbox warmup.
|
||||
MEDIA_GPU_EXPORT void InitializeVideoToolbox();
|
||||
|
||||
// VideoToolbox.framework implementation of the VideoDecodeAccelerator
|
||||
// interface for macOS.
|
||||
class VTVideoDecodeAccelerator : public VideoDecodeAccelerator,
|
||||
public base::trace_event::MemoryDumpProvider {
|
||||
public:
|
||||
VTVideoDecodeAccelerator(const gpu::GpuDriverBugWorkarounds& workarounds,
|
||||
MediaLog* media_log);
|
||||
|
||||
VTVideoDecodeAccelerator(const VTVideoDecodeAccelerator&) = delete;
|
||||
VTVideoDecodeAccelerator& operator=(const VTVideoDecodeAccelerator&) = delete;
|
||||
|
||||
~VTVideoDecodeAccelerator() override;
|
||||
|
||||
// VideoDecodeAccelerator implementation.
|
||||
bool Initialize(const Config& config, Client* client) override;
|
||||
void Decode(BitstreamBuffer bitstream) override;
|
||||
void Decode(scoped_refptr<DecoderBuffer> buffer,
|
||||
int32_t bitstream_id) override;
|
||||
void AssignPictureBuffers(
|
||||
const std::vector<PictureBuffer>& pictures) override;
|
||||
void ReusePictureBuffer(int32_t picture_id) override;
|
||||
void Flush() override;
|
||||
void Reset() override;
|
||||
void Destroy() override;
|
||||
bool TryToSetupDecodeOnSeparateSequence(
|
||||
const base::WeakPtr<Client>& decode_client,
|
||||
const scoped_refptr<base::SequencedTaskRunner>& decode_task_runner)
|
||||
override;
|
||||
|
||||
// MemoryDumpProvider implementation.
|
||||
bool OnMemoryDump(const base::trace_event::MemoryDumpArgs& args,
|
||||
base::trace_event::ProcessMemoryDump* pmd) override;
|
||||
|
||||
// Called by OutputThunk() when VideoToolbox finishes decoding a frame.
|
||||
void Output(void* source_frame_refcon,
|
||||
OSStatus status,
|
||||
CVImageBufferRef image_buffer);
|
||||
|
||||
static VideoDecodeAccelerator::SupportedProfiles GetSupportedProfiles(
|
||||
const gpu::GpuDriverBugWorkarounds& workarounds);
|
||||
|
||||
private:
|
||||
// Logged to UMA, so never reuse values. Make sure to update
|
||||
// VTVDASessionFailureType in histograms.xml to match.
|
||||
enum VTVDASessionFailureType {
|
||||
SFT_SUCCESSFULLY_INITIALIZED = 0,
|
||||
SFT_PLATFORM_ERROR = 1,
|
||||
SFT_INVALID_STREAM = 2,
|
||||
SFT_UNSUPPORTED_STREAM_PARAMETERS = 3,
|
||||
SFT_DECODE_ERROR = 4,
|
||||
SFT_UNSUPPORTED_STREAM = 5,
|
||||
// Must always be equal to largest entry logged.
|
||||
SFT_MAX = SFT_UNSUPPORTED_STREAM
|
||||
};
|
||||
|
||||
enum State {
|
||||
STATE_DECODING,
|
||||
STATE_ERROR,
|
||||
STATE_DESTROYING,
|
||||
};
|
||||
|
||||
enum TaskType {
|
||||
TASK_FRAME,
|
||||
TASK_FLUSH,
|
||||
TASK_RESET,
|
||||
TASK_DESTROY,
|
||||
};
|
||||
|
||||
struct Frame {
|
||||
explicit Frame(int32_t bitstream_id);
|
||||
~Frame();
|
||||
|
||||
// Associated bitstream buffer.
|
||||
int32_t bitstream_id;
|
||||
|
||||
// Slice header information.
|
||||
bool has_slice = false;
|
||||
bool is_idr = false;
|
||||
bool has_recovery_point = false;
|
||||
bool has_mmco5 = false;
|
||||
int32_t pic_order_cnt = 0;
|
||||
int32_t reorder_window = 0;
|
||||
|
||||
// Clean aperture size, as computed by CoreMedia.
|
||||
gfx::Size image_size;
|
||||
|
||||
// Decoded image, if decoding was successful.
|
||||
base::apple::ScopedCFTypeRef<CVImageBufferRef> image;
|
||||
|
||||
// Dynamic HDR metadata, if any.
|
||||
std::optional<gfx::HDRMetadata> hdr_metadata;
|
||||
};
|
||||
|
||||
struct Task {
|
||||
Task(TaskType type);
|
||||
Task(Task&& other);
|
||||
~Task();
|
||||
|
||||
TaskType type;
|
||||
std::unique_ptr<Frame> frame;
|
||||
};
|
||||
|
||||
struct PictureInfo {
|
||||
// A PictureInfo that specifies no texture IDs will be used for shared
|
||||
// images.
|
||||
PictureInfo();
|
||||
|
||||
PictureInfo(const PictureInfo&) = delete;
|
||||
PictureInfo& operator=(const PictureInfo&) = delete;
|
||||
|
||||
~PictureInfo();
|
||||
|
||||
int32_t bitstream_id = 0;
|
||||
|
||||
// The shared image holder that will be passed to the client.
|
||||
scoped_refptr<Picture::ScopedSharedImage> scoped_shared_image;
|
||||
};
|
||||
|
||||
struct FrameOrder {
|
||||
bool operator()(const std::unique_ptr<Frame>& lhs,
|
||||
const std::unique_ptr<Frame>& rhs) const;
|
||||
};
|
||||
|
||||
//
|
||||
// Methods for interacting with VideoToolbox. Run on |decoder_thread_|.
|
||||
//
|
||||
|
||||
// Set up VideoToolbox using the current VPS (if codec is HEVC), SPS and PPS.
|
||||
// Returns true or calls NotifyError() before returning false.
|
||||
bool ConfigureDecoder();
|
||||
|
||||
// Wait for VideoToolbox to output all pending frames. Returns true or calls
|
||||
// NotifyError() before returning false.
|
||||
bool FinishDelayedFrames();
|
||||
|
||||
// |frame| is owned by |pending_frames_|.
|
||||
void DecodeTaskH264(scoped_refptr<DecoderBuffer> buffer, Frame* frame);
|
||||
void DecodeTaskAv1(scoped_refptr<DecoderBuffer> buffer, Frame* frame);
|
||||
void DecodeTaskVp9(scoped_refptr<DecoderBuffer> buffer, Frame* frame);
|
||||
#if BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
|
||||
void DecodeTaskHEVC(scoped_refptr<DecoderBuffer> buffer, Frame* frame);
|
||||
#endif // BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
|
||||
void DecodeDone(Frame* frame);
|
||||
|
||||
//
|
||||
// Methods for interacting with |client_|. Run on |gpu_task_runner_|.
|
||||
//
|
||||
void NotifyError(Error vda_error_type,
|
||||
VTVDASessionFailureType session_failure_type);
|
||||
|
||||
// Since |media_log_| is invalidated in Destroy() on the GPU thread, the easy
|
||||
// thing to do is post to the GPU thread to use it. This helper handles the
|
||||
// thread hop if necessary.
|
||||
void WriteToMediaLog(MediaLogMessageLevel level, const std::string& message);
|
||||
|
||||
// |type| is the type of task that the flush will complete, one of TASK_FLUSH,
|
||||
// TASK_RESET, or TASK_DESTROY.
|
||||
void QueueFlush(TaskType type);
|
||||
void FlushTask(TaskType type);
|
||||
void FlushDone(TaskType type);
|
||||
|
||||
// Try to make progress on tasks in the |task_queue_| or sending frames in the
|
||||
// |reorder_queue_|.
|
||||
void ProcessWorkQueues();
|
||||
|
||||
// These methods returns true if a task was completed, false otherwise.
|
||||
bool ProcessTaskQueue();
|
||||
bool ProcessReorderQueue();
|
||||
bool ProcessOutputQueue();
|
||||
bool ProcessFrame(const Frame& frame);
|
||||
bool SendFrame(const Frame& frame);
|
||||
|
||||
//
|
||||
// GPU thread state.
|
||||
//
|
||||
const gpu::GpuDriverBugWorkarounds workarounds_;
|
||||
std::unique_ptr<MediaLog> media_log_;
|
||||
|
||||
raw_ptr<VideoDecodeAccelerator::Client, AcrossTasksDanglingUntriaged>
|
||||
client_ = nullptr;
|
||||
State state_ = STATE_DECODING;
|
||||
|
||||
// Queue of pending flush tasks. This is used to drop frames when a reset
|
||||
// is pending.
|
||||
base::queue<TaskType> pending_flush_tasks_;
|
||||
|
||||
// Queue of tasks to complete in the GPU thread.
|
||||
base::queue<Task> task_queue_;
|
||||
|
||||
// Queue of decoded frames in presentation order.
|
||||
std::priority_queue<std::unique_ptr<Frame>,
|
||||
std::vector<std::unique_ptr<Frame>>,
|
||||
FrameOrder>
|
||||
reorder_queue_;
|
||||
|
||||
// Queue of decoded frames in presentation order. Used by codecs which don't
|
||||
// require reordering (VP9 only at the moment).
|
||||
std::deque<std::unique_ptr<Frame>> output_queue_;
|
||||
|
||||
std::unique_ptr<VP9ConfigChangeDetector> vp9_cc_detector_;
|
||||
std::unique_ptr<VP9SuperFrameBitstreamFilter> vp9_bsf_;
|
||||
|
||||
std::unique_ptr<AV1ConfigChangeDetector> av1_cc_detector_;
|
||||
|
||||
// Size of assigned picture buffers.
|
||||
gfx::Size picture_size_;
|
||||
|
||||
// Format of the assigned picture buffers.
|
||||
VideoPixelFormat picture_format_ = PIXEL_FORMAT_UNKNOWN;
|
||||
|
||||
// Corresponding SharedImageFormat.
|
||||
viz::SharedImageFormat si_format_ = viz::MultiPlaneFormat::kNV12;
|
||||
|
||||
// Frames that have not yet been decoded, keyed by bitstream ID; maintains
|
||||
// ownership of Frame objects while they flow through VideoToolbox.
|
||||
base::flat_map<int32_t, std::unique_ptr<Frame>> pending_frames_;
|
||||
|
||||
// Set of assigned bitstream IDs, so that Destroy() can release them all.
|
||||
std::set<int32_t> assigned_bitstream_ids_;
|
||||
|
||||
// All picture buffers assigned to us. Used to check if reused picture buffers
|
||||
// should be added back to the available list or released. (They are not
|
||||
// released immediately because we need the reuse event to free the binding.)
|
||||
std::set<int32_t> assigned_picture_ids_;
|
||||
|
||||
// Texture IDs and image buffers of assigned pictures.
|
||||
base::flat_map<int32_t, std::unique_ptr<PictureInfo>> picture_info_map_;
|
||||
|
||||
// Pictures ready to be rendered to.
|
||||
std::vector<int32_t> available_picture_ids_;
|
||||
|
||||
//
|
||||
// Decoder thread state.
|
||||
//
|
||||
VTDecompressionOutputCallbackRecord callback_;
|
||||
base::apple::ScopedCFTypeRef<CMFormatDescriptionRef> format_;
|
||||
base::apple::ScopedCFTypeRef<VTDecompressionSessionRef> session_;
|
||||
H264Parser h264_parser_;
|
||||
|
||||
// SPSs and PPSs seen in the bitstream.
|
||||
base::flat_map<int, std::vector<uint8_t>> seen_sps_;
|
||||
base::flat_map<int, std::vector<uint8_t>> seen_spsext_;
|
||||
base::flat_map<int, std::vector<uint8_t>> seen_pps_;
|
||||
|
||||
// SPS and PPS most recently activated by an IDR.
|
||||
// TODO(sandersd): Enable configuring with multiple PPSs.
|
||||
std::vector<uint8_t> active_sps_;
|
||||
std::vector<uint8_t> active_spsext_;
|
||||
std::vector<uint8_t> active_pps_;
|
||||
|
||||
// SPS and PPS the decoder is currently confgured with.
|
||||
std::vector<uint8_t> configured_sps_;
|
||||
std::vector<uint8_t> configured_spsext_;
|
||||
std::vector<uint8_t> configured_pps_;
|
||||
|
||||
H264POC h264_poc_;
|
||||
#if BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
|
||||
H265Parser hevc_parser_;
|
||||
|
||||
// VPSs seen in the bitstream.
|
||||
base::flat_map<int, std::vector<uint8_t>> seen_vps_;
|
||||
// VPS most recently activated by an IDR.
|
||||
std::vector<uint8_t> active_vps_;
|
||||
|
||||
// VPSs the decoder is currently confgured with.
|
||||
base::flat_map<int, std::vector<uint8_t>> configured_vpss_;
|
||||
// SPSs the decoder is currently confgured with.
|
||||
base::flat_map<int, std::vector<uint8_t>> configured_spss_;
|
||||
// PPSs the decoder is currently confgured with.
|
||||
base::flat_map<int, std::vector<uint8_t>> configured_ppss_;
|
||||
|
||||
H265POC hevc_poc_;
|
||||
#endif // BUILDFLAG(ENABLE_HEVC_PARSER_AND_HW_DECODER)
|
||||
|
||||
Config config_;
|
||||
VideoCodec codec_;
|
||||
|
||||
// Visible rect the decoder is configured to use.
|
||||
gfx::Size configured_size_;
|
||||
|
||||
bool waiting_for_idr_ = true;
|
||||
bool missing_idr_logged_ = false;
|
||||
|
||||
// currently only HEVC is supported, VideoToolbox doesn't
|
||||
// support VP9 with alpha for now.
|
||||
bool has_alpha_ = false;
|
||||
|
||||
uint8_t bit_depth_ = 0;
|
||||
|
||||
// Texture target to use with IOSurfaces.
|
||||
uint32_t texture_target_ = 0;
|
||||
|
||||
// Used to accumulate the output picture count as a workaround to solve
|
||||
// the VT CRA/RASL bug
|
||||
uint64_t output_count_for_cra_rasl_workaround_ = 0;
|
||||
|
||||
// Id number for this instance for memory dumps.
|
||||
int memory_dump_id_ = 0;
|
||||
|
||||
//
|
||||
// Shared state (set up and torn down on GPU thread).
|
||||
//
|
||||
scoped_refptr<base::SingleThreadTaskRunner> gpu_task_runner_;
|
||||
scoped_refptr<base::SequencedTaskRunner> decoder_task_runner_;
|
||||
|
||||
// WeakPtr to |this| for tasks on the |decoder_task_runner_|. Invalidated
|
||||
// on the |decoder_task_runner_| during FlushTask(TASK_DESTROY).
|
||||
base::WeakPtr<VTVideoDecodeAccelerator> decoder_weak_this_;
|
||||
|
||||
base::WeakPtr<VTVideoDecodeAccelerator> weak_this_;
|
||||
|
||||
// Declared last to ensure that all weak pointers are invalidated before
|
||||
// other destructors run.
|
||||
base::WeakPtrFactory<VTVideoDecodeAccelerator> decoder_weak_this_factory_;
|
||||
base::WeakPtrFactory<VTVideoDecodeAccelerator> weak_this_factory_;
|
||||
};
|
||||
|
||||
} // namespace media
|
||||
|
||||
#endif // MEDIA_GPU_MAC_VT_VIDEO_DECODE_ACCELERATOR_MAC_H_
|
File diff suppressed because it is too large
Load Diff
@ -5,27 +5,16 @@
|
||||
#include <memory>
|
||||
|
||||
#include "base/task/sequenced_task_runner.h"
|
||||
#include "base/task/single_thread_task_runner.h"
|
||||
#include "base/task/thread_pool.h"
|
||||
#include "media/base/audio_decoder.h"
|
||||
#include "media/base/media_switches.h"
|
||||
#include "media/base/offloading_audio_encoder.h"
|
||||
#include "media/filters/mac/audio_toolbox_audio_decoder.h"
|
||||
#include "media/filters/mac/audio_toolbox_audio_encoder.h"
|
||||
#include "media/gpu/ipc/service/vda_video_decoder.h"
|
||||
#include "media/gpu/mac/video_toolbox_video_decoder.h"
|
||||
#include "media/mojo/services/gpu_mojo_media_client.h"
|
||||
|
||||
namespace media {
|
||||
|
||||
namespace {
|
||||
|
||||
bool UseVTVD() {
|
||||
return base::FeatureList::IsEnabled(kVideoToolboxVideoDecoder);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
class GpuMojoMediaClientMac final : public GpuMojoMediaClient {
|
||||
public:
|
||||
GpuMojoMediaClientMac(GpuMojoMediaClientTraits& traits)
|
||||
@ -35,17 +24,9 @@ class GpuMojoMediaClientMac final : public GpuMojoMediaClient {
|
||||
protected:
|
||||
std::unique_ptr<VideoDecoder> CreatePlatformVideoDecoder(
|
||||
VideoDecoderTraits& traits) final {
|
||||
if (UseVTVD()) {
|
||||
return std::make_unique<VideoToolboxVideoDecoder>(
|
||||
traits.task_runner, traits.media_log->Clone(), gpu_workarounds_,
|
||||
gpu_task_runner_, traits.get_command_buffer_stub_cb);
|
||||
}
|
||||
|
||||
return VdaVideoDecoder::Create(
|
||||
traits.task_runner, gpu_task_runner_, traits.media_log->Clone(),
|
||||
*traits.target_color_space, gpu_preferences_, gpu_workarounds_,
|
||||
traits.get_command_buffer_stub_cb,
|
||||
VideoDecodeAccelerator::Config::OutputMode::kAllocate);
|
||||
return std::make_unique<VideoToolboxVideoDecoder>(
|
||||
traits.task_runner, traits.media_log->Clone(), gpu_workarounds_,
|
||||
gpu_task_runner_, traits.get_command_buffer_stub_cb);
|
||||
}
|
||||
|
||||
std::optional<SupportedAudioDecoderConfigs>
|
||||
@ -62,11 +43,8 @@ class GpuMojoMediaClientMac final : public GpuMojoMediaClient {
|
||||
std::optional<SupportedVideoDecoderConfigs>
|
||||
GetPlatformSupportedVideoDecoderConfigs(
|
||||
GetVdaConfigsCB get_vda_configs) final {
|
||||
if (UseVTVD()) {
|
||||
return VideoToolboxVideoDecoder::GetSupportedVideoDecoderConfigs(
|
||||
gpu_workarounds_);
|
||||
}
|
||||
return std::move(get_vda_configs).Run();
|
||||
return VideoToolboxVideoDecoder::GetSupportedVideoDecoderConfigs(
|
||||
gpu_workarounds_);
|
||||
}
|
||||
|
||||
std::unique_ptr<AudioDecoder> CreatePlatformAudioDecoder(
|
||||
@ -84,10 +62,7 @@ class GpuMojoMediaClientMac final : public GpuMojoMediaClient {
|
||||
}
|
||||
|
||||
VideoDecoderType GetPlatformDecoderImplementationType() final {
|
||||
if (UseVTVD()) {
|
||||
return VideoDecoderType::kVideoToolbox;
|
||||
}
|
||||
return VideoDecoderType::kVda;
|
||||
return VideoDecoderType::kVideoToolbox;
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -34,13 +34,6 @@ void VideoDecodeAccelerator::Client::NotifyInitializationComplete(
|
||||
<< "By default deferred initialization is not supported.";
|
||||
}
|
||||
|
||||
#if BUILDFLAG(IS_APPLE)
|
||||
gpu::SharedImageStub* VideoDecodeAccelerator::Client::GetSharedImageStub()
|
||||
const {
|
||||
return nullptr;
|
||||
}
|
||||
#endif
|
||||
|
||||
VideoDecodeAccelerator::~VideoDecodeAccelerator() = default;
|
||||
|
||||
void VideoDecodeAccelerator::Decode(scoped_refptr<DecoderBuffer> buffer,
|
||||
|
@ -32,10 +32,6 @@ namespace base {
|
||||
class SequencedTaskRunner;
|
||||
}
|
||||
|
||||
namespace gpu {
|
||||
class SharedImageStub;
|
||||
}
|
||||
|
||||
namespace media {
|
||||
|
||||
// Video decoder interface.
|
||||
@ -245,12 +241,6 @@ class MEDIA_EXPORT VideoDecodeAccelerator {
|
||||
// a false return value there.
|
||||
virtual void NotifyError(Error error) = 0;
|
||||
|
||||
#if BUILDFLAG(IS_APPLE)
|
||||
// Return the SharedImageStub through which SharedImages may be created.
|
||||
// Default implementation returns nullptr.
|
||||
virtual gpu::SharedImageStub* GetSharedImageStub() const;
|
||||
#endif
|
||||
|
||||
protected:
|
||||
virtual ~Client() {}
|
||||
};
|
||||
|
@ -23302,36 +23302,6 @@
|
||||
]
|
||||
}
|
||||
],
|
||||
"VideoToolboxAv1Decoding": [
|
||||
{
|
||||
"platforms": [
|
||||
"mac"
|
||||
],
|
||||
"experiments": [
|
||||
{
|
||||
"name": "Enabled",
|
||||
"enable_features": [
|
||||
"VideoToolboxAv1Decoding"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"VideoToolboxVideoDecoder": [
|
||||
{
|
||||
"platforms": [
|
||||
"mac"
|
||||
],
|
||||
"experiments": [
|
||||
{
|
||||
"name": "Enabled",
|
||||
"enable_features": [
|
||||
"VideoToolboxVideoDecoder"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"ViewTransitionOnNavigation": [
|
||||
{
|
||||
"platforms": [
|
||||
|
Reference in New Issue
Block a user