0

Move key frame flag from StreamParserBuffer to DecoderBuffer

This change is the first in a sequence of changes to enable eventual
removal of the "BrowserSeek" behavior in Chrome for Android by letting
reads from demuxer indicate if the buffer is a key frame or not.

This change
* moves |is_keyframe_| from StreamParserBuffer to DecoderBuffer |is_key_frame_|
* moves StreamParserBuffer::IsKeyframe() to DecoderBuffer::is_key_frame()
* adds DecoderBuffer::set_is_key_frame() to allow updating after buffer
  construction
* populates this field in DemuxerStream subclasses and related code
* updates unit tests, especially ChunkDemuxerTest and FFmpegDemuxerTest,
  to check keyframe expectations in stream read results for most of these tests
* updates media/mojo type DecoderBuffer type converter and unit tests to
  round-trip the key frame flag.

Later change(s) will do the actual removal of Chrome for Android
BrowserSeek logic and tests.

BUG=304234
TEST=Updated unit tests pass, including mojo_media_lib_unittests

Review URL: https://codereview.chromium.org/712593003

Cr-Commit-Position: refs/heads/master@{#303764}
This commit is contained in:
wolenetz
2014-11-11 16:55:44 -08:00
committed by Commit bot
parent e635738fc3
commit 02357c0df3
27 changed files with 254 additions and 149 deletions

@@ -12,14 +12,16 @@ namespace media {
DecoderBuffer::DecoderBuffer(int size)
: size_(size),
side_data_size_(0) {
side_data_size_(0),
is_key_frame_(false) {
Initialize();
}
DecoderBuffer::DecoderBuffer(const uint8* data, int size,
const uint8* side_data, int side_data_size)
: size_(size),
side_data_size_(side_data_size) {
side_data_size_(side_data_size),
is_key_frame_(false) {
if (!data) {
CHECK_EQ(size_, 0);
CHECK(!side_data);
@@ -82,6 +84,7 @@ std::string DecoderBuffer::AsHumanReadableString() {
<< " duration: " << duration_.InMicroseconds()
<< " size: " << size_
<< " side_data_size: " << side_data_size_
<< " is_key_frame: " << is_key_frame_
<< " encrypted: " << (decrypt_config_ != NULL)
<< " discard_padding (ms): (" << discard_padding_.first.InMilliseconds()
<< ", " << discard_padding_.second.InMilliseconds() << ")";

@@ -42,16 +42,18 @@ class MEDIA_EXPORT DecoderBuffer
};
// Allocates buffer with |size| >= 0. Buffer will be padded and aligned
// as necessary.
// as necessary, and |is_key_frame_| will default to false.
explicit DecoderBuffer(int size);
// Create a DecoderBuffer whose |data_| is copied from |data|. Buffer will be
// padded and aligned as necessary. |data| must not be NULL and |size| >= 0.
// The buffer's |is_key_frame_| will default to false.
static scoped_refptr<DecoderBuffer> CopyFrom(const uint8* data, int size);
// Create a DecoderBuffer whose |data_| is copied from |data| and |side_data_|
// is copied from |side_data|. Buffers will be padded and aligned as necessary
// Data pointers must not be NULL and sizes must be >= 0.
// Data pointers must not be NULL and sizes must be >= 0. The buffer's
// |is_key_frame_| will default to false.
static scoped_refptr<DecoderBuffer> CopyFrom(const uint8* data, int size,
const uint8* side_data,
int side_data_size);
@@ -154,6 +156,16 @@ class MEDIA_EXPORT DecoderBuffer
splice_timestamp_ = splice_timestamp;
}
bool is_key_frame() const {
DCHECK(!end_of_stream());
return is_key_frame_;
}
void set_is_key_frame(bool is_key_frame) {
DCHECK(!end_of_stream());
is_key_frame_ = is_key_frame;
}
// Returns a human-readable string describing |*this|.
std::string AsHumanReadableString();
@@ -162,7 +174,8 @@ class MEDIA_EXPORT DecoderBuffer
// Allocates a buffer of size |size| >= 0 and copies |data| into it. Buffer
// will be padded and aligned as necessary. If |data| is NULL then |data_| is
// set to NULL and |buffer_size_| to 0.
// set to NULL and |buffer_size_| to 0. |is_key_frame_| will default to
// false.
DecoderBuffer(const uint8* data, int size,
const uint8* side_data, int side_data_size);
virtual ~DecoderBuffer();
@@ -178,6 +191,7 @@ class MEDIA_EXPORT DecoderBuffer
scoped_ptr<DecryptConfig> decrypt_config_;
DiscardPadding discard_padding_;
base::TimeDelta splice_timestamp_;
bool is_key_frame_;
// Constructor helper method for memory allocations.
void Initialize();

@@ -13,6 +13,7 @@ TEST(DecoderBufferTest, Constructors) {
EXPECT_TRUE(buffer->data());
EXPECT_EQ(0, buffer->data_size());
EXPECT_FALSE(buffer->end_of_stream());
EXPECT_FALSE(buffer->is_key_frame());
const int kTestSize = 10;
scoped_refptr<DecoderBuffer> buffer3(new DecoderBuffer(kTestSize));
@@ -28,6 +29,7 @@ TEST(DecoderBufferTest, CreateEOSBuffer) {
TEST(DecoderBufferTest, CopyFrom) {
const uint8 kData[] = "hello";
const int kDataSize = arraysize(kData);
scoped_refptr<DecoderBuffer> buffer2(DecoderBuffer::CopyFrom(
reinterpret_cast<const uint8*>(&kData), kDataSize));
ASSERT_TRUE(buffer2.get());
@@ -35,6 +37,8 @@ TEST(DecoderBufferTest, CopyFrom) {
EXPECT_EQ(buffer2->data_size(), kDataSize);
EXPECT_EQ(0, memcmp(buffer2->data(), kData, kDataSize));
EXPECT_FALSE(buffer2->end_of_stream());
EXPECT_FALSE(buffer2->is_key_frame());
scoped_refptr<DecoderBuffer> buffer3(DecoderBuffer::CopyFrom(
reinterpret_cast<const uint8*>(&kData), kDataSize,
reinterpret_cast<const uint8*>(&kData), kDataSize));
@@ -46,6 +50,7 @@ TEST(DecoderBufferTest, CopyFrom) {
EXPECT_EQ(buffer3->side_data_size(), kDataSize);
EXPECT_EQ(0, memcmp(buffer3->side_data(), kData, kDataSize));
EXPECT_FALSE(buffer3->end_of_stream());
EXPECT_FALSE(buffer3->is_key_frame());
}
#if !defined(OS_ANDROID)
@@ -72,6 +77,8 @@ TEST(DecoderBufferTest, PaddingAlignment) {
EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(
buffer2->data()) & (DecoderBuffer::kAlignmentSize - 1));
EXPECT_FALSE(buffer2->is_key_frame());
}
#endif
@@ -97,4 +104,15 @@ TEST(DecoderBufferTest, GetDecryptConfig) {
EXPECT_FALSE(buffer->decrypt_config());
}
TEST(DecoderBufferTest, IsKeyFrame) {
scoped_refptr<DecoderBuffer> buffer(new DecoderBuffer(0));
EXPECT_FALSE(buffer->is_key_frame());
buffer->set_is_key_frame(false);
EXPECT_FALSE(buffer->is_key_frame());
buffer->set_is_key_frame(true);
EXPECT_TRUE(buffer->is_key_frame());
}
} // namespace media

@@ -69,6 +69,9 @@ class MEDIA_EXPORT Decryptor {
// decrypted buffer must be NULL.
// - This parameter should not be set to kNeedMoreData.
// Second parameter: The decrypted buffer.
// - Only |data|, |data_size| and |timestamp| are set in the returned
// DecoderBuffer. The callback handler is responsible for setting other
// fields as appropriate.
typedef base::Callback<void(Status,
const scoped_refptr<DecoderBuffer>&)> DecryptCB;

@@ -67,6 +67,9 @@ void FakeTextTrackStream::SatisfyPendingRead(
buffer->set_timestamp(start);
buffer->set_duration(duration);
// Assume all fake text buffers are keyframes.
buffer->set_is_key_frame(true);
base::ResetAndReturn(&read_cb_).Run(kOk, buffer);
}

@@ -19,7 +19,7 @@ static scoped_refptr<StreamParserBuffer> CopyBuffer(
buffer.data_size(),
buffer.side_data(),
buffer.side_data_size(),
buffer.IsKeyframe(),
buffer.is_key_frame(),
buffer.type(),
buffer.track_id());
copied_buffer->SetDecodeTimestamp(buffer.GetDecodeTimestamp());
@@ -45,20 +45,20 @@ scoped_refptr<StreamParserBuffer> StreamParserBuffer::CreateEOSBuffer() {
}
scoped_refptr<StreamParserBuffer> StreamParserBuffer::CopyFrom(
const uint8* data, int data_size, bool is_keyframe, Type type,
const uint8* data, int data_size, bool is_key_frame, Type type,
TrackId track_id) {
return make_scoped_refptr(
new StreamParserBuffer(data, data_size, NULL, 0, is_keyframe, type,
new StreamParserBuffer(data, data_size, NULL, 0, is_key_frame, type,
track_id));
}
scoped_refptr<StreamParserBuffer> StreamParserBuffer::CopyFrom(
const uint8* data, int data_size,
const uint8* side_data, int side_data_size,
bool is_keyframe, Type type, TrackId track_id) {
bool is_key_frame, Type type, TrackId track_id) {
return make_scoped_refptr(
new StreamParserBuffer(data, data_size, side_data, side_data_size,
is_keyframe, type, track_id));
is_key_frame, type, track_id));
}
DecodeTimestamp StreamParserBuffer::GetDecodeTimestamp() const {
@@ -75,10 +75,9 @@ void StreamParserBuffer::SetDecodeTimestamp(DecodeTimestamp timestamp) {
StreamParserBuffer::StreamParserBuffer(const uint8* data, int data_size,
const uint8* side_data,
int side_data_size, bool is_keyframe,
int side_data_size, bool is_key_frame,
Type type, TrackId track_id)
: DecoderBuffer(data, data_size, side_data, side_data_size),
is_keyframe_(is_keyframe),
decode_timestamp_(kNoDecodeTimestamp()),
config_id_(kInvalidConfigId),
type_(type),
@@ -89,6 +88,9 @@ StreamParserBuffer::StreamParserBuffer(const uint8* data, int data_size,
if (data) {
set_duration(kNoTimestamp());
}
if (is_key_frame)
set_is_key_frame(true);
}
StreamParserBuffer::~StreamParserBuffer() {}
@@ -143,7 +145,7 @@ void StreamParserBuffer::ConvertToSpliceBuffer(
SetDecodeTimestamp(first_splice_buffer->GetDecodeTimestamp());
SetConfigId(first_splice_buffer->GetConfigId());
set_timestamp(first_splice_buffer->timestamp());
is_keyframe_ = first_splice_buffer->IsKeyframe();
set_is_key_frame(first_splice_buffer->is_key_frame());
type_ = first_splice_buffer->type();
track_id_ = first_splice_buffer->track_id();
set_splice_timestamp(overlapping_buffer->timestamp());

@@ -110,13 +110,12 @@ class MEDIA_EXPORT StreamParserBuffer : public DecoderBuffer {
static scoped_refptr<StreamParserBuffer> CreateEOSBuffer();
static scoped_refptr<StreamParserBuffer> CopyFrom(
const uint8* data, int data_size, bool is_keyframe, Type type,
const uint8* data, int data_size, bool is_key_frame, Type type,
TrackId track_id);
static scoped_refptr<StreamParserBuffer> CopyFrom(
const uint8* data, int data_size,
const uint8* side_data, int side_data_size, bool is_keyframe, Type type,
const uint8* side_data, int side_data_size, bool is_key_frame, Type type,
TrackId track_id);
bool IsKeyframe() const { return is_keyframe_; }
// Decode timestamp. If not explicitly set, or set to kNoTimestamp(), the
// value will be taken from the normal timestamp.
@@ -174,11 +173,10 @@ class MEDIA_EXPORT StreamParserBuffer : public DecoderBuffer {
private:
StreamParserBuffer(const uint8* data, int data_size,
const uint8* side_data, int side_data_size,
bool is_keyframe, Type type,
bool is_key_frame, Type type,
TrackId track_id);
~StreamParserBuffer() override;
bool is_keyframe_;
DecodeTimestamp decode_timestamp_;
int config_id_;
Type type_;

@@ -222,6 +222,7 @@ scoped_refptr<DecoderBuffer> CreateFakeVideoBufferForTest(
static_cast<int>(pickle.size()));
buffer->set_timestamp(timestamp);
buffer->set_duration(duration);
buffer->set_is_key_frame(true);
return buffer;
}

@@ -186,6 +186,8 @@ class AudioDecoderTest : public testing::TestWithParam<DecoderTestData> {
reader_->GetAVStreamForTesting()->time_base, packet.pts));
buffer->set_duration(ConvertFromTimeBase(
reader_->GetAVStreamForTesting()->time_base, packet.duration));
if (packet.flags & AV_PKT_FLAG_KEY)
buffer->set_is_key_frame(true);
// Don't set discard padding for Opus, it already has discard behavior set
// based on the codec delay in the AudioDecoderConfig.

@@ -37,7 +37,7 @@ const uint8 kTracksHeader[] = {
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // tracks(size = 0)
};
// WebM Block bytes that represent a VP8 keyframe.
// WebM Block bytes that represent a VP8 key frame.
const uint8 kVP8Keyframe[] = {
0x010, 0x00, 0x00, 0x9d, 0x01, 0x2a, 0x00, 0x10, 0x00, 0x10, 0x00
};
@@ -429,9 +429,9 @@ class ChunkDemuxerTest : public ::testing::Test {
// |block_descriptions| - A space delimited string of block info that
// is used to populate |blocks|. Each block info has a timestamp in
// milliseconds and optionally followed by a 'K' to indicate that a block
// should be marked as a keyframe. For example "0K 30 60" should populate
// |blocks| with 3 BlockInfo objects: a keyframe with timestamp 0 and 2
// non-keyframes at 30ms and 60ms.
// should be marked as a key frame. For example "0K 30 60" should populate
// |blocks| with 3 BlockInfo objects: a key frame with timestamp 0 and 2
// non-key-frames at 30ms and 60ms.
void ParseBlockDescriptions(int track_number,
const std::string block_descriptions,
std::vector<BlockInfo>* blocks) {
@@ -457,8 +457,8 @@ class ChunkDemuxerTest : public ::testing::Test {
block_info.duration = kTextBlockDuration;
ASSERT_EQ(kWebMFlagKeyframe, block_info.flags)
<< "Text block with timestamp " << block_info.timestamp_in_ms
<< " was not marked as a keyframe."
<< " All text blocks must be keyframes";
<< " was not marked as a key frame."
<< " All text blocks must be key frames";
}
if (track_number == kAudioTrackNum)
@@ -841,7 +841,7 @@ class ChunkDemuxerTest : public ::testing::Test {
int video_timecode = first_video_timecode;
// Create simple blocks for everything except the last 2 blocks.
// The first video frame must be a keyframe.
// The first video frame must be a key frame.
uint8 video_flag = kWebMFlagKeyframe;
for (int i = 0; i < block_count - 2; i++) {
if (audio_timecode <= video_timecode) {
@@ -1074,6 +1074,9 @@ class ChunkDemuxerTest : public ::testing::Test {
ss << " ";
ss << buffer->timestamp().InMilliseconds();
if (buffer->is_key_frame())
ss << "K";
// Handle preroll buffers.
if (EndsWith(timestamps[i], "P", true)) {
ASSERT_EQ(kInfiniteDuration(), buffer->discard_padding().first);
@@ -1384,15 +1387,15 @@ TEST_F(ChunkDemuxerTest, SingleTextTrackIdChange) {
MuxedStreamInfo(kAlternateTextTrackNum, "45K"));
CheckExpectedRanges(kSourceId, "{ [0,92) }");
CheckExpectedBuffers(audio_stream, "0 23 46 69");
CheckExpectedBuffers(video_stream, "0 30 60");
CheckExpectedBuffers(text_stream, "10 45");
CheckExpectedBuffers(audio_stream, "0K 23K 46K 69K");
CheckExpectedBuffers(video_stream, "0K 30 60K");
CheckExpectedBuffers(text_stream, "10K 45K");
ShutdownDemuxer();
}
TEST_F(ChunkDemuxerTest, InitSegmentSetsNeedRandomAccessPointFlag) {
// Tests that non-keyframes following an init segment are allowed
// Tests that non-key-frames following an init segment are allowed
// and dropped, as expected if the initialization segment received
// algorithm correctly sets the needs random access point flag to true for all
// track buffers. Note that the first initialization segment is insufficient
@@ -1422,9 +1425,9 @@ TEST_F(ChunkDemuxerTest, InitSegmentSetsNeedRandomAccessPointFlag) {
MuxedStreamInfo(kTextTrackNum, "80K 90K"));
CheckExpectedRanges(kSourceId, "{ [23,92) }");
CheckExpectedBuffers(audio_stream, "23 46 69");
CheckExpectedBuffers(video_stream, "30 90");
CheckExpectedBuffers(text_stream, "25 40 80 90");
CheckExpectedBuffers(audio_stream, "23K 46K 69K");
CheckExpectedBuffers(video_stream, "30K 90K");
CheckExpectedBuffers(text_stream, "25K 40K 80K 90K");
}
// Make sure that the demuxer reports an error if Shutdown()
@@ -3409,13 +3412,13 @@ TEST_F(ChunkDemuxerTest, AppendWindow_Video) {
// in the buffer. Also verify that buffers that start inside the
// window and extend beyond the end of the window are not included.
CheckExpectedRanges(kSourceId, "{ [120,270) }");
CheckExpectedBuffers(stream, "120 150 180 210 240");
CheckExpectedBuffers(stream, "120K 150 180 210 240K");
// Extend the append window to [50,650).
append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(650);
// Append more data and verify that adding buffers start at the next
// keyframe.
// key frame.
AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
"360 390 420K 450 480 510 540K 570 600 630K");
CheckExpectedRanges(kSourceId, "{ [120,270) [420,630) }");
@@ -3444,7 +3447,7 @@ TEST_F(ChunkDemuxerTest, AppendWindow_Audio) {
// The "50P" buffer is the "0" buffer marked for complete discard. The next
// "50" buffer is the "30" buffer marked with 20ms of start discard.
CheckExpectedBuffers(stream, "50P 50 60 90 120 150 180 210 240");
CheckExpectedBuffers(stream, "50KP 50K 60K 90K 120K 150K 180K 210K 240K");
// Extend the append window to [50,650).
append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(650);
@@ -3491,7 +3494,7 @@ TEST_F(ChunkDemuxerTest, AppendWindow_WebMFile_AudioOnly) {
AppendDataInPieces(buffer->data(), buffer->data_size(), 128);
DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::AUDIO);
CheckExpectedBuffers(stream, "50P 50 62 86 109 122 125 128");
CheckExpectedBuffers(stream, "50KP 50K 62K 86K 109K 122K 125K 128K");
}
TEST_F(ChunkDemuxerTest, AppendWindow_AudioConfigUpdateRemovesPreroll) {
@@ -3533,7 +3536,7 @@ TEST_F(ChunkDemuxerTest, AppendWindow_AudioConfigUpdateRemovesPreroll) {
Seek(duration_1);
ExpectConfigChanged(DemuxerStream::AUDIO);
ASSERT_FALSE(config_1.Matches(stream->audio_decoder_config()));
CheckExpectedBuffers(stream, "2746 2767 2789 2810");
CheckExpectedBuffers(stream, "2746K 2767K 2789K 2810K");
}
TEST_F(ChunkDemuxerTest, AppendWindow_Text) {
@@ -3558,8 +3561,8 @@ TEST_F(ChunkDemuxerTest, AppendWindow_Text) {
// in the buffer. Also verify that cues that extend beyond the
// window are not included.
CheckExpectedRanges(kSourceId, "{ [100,270) }");
CheckExpectedBuffers(video_stream, "120 150 180 210 240");
CheckExpectedBuffers(text_stream, "100");
CheckExpectedBuffers(video_stream, "120K 150 180 210 240K");
CheckExpectedBuffers(text_stream, "100K");
// Extend the append window to [20,650).
append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(650);
@@ -3573,8 +3576,8 @@ TEST_F(ChunkDemuxerTest, AppendWindow_Text) {
// Seek to the new range and verify that the expected buffers are returned.
Seek(base::TimeDelta::FromMilliseconds(420));
CheckExpectedBuffers(video_stream, "420 450 480 510 540 570 600");
CheckExpectedBuffers(text_stream, "400 500");
CheckExpectedBuffers(video_stream, "420K 450 480 510 540K 570 600");
CheckExpectedBuffers(text_stream, "400K 500K");
}
TEST_F(ChunkDemuxerTest, StartWaitingForSeekAfterParseError) {
@@ -3599,9 +3602,9 @@ TEST_F(ChunkDemuxerTest, Remove_AudioVideoText) {
MuxedStreamInfo(kVideoTrackNum, "0K 30 60 90 120K 150 180"),
MuxedStreamInfo(kTextTrackNum, "0K 100K 200K"));
CheckExpectedBuffers(audio_stream, "0 20 40 60 80 100 120 140");
CheckExpectedBuffers(video_stream, "0 30 60 90 120 150 180");
CheckExpectedBuffers(text_stream, "0 100 200");
CheckExpectedBuffers(audio_stream, "0K 20K 40K 60K 80K 100K 120K 140K");
CheckExpectedBuffers(video_stream, "0K 30 60 90 120K 150 180");
CheckExpectedBuffers(text_stream, "0K 100K 200K");
// Remove the buffers that were added.
demuxer_->Remove(kSourceId, base::TimeDelta(),
@@ -3618,9 +3621,9 @@ TEST_F(ChunkDemuxerTest, Remove_AudioVideoText) {
MuxedStreamInfo(kTextTrackNum, "1K 101K 201K"));
Seek(base::TimeDelta());
CheckExpectedBuffers(audio_stream, "1 21 41 61 81 101 121 141");
CheckExpectedBuffers(video_stream, "1 31 61 91 121 151 181");
CheckExpectedBuffers(text_stream, "1 101 201");
CheckExpectedBuffers(audio_stream, "1K 21K 41K 61K 81K 101K 121K 141K");
CheckExpectedBuffers(video_stream, "1K 31 61 91 121K 151 181");
CheckExpectedBuffers(text_stream, "1K 101K 201K");
}
TEST_F(ChunkDemuxerTest, Remove_StartAtDuration) {
@@ -3639,7 +3642,7 @@ TEST_F(ChunkDemuxerTest, Remove_StartAtDuration) {
"0K 20K 40K 60K 80K 100K 120K 140K");
CheckExpectedRanges(kSourceId, "{ [0,160) }");
CheckExpectedBuffers(audio_stream, "0 20 40 60 80 100 120 140");
CheckExpectedBuffers(audio_stream, "0K 20K 40K 60K 80K 100K 120K 140K");
demuxer_->Remove(kSourceId,
base::TimeDelta::FromSecondsD(demuxer_->GetDuration()),
@@ -3647,7 +3650,7 @@ TEST_F(ChunkDemuxerTest, Remove_StartAtDuration) {
Seek(base::TimeDelta());
CheckExpectedRanges(kSourceId, "{ [0,160) }");
CheckExpectedBuffers(audio_stream, "0 20 40 60 80 100 120 140");
CheckExpectedBuffers(audio_stream, "0K 20K 40K 60K 80K 100K 120K 140K");
}
// Verifies that a Seek() will complete without text cues for
@@ -3687,8 +3690,8 @@ TEST_F(ChunkDemuxerTest, SeekCompletesWithoutTextCues) {
EXPECT_FALSE(text_read_done);
// Read some audio & video buffers to further verify seek completion.
CheckExpectedBuffers(audio_stream, "120 140");
CheckExpectedBuffers(video_stream, "120 150");
CheckExpectedBuffers(audio_stream, "120K 140K");
CheckExpectedBuffers(video_stream, "120K 150");
EXPECT_FALSE(text_read_done);
@@ -3704,10 +3707,10 @@ TEST_F(ChunkDemuxerTest, SeekCompletesWithoutTextCues) {
// NOTE: we start at 275 here because the buffer at 225 was returned
// to the pending read initiated above.
CheckExpectedBuffers(text_stream, "275 325");
CheckExpectedBuffers(text_stream, "275K 325K");
// Verify that audio & video streams continue to return expected values.
CheckExpectedBuffers(audio_stream, "160 180");
CheckExpectedBuffers(audio_stream, "160K 180K");
CheckExpectedBuffers(video_stream, "180 210");
}

@@ -244,6 +244,9 @@ void DecryptingDemuxerStream::DecryptBuffer(
buffer->data(), buffer->data_size());
decrypted->set_timestamp(buffer->timestamp());
decrypted->set_duration(buffer->duration());
if (buffer->is_key_frame())
decrypted->set_is_key_frame(true);
state_ = kIdle;
base::ResetAndReturn(&read_cb_).Run(kOk, decrypted);
return;
@@ -307,6 +310,12 @@ void DecryptingDemuxerStream::DeliverBuffer(
}
DCHECK_EQ(status, Decryptor::kSuccess);
// Copy the key frame flag from the encrypted to decrypted buffer, assuming
// that the decryptor initialized the flag to false.
if (pending_buffer_to_decrypt_->is_key_frame())
decrypted_buffer->set_is_key_frame(true);
pending_buffer_to_decrypt_ = NULL;
state_ = kIdle;
base::ResetAndReturn(&read_cb_).Run(kOk, decrypted_buffer);

@@ -360,6 +360,9 @@ void FFmpegDemuxerStream::EnqueuePacket(ScopedAVPacket packet) {
}
}
if (packet.get()->flags & AV_PKT_FLAG_KEY)
buffer->set_is_key_frame(true);
last_packet_timestamp_ = buffer->timestamp();
last_packet_duration_ = buffer->duration();

@@ -108,13 +108,28 @@ class FFmpegDemuxerTest : public testing::Test {
MOCK_METHOD2(OnReadDoneCalled, void(int, int64));
struct ReadExpectation {
ReadExpectation(int size,
int64 timestamp_us,
const base::TimeDelta& discard_front_padding,
bool is_key_frame)
: size(size),
timestamp_us(timestamp_us),
discard_front_padding(discard_front_padding),
is_key_frame(is_key_frame) {
}
int size;
int64 timestamp_us;
base::TimeDelta discard_front_padding;
bool is_key_frame;
};
// Verifies that |buffer| has a specific |size| and |timestamp|.
// |location| simply indicates where the call to this function was made.
// This makes it easier to track down where test failures occur.
void OnReadDone(const tracked_objects::Location& location,
int size,
int64 timestamp_us,
base::TimeDelta discard_front_padding,
const ReadExpectation& read_expectation,
DemuxerStream::Status status,
const scoped_refptr<DecoderBuffer>& buffer) {
std::string location_str;
@@ -122,39 +137,46 @@ class FFmpegDemuxerTest : public testing::Test {
location_str += "\n";
SCOPED_TRACE(location_str);
EXPECT_EQ(status, DemuxerStream::kOk);
OnReadDoneCalled(size, timestamp_us);
EXPECT_TRUE(buffer.get() != NULL);
EXPECT_EQ(size, buffer->data_size());
EXPECT_EQ(timestamp_us, buffer->timestamp().InMicroseconds());
EXPECT_EQ(discard_front_padding, buffer->discard_padding().first);
EXPECT_EQ(read_expectation.size, buffer->data_size());
EXPECT_EQ(read_expectation.timestamp_us,
buffer->timestamp().InMicroseconds());
EXPECT_EQ(read_expectation.discard_front_padding,
buffer->discard_padding().first);
EXPECT_EQ(read_expectation.is_key_frame, buffer->is_key_frame());
DCHECK_EQ(&message_loop_, base::MessageLoop::current());
OnReadDoneCalled(read_expectation.size, read_expectation.timestamp_us);
message_loop_.PostTask(FROM_HERE, base::MessageLoop::QuitWhenIdleClosure());
}
DemuxerStream::ReadCB NewReadCB(const tracked_objects::Location& location,
int size,
int64 timestamp_us) {
EXPECT_CALL(*this, OnReadDoneCalled(size, timestamp_us));
return base::Bind(&FFmpegDemuxerTest::OnReadDone,
base::Unretained(this),
location,
size,
timestamp_us,
base::TimeDelta());
int64 timestamp_us,
bool is_key_frame) {
return NewReadCBWithCheckedDiscard(location,
size,
timestamp_us,
base::TimeDelta(),
is_key_frame);
}
DemuxerStream::ReadCB NewReadCBWithCheckedDiscard(
const tracked_objects::Location& location,
int size,
int64 timestamp_us,
base::TimeDelta discard_front_padding) {
base::TimeDelta discard_front_padding,
bool is_key_frame) {
EXPECT_CALL(*this, OnReadDoneCalled(size, timestamp_us));
struct ReadExpectation read_expectation(size,
timestamp_us,
discard_front_padding,
is_key_frame);
return base::Bind(&FFmpegDemuxerTest::OnReadDone,
base::Unretained(this),
location,
size,
timestamp_us,
discard_front_padding);
read_expectation);
}
// TODO(xhwang): This is a workaround of the issue that move-only parameters
@@ -374,10 +396,10 @@ TEST_F(FFmpegDemuxerTest, Read_Audio) {
// Attempt a read from the audio stream and run the message loop until done.
DemuxerStream* audio = demuxer_->GetStream(DemuxerStream::AUDIO);
audio->Read(NewReadCB(FROM_HERE, 29, 0));
audio->Read(NewReadCB(FROM_HERE, 29, 0, true));
message_loop_.Run();
audio->Read(NewReadCB(FROM_HERE, 27, 3000));
audio->Read(NewReadCB(FROM_HERE, 27, 3000, true));
message_loop_.Run();
}
@@ -389,10 +411,10 @@ TEST_F(FFmpegDemuxerTest, Read_Video) {
// Attempt a read from the video stream and run the message loop until done.
DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
video->Read(NewReadCB(FROM_HERE, 22084, 0));
video->Read(NewReadCB(FROM_HERE, 22084, 0, true));
message_loop_.Run();
video->Read(NewReadCB(FROM_HERE, 1057, 33000));
video->Read(NewReadCB(FROM_HERE, 1057, 33000, false));
message_loop_.Run();
}
@@ -406,10 +428,10 @@ TEST_F(FFmpegDemuxerTest, Read_Text) {
ASSERT_TRUE(text_stream);
EXPECT_EQ(DemuxerStream::TEXT, text_stream->type());
text_stream->Read(NewReadCB(FROM_HERE, 31, 0));
text_stream->Read(NewReadCB(FROM_HERE, 31, 0, true));
message_loop_.Run();
text_stream->Read(NewReadCB(FROM_HERE, 19, 500000));
text_stream->Read(NewReadCB(FROM_HERE, 19, 500000, true));
message_loop_.Run();
}
@@ -438,9 +460,11 @@ TEST_F(FFmpegDemuxerTest, Read_VideoPositiveStartTime) {
// Run the test twice with a seek in between.
for (int i = 0; i < 2; ++i) {
video->Read(NewReadCB(FROM_HERE, 5636, video_start_time.InMicroseconds()));
video->Read(NewReadCB(FROM_HERE, 5636, video_start_time.InMicroseconds(),
true));
message_loop_.Run();
audio->Read(NewReadCB(FROM_HERE, 165, audio_start_time.InMicroseconds()));
audio->Read(NewReadCB(FROM_HERE, 165, audio_start_time.InMicroseconds(),
true));
message_loop_.Run();
// Verify that the start time is equal to the lowest timestamp (ie the
@@ -466,7 +490,7 @@ TEST_F(FFmpegDemuxerTest, Read_AudioNoStartTime) {
// Run the test twice with a seek in between.
for (int i = 0; i < 2; ++i) {
demuxer_->GetStream(DemuxerStream::AUDIO)
->Read(NewReadCB(FROM_HERE, 4095, 0));
->Read(NewReadCB(FROM_HERE, 4095, 0, true));
message_loop_.Run();
EXPECT_EQ(base::TimeDelta(), demuxer_->start_time());
@@ -478,8 +502,9 @@ TEST_F(FFmpegDemuxerTest, Read_AudioNoStartTime) {
}
// TODO(dalecurtis): Test is disabled since FFmpeg does not currently guarantee
// the order of demuxed packets in OGG containers. Re-enable once we decide to
// either workaround it or attempt a fix upstream. See http://crbug.com/387996.
// the order of demuxed packets in OGG containers. Re-enable and fix key frame
// expectations once we decide to either workaround it or attempt a fix
// upstream. See http://crbug.com/387996.
TEST_F(FFmpegDemuxerTest,
DISABLED_Read_AudioNegativeStartTimeAndOggDiscard_Bear) {
// Many ogg files have negative starting timestamps, so ensure demuxing and
@@ -494,27 +519,29 @@ TEST_F(FFmpegDemuxerTest,
// Run the test twice with a seek in between.
for (int i = 0; i < 2; ++i) {
audio->Read(
NewReadCBWithCheckedDiscard(FROM_HERE, 40, 0, kInfiniteDuration()));
NewReadCBWithCheckedDiscard(FROM_HERE, 40, 0, kInfiniteDuration(),
true));
message_loop_.Run();
audio->Read(
NewReadCBWithCheckedDiscard(FROM_HERE, 41, 2903, kInfiniteDuration()));
NewReadCBWithCheckedDiscard(FROM_HERE, 41, 2903, kInfiniteDuration(),
true));
message_loop_.Run();
audio->Read(NewReadCBWithCheckedDiscard(
FROM_HERE, 173, 5805, base::TimeDelta::FromMicroseconds(10159)));
FROM_HERE, 173, 5805, base::TimeDelta::FromMicroseconds(10159), true));
message_loop_.Run();
audio->Read(NewReadCB(FROM_HERE, 148, 18866));
audio->Read(NewReadCB(FROM_HERE, 148, 18866, true));
message_loop_.Run();
EXPECT_EQ(base::TimeDelta::FromMicroseconds(-15964),
demuxer_->start_time());
video->Read(NewReadCB(FROM_HERE, 5751, 0));
video->Read(NewReadCB(FROM_HERE, 5751, 0, true));
message_loop_.Run();
video->Read(NewReadCB(FROM_HERE, 846, 33367));
video->Read(NewReadCB(FROM_HERE, 846, 33367, true));
message_loop_.Run();
video->Read(NewReadCB(FROM_HERE, 1255, 66733));
video->Read(NewReadCB(FROM_HERE, 1255, 66733, true));
message_loop_.Run();
// Seek back to the beginning and repeat the test.
@@ -540,10 +567,10 @@ TEST_F(FFmpegDemuxerTest, Read_AudioNegativeStartTimeAndOggDiscard_Sync) {
// Run the test twice with a seek in between.
for (int i = 0; i < 2; ++i) {
audio->Read(NewReadCBWithCheckedDiscard(
FROM_HERE, 1, 0, base::TimeDelta::FromMicroseconds(2902)));
FROM_HERE, 1, 0, base::TimeDelta::FromMicroseconds(2902), true));
message_loop_.Run();
audio->Read(NewReadCB(FROM_HERE, 1, 2902));
audio->Read(NewReadCB(FROM_HERE, 1, 2902, true));
message_loop_.Run();
EXPECT_EQ(base::TimeDelta::FromMicroseconds(-2902),
demuxer_->start_time());
@@ -552,13 +579,13 @@ TEST_F(FFmpegDemuxerTest, Read_AudioNegativeStartTimeAndOggDiscard_Sync) {
// must always be greater than zero.
EXPECT_EQ(base::TimeDelta(), demuxer_->GetStartTime());
video->Read(NewReadCB(FROM_HERE, 9997, 0));
video->Read(NewReadCB(FROM_HERE, 9997, 0, true));
message_loop_.Run();
video->Read(NewReadCB(FROM_HERE, 16, 33241));
video->Read(NewReadCB(FROM_HERE, 16, 33241, false));
message_loop_.Run();
video->Read(NewReadCB(FROM_HERE, 631, 66482));
video->Read(NewReadCB(FROM_HERE, 631, 66482, false));
message_loop_.Run();
// Seek back to the beginning and repeat the test.
@@ -646,7 +673,7 @@ TEST_F(FFmpegDemuxerTest, Seek) {
ASSERT_TRUE(audio);
// Read a video packet and release it.
video->Read(NewReadCB(FROM_HERE, 22084, 0));
video->Read(NewReadCB(FROM_HERE, 22084, 0, true));
message_loop_.Run();
// Issue a simple forward seek, which should discard queued packets.
@@ -656,19 +683,19 @@ TEST_F(FFmpegDemuxerTest, Seek) {
event.RunAndWaitForStatus(PIPELINE_OK);
// Audio read #1.
audio->Read(NewReadCB(FROM_HERE, 145, 803000));
audio->Read(NewReadCB(FROM_HERE, 145, 803000, true));
message_loop_.Run();
// Audio read #2.
audio->Read(NewReadCB(FROM_HERE, 148, 826000));
audio->Read(NewReadCB(FROM_HERE, 148, 826000, true));
message_loop_.Run();
// Video read #1.
video->Read(NewReadCB(FROM_HERE, 5425, 801000));
video->Read(NewReadCB(FROM_HERE, 5425, 801000, true));
message_loop_.Run();
// Video read #2.
video->Read(NewReadCB(FROM_HERE, 1906, 834000));
video->Read(NewReadCB(FROM_HERE, 1906, 834000, false));
message_loop_.Run();
}
@@ -690,7 +717,7 @@ TEST_F(FFmpegDemuxerTest, SeekText) {
ASSERT_TRUE(audio);
// Read a text packet and release it.
text_stream->Read(NewReadCB(FROM_HERE, 31, 0));
text_stream->Read(NewReadCB(FROM_HERE, 31, 0, true));
message_loop_.Run();
// Issue a simple forward seek, which should discard queued packets.
@@ -700,27 +727,27 @@ TEST_F(FFmpegDemuxerTest, SeekText) {
event.RunAndWaitForStatus(PIPELINE_OK);
// Audio read #1.
audio->Read(NewReadCB(FROM_HERE, 145, 803000));
audio->Read(NewReadCB(FROM_HERE, 145, 803000, true));
message_loop_.Run();
// Audio read #2.
audio->Read(NewReadCB(FROM_HERE, 148, 826000));
audio->Read(NewReadCB(FROM_HERE, 148, 826000, true));
message_loop_.Run();
// Video read #1.
video->Read(NewReadCB(FROM_HERE, 5425, 801000));
video->Read(NewReadCB(FROM_HERE, 5425, 801000, true));
message_loop_.Run();
// Video read #2.
video->Read(NewReadCB(FROM_HERE, 1906, 834000));
video->Read(NewReadCB(FROM_HERE, 1906, 834000, false));
message_loop_.Run();
// Text read #1.
text_stream->Read(NewReadCB(FROM_HERE, 19, 500000));
text_stream->Read(NewReadCB(FROM_HERE, 19, 500000, true));
message_loop_.Run();
// Text read #2.
text_stream->Read(NewReadCB(FROM_HERE, 19, 1000000));
text_stream->Read(NewReadCB(FROM_HERE, 19, 1000000, true));
message_loop_.Run();
}
@@ -772,7 +799,7 @@ TEST_F(FFmpegDemuxerTest, SeekWithCuesBeforeFirstCluster) {
ASSERT_TRUE(audio);
// Read a video packet and release it.
video->Read(NewReadCB(FROM_HERE, 22084, 0));
video->Read(NewReadCB(FROM_HERE, 22084, 0, true));
message_loop_.Run();
// Issue a simple forward seek, which should discard queued packets.
@@ -782,19 +809,19 @@ TEST_F(FFmpegDemuxerTest, SeekWithCuesBeforeFirstCluster) {
event.RunAndWaitForStatus(PIPELINE_OK);
// Audio read #1.
audio->Read(NewReadCB(FROM_HERE, 40, 2403000));
audio->Read(NewReadCB(FROM_HERE, 40, 2403000, true));
message_loop_.Run();
// Audio read #2.
audio->Read(NewReadCB(FROM_HERE, 42, 2406000));
audio->Read(NewReadCB(FROM_HERE, 42, 2406000, true));
message_loop_.Run();
// Video read #1.
video->Read(NewReadCB(FROM_HERE, 5276, 2402000));
video->Read(NewReadCB(FROM_HERE, 5276, 2402000, true));
message_loop_.Run();
// Video read #2.
video->Read(NewReadCB(FROM_HERE, 1740, 2436000));
video->Read(NewReadCB(FROM_HERE, 1740, 2436000, false));
message_loop_.Run();
}

@@ -332,7 +332,7 @@ bool FrameProcessor::HandlePartialAppendWindowTrimming(
const scoped_refptr<StreamParserBuffer>& buffer) {
DCHECK(buffer->duration() > base::TimeDelta());
DCHECK_EQ(DemuxerStream::AUDIO, buffer->type());
DCHECK(buffer->IsKeyframe());
DCHECK(buffer->is_key_frame());
const base::TimeDelta frame_end_timestamp =
buffer->timestamp() + buffer->duration();
@@ -446,7 +446,7 @@ bool FrameProcessor::ProcessFrame(
<< ", PTS=" << presentation_timestamp.InSecondsF()
<< ", DTS=" << decode_timestamp.InSecondsF()
<< ", DUR=" << frame_duration.InSecondsF()
<< ", RAP=" << frame->IsKeyframe();
<< ", RAP=" << frame->is_key_frame();
// Sanity check the timestamps.
if (presentation_timestamp == kNoTimestamp()) {
@@ -638,7 +638,7 @@ bool FrameProcessor::ProcessFrame(
// 12.1. If the coded frame is not a random access point, then drop the
// coded frame and jump to the top of the loop to start processing
// the next coded frame.
if (!frame->IsKeyframe()) {
if (!frame->is_key_frame()) {
DVLOG(3) << __FUNCTION__
<< ": Dropping frame that is not a random access point";
return true;

@@ -36,7 +36,7 @@ SourceBufferRange::SourceBufferRange(
interbuffer_distance_cb_(interbuffer_distance_cb),
size_in_bytes_(0) {
CHECK(!new_buffers.empty());
DCHECK(new_buffers.front()->IsKeyframe());
DCHECK(new_buffers.front()->is_key_frame());
DCHECK(!interbuffer_distance_cb.is_null());
AppendBuffersToEnd(new_buffers);
}
@@ -55,7 +55,7 @@ void SourceBufferRange::AppendBuffersToEnd(const BufferQueue& new_buffers) {
buffers_.push_back(*itr);
size_in_bytes_ += (*itr)->data_size();
if ((*itr)->IsKeyframe()) {
if ((*itr)->is_key_frame()) {
keyframe_map_.insert(
std::make_pair((*itr)->GetDecodeTimestamp(),
buffers_.size() - 1 + keyframe_map_index_base_));
@@ -449,7 +449,7 @@ bool SourceBufferRange::CanAppendBuffersToEnd(
const BufferQueue& buffers) const {
DCHECK(!buffers_.empty());
return IsNextInSequence(buffers.front()->GetDecodeTimestamp(),
buffers.front()->IsKeyframe());
buffers.front()->is_key_frame());
}
bool SourceBufferRange::BelongsToRange(DecodeTimestamp timestamp) const {
@@ -532,7 +532,7 @@ DecodeTimestamp SourceBufferRange::KeyframeBeforeTimestamp(
}
bool SourceBufferRange::IsNextInSequence(
DecodeTimestamp timestamp, bool is_keyframe) const {
DecodeTimestamp timestamp, bool is_key_frame) const {
DecodeTimestamp end = buffers_.back()->GetDecodeTimestamp();
if (end < timestamp &&
(gap_policy_ == ALLOW_GAPS ||
@@ -541,7 +541,7 @@ bool SourceBufferRange::IsNextInSequence(
}
return timestamp == end && AllowSameTimestamp(
buffers_.back()->IsKeyframe(), is_keyframe);
buffers_.back()->is_key_frame(), is_key_frame);
}
base::TimeDelta SourceBufferRange::GetFudgeRoom() const {

@@ -195,7 +195,7 @@ class SourceBufferRange {
// Returns true if |timestamp| is the timestamp of the next buffer in
// sequence after |buffers_.back()|, false otherwise.
bool IsNextInSequence(DecodeTimestamp timestamp, bool is_keyframe) const;
bool IsNextInSequence(DecodeTimestamp timestamp, bool is_key_frame) const;
// Adds all buffers which overlap [start, end) to the end of |buffers|. If
// no buffers exist in the range returns false, true otherwise.

@@ -189,8 +189,8 @@ bool SourceBufferStream::Append(const BufferQueue& buffers) {
DCHECK(!end_of_stream_);
// New media segments must begin with a keyframe.
if (new_media_segment_ && !buffers.front()->IsKeyframe()) {
MEDIA_LOG(log_cb_) << "Media segment did not begin with keyframe.";
if (new_media_segment_ && !buffers.front()->is_key_frame()) {
MEDIA_LOG(log_cb_) << "Media segment did not begin with key frame.";
return false;
}
@@ -206,7 +206,7 @@ bool SourceBufferStream::Append(const BufferQueue& buffers) {
}
if (!IsNextTimestampValid(buffers.front()->GetDecodeTimestamp(),
buffers.front()->IsKeyframe())) {
buffers.front()->is_key_frame())) {
MEDIA_LOG(log_cb_) << "Invalid same timestamp construct detected at time "
<< buffers.front()->GetDecodeTimestamp().InSecondsF();
@@ -227,7 +227,7 @@ bool SourceBufferStream::Append(const BufferQueue& buffers) {
if (range_for_next_append_ != ranges_.end()) {
(*range_for_next_append_)->AppendBuffersToEnd(buffers);
last_appended_buffer_timestamp_ = buffers.back()->GetDecodeTimestamp();
last_appended_buffer_is_keyframe_ = buffers.back()->IsKeyframe();
last_appended_buffer_is_keyframe_ = buffers.back()->is_key_frame();
} else {
DecodeTimestamp new_range_start_time = std::min(
media_segment_start_time_, buffers.front()->GetDecodeTimestamp());
@@ -235,25 +235,25 @@ bool SourceBufferStream::Append(const BufferQueue& buffers) {
BufferQueue trimmed_buffers;
// If the new range is not being created because of a new media
// segment, then we must make sure that we start with a keyframe.
// segment, then we must make sure that we start with a key frame.
// This can happen if the GOP in the previous append gets destroyed
// by a Remove() call.
if (!new_media_segment_) {
BufferQueue::const_iterator itr = buffers.begin();
// Scan past all the non-keyframes.
while (itr != buffers.end() && !(*itr)->IsKeyframe()) {
// Scan past all the non-key-frames.
while (itr != buffers.end() && !(*itr)->is_key_frame()) {
++itr;
}
// If we didn't find a keyframe, then update the last appended
// If we didn't find a key frame, then update the last appended
// buffer state and return.
if (itr == buffers.end()) {
last_appended_buffer_timestamp_ = buffers.back()->GetDecodeTimestamp();
last_appended_buffer_is_keyframe_ = buffers.back()->IsKeyframe();
last_appended_buffer_is_keyframe_ = buffers.back()->is_key_frame();
return true;
} else if (itr != buffers.begin()) {
// Copy the first keyframe and everything after it into
// Copy the first key frame and everything after it into
// |trimmed_buffers|.
trimmed_buffers.assign(itr, buffers.end());
buffers_for_new_range = &trimmed_buffers;
@@ -272,7 +272,7 @@ bool SourceBufferStream::Append(const BufferQueue& buffers) {
last_appended_buffer_timestamp_ =
buffers_for_new_range->back()->GetDecodeTimestamp();
last_appended_buffer_is_keyframe_ =
buffers_for_new_range->back()->IsKeyframe();
buffers_for_new_range->back()->is_key_frame();
}
new_media_segment_ = false;
@@ -456,7 +456,7 @@ bool SourceBufferStream::IsMonotonicallyIncreasing(
for (BufferQueue::const_iterator itr = buffers.begin();
itr != buffers.end(); ++itr) {
DecodeTimestamp current_timestamp = (*itr)->GetDecodeTimestamp();
bool current_is_keyframe = (*itr)->IsKeyframe();
bool current_is_keyframe = (*itr)->is_key_frame();
DCHECK(current_timestamp != kNoDecodeTimestamp());
DCHECK((*itr)->duration() >= base::TimeDelta())
<< "Packet with invalid duration."
@@ -740,7 +740,7 @@ void SourceBufferStream::PrepareRangesForNextAppend(
DecodeTimestamp prev_timestamp = last_appended_buffer_timestamp_;
bool prev_is_keyframe = last_appended_buffer_is_keyframe_;
DecodeTimestamp next_timestamp = new_buffers.front()->GetDecodeTimestamp();
bool next_is_keyframe = new_buffers.front()->IsKeyframe();
bool next_is_keyframe = new_buffers.front()->is_key_frame();
if (prev_timestamp != kNoDecodeTimestamp() &&
prev_timestamp != next_timestamp) {

@@ -232,7 +232,7 @@ class SourceBufferStreamTest : public testing::Test {
break;
if (expect_keyframe && current_position == starting_position)
EXPECT_TRUE(buffer->IsKeyframe());
EXPECT_TRUE(buffer->is_key_frame());
if (expected_data) {
const uint8* actual_data = buffer->data();
@@ -295,7 +295,7 @@ class SourceBufferStreamTest : public testing::Test {
// Handle preroll buffers.
if (EndsWith(timestamps[i], "P", true)) {
ASSERT_TRUE(buffer->IsKeyframe());
ASSERT_TRUE(buffer->is_key_frame());
scoped_refptr<StreamParserBuffer> preroll_buffer;
preroll_buffer.swap(buffer);
@@ -312,10 +312,10 @@ class SourceBufferStreamTest : public testing::Test {
preroll_buffer->GetDecodeTimestamp());
ASSERT_EQ(kInfiniteDuration(), preroll_buffer->discard_padding().first);
ASSERT_EQ(base::TimeDelta(), preroll_buffer->discard_padding().second);
ASSERT_TRUE(buffer->IsKeyframe());
ASSERT_TRUE(buffer->is_key_frame());
ss << "P";
} else if (buffer->IsKeyframe()) {
} else if (buffer->is_key_frame()) {
ss << "K";
}
@@ -2301,7 +2301,7 @@ TEST_F(SourceBufferStreamTest, PresentationTimestampIndependence) {
scoped_refptr<StreamParserBuffer> buffer;
ASSERT_EQ(stream_->GetNextBuffer(&buffer), SourceBufferStream::kSuccess);
if (buffer->IsKeyframe()) {
if (buffer->is_key_frame()) {
EXPECT_EQ(DecodeTimestamp::FromPresentationTime(buffer->timestamp()),
buffer->GetDecodeTimestamp());
last_keyframe_idx = i;

@@ -153,6 +153,8 @@ class VideoFrameStreamTest
DCHECK_EQ(stream_type, Decryptor::kVideo);
scoped_refptr<DecoderBuffer> decrypted =
DecoderBuffer::CopyFrom(encrypted->data(), encrypted->data_size());
if (encrypted->is_key_frame())
decrypted->set_is_key_frame(true);
decrypted->set_timestamp(encrypted->timestamp());
decrypted->set_duration(encrypted->duration());
decrypt_cb.Run(Decryptor::kSuccess, decrypted);

@@ -19,7 +19,7 @@ static std::string BufferQueueToString(
itr != buffers.end();
++itr) {
ss << " " << (*itr)->timestamp().InMilliseconds();
if ((*itr)->IsKeyframe())
if ((*itr)->is_key_frame())
ss << "K";
}
ss << " }";

@@ -112,7 +112,7 @@ bool EsAdapterVideo::OnNewBuffer(
// - if it is not associated with any config,
// - or if no valid key frame has been found so far.
if (!has_valid_config_ ||
(!has_valid_frame_ && !stream_parser_buffer->IsKeyframe())) {
(!has_valid_frame_ && !stream_parser_buffer->is_key_frame())) {
discarded_frame_count_++;
return true;
}
@@ -196,7 +196,7 @@ base::TimeDelta EsAdapterVideo::GetNextFramePts(base::TimeDelta current_pts) {
void EsAdapterVideo::ReplaceDiscardedFrames(
const scoped_refptr<StreamParserBuffer>& stream_parser_buffer) {
DCHECK_GT(discarded_frame_count_, 0);
DCHECK(stream_parser_buffer->IsKeyframe());
DCHECK(stream_parser_buffer->is_key_frame());
// PTS/DTS are interpolated between the min PTS/DTS of discarded frames
// and the PTS/DTS of the first valid buffer.
@@ -219,7 +219,7 @@ void EsAdapterVideo::ReplaceDiscardedFrames(
StreamParserBuffer::CopyFrom(
stream_parser_buffer->data(),
stream_parser_buffer->data_size(),
stream_parser_buffer->IsKeyframe(),
stream_parser_buffer->is_key_frame(),
stream_parser_buffer->type(),
stream_parser_buffer->track_id());
frame->SetDecodeTimestamp(dts);

@@ -92,7 +92,7 @@ void EsAdapterVideoTest::OnNewConfig(const VideoDecoderConfig& video_config) {
void EsAdapterVideoTest::OnNewBuffer(
scoped_refptr<StreamParserBuffer> buffer) {
buffer_descriptors_ << "(" << buffer->duration().InMilliseconds() << ","
<< (buffer->IsKeyframe() ? "Y" : "N") << ") ";
<< (buffer->is_key_frame() ? "Y" : "N") << ") ";
}
std::string EsAdapterVideoTest::RunAdapterTest(

@@ -559,8 +559,8 @@ void Mp2tStreamParser::OnEmitVideoBuffer(
<< stream_parser_buffer->timestamp().InMilliseconds()
<< " dur="
<< stream_parser_buffer->duration().InMilliseconds()
<< " IsKeyframe="
<< stream_parser_buffer->IsKeyframe();
<< " is_key_frame="
<< stream_parser_buffer->is_key_frame();
// Ignore the incoming buffer if it is not associated with any config.
if (buffer_queue_chain_.empty()) {

@@ -486,7 +486,7 @@ bool WebMClusterParser::Track::AddBuffer(
DVLOG(2) << "AddBuffer() : " << track_num_
<< " ts " << buffer->timestamp().InSecondsF()
<< " dur " << buffer->duration().InSecondsF()
<< " kf " << buffer->IsKeyframe()
<< " kf " << buffer->is_key_frame()
<< " size " << buffer->data_size();
if (last_added_buffer_missing_duration_.get()) {
@@ -499,7 +499,7 @@ bool WebMClusterParser::Track::AddBuffer(
<< last_added_buffer_missing_duration_->timestamp().InSecondsF()
<< " dur "
<< last_added_buffer_missing_duration_->duration().InSecondsF()
<< " kf " << last_added_buffer_missing_duration_->IsKeyframe()
<< " kf " << last_added_buffer_missing_duration_->is_key_frame()
<< " size " << last_added_buffer_missing_duration_->data_size();
scoped_refptr<StreamParserBuffer> updated_buffer =
last_added_buffer_missing_duration_;
@@ -528,7 +528,7 @@ void WebMClusterParser::Track::ApplyDurationEstimateIfNeeded() {
<< last_added_buffer_missing_duration_->timestamp().InSecondsF()
<< " dur "
<< last_added_buffer_missing_duration_->duration().InSecondsF()
<< " kf " << last_added_buffer_missing_duration_->IsKeyframe()
<< " kf " << last_added_buffer_missing_duration_->is_key_frame()
<< " size " << last_added_buffer_missing_duration_->data_size();
// Don't use the applied duration as a future estimation (don't use

@@ -178,6 +178,9 @@ struct MediaDecoderBuffer {
// The number of bytes in |data|.
uint32 data_size;
// Indicates whether or not this buffer is a random access point.
bool is_key_frame;
// This is backed by an std::vector and results in a few copies.
// Into the vector, onto and off the MessagePipe, back into a vector.
array<uint8>? side_data;

@@ -220,6 +220,7 @@ MediaDecoderBufferPtr TypeConverter<MediaDecoderBufferPtr,
mojo_buffer->timestamp_usec = input->timestamp().InMicroseconds();
mojo_buffer->duration_usec = input->duration().InMicroseconds();
mojo_buffer->is_key_frame = input->is_key_frame();
mojo_buffer->data_size = input->data_size();
mojo_buffer->side_data_size = input->side_data_size();
mojo_buffer->front_discard_usec =
@@ -290,6 +291,10 @@ scoped_refptr<media::DecoderBuffer> TypeConverter<
base::TimeDelta::FromMicroseconds(input->timestamp_usec));
buffer->set_duration(
base::TimeDelta::FromMicroseconds(input->duration_usec));
if (input->is_key_frame)
buffer->set_is_key_frame(true);
media::DecoderBuffer::DiscardPadding discard_padding(
base::TimeDelta::FromMicroseconds(input->front_discard_usec),
base::TimeDelta::FromMicroseconds(input->back_discard_usec));

@@ -25,6 +25,7 @@ TEST(MediaTypeConvertersTest, ConvertDecoderBuffer_Normal) {
reinterpret_cast<const uint8*>(&kSideData), kSideDataSize));
buffer->set_timestamp(base::TimeDelta::FromMilliseconds(123));
buffer->set_duration(base::TimeDelta::FromMilliseconds(456));
buffer->set_is_key_frame(true);
buffer->set_splice_timestamp(base::TimeDelta::FromMilliseconds(200));
buffer->set_discard_padding(media::DecoderBuffer::DiscardPadding(
base::TimeDelta::FromMilliseconds(5),
@@ -41,8 +42,16 @@ TEST(MediaTypeConvertersTest, ConvertDecoderBuffer_Normal) {
EXPECT_EQ(0, memcmp(result->side_data(), kSideData, kSideDataSize));
EXPECT_EQ(buffer->timestamp(), result->timestamp());
EXPECT_EQ(buffer->duration(), result->duration());
EXPECT_EQ(buffer->is_key_frame(), result->is_key_frame());
EXPECT_EQ(buffer->splice_timestamp(), result->splice_timestamp());
EXPECT_EQ(buffer->discard_padding(), result->discard_padding());
// Verify a false |is_key_frame| round-trips.
buffer->set_is_key_frame(false);
MediaDecoderBufferPtr ptr2(MediaDecoderBuffer::From(buffer));
scoped_refptr<DecoderBuffer> result2(ptr2.To<scoped_refptr<DecoderBuffer>>());
EXPECT_EQ(0, memcmp(result2->data(), kData, kDataSize));
EXPECT_EQ(buffer->is_key_frame(), result2->is_key_frame());
}
TEST(MediaTypeConvertersTest, ConvertDecoderBuffer_EOS) {