[Cast Streaming] Remove legacy frame sender.
This removes the legacy frame sender used to bridge the streaming sender API to the pre-libcast Cast sender. This will allow a large portion of the legacy Cast transport to be removed in a follow-up CL. This required updating several unit tests which were never ported to use libcast. Either they are updated to use libcast objects, or test cases are deleted if they test functionality that is internal to libcast. Bug: b/41481348 Fixed: b/40226049 Change-Id: I2ecbda8295ca5dcf9cce818a6e7139a1434e22a1 Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/5173522 Commit-Queue: Mark Foltz <mfoltz@chromium.org> Reviewed-by: Dirk Pranke <dpranke@google.com> Reviewed-by: Jordan Bayles <jophba@chromium.org> Cr-Commit-Position: refs/heads/main@{#1280912}
This commit is contained in:

committed by
Chromium LUCI CQ

parent
2395b68c9c
commit
1f06886ef9
BUILD.gn
components/mirroring/service
media/cast
1
BUILD.gn
1
BUILD.gn
@ -612,7 +612,6 @@ group("gn_all") {
|
||||
if (is_linux || is_chromeos_lacros) {
|
||||
deps += [
|
||||
"//gpu/khronos_glcts_support:khronos_glcts_test",
|
||||
"//media/cast:cast_benchmarks",
|
||||
"//media/cast:tap_proxy",
|
||||
"//skia:filter_fuzz_stub",
|
||||
"//skia:image_operations_bench",
|
||||
|
@ -132,23 +132,6 @@ class RemotingSender::SenderEncodedFrameFactory {
|
||||
int64_t frames_created_ = 0;
|
||||
};
|
||||
|
||||
RemotingSender::RemotingSender(
|
||||
scoped_refptr<media::cast::CastEnvironment> cast_environment,
|
||||
media::cast::CastTransport* transport,
|
||||
const media::cast::FrameSenderConfig& config,
|
||||
mojo::ScopedDataPipeConsumerHandle pipe,
|
||||
mojo::PendingReceiver<media::mojom::RemotingDataStreamSender> stream_sender,
|
||||
base::OnceClosure error_callback)
|
||||
: RemotingSender(cast_environment,
|
||||
media::cast::FrameSender::Create(cast_environment,
|
||||
config,
|
||||
transport,
|
||||
*this),
|
||||
config,
|
||||
std::move(pipe),
|
||||
std::move(stream_sender),
|
||||
std::move(error_callback)) {}
|
||||
|
||||
RemotingSender::RemotingSender(
|
||||
scoped_refptr<media::cast::CastEnvironment> cast_environment,
|
||||
std::unique_ptr<openscreen::cast::Sender> sender,
|
||||
|
@ -43,18 +43,6 @@ class COMPONENT_EXPORT(MIRRORING_SERVICE) RemotingSender final
|
||||
: public media::mojom::RemotingDataStreamSender,
|
||||
public media::cast::FrameSender::Client {
|
||||
public:
|
||||
// Old way of instantiating using a cast transport. |transport| is expected to
|
||||
// outlive this class.
|
||||
// TODO(https://crbug.com/1316434): should be removed once libcast sender is
|
||||
// successfully launched.
|
||||
RemotingSender(scoped_refptr<media::cast::CastEnvironment> cast_environment,
|
||||
media::cast::CastTransport* transport,
|
||||
const media::cast::FrameSenderConfig& config,
|
||||
mojo::ScopedDataPipeConsumerHandle pipe,
|
||||
mojo::PendingReceiver<media::mojom::RemotingDataStreamSender>
|
||||
stream_sender,
|
||||
base::OnceClosure error_callback);
|
||||
|
||||
// New way of instantiating using an openscreen::cast::Sender. Since the
|
||||
// |Sender| instance is destroyed when renegotiation is complete, |this|
|
||||
// is also invalid and should be immediately torn down.
|
||||
@ -72,6 +60,9 @@ class COMPONENT_EXPORT(MIRRORING_SERVICE) RemotingSender final
|
||||
~RemotingSender() override;
|
||||
|
||||
private:
|
||||
// Ctor that takes a media::cast::FrameSender for unit tests.
|
||||
// TODO(issues.chromium.org/329781397): Remove unnecessary wrapper objects in
|
||||
// Chrome's implementation of the Cast sender.
|
||||
RemotingSender(scoped_refptr<media::cast::CastEnvironment> cast_environment,
|
||||
std::unique_ptr<media::cast::FrameSender> sender,
|
||||
const media::cast::FrameSenderConfig& config,
|
||||
|
@ -55,6 +55,11 @@ class COMPONENT_EXPORT(MIRRORING_SERVICE) RtpStreamClient {
|
||||
// intervals `refresh_interval` apart for a short period of time. This provides
|
||||
// the video encoder, downstream, several copies of the last frame so that it
|
||||
// may clear up lossy encoding artifacts.
|
||||
//
|
||||
// Note that this mostly calls through to the media::cast::VideoSender, and the
|
||||
// refresh frame logic could be factored out into a separate object.
|
||||
// TODO(issues.chromium.org/329781397): Remove unnecessary wrapper objects in
|
||||
// Chrome's implementation of the Cast sender.
|
||||
class COMPONENT_EXPORT(MIRRORING_SERVICE) VideoRtpStream final {
|
||||
public:
|
||||
VideoRtpStream(std::unique_ptr<media::cast::VideoSender> video_sender,
|
||||
@ -101,6 +106,12 @@ class COMPONENT_EXPORT(MIRRORING_SERVICE) VideoRtpStream final {
|
||||
};
|
||||
|
||||
// Receives audio data and submits the data to media::cast::AudioSender.
|
||||
// Note that this mostly calls through to the media::cast::VideoSender, and the
|
||||
// refresh frame logic could be factored out into a separate object.
|
||||
//
|
||||
// NOTE: This is a do-nothing wrapper over the underlying AudioSender.
|
||||
// TODO(issues.chromium.org/329781397): Remove unnecessary wrapper objects in
|
||||
// Chrome's implementation of the Cast sender.
|
||||
class COMPONENT_EXPORT(MIRRORING_SERVICE) AudioRtpStream final {
|
||||
public:
|
||||
AudioRtpStream(std::unique_ptr<media::cast::AudioSender> audio_sender,
|
||||
|
@ -25,9 +25,10 @@
|
||||
#include "testing/gmock/include/gmock/gmock.h"
|
||||
#include "testing/gtest/include/gtest/gtest.h"
|
||||
|
||||
using ::testing::InvokeWithoutArgs;
|
||||
using ::testing::_;
|
||||
using media::cast::TestAudioBusFactory;
|
||||
using ::testing::_;
|
||||
using ::testing::InvokeWithoutArgs;
|
||||
using ::testing::StrictMock;
|
||||
|
||||
namespace mirroring {
|
||||
|
||||
@ -89,6 +90,24 @@ class StreamClient final : public RtpStreamClient {
|
||||
base::WeakPtrFactory<StreamClient> weak_factory_{this};
|
||||
};
|
||||
|
||||
class MockVideoSender : public media::cast::VideoSender {
|
||||
public:
|
||||
MockVideoSender() = default;
|
||||
MOCK_METHOD(void,
|
||||
InsertRawVideoFrame,
|
||||
(scoped_refptr<media::VideoFrame>, base::TimeTicks),
|
||||
(override));
|
||||
};
|
||||
|
||||
class MockAudioSender : public media::cast::AudioSender {
|
||||
public:
|
||||
MockAudioSender() = default;
|
||||
MOCK_METHOD(void,
|
||||
InsertAudio,
|
||||
(std::unique_ptr<media::AudioBus>, base::TimeTicks),
|
||||
(override));
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
class RtpStreamTest : public ::testing::Test {
|
||||
@ -109,25 +128,6 @@ class RtpStreamTest : public ::testing::Test {
|
||||
~RtpStreamTest() override { task_environment_.RunUntilIdle(); }
|
||||
|
||||
protected:
|
||||
void ExpectVideoFrames(VideoRtpStream& video_stream, int num_frames) {
|
||||
base::RunLoop run_loop;
|
||||
int loop_count = 0;
|
||||
// Expect the video frame is sent to video sender for encoding, and the
|
||||
// encoded frame is sent to the transport.
|
||||
EXPECT_CALL(transport_, InsertFrame(_, _))
|
||||
.WillRepeatedly(
|
||||
InvokeWithoutArgs([&run_loop, &loop_count, &num_frames] {
|
||||
if (++loop_count == num_frames) {
|
||||
run_loop.Quit();
|
||||
}
|
||||
}));
|
||||
|
||||
// We insert the first frame; the remaining frames (if any) will be update
|
||||
// requests.
|
||||
video_stream.InsertVideoFrame(client_.CreateVideoFrame());
|
||||
run_loop.Run();
|
||||
}
|
||||
|
||||
void ExpectTimerRunning(const VideoRtpStream& video_stream) {
|
||||
EXPECT_TRUE(video_stream.refresh_timer_.IsRunning());
|
||||
}
|
||||
@ -141,60 +141,47 @@ class RtpStreamTest : public ::testing::Test {
|
||||
base::SimpleTestTickClock testing_clock_;
|
||||
const scoped_refptr<media::cast::CastEnvironment> cast_environment_;
|
||||
StreamClient client_;
|
||||
|
||||
// We currently don't care about sender reports, so we use a nice mock here.
|
||||
testing::NiceMock<media::cast::MockCastTransport> transport_;
|
||||
};
|
||||
|
||||
// Test the video streaming pipeline.
|
||||
TEST_F(RtpStreamTest, VideoStreaming) {
|
||||
auto video_sender = std::make_unique<media::cast::VideoSender>(
|
||||
cast_environment_, media::cast::GetDefaultVideoSenderConfig(),
|
||||
base::DoNothing(), base::DoNothing(), &transport_,
|
||||
std::make_unique<media::MockVideoEncoderMetricsProvider>(),
|
||||
base::DoNothing(), base::DoNothing());
|
||||
auto video_sender = std::make_unique<MockVideoSender>();
|
||||
EXPECT_CALL(*video_sender, InsertRawVideoFrame(_, _)).Times(1);
|
||||
VideoRtpStream video_stream(std::move(video_sender), client_.GetWeakPtr(),
|
||||
base::Milliseconds(1));
|
||||
client_.SetVideoRtpStream(&video_stream);
|
||||
ExpectVideoFrames(video_stream, 1);
|
||||
video_stream.InsertVideoFrame(client_.CreateVideoFrame());
|
||||
ExpectTimerRunning(video_stream);
|
||||
client_.SetVideoRtpStream(nullptr);
|
||||
}
|
||||
|
||||
TEST_F(RtpStreamTest, VideoStreamEmitsFramesWhenNoUpdates) {
|
||||
auto video_sender = std::make_unique<media::cast::VideoSender>(
|
||||
cast_environment_, media::cast::GetDefaultVideoSenderConfig(),
|
||||
base::DoNothing(), base::DoNothing(), &transport_,
|
||||
std::make_unique<media::MockVideoEncoderMetricsProvider>(),
|
||||
base::DoNothing(), base::DoNothing());
|
||||
auto video_sender = std::make_unique<MockVideoSender>();
|
||||
EXPECT_CALL(*video_sender, InsertRawVideoFrame(_, _)).Times(6);
|
||||
VideoRtpStream video_stream(std::move(video_sender), client_.GetWeakPtr(),
|
||||
base::Milliseconds(1));
|
||||
client_.SetVideoRtpStream(&video_stream);
|
||||
ExpectVideoFrames(video_stream, 5);
|
||||
video_stream.InsertVideoFrame(client_.CreateVideoFrame());
|
||||
task_environment_.FastForwardBy(base::Milliseconds(5));
|
||||
ExpectTimerRunning(video_stream);
|
||||
client_.SetVideoRtpStream(nullptr);
|
||||
}
|
||||
|
||||
TEST_F(RtpStreamTest, VideoStreamDoesNotRefreshWithZeroInterval) {
|
||||
auto video_sender = std::make_unique<media::cast::VideoSender>(
|
||||
cast_environment_, media::cast::GetDefaultVideoSenderConfig(),
|
||||
base::DoNothing(), base::DoNothing(), &transport_,
|
||||
std::make_unique<media::MockVideoEncoderMetricsProvider>(),
|
||||
base::DoNothing(), base::DoNothing());
|
||||
auto video_sender = std::make_unique<MockVideoSender>();
|
||||
EXPECT_CALL(*video_sender, InsertRawVideoFrame(_, _)).Times(1);
|
||||
VideoRtpStream video_stream(std::move(video_sender), client_.GetWeakPtr(),
|
||||
base::TimeDelta());
|
||||
client_.SetVideoRtpStream(&video_stream);
|
||||
ExpectVideoFrames(video_stream, 1);
|
||||
video_stream.InsertVideoFrame(client_.CreateVideoFrame());
|
||||
task_environment_.FastForwardBy(base::Milliseconds(5));
|
||||
ExpectTimerNotRunning(video_stream);
|
||||
client_.SetVideoRtpStream(nullptr);
|
||||
}
|
||||
|
||||
TEST_F(RtpStreamTest, VideoStreamTimerNotRunningWhenNoFramesDelivered) {
|
||||
auto video_sender = std::make_unique<media::cast::VideoSender>(
|
||||
cast_environment_, media::cast::GetDefaultVideoSenderConfig(),
|
||||
base::DoNothing(), base::DoNothing(), &transport_,
|
||||
std::make_unique<media::MockVideoEncoderMetricsProvider>(),
|
||||
base::DoNothing(), base::DoNothing());
|
||||
auto video_sender = std::make_unique<MockVideoSender>();
|
||||
EXPECT_CALL(*video_sender, InsertRawVideoFrame(_, _)).Times(1);
|
||||
VideoRtpStream video_stream(std::move(video_sender), client_.GetWeakPtr(),
|
||||
base::Milliseconds(1));
|
||||
client_.SetVideoRtpStream(&video_stream);
|
||||
@ -202,17 +189,13 @@ TEST_F(RtpStreamTest, VideoStreamTimerNotRunningWhenNoFramesDelivered) {
|
||||
video_stream.InsertVideoFrame(client_.CreateVideoFrame());
|
||||
// Fast forward by enough time for the refresh_timer_ to fire 2 times.
|
||||
task_environment_.FastForwardBy(base::Milliseconds(5));
|
||||
|
||||
ExpectTimerNotRunning(video_stream);
|
||||
client_.SetVideoRtpStream(nullptr);
|
||||
}
|
||||
|
||||
TEST_F(RtpStreamTest, VideoStreamTimerRestartsWhenFramesDeliveredAgain) {
|
||||
auto video_sender = std::make_unique<media::cast::VideoSender>(
|
||||
cast_environment_, media::cast::GetDefaultVideoSenderConfig(),
|
||||
base::DoNothing(), base::DoNothing(), &transport_,
|
||||
std::make_unique<media::MockVideoEncoderMetricsProvider>(),
|
||||
base::DoNothing(), base::DoNothing());
|
||||
auto video_sender = std::make_unique<MockVideoSender>();
|
||||
EXPECT_CALL(*video_sender, InsertRawVideoFrame(_, _)).Times(7);
|
||||
VideoRtpStream video_stream(std::move(video_sender), client_.GetWeakPtr(),
|
||||
base::Milliseconds(1));
|
||||
client_.SetVideoRtpStream(&video_stream);
|
||||
@ -242,20 +225,11 @@ TEST_F(RtpStreamTest, AudioStreaming) {
|
||||
TestAudioBusFactory(audio_config.channels, audio_config.rtp_timebase,
|
||||
TestAudioBusFactory::kMiddleANoteFreq, 0.5f)
|
||||
.NextAudioBus(kDuration);
|
||||
auto audio_sender = std::make_unique<media::cast::AudioSender>(
|
||||
cast_environment_, audio_config, base::DoNothing(), &transport_);
|
||||
AudioRtpStream audio_stream(std::move(audio_sender), client_.GetWeakPtr());
|
||||
{
|
||||
base::RunLoop run_loop;
|
||||
// Expect the audio data is sent to audio sender for encoding, and the
|
||||
// encoded frame is sent to the transport.
|
||||
EXPECT_CALL(transport_, InsertFrame(_, _))
|
||||
.WillOnce(InvokeWithoutArgs(&run_loop, &base::RunLoop::Quit));
|
||||
audio_stream.InsertAudio(std::move(audio_bus), testing_clock_.NowTicks());
|
||||
run_loop.Run();
|
||||
}
|
||||
|
||||
task_environment_.RunUntilIdle();
|
||||
auto audio_sender = std::make_unique<MockAudioSender>();
|
||||
EXPECT_CALL(*audio_sender, InsertAudio(_, _)).Times(1);
|
||||
AudioRtpStream audio_stream(std::move(audio_sender), client_.GetWeakPtr());
|
||||
audio_stream.InsertAudio(std::move(audio_bus), testing_clock_.NowTicks());
|
||||
}
|
||||
|
||||
} // namespace mirroring
|
||||
|
@ -188,21 +188,14 @@ source_set("encoding") {
|
||||
}
|
||||
}
|
||||
|
||||
# TODO(https://crbug.com/1327074): should be split into multiple source sets
|
||||
# once the new Open Screen frame sender implementation is added.
|
||||
source_set("sender") {
|
||||
sources = [
|
||||
"cast_sender.h",
|
||||
"cast_sender_impl.cc",
|
||||
"cast_sender_impl.h",
|
||||
"sender/audio_sender.cc",
|
||||
"sender/audio_sender.h",
|
||||
"sender/congestion_control.cc",
|
||||
"sender/congestion_control.h",
|
||||
"sender/frame_sender.cc",
|
||||
"sender/frame_sender.h",
|
||||
"sender/frame_sender_impl.cc",
|
||||
"sender/frame_sender_impl.h",
|
||||
"sender/openscreen_frame_sender.cc",
|
||||
"sender/openscreen_frame_sender.h",
|
||||
"sender/performance_metrics_overlay.cc",
|
||||
@ -267,6 +260,8 @@ source_set("test_receiver") {
|
||||
static_library("test_support") {
|
||||
testonly = true
|
||||
sources = [
|
||||
"test/fake_openscreen_clock.cc",
|
||||
"test/fake_openscreen_clock.h",
|
||||
"test/fake_receiver_time_offset_estimator.cc",
|
||||
"test/fake_receiver_time_offset_estimator.h",
|
||||
"test/fake_video_encode_accelerator_factory.cc",
|
||||
@ -275,6 +270,8 @@ static_library("test_support") {
|
||||
"test/loopback_transport.h",
|
||||
"test/mock_cast_transport.cc",
|
||||
"test/mock_cast_transport.h",
|
||||
"test/mock_openscreen_environment.cc",
|
||||
"test/mock_openscreen_environment.h",
|
||||
"test/mock_paced_packet_sender.cc",
|
||||
"test/mock_paced_packet_sender.h",
|
||||
"test/mock_rtp_payload_feedback.cc",
|
||||
@ -323,16 +320,6 @@ static_library("test_support") {
|
||||
"//third_party/libyuv",
|
||||
"//ui/gfx:test_support",
|
||||
]
|
||||
|
||||
# FFMPEG software video decoders are not available on Android and/or
|
||||
# Chromecast content_shell builds.
|
||||
if (!is_android && !is_castos) {
|
||||
sources += [
|
||||
"test/fake_media_source.cc",
|
||||
"test/fake_media_source.h",
|
||||
]
|
||||
deps += [ "//third_party/ffmpeg" ]
|
||||
}
|
||||
}
|
||||
|
||||
test("cast_unittests") {
|
||||
@ -366,7 +353,6 @@ test("cast_unittests") {
|
||||
"sender/openscreen_frame_sender_unittest.cc",
|
||||
"sender/video_bitrate_suggester_unittest.cc",
|
||||
"sender/video_sender_unittest.cc",
|
||||
"test/end2end_unittest.cc",
|
||||
"test/receiver/audio_decoder_unittest.cc",
|
||||
"test/receiver/cast_message_builder_unittest.cc",
|
||||
"test/receiver/frame_buffer_unittest.cc",
|
||||
@ -416,71 +402,12 @@ if (is_win || is_mac || is_linux || is_chromeos_lacros || is_fuchsia) {
|
||||
group("testing_tools") {
|
||||
testonly = true
|
||||
deps = [
|
||||
":cast_benchmarks",
|
||||
":cast_sender_app",
|
||||
":cast_simulator",
|
||||
":generate_barcode_video",
|
||||
":generate_timecode_audio",
|
||||
":udp_proxy",
|
||||
]
|
||||
}
|
||||
|
||||
test("cast_benchmarks") {
|
||||
testonly = true
|
||||
sources = [ "test/cast_benchmarks.cc" ]
|
||||
deps = [
|
||||
":common",
|
||||
":net",
|
||||
":sender",
|
||||
":test_receiver",
|
||||
":test_support",
|
||||
"//base",
|
||||
"//base/test:test_support",
|
||||
"//media:test_support",
|
||||
"//net",
|
||||
"//testing/gtest",
|
||||
"//ui/gfx/geometry",
|
||||
]
|
||||
}
|
||||
|
||||
executable("cast_sender_app") {
|
||||
testonly = true
|
||||
sources = [ "test/sender.cc" ]
|
||||
deps = [
|
||||
":common",
|
||||
":net",
|
||||
":sender",
|
||||
":test_support",
|
||||
"//base",
|
||||
"//build/win:default_exe_manifest",
|
||||
"//media",
|
||||
"//media:test_support",
|
||||
"//testing/gmock",
|
||||
]
|
||||
}
|
||||
|
||||
proto_library("network_simulation_model_proto") {
|
||||
visibility = [ ":cast_simulator" ]
|
||||
sources = [ "test/proto/network_simulation_model.proto" ]
|
||||
}
|
||||
|
||||
executable("cast_simulator") {
|
||||
testonly = true
|
||||
sources = [ "test/simulator.cc" ]
|
||||
deps = [
|
||||
":common",
|
||||
":net",
|
||||
":network_simulation_model_proto",
|
||||
":sender",
|
||||
":test_receiver",
|
||||
":test_support",
|
||||
"//base",
|
||||
"//base/test:test_support",
|
||||
"//build/win:default_exe_manifest",
|
||||
"//media:test_support",
|
||||
]
|
||||
}
|
||||
|
||||
executable("generate_barcode_video") {
|
||||
testonly = true
|
||||
sources = [ "test/utility/generate_barcode_video.cc" ]
|
||||
|
@ -1,120 +0,0 @@
|
||||
// Copyright 2013 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
//
|
||||
// This is the main interface for the cast sender.
|
||||
//
|
||||
// The AudioFrameInput, VideoFrameInput and PacketReciever interfaces should
|
||||
// be accessed from the main thread.
|
||||
|
||||
#ifndef MEDIA_CAST_CAST_SENDER_H_
|
||||
#define MEDIA_CAST_CAST_SENDER_H_
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "base/functional/callback.h"
|
||||
#include "base/memory/ref_counted.h"
|
||||
#include "base/time/tick_clock.h"
|
||||
#include "base/time/time.h"
|
||||
#include "media/base/audio_bus.h"
|
||||
#include "media/base/video_frame.h"
|
||||
#include "media/cast/cast_callbacks.h"
|
||||
#include "media/cast/cast_config.h"
|
||||
#include "media/cast/cast_environment.h"
|
||||
#include "media/cast/constants.h"
|
||||
#include "media/cast/net/cast_transport.h"
|
||||
|
||||
namespace gfx {
|
||||
class Size;
|
||||
}
|
||||
|
||||
namespace media {
|
||||
|
||||
class VideoEncoderMetricsProvider;
|
||||
|
||||
namespace cast {
|
||||
|
||||
class VideoFrameInput : public base::RefCountedThreadSafe<VideoFrameInput> {
|
||||
public:
|
||||
// Insert video frames into Cast sender. Frames will be encoded, packetized
|
||||
// and sent to the network.
|
||||
virtual void InsertRawVideoFrame(scoped_refptr<media::VideoFrame> video_frame,
|
||||
base::TimeTicks capture_time) = 0;
|
||||
|
||||
// Creates a |VideoFrame| optimized for the encoder. When available, these
|
||||
// frames offer performance benefits, such as memory copy elimination. The
|
||||
// format is guaranteed to be I420 or NV12.
|
||||
//
|
||||
// Not every encoder supports this method. Use |CanCreateOptimizedFrames| to
|
||||
// determine if you can and should use this method.
|
||||
//
|
||||
// Even if |CanCreateOptimizedFrames| indicates support, there are transient
|
||||
// conditions during a session where optimized frames cannot be provided. In
|
||||
// this case, the caller must be able to account for a nullptr return value
|
||||
// and instantiate its own media::VideoFrames.
|
||||
virtual scoped_refptr<VideoFrame> MaybeCreateOptimizedFrame(
|
||||
const gfx::Size& frame_size, base::TimeDelta timestamp) = 0;
|
||||
|
||||
// Returns true if the encoder supports creating optimized frames.
|
||||
virtual bool CanCreateOptimizedFrames() const = 0;
|
||||
|
||||
protected:
|
||||
virtual ~VideoFrameInput() {}
|
||||
|
||||
private:
|
||||
friend class base::RefCountedThreadSafe<VideoFrameInput>;
|
||||
};
|
||||
|
||||
class AudioFrameInput : public base::RefCountedThreadSafe<AudioFrameInput> {
|
||||
public:
|
||||
// Insert audio frames into Cast sender. Frames will be encoded, packetized
|
||||
// and sent to the network.
|
||||
virtual void InsertAudio(std::unique_ptr<AudioBus> audio_bus,
|
||||
const base::TimeTicks& recorded_time) = 0;
|
||||
|
||||
protected:
|
||||
virtual ~AudioFrameInput() {}
|
||||
|
||||
private:
|
||||
friend class base::RefCountedThreadSafe<AudioFrameInput>;
|
||||
};
|
||||
|
||||
// All methods of CastSender must be called on the main thread.
|
||||
// Provided CastTransport will also be called on the main thread.
|
||||
class CastSender {
|
||||
public:
|
||||
static std::unique_ptr<CastSender> Create(
|
||||
scoped_refptr<CastEnvironment> cast_environment,
|
||||
CastTransport* const transport_sender);
|
||||
|
||||
virtual ~CastSender() {}
|
||||
|
||||
// All video frames for the session should be inserted to this object.
|
||||
virtual scoped_refptr<VideoFrameInput> video_frame_input() = 0;
|
||||
|
||||
// All audio frames for the session should be inserted to this object.
|
||||
virtual scoped_refptr<AudioFrameInput> audio_frame_input() = 0;
|
||||
|
||||
// Initialize the audio stack. Must be called in order to send audio frames.
|
||||
// |status_change_cb| will be run as operational status changes.
|
||||
virtual void InitializeAudio(const FrameSenderConfig& audio_config,
|
||||
StatusChangeOnceCallback status_change_cb) = 0;
|
||||
|
||||
// Initialize the video stack. Must be called in order to send video frames.
|
||||
// |status_change_cb| will be run as operational status changes.
|
||||
virtual void InitializeVideo(
|
||||
const FrameSenderConfig& video_config,
|
||||
std::unique_ptr<VideoEncoderMetricsProvider> metrics_provider,
|
||||
const StatusChangeCallback& status_change_cb,
|
||||
const CreateVideoEncodeAcceleratorCallback& create_vea_cb) = 0;
|
||||
|
||||
// Change the target delay. This is only valid if the receiver
|
||||
// supports the "adaptive_target_delay" rtp extension.
|
||||
virtual void SetTargetPlayoutDelay(
|
||||
base::TimeDelta new_target_playout_delay) = 0;
|
||||
};
|
||||
|
||||
} // namespace cast
|
||||
} // namespace media
|
||||
|
||||
#endif // MEDIA_CAST_CAST_SENDER_H_
|
@ -1,199 +0,0 @@
|
||||
// Copyright 2013 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "media/cast/cast_sender_impl.h"
|
||||
|
||||
#include <utility>
|
||||
|
||||
#include "base/functional/bind.h"
|
||||
#include "base/functional/callback.h"
|
||||
#include "base/functional/callback_helpers.h"
|
||||
#include "base/logging.h"
|
||||
#include "media/base/video_encoder_metrics_provider.h"
|
||||
#include "media/base/video_frame.h"
|
||||
#include "media/cast/common/video_frame_factory.h"
|
||||
|
||||
namespace media {
|
||||
namespace cast {
|
||||
|
||||
// The LocalVideoFrameInput class posts all incoming video frames to the main
|
||||
// cast thread for processing.
|
||||
class LocalVideoFrameInput final : public VideoFrameInput {
|
||||
public:
|
||||
LocalVideoFrameInput(scoped_refptr<CastEnvironment> cast_environment,
|
||||
base::WeakPtr<VideoSender> video_sender)
|
||||
: cast_environment_(cast_environment),
|
||||
video_sender_(video_sender),
|
||||
video_frame_factory_(
|
||||
video_sender.get() ?
|
||||
video_sender->CreateVideoFrameFactory().release() : nullptr) {}
|
||||
|
||||
LocalVideoFrameInput(const LocalVideoFrameInput&) = delete;
|
||||
LocalVideoFrameInput& operator=(const LocalVideoFrameInput&) = delete;
|
||||
|
||||
void InsertRawVideoFrame(scoped_refptr<media::VideoFrame> video_frame,
|
||||
base::TimeTicks capture_time) final {
|
||||
cast_environment_->PostTask(
|
||||
CastEnvironment::MAIN, FROM_HERE,
|
||||
base::BindOnce(&VideoSender::InsertRawVideoFrame, video_sender_,
|
||||
std::move(video_frame), capture_time));
|
||||
}
|
||||
|
||||
scoped_refptr<VideoFrame> MaybeCreateOptimizedFrame(
|
||||
const gfx::Size& frame_size,
|
||||
base::TimeDelta timestamp) final {
|
||||
return video_frame_factory_ ?
|
||||
video_frame_factory_->MaybeCreateFrame(frame_size, timestamp) : nullptr;
|
||||
}
|
||||
|
||||
bool CanCreateOptimizedFrames() const final {
|
||||
return video_frame_factory_.get() != nullptr;
|
||||
}
|
||||
|
||||
protected:
|
||||
~LocalVideoFrameInput() final = default;
|
||||
|
||||
private:
|
||||
friend class base::RefCountedThreadSafe<LocalVideoFrameInput>;
|
||||
|
||||
const scoped_refptr<CastEnvironment> cast_environment_;
|
||||
const base::WeakPtr<VideoSender> video_sender_;
|
||||
const std::unique_ptr<VideoFrameFactory> video_frame_factory_;
|
||||
};
|
||||
|
||||
// The LocalAudioFrameInput class posts all incoming audio frames to the main
|
||||
// cast thread for processing. Therefore frames can be inserted from any thread.
|
||||
class LocalAudioFrameInput final : public AudioFrameInput {
|
||||
public:
|
||||
LocalAudioFrameInput(scoped_refptr<CastEnvironment> cast_environment,
|
||||
base::WeakPtr<AudioSender> audio_sender)
|
||||
: cast_environment_(cast_environment), audio_sender_(audio_sender) {}
|
||||
|
||||
LocalAudioFrameInput(const LocalAudioFrameInput&) = delete;
|
||||
LocalAudioFrameInput& operator=(const LocalAudioFrameInput&) = delete;
|
||||
|
||||
void InsertAudio(std::unique_ptr<AudioBus> audio_bus,
|
||||
const base::TimeTicks& recorded_time) final {
|
||||
cast_environment_->PostTask(
|
||||
CastEnvironment::MAIN, FROM_HERE,
|
||||
base::BindOnce(&AudioSender::InsertAudio, audio_sender_,
|
||||
std::move(audio_bus), recorded_time));
|
||||
}
|
||||
|
||||
protected:
|
||||
~LocalAudioFrameInput() final = default;
|
||||
|
||||
private:
|
||||
friend class base::RefCountedThreadSafe<LocalAudioFrameInput>;
|
||||
|
||||
scoped_refptr<CastEnvironment> cast_environment_;
|
||||
base::WeakPtr<AudioSender> audio_sender_;
|
||||
};
|
||||
|
||||
std::unique_ptr<CastSender> CastSender::Create(
|
||||
scoped_refptr<CastEnvironment> cast_environment,
|
||||
CastTransport* const transport_sender) {
|
||||
CHECK(cast_environment.get());
|
||||
return std::unique_ptr<CastSender>(
|
||||
new CastSenderImpl(cast_environment, transport_sender));
|
||||
}
|
||||
|
||||
CastSenderImpl::CastSenderImpl(scoped_refptr<CastEnvironment> cast_environment,
|
||||
CastTransport* const transport_sender)
|
||||
: cast_environment_(cast_environment), transport_sender_(transport_sender) {
|
||||
CHECK(cast_environment.get());
|
||||
}
|
||||
|
||||
void CastSenderImpl::InitializeAudio(
|
||||
const FrameSenderConfig& audio_config,
|
||||
StatusChangeOnceCallback status_change_cb) {
|
||||
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
|
||||
CHECK(audio_config.use_hardware_encoder ||
|
||||
cast_environment_->HasAudioThread());
|
||||
|
||||
VLOG(1) << "CastSenderImpl@" << this << "::InitializeAudio()";
|
||||
|
||||
audio_sender_ = std::make_unique<AudioSender>(
|
||||
cast_environment_, audio_config,
|
||||
base::BindOnce(&CastSenderImpl::OnAudioStatusChange,
|
||||
weak_factory_.GetWeakPtr(), std::move(status_change_cb)),
|
||||
transport_sender_);
|
||||
if (video_sender_) {
|
||||
DCHECK(audio_sender_->GetTargetPlayoutDelay() ==
|
||||
video_sender_->GetTargetPlayoutDelay());
|
||||
}
|
||||
}
|
||||
|
||||
void CastSenderImpl::InitializeVideo(
|
||||
const FrameSenderConfig& video_config,
|
||||
std::unique_ptr<VideoEncoderMetricsProvider> metrics_provider,
|
||||
const StatusChangeCallback& status_change_cb,
|
||||
const CreateVideoEncodeAcceleratorCallback& create_vea_cb) {
|
||||
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
|
||||
|
||||
VLOG(1) << "CastSenderImpl@" << this << "::InitializeVideo()";
|
||||
|
||||
// No feedback callback, since it's ignored for CastSender.
|
||||
video_sender_ = std::make_unique<VideoSender>(
|
||||
cast_environment_, video_config,
|
||||
base::BindRepeating(&CastSenderImpl::OnVideoStatusChange,
|
||||
weak_factory_.GetWeakPtr(), status_change_cb),
|
||||
create_vea_cb, transport_sender_, std::move(metrics_provider),
|
||||
base::BindRepeating(&CastSenderImpl::SetTargetPlayoutDelay,
|
||||
weak_factory_.GetWeakPtr()),
|
||||
media::VideoCaptureFeedbackCB());
|
||||
if (audio_sender_) {
|
||||
DCHECK(audio_sender_->GetTargetPlayoutDelay() ==
|
||||
video_sender_->GetTargetPlayoutDelay());
|
||||
}
|
||||
}
|
||||
|
||||
CastSenderImpl::~CastSenderImpl() {
|
||||
VLOG(1) << "CastSenderImpl@" << this << "::~CastSenderImpl()";
|
||||
}
|
||||
|
||||
scoped_refptr<AudioFrameInput> CastSenderImpl::audio_frame_input() {
|
||||
return audio_frame_input_;
|
||||
}
|
||||
|
||||
scoped_refptr<VideoFrameInput> CastSenderImpl::video_frame_input() {
|
||||
return video_frame_input_;
|
||||
}
|
||||
|
||||
void CastSenderImpl::SetTargetPlayoutDelay(
|
||||
base::TimeDelta new_target_playout_delay) {
|
||||
VLOG(1) << "CastSenderImpl@" << this << "::SetTargetPlayoutDelay("
|
||||
<< new_target_playout_delay.InMilliseconds() << " ms)";
|
||||
if (audio_sender_) {
|
||||
audio_sender_->SetTargetPlayoutDelay(new_target_playout_delay);
|
||||
}
|
||||
if (video_sender_) {
|
||||
video_sender_->SetTargetPlayoutDelay(new_target_playout_delay);
|
||||
}
|
||||
}
|
||||
|
||||
void CastSenderImpl::OnAudioStatusChange(
|
||||
StatusChangeOnceCallback status_change_cb,
|
||||
OperationalStatus status) {
|
||||
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
|
||||
if (status == STATUS_INITIALIZED && !audio_frame_input_) {
|
||||
audio_frame_input_ =
|
||||
new LocalAudioFrameInput(cast_environment_, audio_sender_->AsWeakPtr());
|
||||
}
|
||||
std::move(status_change_cb).Run(status);
|
||||
}
|
||||
|
||||
void CastSenderImpl::OnVideoStatusChange(
|
||||
const StatusChangeCallback& status_change_cb,
|
||||
OperationalStatus status) {
|
||||
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
|
||||
if (status == STATUS_INITIALIZED && !video_frame_input_) {
|
||||
video_frame_input_ =
|
||||
new LocalVideoFrameInput(cast_environment_, video_sender_->AsWeakPtr());
|
||||
}
|
||||
status_change_cb.Run(status);
|
||||
}
|
||||
|
||||
} // namespace cast
|
||||
} // namespace media
|
@ -1,70 +0,0 @@
|
||||
// Copyright 2013 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
#ifndef MEDIA_CAST_CAST_SENDER_IMPL_H_
|
||||
#define MEDIA_CAST_CAST_SENDER_IMPL_H_
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "base/memory/raw_ptr.h"
|
||||
#include "base/memory/scoped_refptr.h"
|
||||
#include "media/cast/cast_environment.h"
|
||||
#include "media/cast/cast_sender.h"
|
||||
#include "media/cast/sender/audio_sender.h"
|
||||
#include "media/cast/sender/video_sender.h"
|
||||
|
||||
namespace media {
|
||||
|
||||
namespace cast {
|
||||
class AudioSender;
|
||||
class VideoSender;
|
||||
|
||||
// This class combines all required sending objects such as the audio and video
|
||||
// senders, pacer, packet receiver and frame input.
|
||||
class CastSenderImpl final : public CastSender {
|
||||
public:
|
||||
CastSenderImpl(scoped_refptr<CastEnvironment> cast_environment,
|
||||
CastTransport* const transport_sender);
|
||||
|
||||
void InitializeAudio(const FrameSenderConfig& audio_config,
|
||||
StatusChangeOnceCallback status_change_cb) final;
|
||||
void InitializeVideo(
|
||||
const FrameSenderConfig& video_config,
|
||||
std::unique_ptr<VideoEncoderMetricsProvider> metrics_provider,
|
||||
const StatusChangeCallback& status_change_cb,
|
||||
const CreateVideoEncodeAcceleratorCallback& create_vea_cb) final;
|
||||
|
||||
void SetTargetPlayoutDelay(base::TimeDelta new_target_playout_delay) final;
|
||||
|
||||
CastSenderImpl(const CastSenderImpl&) = delete;
|
||||
CastSenderImpl& operator=(const CastSenderImpl&) = delete;
|
||||
|
||||
~CastSenderImpl() final;
|
||||
|
||||
scoped_refptr<AudioFrameInput> audio_frame_input() final;
|
||||
scoped_refptr<VideoFrameInput> video_frame_input() final;
|
||||
|
||||
private:
|
||||
void ReceivedPacket(std::unique_ptr<Packet> packet);
|
||||
void OnAudioStatusChange(StatusChangeOnceCallback status_change_cb,
|
||||
OperationalStatus status);
|
||||
void OnVideoStatusChange(const StatusChangeCallback& status_change_cb,
|
||||
OperationalStatus status);
|
||||
|
||||
std::unique_ptr<AudioSender> audio_sender_;
|
||||
std::unique_ptr<VideoSender> video_sender_;
|
||||
scoped_refptr<AudioFrameInput> audio_frame_input_;
|
||||
scoped_refptr<VideoFrameInput> video_frame_input_;
|
||||
scoped_refptr<CastEnvironment> cast_environment_;
|
||||
// The transport sender is owned by the owner of the CastSender, and should be
|
||||
// valid throughout the lifetime of the CastSender.
|
||||
const raw_ptr<CastTransport> transport_sender_;
|
||||
|
||||
// NOTE: Weak pointers must be invalidated before all other member variables.
|
||||
base::WeakPtrFactory<CastSenderImpl> weak_factory_{this};
|
||||
};
|
||||
|
||||
} // namespace cast
|
||||
} // namespace media
|
||||
|
||||
#endif // MEDIA_CAST_CAST_SENDER_IMPL_H_
|
@ -5,7 +5,6 @@
|
||||
#include "media/cast/common/openscreen_conversion_helpers.h"
|
||||
|
||||
#include "media/cast/cast_config.h"
|
||||
#include "media/cast/cast_sender.h"
|
||||
#include "media/cast/common/sender_encoded_frame.h"
|
||||
#include "testing/gtest/include/gtest/gtest.h"
|
||||
#include "third_party/openscreen/src/cast/streaming/encoded_frame.h"
|
||||
|
@ -9,14 +9,12 @@
|
||||
#include "base/check_op.h"
|
||||
#include "base/functional/bind.h"
|
||||
#include "base/metrics/histogram_functions.h"
|
||||
#include "base/notreached.h"
|
||||
#include "base/trace_event/trace_event.h"
|
||||
#include "media/cast/common/openscreen_conversion_helpers.h"
|
||||
#include "media/cast/common/rtp_time.h"
|
||||
#include "media/cast/common/sender_encoded_frame.h"
|
||||
#include "media/cast/encoding/audio_encoder.h"
|
||||
#include "media/cast/net/cast_transport_config.h"
|
||||
#include "media/cast/sender/openscreen_frame_sender.h"
|
||||
#include "media/cast/sender/frame_sender.h"
|
||||
#include "third_party/openscreen/src/cast/streaming/sender.h"
|
||||
|
||||
namespace media::cast {
|
||||
@ -32,37 +30,16 @@ constexpr char kHistogramFrameDropped[] =
|
||||
|
||||
} // namespace
|
||||
|
||||
AudioSender::AudioSender(scoped_refptr<CastEnvironment> cast_environment,
|
||||
const FrameSenderConfig& audio_config,
|
||||
StatusChangeOnceCallback status_change_cb,
|
||||
CastTransport* const transport_sender)
|
||||
: AudioSender(cast_environment,
|
||||
audio_config,
|
||||
std::move(status_change_cb),
|
||||
FrameSender::Create(cast_environment,
|
||||
audio_config,
|
||||
transport_sender,
|
||||
*this)) {}
|
||||
|
||||
AudioSender::AudioSender(scoped_refptr<CastEnvironment> cast_environment,
|
||||
const FrameSenderConfig& audio_config,
|
||||
StatusChangeOnceCallback status_change_cb,
|
||||
std::unique_ptr<openscreen::cast::Sender> sender)
|
||||
: AudioSender(cast_environment,
|
||||
audio_config,
|
||||
std::move(status_change_cb),
|
||||
FrameSender::Create(cast_environment,
|
||||
audio_config,
|
||||
std::move(sender),
|
||||
*this)) {}
|
||||
|
||||
AudioSender::AudioSender(scoped_refptr<CastEnvironment> cast_environment,
|
||||
const FrameSenderConfig& audio_config,
|
||||
StatusChangeOnceCallback status_change_cb,
|
||||
std::unique_ptr<FrameSender> sender)
|
||||
: cast_environment_(cast_environment),
|
||||
rtp_timebase_(audio_config.rtp_timebase),
|
||||
frame_sender_(std::move(sender)) {
|
||||
frame_sender_(FrameSender::Create(cast_environment,
|
||||
audio_config,
|
||||
std::move(sender),
|
||||
*this)) {
|
||||
if (!audio_config.use_hardware_encoder) {
|
||||
audio_encoder_ = std::make_unique<AudioEncoder>(
|
||||
std::move(cast_environment), audio_config.channels, rtp_timebase_,
|
||||
@ -94,7 +71,7 @@ AudioSender::~AudioSender() {
|
||||
}
|
||||
|
||||
void AudioSender::InsertAudio(std::unique_ptr<AudioBus> audio_bus,
|
||||
const base::TimeTicks& recorded_time) {
|
||||
base::TimeTicks recorded_time) {
|
||||
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
|
||||
CHECK(audio_encoder_);
|
||||
|
||||
@ -134,6 +111,8 @@ base::WeakPtr<AudioSender> AudioSender::AsWeakPtr() {
|
||||
return weak_factory_.GetWeakPtr();
|
||||
}
|
||||
|
||||
AudioSender::AudioSender() = default;
|
||||
|
||||
int AudioSender::GetNumberOfFramesInEncoder() const {
|
||||
// Note: It's possible for a partial frame to be in the encoder, but returning
|
||||
// the floor() is good enough for the "design limit" check in FrameSenderImpl.
|
||||
|
@ -13,8 +13,8 @@
|
||||
#include "base/time/tick_clock.h"
|
||||
#include "base/time/time.h"
|
||||
#include "media/base/audio_bus.h"
|
||||
#include "media/cast/cast_callbacks.h"
|
||||
#include "media/cast/cast_config.h"
|
||||
#include "media/cast/cast_sender.h"
|
||||
#include "media/cast/sender/frame_sender.h"
|
||||
|
||||
namespace openscreen::cast {
|
||||
@ -31,15 +31,8 @@ class AudioEncoder;
|
||||
// RTCP packets.
|
||||
// Additionally it posts a bunch of delayed tasks to the main thread for various
|
||||
// timeouts.
|
||||
class AudioSender final : public FrameSender::Client {
|
||||
class AudioSender : public FrameSender::Client {
|
||||
public:
|
||||
// Old way to instantiate, using a cast transport.
|
||||
// TODO(https://crbug.com/1316434): should be removed once libcast sender is
|
||||
// successfully launched.
|
||||
AudioSender(scoped_refptr<CastEnvironment> cast_environment,
|
||||
const FrameSenderConfig& audio_config,
|
||||
StatusChangeOnceCallback status_change_cb,
|
||||
CastTransport* const transport_sender);
|
||||
|
||||
// New way of instantiating using an openscreen::cast::Sender. Since the
|
||||
// |Sender| instance is destroyed when renegotiation is complete, |this|
|
||||
@ -52,13 +45,13 @@ class AudioSender final : public FrameSender::Client {
|
||||
AudioSender(const AudioSender&) = delete;
|
||||
AudioSender& operator=(const AudioSender&) = delete;
|
||||
|
||||
~AudioSender() final;
|
||||
~AudioSender() override;
|
||||
|
||||
// Note: It is not guaranteed that |audio_frame| will actually be encoded and
|
||||
// sent, if AudioSender detects too many frames in flight. Therefore, clients
|
||||
// should be careful about the rate at which this method is called.
|
||||
void InsertAudio(std::unique_ptr<AudioBus> audio_bus,
|
||||
const base::TimeTicks& recorded_time);
|
||||
virtual void InsertAudio(std::unique_ptr<AudioBus> audio_bus,
|
||||
base::TimeTicks recorded_time);
|
||||
|
||||
void SetTargetPlayoutDelay(base::TimeDelta new_target_playout_delay);
|
||||
base::TimeDelta GetTargetPlayoutDelay() const;
|
||||
@ -67,6 +60,9 @@ class AudioSender final : public FrameSender::Client {
|
||||
base::WeakPtr<AudioSender> AsWeakPtr();
|
||||
|
||||
protected:
|
||||
// For mocking in unit tests.
|
||||
AudioSender();
|
||||
|
||||
// FrameSender::Client overrides.
|
||||
int GetNumberOfFramesInEncoder() const final;
|
||||
base::TimeDelta GetEncoderBacklogDuration() const final;
|
||||
@ -84,7 +80,7 @@ class AudioSender final : public FrameSender::Client {
|
||||
scoped_refptr<CastEnvironment> cast_environment_;
|
||||
|
||||
// The number of RTP units advanced per second;
|
||||
const int rtp_timebase_;
|
||||
const int rtp_timebase_ = 0;
|
||||
|
||||
// The backing frame sender implementation.
|
||||
std::unique_ptr<FrameSender> frame_sender_;
|
||||
|
@ -16,15 +16,25 @@
|
||||
#include "base/memory/raw_ptr.h"
|
||||
#include "base/test/simple_test_tick_clock.h"
|
||||
#include "base/values.h"
|
||||
#include "components/openscreen_platform/task_runner.h"
|
||||
#include "media/base/fake_single_thread_task_runner.h"
|
||||
#include "media/base/media.h"
|
||||
#include "media/cast/cast_config.h"
|
||||
#include "media/cast/cast_environment.h"
|
||||
#include "media/cast/common/openscreen_conversion_helpers.h"
|
||||
#include "media/cast/constants.h"
|
||||
#include "media/cast/net/cast_transport_config.h"
|
||||
#include "media/cast/net/cast_transport_impl.h"
|
||||
#include "media/cast/test/fake_openscreen_clock.h"
|
||||
#include "media/cast/test/mock_openscreen_environment.h"
|
||||
#include "media/cast/test/utility/audio_utility.h"
|
||||
#include "testing/gtest/include/gtest/gtest.h"
|
||||
#include "third_party/openscreen/src/cast/streaming/environment.h"
|
||||
#include "third_party/openscreen/src/cast/streaming/sender.h"
|
||||
#include "third_party/openscreen/src/cast/streaming/sender_packet_router.h"
|
||||
#include "third_party/openscreen/src/platform/api/time.h"
|
||||
#include "third_party/openscreen/src/platform/base/ip_address.h"
|
||||
#include "third_party/openscreen/src/platform/base/trivial_clock_traits.h"
|
||||
|
||||
using testing::_;
|
||||
|
||||
namespace media::cast {
|
||||
|
||||
@ -37,61 +47,8 @@ void SaveOperationalStatus(OperationalStatus* out_status,
|
||||
*out_status = in_status;
|
||||
}
|
||||
|
||||
class TransportClient : public CastTransport::Client {
|
||||
public:
|
||||
TransportClient() = default;
|
||||
|
||||
TransportClient(const TransportClient&) = delete;
|
||||
TransportClient& operator=(const TransportClient&) = delete;
|
||||
|
||||
void OnStatusChanged(CastTransportStatus status) final {
|
||||
EXPECT_EQ(TRANSPORT_STREAM_INITIALIZED, status);
|
||||
}
|
||||
void OnLoggingEventsReceived(
|
||||
std::unique_ptr<std::vector<FrameEvent>> frame_events,
|
||||
std::unique_ptr<std::vector<PacketEvent>> packet_events) final {}
|
||||
void ProcessRtpPacket(std::unique_ptr<Packet> packet) final {}
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
class TestPacketSender : public PacketTransport {
|
||||
public:
|
||||
TestPacketSender() : number_of_rtp_packets_(0), number_of_rtcp_packets_(0) {}
|
||||
|
||||
TestPacketSender(const TestPacketSender&) = delete;
|
||||
TestPacketSender& operator=(const TestPacketSender&) = delete;
|
||||
|
||||
bool SendPacket(PacketRef packet, base::OnceClosure cb) final {
|
||||
if (IsRtcpPacket(packet->data)) {
|
||||
++number_of_rtcp_packets_;
|
||||
} else {
|
||||
// Check that at least one RTCP packet was sent before the first RTP
|
||||
// packet. This confirms that the receiver will have the necessary lip
|
||||
// sync info before it has to calculate the playout time of the first
|
||||
// frame.
|
||||
if (number_of_rtp_packets_ == 0)
|
||||
EXPECT_LE(1, number_of_rtcp_packets_);
|
||||
++number_of_rtp_packets_;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
int64_t GetBytesSent() final { return 0; }
|
||||
|
||||
void StartReceiving(PacketReceiverCallbackWithStatus packet_receiver) final {}
|
||||
|
||||
void StopReceiving() final {}
|
||||
|
||||
int number_of_rtp_packets() const { return number_of_rtp_packets_; }
|
||||
|
||||
int number_of_rtcp_packets() const { return number_of_rtcp_packets_; }
|
||||
|
||||
private:
|
||||
int number_of_rtp_packets_;
|
||||
int number_of_rtcp_packets_;
|
||||
};
|
||||
|
||||
class AudioSenderTest : public ::testing::Test {
|
||||
protected:
|
||||
AudioSenderTest()
|
||||
@ -100,10 +57,20 @@ class AudioSenderTest : public ::testing::Test {
|
||||
cast_environment_(base::MakeRefCounted<CastEnvironment>(&testing_clock_,
|
||||
task_runner_,
|
||||
task_runner_,
|
||||
task_runner_)) {
|
||||
task_runner_)),
|
||||
openscreen_task_runner_(task_runner_) {
|
||||
FakeOpenscreenClock::SetTickClock(&testing_clock_);
|
||||
InitializeMediaLibrary();
|
||||
testing_clock_.Advance(base::TimeTicks::Now() - base::TimeTicks());
|
||||
|
||||
mock_openscreen_environment_ = std::make_unique<MockOpenscreenEnvironment>(
|
||||
&FakeOpenscreenClock::now, openscreen_task_runner_);
|
||||
openscreen_packet_router_ =
|
||||
std::make_unique<openscreen::cast::SenderPacketRouter>(
|
||||
mock_openscreen_environment_.get());
|
||||
|
||||
audio_config_.sender_ssrc = 35535;
|
||||
audio_config_.receiver_ssrc = 35536;
|
||||
audio_config_.codec = Codec::kAudioOpus;
|
||||
audio_config_.use_hardware_encoder = false;
|
||||
audio_config_.rtp_timebase = kDefaultAudioSamplingRate;
|
||||
@ -111,30 +78,41 @@ class AudioSenderTest : public ::testing::Test {
|
||||
audio_config_.max_bitrate = kDefaultAudioEncoderBitrate;
|
||||
audio_config_.rtp_payload_type = RtpPayloadType::AUDIO_OPUS;
|
||||
|
||||
auto sender = std::make_unique<TestPacketSender>();
|
||||
transport_ = sender.get();
|
||||
transport_sender_ = std::make_unique<CastTransportImpl>(
|
||||
&testing_clock_, base::TimeDelta(), std::make_unique<TransportClient>(),
|
||||
std::move(sender), task_runner_);
|
||||
openscreen::cast::SessionConfig openscreen_audio_config =
|
||||
ToOpenscreenSessionConfig(audio_config_, /* is_pli_enabled= */ true);
|
||||
|
||||
auto openscreen_audio_sender = std::make_unique<openscreen::cast::Sender>(
|
||||
mock_openscreen_environment_.get(), openscreen_packet_router_.get(),
|
||||
openscreen_audio_config, openscreen::cast::RtpPayloadType::kAudioOpus);
|
||||
openscreen_audio_sender_ = openscreen_audio_sender.get();
|
||||
|
||||
OperationalStatus operational_status = STATUS_UNINITIALIZED;
|
||||
audio_sender_ = std::make_unique<AudioSender>(
|
||||
cast_environment_, audio_config_,
|
||||
base::BindOnce(&SaveOperationalStatus, &operational_status),
|
||||
transport_sender_.get());
|
||||
std::move(openscreen_audio_sender));
|
||||
task_runner_->RunTasks();
|
||||
CHECK_EQ(STATUS_INITIALIZED, operational_status);
|
||||
}
|
||||
|
||||
~AudioSenderTest() override = default;
|
||||
~AudioSenderTest() override {
|
||||
FakeOpenscreenClock::ClearTickClock();
|
||||
openscreen_audio_sender_ = nullptr;
|
||||
}
|
||||
|
||||
base::SimpleTestTickClock testing_clock_;
|
||||
const scoped_refptr<FakeSingleThreadTaskRunner> task_runner_;
|
||||
const scoped_refptr<CastEnvironment> cast_environment_;
|
||||
std::unique_ptr<CastTransportImpl> transport_sender_;
|
||||
raw_ptr<TestPacketSender> transport_; // Owned by CastTransport.
|
||||
std::unique_ptr<AudioSender> audio_sender_;
|
||||
// openscreen::Sender related classes.
|
||||
openscreen_platform::TaskRunner openscreen_task_runner_;
|
||||
std::unique_ptr<media::cast::MockOpenscreenEnvironment>
|
||||
mock_openscreen_environment_;
|
||||
std::unique_ptr<openscreen::cast::SenderPacketRouter>
|
||||
openscreen_packet_router_;
|
||||
FrameSenderConfig audio_config_;
|
||||
std::unique_ptr<AudioSender> audio_sender_;
|
||||
// Unowned pointer to the openscreen::cast::Sender.
|
||||
raw_ptr<openscreen::cast::Sender> openscreen_audio_sender_;
|
||||
};
|
||||
|
||||
TEST_F(AudioSenderTest, Encode20ms) {
|
||||
@ -144,29 +122,11 @@ TEST_F(AudioSenderTest, Encode20ms) {
|
||||
TestAudioBusFactory::kMiddleANoteFreq, 0.5f)
|
||||
.NextAudioBus(kDuration));
|
||||
|
||||
audio_sender_->InsertAudio(std::move(bus), testing_clock_.NowTicks());
|
||||
task_runner_->RunTasks();
|
||||
EXPECT_LE(1, transport_->number_of_rtp_packets());
|
||||
EXPECT_LE(1, transport_->number_of_rtcp_packets());
|
||||
}
|
||||
|
||||
TEST_F(AudioSenderTest, RtcpTimer) {
|
||||
const base::TimeDelta kDuration = base::Milliseconds(20);
|
||||
std::unique_ptr<AudioBus> bus(
|
||||
TestAudioBusFactory(audio_config_.channels, audio_config_.rtp_timebase,
|
||||
TestAudioBusFactory::kMiddleANoteFreq, 0.5f)
|
||||
.NextAudioBus(kDuration));
|
||||
EXPECT_CALL(*mock_openscreen_environment_, SendPacket(_, _)).Times(3);
|
||||
|
||||
audio_sender_->InsertAudio(std::move(bus), testing_clock_.NowTicks());
|
||||
task_runner_->RunTasks();
|
||||
|
||||
// Make sure that we send at least one RTCP packet.
|
||||
base::TimeDelta max_rtcp_timeout =
|
||||
base::Milliseconds(1) + kRtcpReportInterval * 3 / 2;
|
||||
testing_clock_.Advance(max_rtcp_timeout);
|
||||
task_runner_->RunTasks();
|
||||
EXPECT_LE(1, transport_->number_of_rtp_packets());
|
||||
EXPECT_LE(1, transport_->number_of_rtcp_packets());
|
||||
EXPECT_EQ(2, openscreen_audio_sender_->GetInFlightFrameCount());
|
||||
}
|
||||
|
||||
} // namespace media::cast
|
||||
|
@ -3,11 +3,11 @@
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "media/cast/sender/frame_sender.h"
|
||||
#include "base/feature_list.h"
|
||||
#include "media/base/media_switches.h"
|
||||
|
||||
namespace media::cast {
|
||||
|
||||
FrameSender::Client::~Client() = default;
|
||||
|
||||
FrameSender::FrameSender() = default;
|
||||
FrameSender::~FrameSender() = default;
|
||||
|
||||
|
@ -14,9 +14,7 @@
|
||||
#include "media/cast/cast_config.h"
|
||||
#include "media/cast/cast_environment.h"
|
||||
#include "media/cast/constants.h"
|
||||
#include "media/cast/net/cast_transport.h"
|
||||
#include "media/cast/net/rtcp/rtcp_defines.h"
|
||||
#include "media/cast/sender/congestion_control.h"
|
||||
|
||||
namespace openscreen::cast {
|
||||
class Sender;
|
||||
@ -26,7 +24,6 @@ namespace media::cast {
|
||||
|
||||
struct SenderEncodedFrame;
|
||||
class CastEnvironment;
|
||||
class CastTransport;
|
||||
|
||||
// This is the pure virtual interface for an object that sends encoded frames
|
||||
// to a receiver.
|
||||
@ -51,15 +48,6 @@ class FrameSender {
|
||||
virtual void OnFrameCanceled(FrameId frame_id) {}
|
||||
};
|
||||
|
||||
// Method of creating a frame sender using a cast transport.
|
||||
// TODO(https://crbug.com/1316434): should be removed once libcast sender is
|
||||
// successfully launched.
|
||||
static std::unique_ptr<FrameSender> Create(
|
||||
scoped_refptr<CastEnvironment> cast_environment,
|
||||
const FrameSenderConfig& config,
|
||||
CastTransport* const transport_sender,
|
||||
Client& client);
|
||||
|
||||
// NOTE: currently only used by the VideoSender.
|
||||
// TODO(https://crbug.com/1316434): cleanup bitrate calculations when libcast
|
||||
// has successfully launched.
|
||||
@ -138,15 +126,6 @@ class FrameSender {
|
||||
|
||||
// The last acknowledged frame ID.
|
||||
virtual FrameId LastAckedFrameId() const = 0;
|
||||
|
||||
// RTCP client-specific methods.
|
||||
// TODO(https://crbug.com/1318499): these assume we are using an RTCP client,
|
||||
// which is not true when this implementation is backed by an
|
||||
// OpenscreenFrameSender. These methods should be removed and tests updated to
|
||||
// use a different mechanism.
|
||||
virtual void OnReceivedCastFeedback(const RtcpCastMessage& cast_feedback) = 0;
|
||||
virtual void OnReceivedPli() = 0;
|
||||
virtual void OnMeasuredRoundTripTime(base::TimeDelta rtt) = 0;
|
||||
};
|
||||
|
||||
} // namespace media::cast
|
||||
|
@ -1,553 +0,0 @@
|
||||
// Copyright 2014 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "media/cast/sender/frame_sender_impl.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <limits>
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "base/feature_list.h"
|
||||
#include "base/functional/bind.h"
|
||||
#include "base/logging.h"
|
||||
#include "base/numerics/safe_conversions.h"
|
||||
#include "base/trace_event/trace_event.h"
|
||||
#include "media/cast/common/openscreen_conversion_helpers.h"
|
||||
#include "media/cast/common/sender_encoded_frame.h"
|
||||
#include "media/cast/constants.h"
|
||||
#include "third_party/openscreen/src/cast/streaming/encoded_frame.h"
|
||||
|
||||
namespace media::cast {
|
||||
namespace {
|
||||
|
||||
constexpr int kNumAggressiveReportsSentAtStart = 100;
|
||||
constexpr base::TimeDelta kMinSchedulingDelay = base::Milliseconds(1);
|
||||
constexpr base::TimeDelta kReceiverProcessTime = base::Milliseconds(250);
|
||||
|
||||
// The additional number of frames that can be in-flight when input exceeds the
|
||||
// maximum frame rate.
|
||||
constexpr int kMaxFrameBurst = 5;
|
||||
|
||||
} // namespace
|
||||
|
||||
// static
|
||||
std::unique_ptr<FrameSender> FrameSender::Create(
|
||||
scoped_refptr<CastEnvironment> cast_environment,
|
||||
const FrameSenderConfig& config,
|
||||
CastTransport* const transport_sender,
|
||||
Client& client) {
|
||||
return std::make_unique<FrameSenderImpl>(cast_environment, config,
|
||||
transport_sender, client);
|
||||
}
|
||||
|
||||
// Convenience macro used in logging statements throughout this file.
|
||||
#define SENDER_SSRC \
|
||||
(is_audio_ ? "AUDIO[" : "VIDEO[") << config_.sender_ssrc << "] "
|
||||
|
||||
FrameSenderImpl::Client::~Client() = default;
|
||||
|
||||
FrameSenderImpl::RtcpClient::RtcpClient(
|
||||
base::WeakPtr<FrameSenderImpl> frame_sender)
|
||||
: frame_sender_(frame_sender) {}
|
||||
|
||||
FrameSenderImpl::RtcpClient::~RtcpClient() = default;
|
||||
|
||||
void FrameSenderImpl::RtcpClient::OnReceivedCastMessage(
|
||||
const RtcpCastMessage& cast_message) {
|
||||
if (frame_sender_)
|
||||
frame_sender_->OnReceivedCastFeedback(cast_message);
|
||||
}
|
||||
|
||||
void FrameSenderImpl::RtcpClient::OnReceivedRtt(
|
||||
base::TimeDelta round_trip_time) {
|
||||
if (frame_sender_)
|
||||
frame_sender_->OnMeasuredRoundTripTime(round_trip_time);
|
||||
}
|
||||
|
||||
void FrameSenderImpl::RtcpClient::OnReceivedPli() {
|
||||
if (frame_sender_)
|
||||
frame_sender_->OnReceivedPli();
|
||||
}
|
||||
|
||||
FrameSenderImpl::FrameSenderImpl(
|
||||
scoped_refptr<CastEnvironment> cast_environment,
|
||||
const FrameSenderConfig& config,
|
||||
CastTransport* const transport_sender,
|
||||
Client& client)
|
||||
: cast_environment_(cast_environment),
|
||||
config_(config),
|
||||
target_playout_delay_(config.max_playout_delay),
|
||||
max_frame_rate_(config.max_frame_rate),
|
||||
transport_sender_(transport_sender),
|
||||
client_(client),
|
||||
is_audio_(config.rtp_payload_type <= RtpPayloadType::AUDIO_LAST),
|
||||
// We only use the adaptive control for software video encoding.
|
||||
congestion_control_(
|
||||
(!config.use_hardware_encoder && !is_audio_)
|
||||
? NewAdaptiveCongestionControl(cast_environment->Clock(),
|
||||
config.max_bitrate,
|
||||
config.min_bitrate,
|
||||
max_frame_rate_)
|
||||
: NewFixedCongestionControl(
|
||||
(config.min_bitrate + config.max_bitrate) / 2)
|
||||
|
||||
),
|
||||
max_ack_delay_(config_.max_playout_delay) {
|
||||
DCHECK(transport_sender_);
|
||||
DCHECK_GT(config_.rtp_timebase, 0);
|
||||
DCHECK(congestion_control_);
|
||||
|
||||
// We start at the minimum playout delay and extend if necessary later.
|
||||
VLOG(1) << SENDER_SSRC << "min latency "
|
||||
<< config_.min_playout_delay.InMilliseconds() << ", max latency "
|
||||
<< config_.max_playout_delay.InMilliseconds();
|
||||
SetTargetPlayoutDelay(config_.min_playout_delay);
|
||||
|
||||
CastTransportRtpConfig transport_config;
|
||||
transport_config.ssrc = config.sender_ssrc;
|
||||
transport_config.feedback_ssrc = config.receiver_ssrc;
|
||||
transport_config.rtp_payload_type = config.rtp_payload_type;
|
||||
transport_config.aes_key = config.aes_key;
|
||||
transport_config.aes_iv_mask = config.aes_iv_mask;
|
||||
transport_sender_->InitializeStream(
|
||||
transport_config, std::make_unique<FrameSenderImpl::RtcpClient>(
|
||||
weak_factory_.GetWeakPtr()));
|
||||
}
|
||||
|
||||
FrameSenderImpl::~FrameSenderImpl() = default;
|
||||
|
||||
bool FrameSenderImpl::NeedsKeyFrame() const {
|
||||
return picture_lost_at_receiver_;
|
||||
}
|
||||
|
||||
base::TimeTicks FrameSenderImpl::GetRecordedReferenceTime(
|
||||
FrameId frame_id) const {
|
||||
return frame_reference_times_[frame_id.lower_8_bits()];
|
||||
}
|
||||
|
||||
void FrameSenderImpl::ScheduleNextRtcpReport() {
|
||||
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
|
||||
|
||||
cast_environment_->PostDelayedTask(
|
||||
CastEnvironment::MAIN, FROM_HERE,
|
||||
base::BindOnce(&FrameSenderImpl::SendRtcpReport,
|
||||
weak_factory_.GetWeakPtr(), true),
|
||||
kRtcpReportInterval);
|
||||
}
|
||||
|
||||
void FrameSenderImpl::SendRtcpReport(bool schedule_future_reports) {
|
||||
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
|
||||
|
||||
// Sanity-check: We should have sent at least the first frame by this point.
|
||||
DCHECK(!last_send_time_.is_null());
|
||||
|
||||
// Create lip-sync info for the sender report. The last sent frame's
|
||||
// reference time and RTP timestamp are used to estimate an RTP timestamp in
|
||||
// terms of "now." Note that |now| is never likely to be precise to an exact
|
||||
// frame boundary; and so the computation here will result in a
|
||||
// |now_as_rtp_timestamp| value that is rarely equal to any one emitted by the
|
||||
// encoder.
|
||||
const base::TimeTicks now = cast_environment_->Clock()->NowTicks();
|
||||
const base::TimeDelta time_delta =
|
||||
now - GetRecordedReferenceTime(last_sent_frame_id_);
|
||||
const RtpTimeDelta rtp_delta =
|
||||
ToRtpTimeDelta(time_delta, config_.rtp_timebase);
|
||||
const RtpTimeTicks now_as_rtp_timestamp =
|
||||
GetRecordedRtpTimestamp(last_sent_frame_id_) + rtp_delta;
|
||||
transport_sender_->SendSenderReport(config_.sender_ssrc, now,
|
||||
now_as_rtp_timestamp);
|
||||
|
||||
if (schedule_future_reports)
|
||||
ScheduleNextRtcpReport();
|
||||
}
|
||||
|
||||
void FrameSenderImpl::OnMeasuredRoundTripTime(base::TimeDelta round_trip_time) {
|
||||
DCHECK_GT(round_trip_time, base::TimeDelta());
|
||||
current_round_trip_time_ = round_trip_time;
|
||||
max_ack_delay_ = 2 * std::max(current_round_trip_time_, base::TimeDelta()) +
|
||||
kReceiverProcessTime;
|
||||
max_ack_delay_ = std::min(max_ack_delay_, target_playout_delay_);
|
||||
}
|
||||
|
||||
void FrameSenderImpl::SetTargetPlayoutDelay(
|
||||
base::TimeDelta new_target_playout_delay) {
|
||||
if (send_target_playout_delay_ &&
|
||||
target_playout_delay_ == new_target_playout_delay) {
|
||||
return;
|
||||
}
|
||||
new_target_playout_delay =
|
||||
std::max(new_target_playout_delay, config_.min_playout_delay);
|
||||
new_target_playout_delay =
|
||||
std::min(new_target_playout_delay, config_.max_playout_delay);
|
||||
VLOG(2) << SENDER_SSRC << "Target playout delay changing from "
|
||||
<< target_playout_delay_.InMilliseconds() << " ms to "
|
||||
<< new_target_playout_delay.InMilliseconds() << " ms.";
|
||||
target_playout_delay_ = new_target_playout_delay;
|
||||
max_ack_delay_ = std::min(max_ack_delay_, target_playout_delay_);
|
||||
send_target_playout_delay_ = true;
|
||||
congestion_control_->UpdateTargetPlayoutDelay(target_playout_delay_);
|
||||
}
|
||||
|
||||
base::TimeDelta FrameSenderImpl::GetTargetPlayoutDelay() const {
|
||||
return target_playout_delay_;
|
||||
}
|
||||
|
||||
void FrameSenderImpl::ResendCheck() {
|
||||
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
|
||||
DCHECK(!last_send_time_.is_null());
|
||||
const base::TimeDelta time_since_last_send =
|
||||
cast_environment_->Clock()->NowTicks() - last_send_time_;
|
||||
if (time_since_last_send > max_ack_delay_) {
|
||||
if (latest_acked_frame_id_ == last_sent_frame_id_) {
|
||||
// Last frame acked, no point in doing anything
|
||||
} else {
|
||||
VLOG(1) << SENDER_SSRC
|
||||
<< "ACK timeout; last acked frame: " << latest_acked_frame_id_;
|
||||
ResendForKickstart();
|
||||
}
|
||||
}
|
||||
ScheduleNextResendCheck();
|
||||
}
|
||||
|
||||
void FrameSenderImpl::ScheduleNextResendCheck() {
|
||||
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
|
||||
DCHECK(!last_send_time_.is_null());
|
||||
base::TimeDelta time_to_next =
|
||||
last_send_time_ - cast_environment_->Clock()->NowTicks() + max_ack_delay_;
|
||||
time_to_next = std::max(time_to_next, kMinSchedulingDelay);
|
||||
cast_environment_->PostDelayedTask(
|
||||
CastEnvironment::MAIN, FROM_HERE,
|
||||
base::BindOnce(&FrameSenderImpl::ResendCheck, weak_factory_.GetWeakPtr()),
|
||||
time_to_next);
|
||||
}
|
||||
|
||||
void FrameSenderImpl::ResendForKickstart() {
|
||||
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
|
||||
DCHECK(!last_send_time_.is_null());
|
||||
VLOG(1) << SENDER_SSRC << "Resending last packet of frame "
|
||||
<< last_sent_frame_id_ << " to kick-start.";
|
||||
last_send_time_ = cast_environment_->Clock()->NowTicks();
|
||||
transport_sender_->ResendFrameForKickstart(config_.sender_ssrc,
|
||||
last_sent_frame_id_);
|
||||
}
|
||||
|
||||
void FrameSenderImpl::RecordLatestFrameTimestamps(
|
||||
FrameId frame_id,
|
||||
base::TimeTicks reference_time,
|
||||
RtpTimeTicks rtp_timestamp) {
|
||||
DCHECK(!reference_time.is_null());
|
||||
frame_reference_times_[frame_id.lower_8_bits()] = reference_time;
|
||||
frame_rtp_timestamps_[frame_id.lower_8_bits()] = rtp_timestamp;
|
||||
}
|
||||
|
||||
base::TimeDelta FrameSenderImpl::GetInFlightMediaDuration() const {
|
||||
const base::TimeDelta encoder_duration = client_->GetEncoderBacklogDuration();
|
||||
// No frames are in flight, so only look at the encoder duration.
|
||||
if (last_sent_frame_id_ == latest_acked_frame_id_) {
|
||||
return encoder_duration;
|
||||
}
|
||||
|
||||
const RtpTimeTicks oldest_acked_timestamp =
|
||||
GetRecordedRtpTimestamp(latest_acked_frame_id_);
|
||||
const RtpTimeTicks newest_acked_timestamp =
|
||||
GetRecordedRtpTimestamp(last_sent_frame_id_);
|
||||
return ToTimeDelta(newest_acked_timestamp - oldest_acked_timestamp,
|
||||
config_.rtp_timebase) +
|
||||
encoder_duration;
|
||||
}
|
||||
|
||||
RtpTimeTicks FrameSenderImpl::GetRecordedRtpTimestamp(FrameId frame_id) const {
|
||||
return frame_rtp_timestamps_[frame_id.lower_8_bits()];
|
||||
}
|
||||
|
||||
int FrameSenderImpl::GetUnacknowledgedFrameCount() const {
|
||||
if (last_send_time_.is_null())
|
||||
return 0;
|
||||
const int count = last_sent_frame_id_ - latest_acked_frame_id_;
|
||||
DCHECK_GE(count, 0);
|
||||
return count;
|
||||
}
|
||||
|
||||
int FrameSenderImpl::GetSuggestedBitrate(base::TimeTicks playout_time,
|
||||
base::TimeDelta playout_delay) {
|
||||
return congestion_control_->GetBitrate(playout_time, playout_delay);
|
||||
}
|
||||
|
||||
double FrameSenderImpl::MaxFrameRate() const {
|
||||
return max_frame_rate_;
|
||||
}
|
||||
|
||||
void FrameSenderImpl::SetMaxFrameRate(double max_frame_rate) {
|
||||
max_frame_rate_ = max_frame_rate;
|
||||
}
|
||||
|
||||
base::TimeDelta FrameSenderImpl::TargetPlayoutDelay() const {
|
||||
return target_playout_delay_;
|
||||
}
|
||||
base::TimeDelta FrameSenderImpl::CurrentRoundTripTime() const {
|
||||
return current_round_trip_time_;
|
||||
}
|
||||
base::TimeTicks FrameSenderImpl::LastSendTime() const {
|
||||
return last_send_time_;
|
||||
}
|
||||
FrameId FrameSenderImpl::LastAckedFrameId() const {
|
||||
return latest_acked_frame_id_;
|
||||
}
|
||||
|
||||
base::TimeDelta FrameSenderImpl::GetAllowedInFlightMediaDuration() const {
|
||||
// The total amount allowed in-flight media should equal the amount that fits
|
||||
// within the entire playout delay window, plus the amount of time it takes to
|
||||
// receive an ACK from the receiver.
|
||||
return target_playout_delay_ + (current_round_trip_time_ / 2);
|
||||
}
|
||||
|
||||
CastStreamingFrameDropReason FrameSenderImpl::EnqueueFrame(
|
||||
std::unique_ptr<SenderEncodedFrame> encoded_frame) {
|
||||
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
|
||||
|
||||
VLOG(2) << SENDER_SSRC
|
||||
<< "About to send another frame: last_sent=" << last_sent_frame_id_
|
||||
<< ", latest_acked=" << latest_acked_frame_id_;
|
||||
|
||||
const FrameId frame_id = encoded_frame->frame_id;
|
||||
const bool is_first_frame_to_be_sent = last_send_time_.is_null();
|
||||
|
||||
if (picture_lost_at_receiver_ &&
|
||||
(encoded_frame->dependency ==
|
||||
openscreen::cast::EncodedFrame::Dependency::kKeyFrame)) {
|
||||
picture_lost_at_receiver_ = false;
|
||||
DCHECK(frame_id > latest_acked_frame_id_);
|
||||
// Cancel sending remaining frames.
|
||||
std::vector<FrameId> cancel_sending_frames;
|
||||
for (FrameId id = latest_acked_frame_id_ + 1; id < frame_id; ++id) {
|
||||
cancel_sending_frames.push_back(id);
|
||||
client_->OnFrameCanceled(id);
|
||||
}
|
||||
transport_sender_->CancelSendingFrames(config_.sender_ssrc,
|
||||
cancel_sending_frames);
|
||||
}
|
||||
|
||||
last_send_time_ = cast_environment_->Clock()->NowTicks();
|
||||
|
||||
DCHECK(frame_id > last_sent_frame_id_) << "enqueued frames out of order.";
|
||||
last_sent_frame_id_ = frame_id;
|
||||
// If this is the first frame about to be sent, fake the value of
|
||||
// |latest_acked_frame_id_| to indicate the receiver starts out all
|
||||
// caught up. Also, schedule the periodic frame re-send checks.
|
||||
if (is_first_frame_to_be_sent) {
|
||||
latest_acked_frame_id_ = frame_id - 1;
|
||||
ScheduleNextResendCheck();
|
||||
}
|
||||
|
||||
VLOG_IF(1, !is_audio_ &&
|
||||
encoded_frame->dependency ==
|
||||
openscreen::cast::EncodedFrame::Dependency::kKeyFrame)
|
||||
<< SENDER_SSRC << "Sending encoded key frame, id=" << frame_id;
|
||||
|
||||
std::unique_ptr<FrameEvent> encode_event(new FrameEvent());
|
||||
encode_event->timestamp = encoded_frame->encode_completion_time;
|
||||
encode_event->type = FRAME_ENCODED;
|
||||
encode_event->media_type = is_audio_ ? AUDIO_EVENT : VIDEO_EVENT;
|
||||
encode_event->rtp_timestamp = encoded_frame->rtp_timestamp;
|
||||
encode_event->frame_id = frame_id;
|
||||
encode_event->size = base::checked_cast<uint32_t>(encoded_frame->data.size());
|
||||
encode_event->key_frame =
|
||||
encoded_frame->dependency ==
|
||||
openscreen::cast::EncodedFrame::Dependency::kKeyFrame;
|
||||
encode_event->target_bitrate = encoded_frame->encoder_bitrate;
|
||||
encode_event->encoder_cpu_utilization = encoded_frame->encoder_utilization;
|
||||
encode_event->idealized_bitrate_utilization = encoded_frame->lossiness;
|
||||
cast_environment_->logger()->DispatchFrameEvent(std::move(encode_event));
|
||||
|
||||
RecordLatestFrameTimestamps(frame_id, encoded_frame->reference_time,
|
||||
encoded_frame->rtp_timestamp);
|
||||
|
||||
if (!is_audio_) {
|
||||
// Used by chrome/browser/media/cast_mirroring_performance_browsertest.cc
|
||||
TRACE_EVENT_INSTANT1("cast_perf_test", "VideoFrameEncoded",
|
||||
TRACE_EVENT_SCOPE_THREAD, "rtp_timestamp",
|
||||
encoded_frame->rtp_timestamp.lower_32_bits());
|
||||
}
|
||||
|
||||
// At the start of the session, it's important to send reports before each
|
||||
// frame so that the receiver can properly compute playout times. The reason
|
||||
// more than one report is sent is because transmission is not guaranteed,
|
||||
// only best effort, so send enough that one should almost certainly get
|
||||
// through.
|
||||
if (num_aggressive_rtcp_reports_sent_ < kNumAggressiveReportsSentAtStart) {
|
||||
// SendRtcpReport() will schedule future reports to be made if this is the
|
||||
// last "aggressive report."
|
||||
++num_aggressive_rtcp_reports_sent_;
|
||||
const bool is_last_aggressive_report =
|
||||
(num_aggressive_rtcp_reports_sent_ == kNumAggressiveReportsSentAtStart);
|
||||
VLOG_IF(1, is_last_aggressive_report)
|
||||
<< SENDER_SSRC << "Sending last aggressive report.";
|
||||
SendRtcpReport(is_last_aggressive_report);
|
||||
}
|
||||
|
||||
congestion_control_->WillSendFrameToTransport(
|
||||
frame_id, encoded_frame->data.size(), last_send_time_);
|
||||
|
||||
if (send_target_playout_delay_) {
|
||||
encoded_frame->new_playout_delay_ms =
|
||||
target_playout_delay_.InMilliseconds();
|
||||
}
|
||||
|
||||
const char* name = is_audio_ ? "Audio Transport" : "Video Transport";
|
||||
TRACE_EVENT_NESTABLE_ASYNC_BEGIN1(
|
||||
"cast.stream", name, TRACE_ID_WITH_SCOPE(name, frame_id.lower_32_bits()),
|
||||
"rtp_timestamp", encoded_frame->rtp_timestamp.lower_32_bits());
|
||||
transport_sender_->InsertFrame(config_.sender_ssrc, *encoded_frame);
|
||||
return CastStreamingFrameDropReason::kNotDropped;
|
||||
}
|
||||
|
||||
void FrameSenderImpl::OnReceivedCastFeedback(
|
||||
const RtcpCastMessage& cast_feedback) {
|
||||
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
|
||||
|
||||
const bool have_valid_rtt = current_round_trip_time_.is_positive();
|
||||
if (have_valid_rtt) {
|
||||
congestion_control_->UpdateRtt(current_round_trip_time_);
|
||||
|
||||
// Having the RTT value implies the receiver sent back a receiver report
|
||||
// based on it having received a report from here. Therefore, ensure this
|
||||
// sender stops aggressively sending reports.
|
||||
if (num_aggressive_rtcp_reports_sent_ < kNumAggressiveReportsSentAtStart) {
|
||||
VLOG(1) << SENDER_SSRC
|
||||
<< "No longer a need to send reports aggressively (sent "
|
||||
<< num_aggressive_rtcp_reports_sent_ << ").";
|
||||
num_aggressive_rtcp_reports_sent_ = kNumAggressiveReportsSentAtStart;
|
||||
ScheduleNextRtcpReport();
|
||||
}
|
||||
}
|
||||
|
||||
if (last_send_time_.is_null())
|
||||
return; // Cannot get an ACK without having first sent a frame.
|
||||
|
||||
if (cast_feedback.missing_frames_and_packets.empty() &&
|
||||
cast_feedback.received_later_frames.empty()) {
|
||||
if (latest_acked_frame_id_ == cast_feedback.ack_frame_id) {
|
||||
VLOG(1) << SENDER_SSRC << "Received duplicate ACK for frame "
|
||||
<< latest_acked_frame_id_;
|
||||
TRACE_EVENT_INSTANT2(
|
||||
"cast.stream", "Duplicate ACK", TRACE_EVENT_SCOPE_THREAD,
|
||||
"ack_frame_id", cast_feedback.ack_frame_id.lower_32_bits(),
|
||||
"last_sent_frame_id", last_sent_frame_id_.lower_32_bits());
|
||||
}
|
||||
// We only count duplicate ACKs when we have sent newer frames.
|
||||
if (latest_acked_frame_id_ == cast_feedback.ack_frame_id &&
|
||||
latest_acked_frame_id_ != last_sent_frame_id_) {
|
||||
duplicate_ack_counter_++;
|
||||
} else {
|
||||
duplicate_ack_counter_ = 0;
|
||||
}
|
||||
if (duplicate_ack_counter_ >= 2 && duplicate_ack_counter_ % 3 == 2) {
|
||||
ResendForKickstart();
|
||||
}
|
||||
} else {
|
||||
// Only count duplicated ACKs if there is no NACK request in between.
|
||||
// This is to avoid aggressive resend.
|
||||
duplicate_ack_counter_ = 0;
|
||||
}
|
||||
|
||||
base::TimeTicks now = cast_environment_->Clock()->NowTicks();
|
||||
congestion_control_->AckFrame(cast_feedback.ack_frame_id, now);
|
||||
if (!cast_feedback.received_later_frames.empty()) {
|
||||
// Ack the received frames.
|
||||
congestion_control_->AckLaterFrames(cast_feedback.received_later_frames,
|
||||
now);
|
||||
}
|
||||
|
||||
std::unique_ptr<FrameEvent> ack_event(new FrameEvent());
|
||||
ack_event->timestamp = now;
|
||||
ack_event->type = FRAME_ACK_RECEIVED;
|
||||
ack_event->media_type = is_audio_ ? AUDIO_EVENT : VIDEO_EVENT;
|
||||
ack_event->rtp_timestamp =
|
||||
GetRecordedRtpTimestamp(cast_feedback.ack_frame_id);
|
||||
ack_event->frame_id = cast_feedback.ack_frame_id;
|
||||
cast_environment_->logger()->DispatchFrameEvent(std::move(ack_event));
|
||||
|
||||
const bool is_acked_out_of_order =
|
||||
cast_feedback.ack_frame_id < latest_acked_frame_id_;
|
||||
VLOG(2) << SENDER_SSRC << "Received ACK"
|
||||
<< (is_acked_out_of_order ? " out-of-order" : "") << " for frame "
|
||||
<< cast_feedback.ack_frame_id;
|
||||
if (is_acked_out_of_order) {
|
||||
TRACE_EVENT_INSTANT2(
|
||||
"cast.stream", "ACK out of order", TRACE_EVENT_SCOPE_THREAD,
|
||||
"ack_frame_id", cast_feedback.ack_frame_id.lower_32_bits(),
|
||||
"latest_acked_frame_id", latest_acked_frame_id_.lower_32_bits());
|
||||
} else if (latest_acked_frame_id_ < cast_feedback.ack_frame_id) {
|
||||
// Cancel resends of acked frames.
|
||||
std::vector<FrameId> frames_to_cancel;
|
||||
frames_to_cancel.reserve(cast_feedback.ack_frame_id -
|
||||
latest_acked_frame_id_);
|
||||
do {
|
||||
++latest_acked_frame_id_;
|
||||
frames_to_cancel.push_back(latest_acked_frame_id_);
|
||||
client_->OnFrameCanceled(latest_acked_frame_id_);
|
||||
// This is a good place to match the trace for frame ids
|
||||
// since this ensures we not only track frame ids that are
|
||||
// implicitly ACKed, but also handles duplicate ACKs
|
||||
const char* name = is_audio_ ? "Audio Transport" : "Video Transport";
|
||||
TRACE_EVENT_NESTABLE_ASYNC_END1(
|
||||
"cast.stream", name,
|
||||
TRACE_ID_WITH_SCOPE(name, latest_acked_frame_id_.lower_32_bits()),
|
||||
"RTT_usecs", current_round_trip_time_.InMicroseconds());
|
||||
} while (latest_acked_frame_id_ < cast_feedback.ack_frame_id);
|
||||
transport_sender_->CancelSendingFrames(config_.sender_ssrc,
|
||||
frames_to_cancel);
|
||||
}
|
||||
}
|
||||
|
||||
void FrameSenderImpl::OnReceivedPli() {
|
||||
picture_lost_at_receiver_ = true;
|
||||
}
|
||||
|
||||
CastStreamingFrameDropReason FrameSenderImpl::ShouldDropNextFrame(
|
||||
base::TimeDelta frame_duration) {
|
||||
// Check that accepting the next frame won't cause more frames to become
|
||||
// in-flight than the system's design limit.
|
||||
const int count_frames_in_flight =
|
||||
GetUnacknowledgedFrameCount() + client_->GetNumberOfFramesInEncoder();
|
||||
if (count_frames_in_flight >= kMaxUnackedFrames) {
|
||||
return CastStreamingFrameDropReason::kTooManyFramesInFlight;
|
||||
}
|
||||
|
||||
// Check that accepting the next frame won't exceed the configured maximum
|
||||
// frame rate, allowing for short-term bursts.
|
||||
const base::TimeDelta duration_in_flight = GetInFlightMediaDuration();
|
||||
const double max_frames_in_flight =
|
||||
max_frame_rate_ * duration_in_flight.InSecondsF();
|
||||
if (count_frames_in_flight >= max_frames_in_flight + kMaxFrameBurst) {
|
||||
return CastStreamingFrameDropReason::kBurstThresholdExceeded;
|
||||
}
|
||||
|
||||
// Check that accepting the next frame won't exceed the allowed in-flight
|
||||
// media duration.
|
||||
const base::TimeDelta duration_would_be_in_flight =
|
||||
duration_in_flight + frame_duration;
|
||||
const base::TimeDelta allowed_in_flight = GetAllowedInFlightMediaDuration();
|
||||
if (VLOG_IS_ON(1)) {
|
||||
const int64_t percent =
|
||||
allowed_in_flight.is_positive()
|
||||
? base::ClampRound<int64_t>(duration_would_be_in_flight /
|
||||
allowed_in_flight * 100)
|
||||
: std::numeric_limits<int64_t>::max();
|
||||
VLOG_IF(1, percent > 50)
|
||||
<< SENDER_SSRC << duration_in_flight.InMicroseconds()
|
||||
<< " usec in-flight + " << frame_duration.InMicroseconds()
|
||||
<< " usec for next frame --> " << percent << "% of allowed in-flight.";
|
||||
}
|
||||
if (duration_would_be_in_flight > allowed_in_flight) {
|
||||
return CastStreamingFrameDropReason::kInFlightDurationTooHigh;
|
||||
}
|
||||
|
||||
// Next frame is accepted.
|
||||
return CastStreamingFrameDropReason::kNotDropped;
|
||||
}
|
||||
|
||||
} // namespace media::cast
|
@ -1,185 +0,0 @@
|
||||
// Copyright 2014 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
//
|
||||
// This is the base class for an object that send frames to a receiver.
|
||||
|
||||
#ifndef MEDIA_CAST_SENDER_FRAME_SENDER_IMPL_H_
|
||||
#define MEDIA_CAST_SENDER_FRAME_SENDER_IMPL_H_
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include "base/memory/raw_ptr.h"
|
||||
#include "base/memory/raw_ref.h"
|
||||
#include "base/memory/scoped_refptr.h"
|
||||
#include "base/memory/weak_ptr.h"
|
||||
#include "base/time/time.h"
|
||||
#include "media/cast/cast_config.h"
|
||||
#include "media/cast/cast_environment.h"
|
||||
#include "media/cast/net/cast_transport.h"
|
||||
#include "media/cast/net/rtcp/rtcp_defines.h"
|
||||
#include "media/cast/sender/congestion_control.h"
|
||||
#include "media/cast/sender/frame_sender.h"
|
||||
|
||||
namespace media::cast {
|
||||
|
||||
struct SenderEncodedFrame;
|
||||
|
||||
class FrameSenderImpl : public FrameSender {
|
||||
public:
|
||||
FrameSenderImpl(scoped_refptr<CastEnvironment> cast_environment,
|
||||
const FrameSenderConfig& config,
|
||||
CastTransport* const transport_sender,
|
||||
Client& client);
|
||||
~FrameSenderImpl() override;
|
||||
|
||||
// FrameSender overrides.
|
||||
void SetTargetPlayoutDelay(base::TimeDelta new_target_playout_delay) override;
|
||||
base::TimeDelta GetTargetPlayoutDelay() const override;
|
||||
bool NeedsKeyFrame() const override;
|
||||
CastStreamingFrameDropReason EnqueueFrame(
|
||||
std::unique_ptr<SenderEncodedFrame> encoded_frame) override;
|
||||
CastStreamingFrameDropReason ShouldDropNextFrame(
|
||||
base::TimeDelta frame_duration) override;
|
||||
RtpTimeTicks GetRecordedRtpTimestamp(FrameId frame_id) const override;
|
||||
int GetUnacknowledgedFrameCount() const override;
|
||||
int GetSuggestedBitrate(base::TimeTicks playout_time,
|
||||
base::TimeDelta playout_delay) override;
|
||||
double MaxFrameRate() const override;
|
||||
void SetMaxFrameRate(double max_frame_rate) override;
|
||||
base::TimeDelta TargetPlayoutDelay() const override;
|
||||
base::TimeDelta CurrentRoundTripTime() const override;
|
||||
base::TimeTicks LastSendTime() const override;
|
||||
FrameId LastAckedFrameId() const override;
|
||||
void OnReceivedCastFeedback(const RtcpCastMessage& cast_feedback) override;
|
||||
void OnReceivedPli() override;
|
||||
void OnMeasuredRoundTripTime(base::TimeDelta rtt) override;
|
||||
|
||||
private:
|
||||
// Helper for getting the reference time recorded on the frame associated
|
||||
// with |frame_id|.
|
||||
base::TimeTicks GetRecordedReferenceTime(FrameId frame_id) const;
|
||||
|
||||
// Schedule and execute periodic checks for re-sending packets. If no
|
||||
// acknowledgements have been received for "too long," FrameSenderImpl will
|
||||
// speculatively re-send certain packets of an unacked frame to kick-start
|
||||
// re-transmission. This is a last resort tactic to prevent the session from
|
||||
// getting stuck after a long outage.
|
||||
void ScheduleNextResendCheck();
|
||||
void ResendCheck();
|
||||
void ResendForKickstart();
|
||||
|
||||
// Schedule and execute periodic sending of RTCP report.
|
||||
void ScheduleNextRtcpReport();
|
||||
void SendRtcpReport(bool schedule_future_reports);
|
||||
|
||||
// Record or retrieve a recent history of each frame's timestamps.
|
||||
// Warning: If a frame ID too far in the past is requested, the getters will
|
||||
// silently succeed but return incorrect values. Be sure to respect
|
||||
// media::cast::kMaxUnackedFrames.
|
||||
void RecordLatestFrameTimestamps(FrameId frame_id,
|
||||
base::TimeTicks reference_time,
|
||||
RtpTimeTicks rtp_timestamp);
|
||||
|
||||
base::TimeDelta GetInFlightMediaDuration() const;
|
||||
|
||||
private:
|
||||
class RtcpClient : public RtcpObserver {
|
||||
public:
|
||||
explicit RtcpClient(base::WeakPtr<FrameSenderImpl> frame_sender);
|
||||
~RtcpClient() override;
|
||||
|
||||
void OnReceivedCastMessage(const RtcpCastMessage& cast_message) override;
|
||||
void OnReceivedRtt(base::TimeDelta round_trip_time) override;
|
||||
void OnReceivedPli() override;
|
||||
|
||||
private:
|
||||
const base::WeakPtr<FrameSenderImpl> frame_sender_;
|
||||
};
|
||||
|
||||
// The cast environment.
|
||||
const scoped_refptr<CastEnvironment> cast_environment_;
|
||||
|
||||
// The configuration provided upon initialization.
|
||||
const FrameSenderConfig config_;
|
||||
|
||||
// The target playout delay, may fluctuate between the min and max delays
|
||||
// stored in |config_|.
|
||||
base::TimeDelta target_playout_delay_;
|
||||
|
||||
// Max encoded frames generated per second.
|
||||
double max_frame_rate_;
|
||||
|
||||
// Sends encoded frames over the configured transport (e.g., UDP). In
|
||||
// Chromium, this could be a proxy that first sends the frames from a renderer
|
||||
// process to the browser process over IPC, with the browser process being
|
||||
// responsible for "packetizing" the frames and pushing packets into the
|
||||
// network layer.
|
||||
const raw_ptr<CastTransport> transport_sender_;
|
||||
|
||||
// The frame sender client.
|
||||
const raw_ref<Client> client_;
|
||||
|
||||
// Whether this is an audio or video frame sender.
|
||||
const bool is_audio_;
|
||||
|
||||
// The congestion control manages frame statistics and helps make decisions
|
||||
// about what bitrate we encode the next frame at.
|
||||
std::unique_ptr<CongestionControl> congestion_control_;
|
||||
|
||||
// This is the maximum delay that the sender should get ack from receiver.
|
||||
// Otherwise, sender will call ResendForKickstart().
|
||||
base::TimeDelta max_ack_delay_;
|
||||
|
||||
// This is "null" until the first frame is sent. Thereafter, this tracks the
|
||||
// last time any frame was sent or re-sent.
|
||||
base::TimeTicks last_send_time_;
|
||||
|
||||
// The ID of the last frame sent. This member is invalid until
|
||||
// |!last_send_time_.is_null()|.
|
||||
FrameId last_sent_frame_id_;
|
||||
|
||||
// The ID of the latest (not necessarily the last) frame that has been
|
||||
// acknowledged. This member is invalid until |!last_send_time_.is_null()|.
|
||||
FrameId latest_acked_frame_id_;
|
||||
|
||||
// The most recently measured round trip time.
|
||||
base::TimeDelta current_round_trip_time_;
|
||||
|
||||
// This is the maximum delay that the sender should get ack from receiver.
|
||||
// Counts how many RTCP reports are being "aggressively" sent (i.e., one per
|
||||
// frame) at the start of the session. Once a threshold is reached, RTCP
|
||||
// reports are instead sent at the configured interval + random drift.
|
||||
int num_aggressive_rtcp_reports_sent_ = 0;
|
||||
|
||||
// Counts the number of duplicate ACK that are being received. When this
|
||||
// number reaches a threshold, the sender will take this as a sign that the
|
||||
// receiver hasn't yet received the first packet of the next frame. In this
|
||||
// case, FrameSenderImpl will trigger a re-send of the next frame.
|
||||
int duplicate_ack_counter_ = 0;
|
||||
|
||||
// This flag is set true when a Pli message is received. It is cleared once
|
||||
// the FrameSenderImpl scheduled an encoded key frame to be sent.
|
||||
bool picture_lost_at_receiver_ = false;
|
||||
|
||||
// Should send the target playout delay with the next frame.
|
||||
bool send_target_playout_delay_ = false;
|
||||
|
||||
// Returns the maximum media duration currently allowed in-flight. This
|
||||
// fluctuates in response to the currently-measured network latency.
|
||||
base::TimeDelta GetAllowedInFlightMediaDuration() const;
|
||||
|
||||
// Ring buffers to keep track of recent frame timestamps (both in terms of
|
||||
// local reference time and RTP media time). These should only be accessed
|
||||
// through the Record/GetXXX() methods. The index into this ring
|
||||
// buffer is the lower 8 bits of the FrameId.
|
||||
base::TimeTicks frame_reference_times_[256];
|
||||
RtpTimeTicks frame_rtp_timestamps_[256];
|
||||
|
||||
// NOTE: Weak pointers must be invalidated before all other member variables.
|
||||
base::WeakPtrFactory<FrameSenderImpl> weak_factory_{this};
|
||||
};
|
||||
|
||||
} // namespace media::cast
|
||||
|
||||
#endif // MEDIA_CAST_SENDER_FRAME_SENDER_IMPL_H_
|
@ -100,11 +100,6 @@ bool OpenscreenFrameSender::NeedsKeyFrame() const {
|
||||
return sender_->NeedsKeyFrame();
|
||||
}
|
||||
|
||||
void OpenscreenFrameSender::OnMeasuredRoundTripTime(
|
||||
base::TimeDelta round_trip_time) {
|
||||
NOTIMPLEMENTED();
|
||||
}
|
||||
|
||||
void OpenscreenFrameSender::SetTargetPlayoutDelay(
|
||||
base::TimeDelta new_target_playout_delay) {
|
||||
if (send_target_playout_delay_ &&
|
||||
@ -293,15 +288,6 @@ CastStreamingFrameDropReason OpenscreenFrameSender::EnqueueFrame(
|
||||
return ToFrameDropReason(result);
|
||||
}
|
||||
|
||||
void OpenscreenFrameSender::OnReceivedCastFeedback(
|
||||
const RtcpCastMessage& cast_feedback) {
|
||||
NOTIMPLEMENTED();
|
||||
}
|
||||
|
||||
void OpenscreenFrameSender::OnReceivedPli() {
|
||||
OnPictureLost();
|
||||
}
|
||||
|
||||
CastStreamingFrameDropReason OpenscreenFrameSender::ShouldDropNextFrame(
|
||||
base::TimeDelta frame_duration) {
|
||||
// Check that accepting the next frame won't cause more frames to become
|
||||
|
@ -34,6 +34,14 @@ struct SenderEncodedFrame;
|
||||
//
|
||||
// For more information, see the Cast Streaming README.md located at:
|
||||
// https://source.chromium.org/chromium/chromium/src/+/main:third_party/openscreen/src/cast/streaming/README.md
|
||||
//
|
||||
// NOTE: This class mostly exists to wrap an openscreen::cast::Sender, implement
|
||||
// frame dropping logic, and support type translation between Chrome and Open
|
||||
// Screen. See if it can be removed by migrating functionality into
|
||||
// openscreen::cast::Sender.
|
||||
//
|
||||
// TODO(issues.chromium.org/329781397): Remove unnecessary wrapper objects in
|
||||
// Chrome's implementation of the Cast sender.
|
||||
class OpenscreenFrameSender : public FrameSender,
|
||||
openscreen::cast::Sender::Observer {
|
||||
public:
|
||||
@ -68,12 +76,6 @@ class OpenscreenFrameSender : public FrameSender,
|
||||
FrameId LastAckedFrameId() const override;
|
||||
|
||||
private:
|
||||
// TODO(https://crbug.com/1318499): these should be removed from the
|
||||
// FrameSender API.
|
||||
void OnReceivedCastFeedback(const RtcpCastMessage& cast_feedback) override;
|
||||
void OnReceivedPli() override;
|
||||
void OnMeasuredRoundTripTime(base::TimeDelta rtt) override;
|
||||
|
||||
// openscreen::cast::Sender::Observer overrides.
|
||||
void OnFrameCanceled(openscreen::cast::FrameId frame_id) override;
|
||||
// NOTE: this is a no-op since the encoder checks if it should generate a key
|
||||
|
@ -20,7 +20,6 @@
|
||||
#include "media/cast/common/rtp_time.h"
|
||||
#include "media/cast/common/sender_encoded_frame.h"
|
||||
#include "media/cast/encoding/video_encoder.h"
|
||||
#include "media/cast/net/cast_transport_config.h"
|
||||
#include "media/cast/sender/openscreen_frame_sender.h"
|
||||
#include "media/cast/sender/performance_metrics_overlay.h"
|
||||
#include "third_party/openscreen/src/cast/streaming/encoded_frame.h"
|
||||
@ -103,28 +102,6 @@ void LogVideoCaptureTimestamps(CastEnvironment* cast_environment,
|
||||
|
||||
} // namespace
|
||||
|
||||
VideoSender::VideoSender(
|
||||
scoped_refptr<CastEnvironment> cast_environment,
|
||||
const FrameSenderConfig& video_config,
|
||||
StatusChangeCallback status_change_cb,
|
||||
const CreateVideoEncodeAcceleratorCallback& create_vea_cb,
|
||||
CastTransport* const transport_sender,
|
||||
std::unique_ptr<media::VideoEncoderMetricsProvider>
|
||||
encoder_metrics_provider,
|
||||
PlayoutDelayChangeCB playout_delay_change_cb,
|
||||
media::VideoCaptureFeedbackCB feedback_cb)
|
||||
: VideoSender(cast_environment,
|
||||
video_config,
|
||||
std::move(status_change_cb),
|
||||
std::move(create_vea_cb),
|
||||
FrameSender::Create(cast_environment,
|
||||
video_config,
|
||||
transport_sender,
|
||||
*this),
|
||||
std::move(encoder_metrics_provider),
|
||||
std::move(playout_delay_change_cb),
|
||||
std::move(feedback_cb)) {}
|
||||
|
||||
VideoSender::VideoSender(
|
||||
scoped_refptr<CastEnvironment> cast_environment,
|
||||
const FrameSenderConfig& video_config,
|
||||
@ -136,39 +113,16 @@ VideoSender::VideoSender(
|
||||
PlayoutDelayChangeCB playout_delay_change_cb,
|
||||
media::VideoCaptureFeedbackCB feedback_cb,
|
||||
FrameSender::GetSuggestedVideoBitrateCB get_bitrate_cb)
|
||||
: VideoSender(cast_environment,
|
||||
video_config,
|
||||
std::move(status_change_cb),
|
||||
std::move(create_vea_cb),
|
||||
FrameSender::Create(cast_environment,
|
||||
video_config,
|
||||
std::move(sender),
|
||||
*this,
|
||||
std::move(get_bitrate_cb)),
|
||||
std::move(encoder_metrics_provider),
|
||||
std::move(playout_delay_change_cb),
|
||||
std::move(feedback_cb)) {}
|
||||
|
||||
// Note, we use a fixed bitrate value when external video encoder is used.
|
||||
// Some hardware encoder shows bad behavior if we set the bitrate too
|
||||
// frequently, e.g. quality drop, not abiding by target bitrate, etc.
|
||||
// See details: crbug.com/392086.
|
||||
VideoSender::VideoSender(
|
||||
scoped_refptr<CastEnvironment> cast_environment,
|
||||
const FrameSenderConfig& video_config,
|
||||
StatusChangeCallback status_change_cb,
|
||||
const CreateVideoEncodeAcceleratorCallback& create_vea_cb,
|
||||
std::unique_ptr<FrameSender> sender,
|
||||
std::unique_ptr<media::VideoEncoderMetricsProvider>
|
||||
encoder_metrics_provider,
|
||||
PlayoutDelayChangeCB playout_delay_change_cb,
|
||||
media::VideoCaptureFeedbackCB feedback_callback)
|
||||
: frame_sender_(std::move(sender)),
|
||||
: frame_sender_(FrameSender::Create(cast_environment,
|
||||
video_config,
|
||||
std::move(sender),
|
||||
*this,
|
||||
std::move(get_bitrate_cb))),
|
||||
cast_environment_(cast_environment),
|
||||
min_playout_delay_(video_config.min_playout_delay),
|
||||
max_playout_delay_(video_config.max_playout_delay),
|
||||
playout_delay_change_cb_(std::move(playout_delay_change_cb)),
|
||||
feedback_cb_(feedback_callback) {
|
||||
feedback_cb_(feedback_cb) {
|
||||
video_encoder_ = VideoEncoder::Create(cast_environment_, video_config,
|
||||
std::move(encoder_metrics_provider),
|
||||
status_change_cb, create_vea_cb);
|
||||
@ -188,7 +142,7 @@ VideoSender::~VideoSender() {
|
||||
|
||||
void VideoSender::InsertRawVideoFrame(
|
||||
scoped_refptr<media::VideoFrame> video_frame,
|
||||
const base::TimeTicks& reference_time) {
|
||||
base::TimeTicks reference_time) {
|
||||
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
|
||||
CHECK(video_encoder_);
|
||||
|
||||
@ -360,6 +314,8 @@ base::WeakPtr<VideoSender> VideoSender::AsWeakPtr() {
|
||||
return weak_factory_.GetWeakPtr();
|
||||
}
|
||||
|
||||
VideoSender::VideoSender() = default;
|
||||
|
||||
int VideoSender::GetNumberOfFramesInEncoder() const {
|
||||
return frames_in_encoder_;
|
||||
}
|
||||
|
@ -13,8 +13,8 @@
|
||||
#include "base/time/tick_clock.h"
|
||||
#include "base/time/time.h"
|
||||
#include "media/capture/video/video_capture_feedback.h"
|
||||
#include "media/cast/cast_callbacks.h"
|
||||
#include "media/cast/cast_config.h"
|
||||
#include "media/cast/cast_sender.h"
|
||||
#include "media/cast/common/rtp_time.h"
|
||||
#include "media/cast/sender/frame_sender.h"
|
||||
|
||||
@ -29,7 +29,6 @@ class VideoFrame;
|
||||
|
||||
namespace media::cast {
|
||||
|
||||
class CastTransport;
|
||||
class VideoEncoder;
|
||||
class VideoFrameFactory;
|
||||
|
||||
@ -43,19 +42,6 @@ using PlayoutDelayChangeCB = base::RepeatingCallback<void(base::TimeDelta)>;
|
||||
// timeouts.
|
||||
class VideoSender : public FrameSender::Client {
|
||||
public:
|
||||
// Old way to instantiate, using a cast transport.
|
||||
// TODO(https://crbug.com/1316434): should be removed once libcast sender is
|
||||
// successfully launched.
|
||||
VideoSender(scoped_refptr<CastEnvironment> cast_environment,
|
||||
const FrameSenderConfig& video_config,
|
||||
StatusChangeCallback status_change_cb,
|
||||
const CreateVideoEncodeAcceleratorCallback& create_vea_cb,
|
||||
CastTransport* const transport_sender,
|
||||
std::unique_ptr<media::VideoEncoderMetricsProvider>
|
||||
encoder_metrics_provider,
|
||||
PlayoutDelayChangeCB playout_delay_change_cb,
|
||||
media::VideoCaptureFeedbackCB feedback_callback);
|
||||
|
||||
// New way of instantiating using an openscreen::cast::Sender. Since the
|
||||
// |Sender| instance is destroyed when renegotiation is complete, |this|
|
||||
// is also invalid and should be immediately torn down.
|
||||
@ -78,8 +64,8 @@ class VideoSender : public FrameSender::Client {
|
||||
// Note: It is not guaranteed that |video_frame| will actually be encoded and
|
||||
// sent, if VideoSender detects too many frames in flight. Therefore, clients
|
||||
// should be careful about the rate at which this method is called.
|
||||
void InsertRawVideoFrame(scoped_refptr<media::VideoFrame> video_frame,
|
||||
const base::TimeTicks& reference_time);
|
||||
virtual void InsertRawVideoFrame(scoped_refptr<media::VideoFrame> video_frame,
|
||||
base::TimeTicks reference_time);
|
||||
|
||||
// Creates a |VideoFrameFactory| object to vend |VideoFrame| object with
|
||||
// encoder affinity (defined as offering some sort of performance benefit). If
|
||||
@ -92,24 +78,14 @@ class VideoSender : public FrameSender::Client {
|
||||
base::WeakPtr<VideoSender> AsWeakPtr();
|
||||
|
||||
protected:
|
||||
// For mocking in unit tests.
|
||||
VideoSender();
|
||||
|
||||
// FrameSender::Client overrides.
|
||||
int GetNumberOfFramesInEncoder() const final;
|
||||
base::TimeDelta GetEncoderBacklogDuration() const final;
|
||||
|
||||
// Exposed as protected for testing.
|
||||
FrameSender* frame_sender_for_testing() { return frame_sender_.get(); }
|
||||
|
||||
private:
|
||||
VideoSender(scoped_refptr<CastEnvironment> cast_environment,
|
||||
const FrameSenderConfig& video_config,
|
||||
StatusChangeCallback status_change_cb,
|
||||
const CreateVideoEncodeAcceleratorCallback& create_vea_cb,
|
||||
std::unique_ptr<FrameSender> sender,
|
||||
std::unique_ptr<media::VideoEncoderMetricsProvider>
|
||||
encoder_metrics_provider,
|
||||
PlayoutDelayChangeCB playout_delay_change_cb,
|
||||
media::VideoCaptureFeedbackCB feedback_callback);
|
||||
|
||||
// Called by the |video_encoder_| with the next EncodedFrame to send.
|
||||
void OnEncodedVideoFrame(scoped_refptr<media::VideoFrame> video_frame,
|
||||
const base::TimeTicks reference_time,
|
||||
|
@ -18,22 +18,28 @@
|
||||
#include "base/memory/raw_ptr.h"
|
||||
#include "base/test/simple_test_tick_clock.h"
|
||||
#include "base/time/time.h"
|
||||
#include "components/openscreen_platform/task_runner.h"
|
||||
#include "media/base/fake_single_thread_task_runner.h"
|
||||
#include "media/base/mock_filters.h"
|
||||
#include "media/base/video_frame.h"
|
||||
#include "media/cast/cast_environment.h"
|
||||
#include "media/cast/common/openscreen_conversion_helpers.h"
|
||||
#include "media/cast/common/video_frame_factory.h"
|
||||
#include "media/cast/constants.h"
|
||||
#include "media/cast/logging/simple_event_subscriber.h"
|
||||
#include "media/cast/net/cast_transport_config.h"
|
||||
#include "media/cast/net/cast_transport_impl.h"
|
||||
#include "media/cast/net/pacing/paced_sender.h"
|
||||
#include "media/cast/test/fake_openscreen_clock.h"
|
||||
#include "media/cast/test/fake_video_encode_accelerator_factory.h"
|
||||
#include "media/cast/test/mock_openscreen_environment.h"
|
||||
#include "media/cast/test/utility/default_config.h"
|
||||
#include "media/cast/test/utility/video_utility.h"
|
||||
#include "media/video/fake_video_encode_accelerator.h"
|
||||
#include "testing/gmock/include/gmock/gmock.h"
|
||||
#include "testing/gtest/include/gtest/gtest.h"
|
||||
#include "third_party/openscreen/src/cast/streaming/capture_recommendations.h"
|
||||
#include "third_party/openscreen/src/cast/streaming/environment.h"
|
||||
#include "third_party/openscreen/src/cast/streaming/sender.h"
|
||||
#include "third_party/openscreen/src/cast/streaming/sender_packet_router.h"
|
||||
#include "third_party/openscreen/src/platform/api/time.h"
|
||||
|
||||
namespace media::cast {
|
||||
|
||||
@ -52,111 +58,14 @@ void SaveOperationalStatus(OperationalStatus* out_status,
|
||||
*out_status = in_status;
|
||||
}
|
||||
|
||||
class TestPacketSender : public PacketTransport {
|
||||
public:
|
||||
TestPacketSender()
|
||||
: number_of_rtp_packets_(0), number_of_rtcp_packets_(0), paused_(false) {}
|
||||
|
||||
TestPacketSender(const TestPacketSender&) = delete;
|
||||
TestPacketSender& operator=(const TestPacketSender&) = delete;
|
||||
|
||||
// A singular packet implies a RTCP packet.
|
||||
bool SendPacket(PacketRef packet, base::OnceClosure cb) final {
|
||||
if (paused_) {
|
||||
stored_packet_ = packet;
|
||||
callback_ = std::move(cb);
|
||||
return false;
|
||||
}
|
||||
if (IsRtcpPacket(packet->data)) {
|
||||
++number_of_rtcp_packets_;
|
||||
} else {
|
||||
// Check that at least one RTCP packet was sent before the first RTP
|
||||
// packet. This confirms that the receiver will have the necessary lip
|
||||
// sync info before it has to calculate the playout time of the first
|
||||
// frame.
|
||||
if (number_of_rtp_packets_ == 0)
|
||||
EXPECT_LE(1, number_of_rtcp_packets_);
|
||||
++number_of_rtp_packets_;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
int64_t GetBytesSent() final { return 0; }
|
||||
|
||||
void StartReceiving(PacketReceiverCallbackWithStatus packet_receiver) final {}
|
||||
|
||||
void StopReceiving() final {}
|
||||
|
||||
int number_of_rtp_packets() const { return number_of_rtp_packets_; }
|
||||
|
||||
int number_of_rtcp_packets() const { return number_of_rtcp_packets_; }
|
||||
|
||||
void SetPause(bool paused) {
|
||||
paused_ = paused;
|
||||
if (!paused && stored_packet_.get()) {
|
||||
SendPacket(stored_packet_, base::OnceClosure());
|
||||
std::move(callback_).Run();
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
int number_of_rtp_packets_;
|
||||
int number_of_rtcp_packets_;
|
||||
bool paused_;
|
||||
base::OnceClosure callback_;
|
||||
PacketRef stored_packet_;
|
||||
};
|
||||
|
||||
void IgnorePlayoutDelayChanges(base::TimeDelta unused_playout_delay) {}
|
||||
|
||||
class PeerVideoSender : public VideoSender {
|
||||
public:
|
||||
PeerVideoSender(scoped_refptr<CastEnvironment> cast_environment,
|
||||
const FrameSenderConfig& video_config,
|
||||
const StatusChangeCallback& status_change_cb,
|
||||
const CreateVideoEncodeAcceleratorCallback& create_vea_cb,
|
||||
CastTransport* const transport_sender)
|
||||
: VideoSender(cast_environment,
|
||||
video_config,
|
||||
status_change_cb,
|
||||
create_vea_cb,
|
||||
transport_sender,
|
||||
std::make_unique<media::MockVideoEncoderMetricsProvider>(),
|
||||
base::BindRepeating(&IgnorePlayoutDelayChanges),
|
||||
base::BindRepeating(&PeerVideoSender::ProcessFeedback,
|
||||
base::Unretained(this))) {}
|
||||
void IgnoreVideoCaptureFeedback(
|
||||
const media::VideoCaptureFeedback& unused_feedback) {}
|
||||
|
||||
void OnReceivedCastFeedback(const RtcpCastMessage& cast_feedback) {
|
||||
frame_sender_for_testing()->OnReceivedCastFeedback(cast_feedback);
|
||||
}
|
||||
|
||||
void OnReceivedPli() { frame_sender_for_testing()->OnReceivedPli(); }
|
||||
|
||||
void ProcessFeedback(const media::VideoCaptureFeedback& feedback) {
|
||||
feedback_ = feedback;
|
||||
}
|
||||
|
||||
VideoCaptureFeedback GetFeedback() { return feedback_; }
|
||||
|
||||
private:
|
||||
VideoCaptureFeedback feedback_;
|
||||
};
|
||||
|
||||
class TransportClient : public CastTransport::Client {
|
||||
public:
|
||||
TransportClient() = default;
|
||||
|
||||
TransportClient(const TransportClient&) = delete;
|
||||
TransportClient& operator=(const TransportClient&) = delete;
|
||||
|
||||
void OnStatusChanged(CastTransportStatus status) final {
|
||||
EXPECT_EQ(TRANSPORT_STREAM_INITIALIZED, status);
|
||||
}
|
||||
void OnLoggingEventsReceived(
|
||||
std::unique_ptr<std::vector<FrameEvent>> frame_events,
|
||||
std::unique_ptr<std::vector<PacketEvent>> packet_events) final {}
|
||||
void ProcessRtpPacket(std::unique_ptr<Packet> packet) final {}
|
||||
};
|
||||
int GetSuggestedVideoBitrate() {
|
||||
return openscreen::cast::kDefaultVideoMinBitRate;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
@ -172,21 +81,25 @@ class VideoSenderTest : public ::testing::Test {
|
||||
task_runner_,
|
||||
task_runner_,
|
||||
task_runner_)),
|
||||
operational_status_(STATUS_UNINITIALIZED),
|
||||
openscreen_task_runner_(task_runner_),
|
||||
vea_factory_(task_runner_) {
|
||||
FakeOpenscreenClock::SetTickClock(&testing_clock_);
|
||||
testing_clock_.Advance(base::TimeTicks::Now() - base::TimeTicks());
|
||||
mock_openscreen_environment_ = std::make_unique<MockOpenscreenEnvironment>(
|
||||
&FakeOpenscreenClock::now, openscreen_task_runner_);
|
||||
openscreen_packet_router_ =
|
||||
std::make_unique<openscreen::cast::SenderPacketRouter>(
|
||||
mock_openscreen_environment_.get());
|
||||
vea_factory_.SetAutoRespond(true);
|
||||
last_pixel_value_ = kPixelValue;
|
||||
auto sender = std::make_unique<TestPacketSender>();
|
||||
transport_ = sender.get();
|
||||
transport_sender_ = std::make_unique<CastTransportImpl>(
|
||||
&testing_clock_, base::TimeDelta(), std::make_unique<TransportClient>(),
|
||||
std::move(sender), task_runner_);
|
||||
}
|
||||
|
||||
~VideoSenderTest() override = default;
|
||||
~VideoSenderTest() override { FakeOpenscreenClock::ClearTickClock(); }
|
||||
|
||||
void TearDown() final {
|
||||
// Video encoders owned by the VideoSender are deleted asynchronously.
|
||||
// Delete the VideoSender here and then run any posted deletion tasks.
|
||||
openscreen_video_sender_ = nullptr;
|
||||
video_sender_.reset();
|
||||
task_runner_->RunTasks();
|
||||
}
|
||||
@ -197,23 +110,32 @@ class VideoSenderTest : public ::testing::Test {
|
||||
FrameSenderConfig video_config = GetDefaultVideoSenderConfig();
|
||||
video_config.use_hardware_encoder = external;
|
||||
|
||||
openscreen::cast::SessionConfig openscreen_video_config =
|
||||
ToOpenscreenSessionConfig(video_config, /* is_pli_enabled= */ true);
|
||||
|
||||
ASSERT_EQ(operational_status_, STATUS_UNINITIALIZED);
|
||||
|
||||
if (external) {
|
||||
vea_factory_.SetInitializationWillSucceed(expect_init_success);
|
||||
video_sender_ = std::make_unique<PeerVideoSender>(
|
||||
cast_environment_, video_config,
|
||||
base::BindRepeating(&SaveOperationalStatus, &operational_status_),
|
||||
base::BindRepeating(
|
||||
&FakeVideoEncodeAcceleratorFactory::CreateVideoEncodeAccelerator,
|
||||
base::Unretained(&vea_factory_)),
|
||||
transport_sender_.get());
|
||||
} else {
|
||||
video_sender_ = std::make_unique<PeerVideoSender>(
|
||||
cast_environment_, video_config,
|
||||
base::BindRepeating(&SaveOperationalStatus, &operational_status_),
|
||||
base::DoNothing(), transport_sender_.get());
|
||||
}
|
||||
|
||||
auto openscreen_video_sender = std::make_unique<openscreen::cast::Sender>(
|
||||
mock_openscreen_environment_.get(), openscreen_packet_router_.get(),
|
||||
openscreen_video_config, openscreen::cast::RtpPayloadType::kVideoVp8);
|
||||
openscreen_video_sender_ = openscreen_video_sender.get();
|
||||
|
||||
video_sender_ = std::make_unique<VideoSender>(
|
||||
cast_environment_, video_config,
|
||||
base::BindRepeating(&SaveOperationalStatus, &operational_status_),
|
||||
base::BindRepeating(
|
||||
&FakeVideoEncodeAcceleratorFactory::CreateVideoEncodeAccelerator,
|
||||
base::Unretained(&vea_factory_)),
|
||||
std::move(openscreen_video_sender),
|
||||
std::make_unique<media::MockVideoEncoderMetricsProvider>(),
|
||||
base::BindRepeating(&IgnorePlayoutDelayChanges),
|
||||
base::BindRepeating(&IgnoreVideoCaptureFeedback),
|
||||
base::BindRepeating(&GetSuggestedVideoBitrate));
|
||||
|
||||
task_runner_->RunTasks();
|
||||
}
|
||||
|
||||
@ -229,18 +151,6 @@ class VideoSenderTest : public ::testing::Test {
|
||||
return video_frame;
|
||||
}
|
||||
|
||||
scoped_refptr<media::VideoFrame> GetLargeNewVideoFrame() {
|
||||
if (first_frame_timestamp_.is_null())
|
||||
first_frame_timestamp_ = testing_clock_.NowTicks();
|
||||
gfx::Size size(kWidth, kHeight);
|
||||
scoped_refptr<media::VideoFrame> video_frame =
|
||||
media::VideoFrame::CreateFrame(
|
||||
PIXEL_FORMAT_I420, size, gfx::Rect(size), size,
|
||||
testing_clock_.NowTicks() - first_frame_timestamp_);
|
||||
PopulateVideoFrameWithNoise(video_frame.get());
|
||||
return video_frame;
|
||||
}
|
||||
|
||||
void RunTasks(int during_ms) {
|
||||
task_runner_->Sleep(base::Milliseconds(during_ms));
|
||||
}
|
||||
@ -248,13 +158,19 @@ class VideoSenderTest : public ::testing::Test {
|
||||
base::SimpleTestTickClock testing_clock_;
|
||||
const scoped_refptr<FakeSingleThreadTaskRunner> task_runner_;
|
||||
const scoped_refptr<CastEnvironment> cast_environment_;
|
||||
OperationalStatus operational_status_;
|
||||
// openscreen::Sender related classes.
|
||||
openscreen_platform::TaskRunner openscreen_task_runner_;
|
||||
std::unique_ptr<media::cast::MockOpenscreenEnvironment>
|
||||
mock_openscreen_environment_;
|
||||
std::unique_ptr<openscreen::cast::SenderPacketRouter>
|
||||
openscreen_packet_router_;
|
||||
OperationalStatus operational_status_ = STATUS_UNINITIALIZED;
|
||||
FakeVideoEncodeAcceleratorFactory vea_factory_;
|
||||
std::unique_ptr<CastTransportImpl> transport_sender_;
|
||||
raw_ptr<TestPacketSender> transport_; // Owned by CastTransport.
|
||||
std::unique_ptr<PeerVideoSender> video_sender_;
|
||||
int last_pixel_value_;
|
||||
base::TimeTicks first_frame_timestamp_;
|
||||
std::unique_ptr<VideoSender> video_sender_;
|
||||
// Unowned pointer to the openscreen::cast::Sender.
|
||||
raw_ptr<openscreen::cast::Sender> openscreen_video_sender_;
|
||||
};
|
||||
|
||||
TEST_F(VideoSenderTest, BuiltInEncoder) {
|
||||
@ -267,8 +183,7 @@ TEST_F(VideoSenderTest, BuiltInEncoder) {
|
||||
video_sender_->InsertRawVideoFrame(video_frame, reference_time);
|
||||
|
||||
task_runner_->RunTasks();
|
||||
EXPECT_LE(1, transport_->number_of_rtp_packets());
|
||||
EXPECT_LE(1, transport_->number_of_rtcp_packets());
|
||||
EXPECT_EQ(1, openscreen_video_sender_->GetInFlightFrameCount());
|
||||
}
|
||||
|
||||
TEST_F(VideoSenderTest, ExternalEncoder) {
|
||||
@ -302,7 +217,10 @@ TEST_F(VideoSenderTest, ExternalEncoder) {
|
||||
EXPECT_EQ(1, vea_factory_.vea_response_count());
|
||||
}
|
||||
|
||||
video_sender_.reset(NULL);
|
||||
// NOTE: Must delete video_sender_ before test exits to avoid dangling pointer
|
||||
// issues; root cause is unclear
|
||||
openscreen_video_sender_ = nullptr;
|
||||
video_sender_.reset();
|
||||
task_runner_->RunTasks();
|
||||
EXPECT_EQ(1, vea_factory_.vea_response_count());
|
||||
}
|
||||
@ -321,327 +239,17 @@ TEST_F(VideoSenderTest, ExternalEncoderInitFails) {
|
||||
}
|
||||
EXPECT_EQ(STATUS_CODEC_INIT_FAILED, operational_status_);
|
||||
|
||||
video_sender_.reset(NULL);
|
||||
// NOTE: Must delete video_sender_ before test exits to avoid dangling pointer
|
||||
// issues; root cause is unclear
|
||||
openscreen_video_sender_ = nullptr;
|
||||
video_sender_.reset();
|
||||
task_runner_->RunTasks();
|
||||
}
|
||||
|
||||
TEST_F(VideoSenderTest, RtcpTimer) {
|
||||
InitEncoder(false, true);
|
||||
ASSERT_EQ(STATUS_INITIALIZED, operational_status_);
|
||||
|
||||
scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
|
||||
|
||||
const base::TimeTicks reference_time = testing_clock_.NowTicks();
|
||||
video_sender_->InsertRawVideoFrame(video_frame, reference_time);
|
||||
|
||||
// Make sure that we send at least one RTCP packet.
|
||||
base::TimeDelta max_rtcp_timeout =
|
||||
base::Milliseconds(1) + kRtcpReportInterval * 3 / 2;
|
||||
|
||||
RunTasks(max_rtcp_timeout.InMilliseconds());
|
||||
EXPECT_LE(1, transport_->number_of_rtp_packets());
|
||||
EXPECT_LE(1, transport_->number_of_rtcp_packets());
|
||||
// Build Cast msg and expect RTCP packet.
|
||||
RtcpCastMessage cast_feedback(1);
|
||||
cast_feedback.remote_ssrc = 2;
|
||||
cast_feedback.ack_frame_id = FrameId::first();
|
||||
video_sender_->OnReceivedCastFeedback(cast_feedback);
|
||||
RunTasks(max_rtcp_timeout.InMilliseconds());
|
||||
EXPECT_LE(1, transport_->number_of_rtcp_packets());
|
||||
}
|
||||
|
||||
TEST_F(VideoSenderTest, ResendTimer) {
|
||||
InitEncoder(false, true);
|
||||
ASSERT_EQ(STATUS_INITIALIZED, operational_status_);
|
||||
|
||||
scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
|
||||
|
||||
const base::TimeTicks reference_time = testing_clock_.NowTicks();
|
||||
video_sender_->InsertRawVideoFrame(video_frame, reference_time);
|
||||
|
||||
// ACK the key frame.
|
||||
RtcpCastMessage cast_feedback(1);
|
||||
cast_feedback.remote_ssrc = 2;
|
||||
cast_feedback.ack_frame_id = FrameId::first();
|
||||
video_sender_->OnReceivedCastFeedback(cast_feedback);
|
||||
|
||||
video_frame = GetNewVideoFrame();
|
||||
video_sender_->InsertRawVideoFrame(video_frame, reference_time);
|
||||
|
||||
base::TimeDelta max_resend_timeout =
|
||||
kDefaultTargetPlayoutDelay + base::Milliseconds(1);
|
||||
|
||||
// Make sure that we do a re-send.
|
||||
RunTasks(max_resend_timeout.InMilliseconds());
|
||||
// Should have sent at least 3 packets.
|
||||
EXPECT_LE(3, transport_->number_of_rtp_packets() +
|
||||
transport_->number_of_rtcp_packets());
|
||||
}
|
||||
|
||||
TEST_F(VideoSenderTest, LogAckReceivedEvent) {
|
||||
InitEncoder(false, true);
|
||||
ASSERT_EQ(STATUS_INITIALIZED, operational_status_);
|
||||
|
||||
SimpleEventSubscriber event_subscriber;
|
||||
cast_environment_->logger()->Subscribe(&event_subscriber);
|
||||
|
||||
int num_frames = 10;
|
||||
for (int i = 0; i < num_frames; i++) {
|
||||
scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
|
||||
|
||||
const base::TimeTicks reference_time = testing_clock_.NowTicks();
|
||||
video_sender_->InsertRawVideoFrame(video_frame, reference_time);
|
||||
RunTasks(33);
|
||||
}
|
||||
|
||||
task_runner_->RunTasks();
|
||||
|
||||
RtcpCastMessage cast_feedback(1);
|
||||
cast_feedback.ack_frame_id = FrameId::first() + num_frames - 1;
|
||||
|
||||
video_sender_->OnReceivedCastFeedback(cast_feedback);
|
||||
|
||||
std::vector<FrameEvent> frame_events;
|
||||
event_subscriber.GetFrameEventsAndReset(&frame_events);
|
||||
|
||||
ASSERT_TRUE(!frame_events.empty());
|
||||
EXPECT_EQ(FRAME_ACK_RECEIVED, frame_events.rbegin()->type);
|
||||
EXPECT_EQ(VIDEO_EVENT, frame_events.rbegin()->media_type);
|
||||
EXPECT_EQ(FrameId::first() + num_frames - 1, frame_events.rbegin()->frame_id);
|
||||
|
||||
cast_environment_->logger()->Unsubscribe(&event_subscriber);
|
||||
}
|
||||
|
||||
TEST_F(VideoSenderTest, StopSendingInTheAbsenceOfAck) {
|
||||
InitEncoder(false, true);
|
||||
ASSERT_EQ(STATUS_INITIALIZED, operational_status_);
|
||||
|
||||
// Send a stream of frames and don't ACK; by default we shouldn't have more
|
||||
// than 4 frames in flight.
|
||||
scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
|
||||
video_sender_->InsertRawVideoFrame(video_frame, testing_clock_.NowTicks());
|
||||
// Give time for the frame to be processed plus handling some of the playout
|
||||
// delay.
|
||||
RunTasks(300);
|
||||
|
||||
// Send 3 more frames and record the number of packets sent.
|
||||
for (int i = 0; i < 3; ++i) {
|
||||
video_frame = GetNewVideoFrame();
|
||||
video_sender_->InsertRawVideoFrame(video_frame, testing_clock_.NowTicks());
|
||||
RunTasks(33);
|
||||
}
|
||||
const int number_of_packets_sent = transport_->number_of_rtp_packets();
|
||||
|
||||
// Send 3 more frames - they should not be encoded, as we have not received
|
||||
// any acks.
|
||||
for (int i = 0; i < 3; ++i) {
|
||||
video_frame = GetNewVideoFrame();
|
||||
video_sender_->InsertRawVideoFrame(video_frame, testing_clock_.NowTicks());
|
||||
RunTasks(33);
|
||||
}
|
||||
|
||||
// We expect a frame to be retransmitted because of duplicated ACKs.
|
||||
// Only one packet of the frame is re-transmitted.
|
||||
EXPECT_EQ(number_of_packets_sent + 1, transport_->number_of_rtp_packets());
|
||||
|
||||
// Start acking and make sure we're back to steady-state.
|
||||
RtcpCastMessage cast_feedback(1);
|
||||
cast_feedback.remote_ssrc = 2;
|
||||
cast_feedback.ack_frame_id = FrameId::first();
|
||||
video_sender_->OnReceivedCastFeedback(cast_feedback);
|
||||
EXPECT_LE(4, transport_->number_of_rtp_packets() +
|
||||
transport_->number_of_rtcp_packets());
|
||||
|
||||
// Empty the pipeline.
|
||||
RunTasks(100);
|
||||
// Should have sent at least 7 packets.
|
||||
EXPECT_LE(7, transport_->number_of_rtp_packets() +
|
||||
transport_->number_of_rtcp_packets());
|
||||
}
|
||||
|
||||
TEST_F(VideoSenderTest, DuplicateAckRetransmit) {
|
||||
InitEncoder(false, true);
|
||||
ASSERT_EQ(STATUS_INITIALIZED, operational_status_);
|
||||
|
||||
scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
|
||||
video_sender_->InsertRawVideoFrame(video_frame, testing_clock_.NowTicks());
|
||||
RunTasks(33);
|
||||
RtcpCastMessage cast_feedback(1);
|
||||
cast_feedback.remote_ssrc = 2;
|
||||
cast_feedback.ack_frame_id = FrameId::first();
|
||||
|
||||
// Send 3 more frames but don't ACK.
|
||||
for (int i = 0; i < 3; ++i) {
|
||||
video_frame = GetNewVideoFrame();
|
||||
video_sender_->InsertRawVideoFrame(video_frame, testing_clock_.NowTicks());
|
||||
RunTasks(33);
|
||||
}
|
||||
const int number_of_packets_sent = transport_->number_of_rtp_packets();
|
||||
|
||||
// Send duplicated ACKs and mix some invalid NACKs.
|
||||
for (int i = 0; i < 10; ++i) {
|
||||
RtcpCastMessage ack_feedback(1);
|
||||
ack_feedback.remote_ssrc = 2;
|
||||
ack_feedback.ack_frame_id = FrameId::first();
|
||||
RtcpCastMessage nack_feedback(1);
|
||||
nack_feedback.remote_ssrc = 2;
|
||||
nack_feedback.missing_frames_and_packets[FrameId::first() + 255] =
|
||||
PacketIdSet();
|
||||
video_sender_->OnReceivedCastFeedback(ack_feedback);
|
||||
video_sender_->OnReceivedCastFeedback(nack_feedback);
|
||||
}
|
||||
EXPECT_EQ(number_of_packets_sent, transport_->number_of_rtp_packets());
|
||||
|
||||
// Re-transmit one packet because of duplicated ACKs.
|
||||
for (int i = 0; i < 3; ++i) {
|
||||
RtcpCastMessage ack_feedback(1);
|
||||
ack_feedback.remote_ssrc = 2;
|
||||
ack_feedback.ack_frame_id = FrameId::first();
|
||||
video_sender_->OnReceivedCastFeedback(ack_feedback);
|
||||
}
|
||||
EXPECT_EQ(number_of_packets_sent + 1, transport_->number_of_rtp_packets());
|
||||
}
|
||||
|
||||
TEST_F(VideoSenderTest, DuplicateAckRetransmitDoesNotCancelRetransmits) {
|
||||
InitEncoder(false, true);
|
||||
ASSERT_EQ(STATUS_INITIALIZED, operational_status_);
|
||||
|
||||
scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
|
||||
video_sender_->InsertRawVideoFrame(video_frame, testing_clock_.NowTicks());
|
||||
RunTasks(33);
|
||||
RtcpCastMessage cast_feedback(1);
|
||||
cast_feedback.remote_ssrc = 2;
|
||||
cast_feedback.ack_frame_id = FrameId::first();
|
||||
|
||||
// Send 2 more frames but don't ACK.
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
video_frame = GetNewVideoFrame();
|
||||
video_sender_->InsertRawVideoFrame(video_frame, testing_clock_.NowTicks());
|
||||
RunTasks(33);
|
||||
}
|
||||
// Pause the transport
|
||||
transport_->SetPause(true);
|
||||
|
||||
// Insert one more video frame.
|
||||
video_frame = GetLargeNewVideoFrame();
|
||||
video_sender_->InsertRawVideoFrame(video_frame, testing_clock_.NowTicks());
|
||||
RunTasks(33);
|
||||
|
||||
const int number_of_packets_sent = transport_->number_of_rtp_packets();
|
||||
|
||||
// Send duplicated ACKs and mix some invalid NACKs.
|
||||
for (int i = 0; i < 10; ++i) {
|
||||
RtcpCastMessage ack_feedback(1);
|
||||
ack_feedback.remote_ssrc = 2;
|
||||
ack_feedback.ack_frame_id = FrameId::first();
|
||||
RtcpCastMessage nack_feedback(1);
|
||||
nack_feedback.remote_ssrc = 2;
|
||||
nack_feedback.missing_frames_and_packets[FrameId::first() + 255] =
|
||||
PacketIdSet();
|
||||
video_sender_->OnReceivedCastFeedback(ack_feedback);
|
||||
video_sender_->OnReceivedCastFeedback(nack_feedback);
|
||||
}
|
||||
EXPECT_EQ(number_of_packets_sent, transport_->number_of_rtp_packets());
|
||||
|
||||
// Re-transmit one packet because of duplicated ACKs.
|
||||
for (int i = 0; i < 3; ++i) {
|
||||
RtcpCastMessage ack_feedback(1);
|
||||
ack_feedback.remote_ssrc = 2;
|
||||
ack_feedback.ack_frame_id = FrameId::first();
|
||||
video_sender_->OnReceivedCastFeedback(ack_feedback);
|
||||
}
|
||||
|
||||
transport_->SetPause(false);
|
||||
RunTasks(100);
|
||||
EXPECT_LT(number_of_packets_sent + 1, transport_->number_of_rtp_packets());
|
||||
}
|
||||
|
||||
TEST_F(VideoSenderTest, AcksCancelRetransmits) {
|
||||
InitEncoder(false, true);
|
||||
ASSERT_EQ(STATUS_INITIALIZED, operational_status_);
|
||||
|
||||
transport_->SetPause(true);
|
||||
scoped_refptr<media::VideoFrame> video_frame = GetLargeNewVideoFrame();
|
||||
video_sender_->InsertRawVideoFrame(video_frame, testing_clock_.NowTicks());
|
||||
RunTasks(33);
|
||||
|
||||
// Frame should be in buffer, waiting. Now let's ack it.
|
||||
RtcpCastMessage cast_feedback(1);
|
||||
cast_feedback.remote_ssrc = 2;
|
||||
cast_feedback.ack_frame_id = FrameId::first();
|
||||
video_sender_->OnReceivedCastFeedback(cast_feedback);
|
||||
|
||||
transport_->SetPause(false);
|
||||
RunTasks(33);
|
||||
EXPECT_EQ(0, transport_->number_of_rtp_packets());
|
||||
}
|
||||
|
||||
TEST_F(VideoSenderTest, CheckVideoFrameFactoryIsNull) {
|
||||
InitEncoder(false, true);
|
||||
ASSERT_EQ(STATUS_INITIALIZED, operational_status_);
|
||||
|
||||
EXPECT_EQ(nullptr, video_sender_->CreateVideoFrameFactory().get());
|
||||
}
|
||||
|
||||
TEST_F(VideoSenderTest, ReportsResourceUtilizationInCallback) {
|
||||
InitEncoder(false, true);
|
||||
ASSERT_EQ(STATUS_INITIALIZED, operational_status_);
|
||||
|
||||
for (int i = 0; i < 3; ++i) {
|
||||
scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
|
||||
|
||||
const base::TimeTicks reference_time = testing_clock_.NowTicks();
|
||||
video_sender_->InsertRawVideoFrame(video_frame, reference_time);
|
||||
|
||||
// Run encode tasks. VideoSender::OnEncodedVideoFrame() will be called once
|
||||
// encoding of the frame is complete, and this is when the
|
||||
// resource_utilization metadata is populated.
|
||||
RunTasks(33);
|
||||
|
||||
// Check that the resource_utilization value is set and non-negative. Don't
|
||||
// check for specific values because they are dependent on real-world CPU
|
||||
// encode time, which can vary across test runs.
|
||||
double utilization = video_sender_->GetFeedback().resource_utilization;
|
||||
EXPECT_LE(0.0, utilization);
|
||||
if (i == 0)
|
||||
EXPECT_GE(1.0, utilization); // Key frames never exceed 1.0.
|
||||
DVLOG(1) << "Utilization computed by VideoSender is: " << utilization;
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(VideoSenderTest, CancelSendingOnReceivingPli) {
|
||||
InitEncoder(false, true);
|
||||
ASSERT_EQ(STATUS_INITIALIZED, operational_status_);
|
||||
|
||||
// Send a frame and ACK it.
|
||||
scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
|
||||
video_sender_->InsertRawVideoFrame(video_frame, testing_clock_.NowTicks());
|
||||
RunTasks(33);
|
||||
|
||||
RtcpCastMessage cast_feedback(1);
|
||||
cast_feedback.remote_ssrc = 2;
|
||||
cast_feedback.ack_frame_id = FrameId::first();
|
||||
video_sender_->OnReceivedCastFeedback(cast_feedback);
|
||||
|
||||
transport_->SetPause(true);
|
||||
// Send three more frames.
|
||||
for (int i = 0; i < 3; i++) {
|
||||
video_frame = GetNewVideoFrame();
|
||||
video_sender_->InsertRawVideoFrame(video_frame, testing_clock_.NowTicks());
|
||||
RunTasks(33);
|
||||
}
|
||||
EXPECT_EQ(1, transport_->number_of_rtp_packets());
|
||||
|
||||
// Frames should be in buffer, waiting.
|
||||
// Received PLI from receiver.
|
||||
video_sender_->OnReceivedPli();
|
||||
video_frame = GetNewVideoFrame();
|
||||
video_sender_->InsertRawVideoFrame(
|
||||
video_frame, testing_clock_.NowTicks() + base::Milliseconds(1000));
|
||||
RunTasks(33);
|
||||
transport_->SetPause(false);
|
||||
RunTasks(33);
|
||||
EXPECT_EQ(2, transport_->number_of_rtp_packets());
|
||||
}
|
||||
|
||||
} // namespace media::cast
|
||||
|
@ -1,734 +0,0 @@
|
||||
// Copyright 2014 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
//
|
||||
// This program benchmarks the theoretical throughput of the cast library.
|
||||
// It runs using a fake clock, simulated network and fake codecs. This allows
|
||||
// tests to run much faster than real time.
|
||||
// To run the program, run:
|
||||
// $ ./out/Release/cast_benchmarks | tee benchmarkoutput.asc
|
||||
// This may take a while, when it is done, you can view the data with
|
||||
// meshlab by running:
|
||||
// $ meshlab benchmarkoutput.asc
|
||||
// After starting meshlab, turn on Render->Show Axis. The red axis will
|
||||
// represent bandwidth (in megabits) the blue axis will be packet drop
|
||||
// (in percent) and the green axis will be latency (in milliseconds).
|
||||
//
|
||||
// This program can also be used for profiling. On linux it has
|
||||
// built-in support for this. Simply set the environment variable
|
||||
// PROFILE_FILE before running it, like so:
|
||||
// $ export PROFILE_FILE=cast_benchmark.profile
|
||||
// Then after running the program, you can view the profile with:
|
||||
// $ pprof ./out/Release/cast_benchmarks $PROFILE_FILE --gv
|
||||
|
||||
#include <math.h>
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <numeric>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "base/at_exit.h"
|
||||
#include "base/command_line.h"
|
||||
#include "base/debug/profiler.h"
|
||||
#include "base/functional/bind.h"
|
||||
#include "base/functional/callback_helpers.h"
|
||||
#include "base/logging.h"
|
||||
#include "base/memory/ptr_util.h"
|
||||
#include "base/memory/raw_ptr.h"
|
||||
#include "base/memory/weak_ptr.h"
|
||||
#include "base/run_loop.h"
|
||||
#include "base/strings/string_number_conversions.h"
|
||||
#include "base/strings/stringprintf.h"
|
||||
#include "base/task/single_thread_task_runner.h"
|
||||
#include "base/test/simple_test_tick_clock.h"
|
||||
#include "base/threading/thread.h"
|
||||
#include "base/time/tick_clock.h"
|
||||
#include "base/time/time.h"
|
||||
#include "media/base/audio_bus.h"
|
||||
#include "media/base/fake_single_thread_task_runner.h"
|
||||
#include "media/base/mock_filters.h"
|
||||
#include "media/base/video_frame.h"
|
||||
#include "media/cast/cast_config.h"
|
||||
#include "media/cast/cast_environment.h"
|
||||
#include "media/cast/cast_sender.h"
|
||||
#include "media/cast/common/encoded_frame.h"
|
||||
#include "media/cast/logging/simple_event_subscriber.h"
|
||||
#include "media/cast/net/cast_transport.h"
|
||||
#include "media/cast/net/cast_transport_config.h"
|
||||
#include "media/cast/net/cast_transport_defines.h"
|
||||
#include "media/cast/net/cast_transport_impl.h"
|
||||
#include "media/cast/test/loopback_transport.h"
|
||||
#include "media/cast/test/receiver/cast_receiver.h"
|
||||
#include "media/cast/test/skewed_single_thread_task_runner.h"
|
||||
#include "media/cast/test/skewed_tick_clock.h"
|
||||
#include "media/cast/test/utility/audio_utility.h"
|
||||
#include "media/cast/test/utility/default_config.h"
|
||||
#include "media/cast/test/utility/test_util.h"
|
||||
#include "media/cast/test/utility/udp_proxy.h"
|
||||
#include "media/cast/test/utility/video_utility.h"
|
||||
#include "testing/gtest/include/gtest/gtest.h"
|
||||
|
||||
namespace media {
|
||||
namespace cast {
|
||||
|
||||
namespace {
|
||||
|
||||
static const int64_t kStartMillisecond = INT64_C(1245);
|
||||
static const int kTargetPlayoutDelayMs = 400;
|
||||
|
||||
void ExpectVideoSuccess(OperationalStatus status) {
|
||||
EXPECT_EQ(STATUS_INITIALIZED, status);
|
||||
}
|
||||
|
||||
void ExpectAudioSuccess(OperationalStatus status) {
|
||||
EXPECT_EQ(STATUS_INITIALIZED, status);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
// Wraps a CastTransport and records some statistics about
|
||||
// the data that goes through it.
|
||||
class CastTransportWrapper : public CastTransport {
|
||||
public:
|
||||
// Takes ownership of |transport|.
|
||||
void Init(CastTransport* transport,
|
||||
uint64_t* encoded_video_bytes,
|
||||
uint64_t* encoded_audio_bytes) {
|
||||
transport_.reset(transport);
|
||||
encoded_video_bytes_ = encoded_video_bytes;
|
||||
encoded_audio_bytes_ = encoded_audio_bytes;
|
||||
}
|
||||
|
||||
void InitializeStream(const CastTransportRtpConfig& config,
|
||||
std::unique_ptr<RtcpObserver> rtcp_observer) final {
|
||||
if (config.rtp_payload_type <= RtpPayloadType::AUDIO_LAST)
|
||||
audio_ssrc_ = config.ssrc;
|
||||
else
|
||||
video_ssrc_ = config.ssrc;
|
||||
transport_->InitializeStream(config, std::move(rtcp_observer));
|
||||
}
|
||||
|
||||
void InsertFrame(uint32_t ssrc, const EncodedFrame& frame) final {
|
||||
if (ssrc == audio_ssrc_) {
|
||||
*encoded_audio_bytes_ += frame.data.size();
|
||||
} else if (ssrc == video_ssrc_) {
|
||||
*encoded_video_bytes_ += frame.data.size();
|
||||
}
|
||||
transport_->InsertFrame(ssrc, frame);
|
||||
}
|
||||
|
||||
void SendSenderReport(uint32_t ssrc,
|
||||
base::TimeTicks current_time,
|
||||
RtpTimeTicks current_time_as_rtp_timestamp) final {
|
||||
transport_->SendSenderReport(ssrc,
|
||||
current_time,
|
||||
current_time_as_rtp_timestamp);
|
||||
}
|
||||
|
||||
void CancelSendingFrames(uint32_t ssrc,
|
||||
const std::vector<FrameId>& frame_ids) final {
|
||||
transport_->CancelSendingFrames(ssrc, frame_ids);
|
||||
}
|
||||
|
||||
void ResendFrameForKickstart(uint32_t ssrc, FrameId frame_id) final {
|
||||
transport_->ResendFrameForKickstart(ssrc, frame_id);
|
||||
}
|
||||
|
||||
PacketReceiverCallback PacketReceiverForTesting() final {
|
||||
return transport_->PacketReceiverForTesting();
|
||||
}
|
||||
|
||||
void AddValidRtpReceiver(uint32_t rtp_sender_ssrc,
|
||||
uint32_t rtp_receiver_ssrc) final {
|
||||
return transport_->AddValidRtpReceiver(rtp_sender_ssrc, rtp_receiver_ssrc);
|
||||
}
|
||||
|
||||
void InitializeRtpReceiverRtcpBuilder(uint32_t rtp_receiver_ssrc,
|
||||
const RtcpTimeData& time_data) final {
|
||||
transport_->InitializeRtpReceiverRtcpBuilder(rtp_receiver_ssrc, time_data);
|
||||
}
|
||||
|
||||
void AddCastFeedback(const RtcpCastMessage& cast_message,
|
||||
base::TimeDelta target_delay) final {
|
||||
transport_->AddCastFeedback(cast_message, target_delay);
|
||||
}
|
||||
|
||||
void AddRtcpEvents(
|
||||
const ReceiverRtcpEventSubscriber::RtcpEvents& rtcp_events) final {
|
||||
transport_->AddRtcpEvents(rtcp_events);
|
||||
}
|
||||
|
||||
void AddRtpReceiverReport(const RtcpReportBlock& rtp_report_block) final {
|
||||
transport_->AddRtpReceiverReport(rtp_report_block);
|
||||
}
|
||||
|
||||
void AddPli(const RtcpPliMessage& pli_message) final {
|
||||
transport_->AddPli(pli_message);
|
||||
}
|
||||
|
||||
void SendRtcpFromRtpReceiver() final {
|
||||
transport_->SendRtcpFromRtpReceiver();
|
||||
}
|
||||
|
||||
void SetOptions(const base::Value::Dict& options) final {}
|
||||
|
||||
private:
|
||||
std::unique_ptr<CastTransport> transport_;
|
||||
uint32_t audio_ssrc_, video_ssrc_;
|
||||
raw_ptr<uint64_t> encoded_video_bytes_;
|
||||
raw_ptr<uint64_t> encoded_audio_bytes_;
|
||||
};
|
||||
|
||||
struct MeasuringPoint {
|
||||
MeasuringPoint(double bitrate_, double latency_, double percent_packet_drop_)
|
||||
: bitrate(bitrate_),
|
||||
latency(latency_),
|
||||
percent_packet_drop(percent_packet_drop_) {}
|
||||
bool operator<=(const MeasuringPoint& other) const {
|
||||
return bitrate >= other.bitrate && latency <= other.latency &&
|
||||
percent_packet_drop <= other.percent_packet_drop;
|
||||
}
|
||||
bool operator>=(const MeasuringPoint& other) const {
|
||||
return bitrate <= other.bitrate && latency >= other.latency &&
|
||||
percent_packet_drop >= other.percent_packet_drop;
|
||||
}
|
||||
|
||||
std::string AsString() const {
|
||||
return base::StringPrintf(
|
||||
"%f Mbit/s %f ms %f %% ", bitrate, latency, percent_packet_drop);
|
||||
}
|
||||
|
||||
double bitrate;
|
||||
double latency;
|
||||
double percent_packet_drop;
|
||||
};
|
||||
|
||||
class RunOneBenchmark {
|
||||
public:
|
||||
RunOneBenchmark()
|
||||
: start_time_(),
|
||||
task_runner_(new FakeSingleThreadTaskRunner(&testing_clock_)),
|
||||
testing_clock_sender_(&testing_clock_),
|
||||
task_runner_sender_(
|
||||
new test::SkewedSingleThreadTaskRunner(task_runner_)),
|
||||
testing_clock_receiver_(&testing_clock_),
|
||||
task_runner_receiver_(
|
||||
new test::SkewedSingleThreadTaskRunner(task_runner_)),
|
||||
cast_environment_sender_(new CastEnvironment(&testing_clock_sender_,
|
||||
task_runner_sender_,
|
||||
task_runner_sender_,
|
||||
task_runner_sender_)),
|
||||
cast_environment_receiver_(new CastEnvironment(&testing_clock_receiver_,
|
||||
task_runner_receiver_,
|
||||
task_runner_receiver_,
|
||||
task_runner_receiver_)),
|
||||
video_bytes_encoded_(0),
|
||||
audio_bytes_encoded_(0),
|
||||
frames_sent_(0) {
|
||||
testing_clock_.Advance(base::Milliseconds(kStartMillisecond));
|
||||
}
|
||||
|
||||
void Configure(Codec video_codec,
|
||||
Codec audio_codec) {
|
||||
audio_sender_config_ = GetDefaultAudioSenderConfig();
|
||||
audio_sender_config_.min_playout_delay =
|
||||
audio_sender_config_.max_playout_delay =
|
||||
base::Milliseconds(kTargetPlayoutDelayMs);
|
||||
audio_sender_config_.codec = audio_codec;
|
||||
|
||||
audio_receiver_config_ = GetDefaultAudioReceiverConfig();
|
||||
audio_receiver_config_.rtp_max_delay_ms =
|
||||
audio_sender_config_.max_playout_delay.InMicroseconds();
|
||||
audio_receiver_config_.codec = audio_codec;
|
||||
|
||||
video_sender_config_ = GetDefaultVideoSenderConfig();
|
||||
video_sender_config_.min_playout_delay =
|
||||
video_sender_config_.max_playout_delay =
|
||||
base::Milliseconds(kTargetPlayoutDelayMs);
|
||||
video_sender_config_.max_bitrate = 4000000;
|
||||
video_sender_config_.min_bitrate = 4000000;
|
||||
video_sender_config_.start_bitrate = 4000000;
|
||||
video_sender_config_.codec = video_codec;
|
||||
|
||||
video_receiver_config_ = GetDefaultVideoReceiverConfig();
|
||||
video_receiver_config_.rtp_max_delay_ms = kTargetPlayoutDelayMs;
|
||||
video_receiver_config_.codec = video_codec;
|
||||
|
||||
DCHECK_GT(video_sender_config_.max_frame_rate, 0);
|
||||
frame_duration_ = base::Seconds(1.0 / video_sender_config_.max_frame_rate);
|
||||
}
|
||||
|
||||
void SetSenderClockSkew(double skew, base::TimeDelta offset) {
|
||||
testing_clock_sender_.SetSkew(skew, offset);
|
||||
task_runner_sender_->SetSkew(1.0 / skew);
|
||||
}
|
||||
|
||||
void SetReceiverClockSkew(double skew, base::TimeDelta offset) {
|
||||
testing_clock_receiver_.SetSkew(skew, offset);
|
||||
task_runner_receiver_->SetSkew(1.0 / skew);
|
||||
}
|
||||
|
||||
void Create(const MeasuringPoint& p);
|
||||
|
||||
void ReceivePacket(std::unique_ptr<Packet> packet) {
|
||||
cast_receiver_->ReceivePacket(std::move(packet));
|
||||
}
|
||||
|
||||
virtual ~RunOneBenchmark() {
|
||||
cast_sender_.reset();
|
||||
cast_receiver_.reset();
|
||||
task_runner_->RunTasks();
|
||||
}
|
||||
|
||||
base::TimeDelta VideoTimestamp(int frame_number) {
|
||||
return frame_number *
|
||||
base::Seconds(1.0 / video_sender_config_.max_frame_rate);
|
||||
}
|
||||
|
||||
void SendFakeVideoFrame() {
|
||||
// NB: Blackframe with timestamp
|
||||
cast_sender_->video_frame_input()->InsertRawVideoFrame(
|
||||
media::VideoFrame::CreateColorFrame(gfx::Size(2, 2), 0x00, 0x80, 0x80,
|
||||
VideoTimestamp(frames_sent_)),
|
||||
testing_clock_sender_.NowTicks());
|
||||
frames_sent_++;
|
||||
}
|
||||
|
||||
void RunTasks(base::TimeDelta duration) {
|
||||
task_runner_->Sleep(duration);
|
||||
}
|
||||
|
||||
void BasicPlayerGotVideoFrame(scoped_refptr<media::VideoFrame> video_frame,
|
||||
base::TimeTicks render_time,
|
||||
bool continuous) {
|
||||
video_ticks_.push_back(
|
||||
std::make_pair(testing_clock_receiver_.NowTicks(), render_time));
|
||||
cast_receiver_->RequestDecodedVideoFrame(base::BindRepeating(
|
||||
&RunOneBenchmark::BasicPlayerGotVideoFrame, base::Unretained(this)));
|
||||
}
|
||||
|
||||
void BasicPlayerGotAudioFrame(std::unique_ptr<AudioBus> audio_bus,
|
||||
base::TimeTicks playout_time,
|
||||
bool is_continuous) {
|
||||
audio_ticks_.push_back(
|
||||
std::make_pair(testing_clock_receiver_.NowTicks(), playout_time));
|
||||
cast_receiver_->RequestDecodedAudioFrame(base::BindRepeating(
|
||||
&RunOneBenchmark::BasicPlayerGotAudioFrame, base::Unretained(this)));
|
||||
}
|
||||
|
||||
void StartBasicPlayer() {
|
||||
cast_receiver_->RequestDecodedVideoFrame(base::BindRepeating(
|
||||
&RunOneBenchmark::BasicPlayerGotVideoFrame, base::Unretained(this)));
|
||||
cast_receiver_->RequestDecodedAudioFrame(base::BindRepeating(
|
||||
&RunOneBenchmark::BasicPlayerGotAudioFrame, base::Unretained(this)));
|
||||
}
|
||||
|
||||
std::unique_ptr<test::PacketPipe> CreateSimplePipe(const MeasuringPoint& p) {
|
||||
std::unique_ptr<test::PacketPipe> pipe = test::NewBuffer(65536, p.bitrate);
|
||||
pipe->AppendToPipe(test::NewRandomDrop(p.percent_packet_drop / 100.0));
|
||||
pipe->AppendToPipe(test::NewConstantDelay(p.latency / 1000.0));
|
||||
return pipe;
|
||||
}
|
||||
|
||||
void Run(const MeasuringPoint& p) {
|
||||
available_bitrate_ = p.bitrate;
|
||||
Configure(Codec::kVideoFake, Codec::kAudioPcm16);
|
||||
Create(p);
|
||||
StartBasicPlayer();
|
||||
|
||||
for (int frame = 0; frame < 1000; frame++) {
|
||||
SendFakeVideoFrame();
|
||||
RunTasks(frame_duration_);
|
||||
}
|
||||
RunTasks(100 * frame_duration_); // Empty the pipeline.
|
||||
VLOG(1) << "=============INPUTS============";
|
||||
VLOG(1) << "Bitrate: " << p.bitrate << " mbit/s";
|
||||
VLOG(1) << "Latency: " << p.latency << " ms";
|
||||
VLOG(1) << "Packet drop drop: " << p.percent_packet_drop << "%";
|
||||
VLOG(1) << "=============OUTPUTS============";
|
||||
VLOG(1) << "Frames lost: " << frames_lost();
|
||||
VLOG(1) << "Late frames: " << late_frames();
|
||||
VLOG(1) << "Playout margin: " << frame_playout_buffer().AsString();
|
||||
VLOG(1) << "Video bandwidth used: " << video_bandwidth() << " mbit/s ("
|
||||
<< (video_bandwidth() * 100 / desired_video_bitrate()) << "%)";
|
||||
VLOG(1) << "Good run: " << SimpleGood();
|
||||
}
|
||||
|
||||
// Metrics
|
||||
int frames_lost() const { return frames_sent_ - video_ticks_.size(); }
|
||||
|
||||
int late_frames() const {
|
||||
int frames = 0;
|
||||
// Ignore the first two seconds of video or so.
|
||||
for (size_t i = 60; i < video_ticks_.size(); i++) {
|
||||
if (video_ticks_[i].first > video_ticks_[i].second) {
|
||||
frames++;
|
||||
}
|
||||
}
|
||||
return frames;
|
||||
}
|
||||
|
||||
test::MeanAndError frame_playout_buffer() const {
|
||||
std::vector<double> values;
|
||||
for (size_t i = 0; i < video_ticks_.size(); i++) {
|
||||
values.push_back(
|
||||
(video_ticks_[i].second - video_ticks_[i].first).InMillisecondsF());
|
||||
}
|
||||
return test::MeanAndError(values);
|
||||
}
|
||||
|
||||
// Mbits per second
|
||||
double video_bandwidth() const {
|
||||
double seconds = (frame_duration_.InSecondsF() * frames_sent_);
|
||||
double megabits = video_bytes_encoded_ * 8 / 1000000.0;
|
||||
return megabits / seconds;
|
||||
}
|
||||
|
||||
// Mbits per second
|
||||
double audio_bandwidth() const {
|
||||
double seconds = (frame_duration_.InSecondsF() * frames_sent_);
|
||||
double megabits = audio_bytes_encoded_ * 8 / 1000000.0;
|
||||
return megabits / seconds;
|
||||
}
|
||||
|
||||
double desired_video_bitrate() {
|
||||
return std::min<double>(available_bitrate_,
|
||||
video_sender_config_.max_bitrate / 1000000.0);
|
||||
}
|
||||
|
||||
bool SimpleGood() {
|
||||
return frames_lost() <= 1 && late_frames() <= 1 &&
|
||||
video_bandwidth() > desired_video_bitrate() * 0.8 &&
|
||||
video_bandwidth() < desired_video_bitrate() * 1.2;
|
||||
}
|
||||
|
||||
private:
|
||||
FrameReceiverConfig audio_receiver_config_;
|
||||
FrameReceiverConfig video_receiver_config_;
|
||||
FrameSenderConfig audio_sender_config_;
|
||||
FrameSenderConfig video_sender_config_;
|
||||
|
||||
base::TimeTicks start_time_;
|
||||
|
||||
// These run in "test time"
|
||||
base::SimpleTestTickClock testing_clock_;
|
||||
scoped_refptr<FakeSingleThreadTaskRunner> task_runner_;
|
||||
|
||||
// These run on the sender timeline.
|
||||
test::SkewedTickClock testing_clock_sender_;
|
||||
scoped_refptr<test::SkewedSingleThreadTaskRunner> task_runner_sender_;
|
||||
|
||||
// These run on the receiver timeline.
|
||||
test::SkewedTickClock testing_clock_receiver_;
|
||||
scoped_refptr<test::SkewedSingleThreadTaskRunner> task_runner_receiver_;
|
||||
|
||||
scoped_refptr<CastEnvironment> cast_environment_sender_;
|
||||
scoped_refptr<CastEnvironment> cast_environment_receiver_;
|
||||
|
||||
raw_ptr<LoopBackTransport>
|
||||
receiver_to_sender_; // Owned by CastTransportImpl.
|
||||
raw_ptr<LoopBackTransport>
|
||||
sender_to_receiver_; // Owned by CastTransportImpl.
|
||||
CastTransportWrapper transport_sender_;
|
||||
std::unique_ptr<CastTransport> transport_receiver_;
|
||||
uint64_t video_bytes_encoded_;
|
||||
uint64_t audio_bytes_encoded_;
|
||||
|
||||
std::unique_ptr<CastReceiver> cast_receiver_;
|
||||
std::unique_ptr<CastSender> cast_sender_;
|
||||
|
||||
int frames_sent_;
|
||||
base::TimeDelta frame_duration_;
|
||||
double available_bitrate_;
|
||||
std::vector<std::pair<base::TimeTicks, base::TimeTicks> > audio_ticks_;
|
||||
std::vector<std::pair<base::TimeTicks, base::TimeTicks> > video_ticks_;
|
||||
};
|
||||
|
||||
namespace {
|
||||
|
||||
class TransportClient : public CastTransport::Client {
|
||||
public:
|
||||
explicit TransportClient(RunOneBenchmark* run_one_benchmark)
|
||||
: run_one_benchmark_(run_one_benchmark) {}
|
||||
|
||||
TransportClient(const TransportClient&) = delete;
|
||||
TransportClient& operator=(const TransportClient&) = delete;
|
||||
|
||||
void OnStatusChanged(CastTransportStatus status) final {
|
||||
EXPECT_EQ(TRANSPORT_STREAM_INITIALIZED, status);
|
||||
}
|
||||
void OnLoggingEventsReceived(
|
||||
std::unique_ptr<std::vector<FrameEvent>> frame_events,
|
||||
std::unique_ptr<std::vector<PacketEvent>> packet_events) final {}
|
||||
void ProcessRtpPacket(std::unique_ptr<Packet> packet) final {
|
||||
if (run_one_benchmark_)
|
||||
run_one_benchmark_->ReceivePacket(std::move(packet));
|
||||
}
|
||||
|
||||
private:
|
||||
const raw_ptr<RunOneBenchmark> run_one_benchmark_;
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
void RunOneBenchmark::Create(const MeasuringPoint& p) {
|
||||
sender_to_receiver_ = new LoopBackTransport(cast_environment_sender_);
|
||||
transport_sender_.Init(
|
||||
new CastTransportImpl(&testing_clock_sender_, base::Seconds(1),
|
||||
std::make_unique<TransportClient>(nullptr),
|
||||
base::WrapUnique(sender_to_receiver_.get()),
|
||||
task_runner_sender_),
|
||||
&video_bytes_encoded_, &audio_bytes_encoded_);
|
||||
|
||||
receiver_to_sender_ = new LoopBackTransport(cast_environment_receiver_);
|
||||
transport_receiver_ = std::make_unique<CastTransportImpl>(
|
||||
&testing_clock_receiver_, base::Seconds(1),
|
||||
std::make_unique<TransportClient>(this),
|
||||
base::WrapUnique(receiver_to_sender_.get()), task_runner_receiver_);
|
||||
|
||||
cast_receiver_ =
|
||||
CastReceiver::Create(cast_environment_receiver_, audio_receiver_config_,
|
||||
video_receiver_config_, transport_receiver_.get());
|
||||
|
||||
cast_sender_ =
|
||||
CastSender::Create(cast_environment_sender_, &transport_sender_);
|
||||
|
||||
cast_sender_->InitializeAudio(audio_sender_config_,
|
||||
base::BindOnce(&ExpectAudioSuccess));
|
||||
cast_sender_->InitializeVideo(
|
||||
video_sender_config_,
|
||||
std::make_unique<media::MockVideoEncoderMetricsProvider>(),
|
||||
base::BindRepeating(&ExpectVideoSuccess), base::DoNothing());
|
||||
|
||||
receiver_to_sender_->Initialize(CreateSimplePipe(p),
|
||||
transport_sender_.PacketReceiverForTesting(),
|
||||
task_runner_, &testing_clock_);
|
||||
sender_to_receiver_->Initialize(
|
||||
CreateSimplePipe(p), transport_receiver_->PacketReceiverForTesting(),
|
||||
task_runner_, &testing_clock_);
|
||||
|
||||
task_runner_->RunTasks();
|
||||
}
|
||||
|
||||
enum CacheResult { FOUND_TRUE, FOUND_FALSE, NOT_FOUND };
|
||||
|
||||
template <class T>
|
||||
class BenchmarkCache {
|
||||
public:
|
||||
CacheResult Lookup(const T& x) {
|
||||
base::AutoLock key(lock_);
|
||||
for (size_t i = 0; i < results_.size(); i++) {
|
||||
if (results_[i].second) {
|
||||
if (x <= results_[i].first) {
|
||||
VLOG(2) << "TRUE because: " << x.AsString()
|
||||
<< " <= " << results_[i].first.AsString();
|
||||
return FOUND_TRUE;
|
||||
}
|
||||
} else {
|
||||
if (x >= results_[i].first) {
|
||||
VLOG(2) << "FALSE because: " << x.AsString()
|
||||
<< " >= " << results_[i].first.AsString();
|
||||
return FOUND_FALSE;
|
||||
}
|
||||
}
|
||||
}
|
||||
return NOT_FOUND;
|
||||
}
|
||||
|
||||
void Add(const T& x, bool result) {
|
||||
base::AutoLock key(lock_);
|
||||
VLOG(2) << "Cache Insert: " << x.AsString() << " = " << result;
|
||||
results_.push_back(std::make_pair(x, result));
|
||||
}
|
||||
|
||||
private:
|
||||
base::Lock lock_;
|
||||
std::vector<std::pair<T, bool> > results_;
|
||||
};
|
||||
|
||||
struct SearchVariable {
|
||||
SearchVariable() : base(0.0), grade(0.0) {}
|
||||
SearchVariable(double b, double g) : base(b), grade(g) {}
|
||||
SearchVariable blend(const SearchVariable& other, double factor) {
|
||||
CHECK_GE(factor, 0);
|
||||
CHECK_LE(factor, 1.0);
|
||||
return SearchVariable(base * (1 - factor) + other.base * factor,
|
||||
grade * (1 - factor) + other.grade * factor);
|
||||
}
|
||||
double value(double x) const { return base + grade * x; }
|
||||
double base;
|
||||
double grade;
|
||||
};
|
||||
|
||||
struct SearchVector {
|
||||
SearchVector blend(const SearchVector& other, double factor) {
|
||||
SearchVector ret;
|
||||
ret.bitrate = bitrate.blend(other.bitrate, factor);
|
||||
ret.latency = latency.blend(other.latency, factor);
|
||||
ret.packet_drop = packet_drop.blend(other.packet_drop, factor);
|
||||
return ret;
|
||||
}
|
||||
|
||||
SearchVector average(const SearchVector& other) {
|
||||
return blend(other, 0.5);
|
||||
}
|
||||
|
||||
MeasuringPoint GetMeasuringPoint(double v) const {
|
||||
return MeasuringPoint(
|
||||
bitrate.value(-v), latency.value(v), packet_drop.value(v));
|
||||
}
|
||||
std::string AsString(double v) { return GetMeasuringPoint(v).AsString(); }
|
||||
|
||||
SearchVariable bitrate;
|
||||
SearchVariable latency;
|
||||
SearchVariable packet_drop;
|
||||
};
|
||||
|
||||
class CastBenchmark {
|
||||
public:
|
||||
bool RunOnePoint(const SearchVector& v, double multiplier) {
|
||||
MeasuringPoint p = v.GetMeasuringPoint(multiplier);
|
||||
VLOG(1) << "RUN: v = " << multiplier << " p = " << p.AsString();
|
||||
if (p.bitrate <= 0) {
|
||||
return false;
|
||||
}
|
||||
switch (cache_.Lookup(p)) {
|
||||
case FOUND_TRUE:
|
||||
return true;
|
||||
case FOUND_FALSE:
|
||||
return false;
|
||||
case NOT_FOUND:
|
||||
// Keep going
|
||||
break;
|
||||
}
|
||||
bool result = true;
|
||||
for (int tries = 0; tries < 3 && result; tries++) {
|
||||
RunOneBenchmark benchmark;
|
||||
benchmark.Run(p);
|
||||
result &= benchmark.SimpleGood();
|
||||
}
|
||||
cache_.Add(p, result);
|
||||
return result;
|
||||
}
|
||||
|
||||
void BinarySearch(SearchVector v, double accuracy) {
|
||||
double min = 0.0;
|
||||
double max = 1.0;
|
||||
while (RunOnePoint(v, max)) {
|
||||
min = max;
|
||||
max *= 2;
|
||||
}
|
||||
|
||||
while (max - min > accuracy) {
|
||||
double avg = std::midpoint(min, max);
|
||||
if (RunOnePoint(v, avg)) {
|
||||
min = avg;
|
||||
} else {
|
||||
max = avg;
|
||||
}
|
||||
}
|
||||
|
||||
// Print a data point to stdout.
|
||||
base::AutoLock key(lock_);
|
||||
MeasuringPoint p = v.GetMeasuringPoint(min);
|
||||
fprintf(stdout, "%f %f %f\n", p.bitrate, p.latency, p.percent_packet_drop);
|
||||
fflush(stdout);
|
||||
}
|
||||
|
||||
void SpanningSearch(int max,
|
||||
int x,
|
||||
int y,
|
||||
int skip,
|
||||
SearchVector a,
|
||||
SearchVector b,
|
||||
SearchVector c,
|
||||
double accuracy,
|
||||
std::vector<std::unique_ptr<base::Thread>>* threads) {
|
||||
static int thread_num = 0;
|
||||
if (x > max) return;
|
||||
if (skip > max) {
|
||||
if (y > x) return;
|
||||
SearchVector ab = a.blend(b, static_cast<double>(x) / max);
|
||||
SearchVector ac = a.blend(c, static_cast<double>(x) / max);
|
||||
SearchVector v = ab.blend(ac, x == y ? 1.0 : static_cast<double>(y) / x);
|
||||
thread_num++;
|
||||
(*threads)[thread_num % threads->size()]->task_runner()->PostTask(
|
||||
FROM_HERE, base::BindOnce(&CastBenchmark::BinarySearch,
|
||||
base::Unretained(this), v, accuracy));
|
||||
} else {
|
||||
skip *= 2;
|
||||
SpanningSearch(max, x, y, skip, a, b, c, accuracy, threads);
|
||||
SpanningSearch(max, x + skip, y + skip, skip, a, b, c, accuracy, threads);
|
||||
SpanningSearch(max, x + skip, y, skip, a, b, c, accuracy, threads);
|
||||
SpanningSearch(max, x, y + skip, skip, a, b, c, accuracy, threads);
|
||||
}
|
||||
}
|
||||
|
||||
void Run() {
|
||||
// Spanning search.
|
||||
|
||||
std::vector<std::unique_ptr<base::Thread>> threads;
|
||||
for (int i = 0; i < 16; i++) {
|
||||
threads.push_back(std::make_unique<base::Thread>(
|
||||
base::StringPrintf("cast_bench_thread_%d", i)));
|
||||
threads[i]->Start();
|
||||
}
|
||||
|
||||
if (base::CommandLine::ForCurrentProcess()->HasSwitch("single-run")) {
|
||||
SearchVector a;
|
||||
a.bitrate.base = 100.0;
|
||||
a.bitrate.grade = 1.0;
|
||||
a.latency.grade = 1.0;
|
||||
a.packet_drop.grade = 1.0;
|
||||
threads[0]->task_runner()->PostTask(
|
||||
FROM_HERE,
|
||||
base::BindOnce(base::IgnoreResult(&CastBenchmark::RunOnePoint),
|
||||
base::Unretained(this), a, 1.0));
|
||||
} else {
|
||||
SearchVector a, b, c;
|
||||
a.bitrate.base = b.bitrate.base = c.bitrate.base = 100.0;
|
||||
a.bitrate.grade = 1.0;
|
||||
b.latency.grade = 1.0;
|
||||
c.packet_drop.grade = 1.0;
|
||||
|
||||
SpanningSearch(512,
|
||||
0,
|
||||
0,
|
||||
1,
|
||||
a,
|
||||
b,
|
||||
c,
|
||||
0.01,
|
||||
&threads);
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < threads.size(); i++) {
|
||||
threads[i]->Stop();
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
BenchmarkCache<MeasuringPoint> cache_;
|
||||
base::Lock lock_;
|
||||
};
|
||||
|
||||
} // namespace cast
|
||||
} // namespace media
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
base::AtExitManager at_exit;
|
||||
base::CommandLine::Init(argc, argv);
|
||||
media::cast::CastBenchmark benchmark;
|
||||
if (getenv("PROFILE_FILE")) {
|
||||
std::string profile_file(getenv("PROFILE_FILE"));
|
||||
base::debug::StartProfiling(profile_file);
|
||||
benchmark.Run();
|
||||
base::debug::StopProfiling();
|
||||
} else {
|
||||
benchmark.Run();
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -1,616 +0,0 @@
|
||||
// Copyright 2014 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "media/cast/test/fake_media_source.h"
|
||||
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
|
||||
#include "base/files/scoped_file.h"
|
||||
#include "base/functional/bind.h"
|
||||
#include "base/logging.h"
|
||||
#include "base/rand_util.h"
|
||||
#include "base/strings/string_number_conversions.h"
|
||||
#include "base/task/single_thread_task_runner.h"
|
||||
#include "build/build_config.h"
|
||||
#include "media/base/audio_buffer.h"
|
||||
#include "media/base/audio_bus.h"
|
||||
#include "media/base/audio_fifo.h"
|
||||
#include "media/base/audio_timestamp_helper.h"
|
||||
#include "media/base/media.h"
|
||||
#include "media/base/video_frame.h"
|
||||
#include "media/base/video_util.h"
|
||||
#include "media/cast/cast_sender.h"
|
||||
#include "media/cast/test/utility/audio_utility.h"
|
||||
#include "media/cast/test/utility/video_utility.h"
|
||||
#include "ui/gfx/geometry/size.h"
|
||||
|
||||
#if BUILDFLAG(IS_WIN)
|
||||
#include <direct.h>
|
||||
#endif // BUILDFLAG(IS_WIN)
|
||||
#include "media/ffmpeg/ffmpeg_common.h"
|
||||
#include "media/ffmpeg/ffmpeg_decoding_loop.h"
|
||||
#include "media/ffmpeg/ffmpeg_deleters.h"
|
||||
#include "media/filters/ffmpeg_glue.h"
|
||||
#include "media/filters/in_memory_url_protocol.h"
|
||||
|
||||
namespace {
|
||||
|
||||
static const int kSoundFrequency = 440; // Frequency of sinusoid wave.
|
||||
static const float kSoundVolume = 0.10f;
|
||||
static const int kAudioFrameMs = 10; // Each audio frame is exactly 10ms.
|
||||
static const int kAudioPacketsPerSecond = 1000 / kAudioFrameMs;
|
||||
|
||||
// Bounds for variable frame size mode.
|
||||
static const int kMinFakeFrameWidth = 60;
|
||||
static const int kMinFakeFrameHeight = 34;
|
||||
static const int kStartingFakeFrameWidth = 854;
|
||||
static const int kStartingFakeFrameHeight = 480;
|
||||
static const int kMaxFakeFrameWidth = 1280;
|
||||
static const int kMaxFakeFrameHeight = 720;
|
||||
static const int kMaxFrameSizeChangeMillis = 5000;
|
||||
|
||||
void AVFreeFrame(AVFrame* frame) {
|
||||
av_frame_free(&frame);
|
||||
}
|
||||
|
||||
base::TimeDelta PtsToTimeDelta(int64_t pts, const AVRational& time_base) {
|
||||
return pts * base::Seconds(1) * time_base.num / time_base.den;
|
||||
}
|
||||
|
||||
int64_t TimeDeltaToPts(base::TimeDelta delta, const AVRational& time_base) {
|
||||
return static_cast<int64_t>(
|
||||
delta.InSecondsF() * time_base.den / time_base.num + 0.5 /* rounding */);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
namespace media {
|
||||
namespace cast {
|
||||
|
||||
FakeMediaSource::FakeMediaSource(
|
||||
scoped_refptr<base::SingleThreadTaskRunner> task_runner,
|
||||
const base::TickClock* clock,
|
||||
const FrameSenderConfig& audio_config,
|
||||
const FrameSenderConfig& video_config,
|
||||
bool keep_frames)
|
||||
: task_runner_(task_runner),
|
||||
output_audio_params_(
|
||||
AudioParameters::AUDIO_PCM_LINEAR,
|
||||
media::ChannelLayoutConfig::Guess(audio_config.channels),
|
||||
audio_config.rtp_timebase,
|
||||
audio_config.rtp_timebase / kAudioPacketsPerSecond),
|
||||
video_config_(video_config),
|
||||
keep_frames_(keep_frames),
|
||||
variable_frame_size_mode_(false),
|
||||
synthetic_count_(0),
|
||||
clock_(clock),
|
||||
audio_frame_count_(0),
|
||||
video_frame_count_(0),
|
||||
av_format_context_(nullptr),
|
||||
audio_stream_index_(-1),
|
||||
playback_rate_(1.0),
|
||||
video_stream_index_(-1),
|
||||
video_frame_rate_numerator_(video_config.max_frame_rate),
|
||||
video_frame_rate_denominator_(1),
|
||||
audio_algo_(&media_log_),
|
||||
video_first_pts_(0),
|
||||
video_first_pts_set_(false) {
|
||||
CHECK(output_audio_params_.IsValid());
|
||||
audio_bus_factory_ = std::make_unique<TestAudioBusFactory>(
|
||||
audio_config.channels, audio_config.rtp_timebase, kSoundFrequency,
|
||||
kSoundVolume);
|
||||
}
|
||||
|
||||
FakeMediaSource::~FakeMediaSource() = default;
|
||||
|
||||
void FakeMediaSource::SetSourceFile(const base::FilePath& video_file,
|
||||
int final_fps) {
|
||||
DCHECK(!video_file.empty());
|
||||
|
||||
LOG(INFO) << "Source: " << video_file.value();
|
||||
if (!file_data_.Initialize(video_file)) {
|
||||
LOG(ERROR) << "Cannot load file.";
|
||||
return;
|
||||
}
|
||||
protocol_ = std::make_unique<InMemoryUrlProtocol>(file_data_.data(),
|
||||
file_data_.length(), false);
|
||||
glue_ = std::make_unique<FFmpegGlue>(protocol_.get());
|
||||
|
||||
if (!glue_->OpenContext()) {
|
||||
LOG(ERROR) << "Cannot open file.";
|
||||
return;
|
||||
}
|
||||
|
||||
// AVFormatContext is owned by the glue.
|
||||
av_format_context_ = glue_->format_context();
|
||||
if (avformat_find_stream_info(av_format_context_, NULL) < 0) {
|
||||
LOG(ERROR) << "Cannot find stream information.";
|
||||
return;
|
||||
}
|
||||
|
||||
// Prepare FFmpeg decoders.
|
||||
for (unsigned int i = 0; i < av_format_context_->nb_streams; ++i) {
|
||||
AVStream* av_stream = av_format_context_->streams[i];
|
||||
std::unique_ptr<AVCodecContext, ScopedPtrAVFreeContext> av_codec_context(
|
||||
AVStreamToAVCodecContext(av_stream));
|
||||
if (!av_codec_context) {
|
||||
LOG(ERROR) << "Cannot get a codec context for the codec: "
|
||||
<< av_stream->codecpar->codec_id;
|
||||
continue;
|
||||
}
|
||||
|
||||
const AVCodec* av_codec = avcodec_find_decoder(av_codec_context->codec_id);
|
||||
|
||||
if (!av_codec) {
|
||||
LOG(ERROR) << "Cannot find decoder for the codec: "
|
||||
<< av_codec_context->codec_id;
|
||||
continue;
|
||||
}
|
||||
|
||||
// Number of threads for decoding.
|
||||
av_codec_context->thread_count = 2;
|
||||
av_codec_context->error_concealment = FF_EC_GUESS_MVS | FF_EC_DEBLOCK;
|
||||
av_codec_context->request_sample_fmt = AV_SAMPLE_FMT_S16;
|
||||
|
||||
if (avcodec_open2(av_codec_context.get(), av_codec, nullptr) < 0) {
|
||||
LOG(ERROR) << "Cannot open AVCodecContext for the codec: "
|
||||
<< av_codec_context->codec_id;
|
||||
return;
|
||||
}
|
||||
|
||||
if (av_codec->type == AVMEDIA_TYPE_AUDIO) {
|
||||
if (av_codec_context->sample_fmt == AV_SAMPLE_FMT_S16P) {
|
||||
LOG(ERROR) << "Audio format not supported.";
|
||||
continue;
|
||||
}
|
||||
ChannelLayout layout = ChannelLayoutToChromeChannelLayout(
|
||||
av_codec_context->ch_layout.u.mask,
|
||||
av_codec_context->ch_layout.nb_channels);
|
||||
if (layout == CHANNEL_LAYOUT_UNSUPPORTED) {
|
||||
LOG(ERROR) << "Unsupported audio channels layout.";
|
||||
continue;
|
||||
}
|
||||
if (audio_stream_index_ != -1) {
|
||||
LOG(WARNING) << "Found multiple audio streams.";
|
||||
}
|
||||
audio_stream_index_ = static_cast<int>(i);
|
||||
av_audio_context_ = std::move(av_codec_context);
|
||||
source_audio_params_.Reset(
|
||||
AudioParameters::AUDIO_PCM_LINEAR,
|
||||
{layout, av_audio_context_->ch_layout.nb_channels},
|
||||
av_audio_context_->sample_rate,
|
||||
av_audio_context_->sample_rate / kAudioPacketsPerSecond);
|
||||
CHECK(source_audio_params_.IsValid());
|
||||
LOG(INFO) << "Source file has audio.";
|
||||
audio_decoding_loop_ =
|
||||
std::make_unique<FFmpegDecodingLoop>(av_audio_context_.get());
|
||||
} else if (av_codec->type == AVMEDIA_TYPE_VIDEO) {
|
||||
VideoPixelFormat format =
|
||||
AVPixelFormatToVideoPixelFormat(av_codec_context->pix_fmt);
|
||||
if (format != PIXEL_FORMAT_I420) {
|
||||
LOG(ERROR) << "Cannot handle non YV12 video format: " << format;
|
||||
continue;
|
||||
}
|
||||
if (video_stream_index_ != -1) {
|
||||
LOG(WARNING) << "Found multiple video streams.";
|
||||
}
|
||||
video_stream_index_ = static_cast<int>(i);
|
||||
av_video_context_ = std::move(av_codec_context);
|
||||
video_decoding_loop_ =
|
||||
std::make_unique<FFmpegDecodingLoop>(av_video_context_.get());
|
||||
if (final_fps > 0) {
|
||||
// If video is played at a manual speed audio needs to match.
|
||||
playback_rate_ = 1.0 * final_fps *
|
||||
av_stream->r_frame_rate.den / av_stream->r_frame_rate.num;
|
||||
video_frame_rate_numerator_ = final_fps;
|
||||
video_frame_rate_denominator_ = 1;
|
||||
} else {
|
||||
playback_rate_ = 1.0;
|
||||
video_frame_rate_numerator_ = av_stream->r_frame_rate.num;
|
||||
video_frame_rate_denominator_ = av_stream->r_frame_rate.den;
|
||||
}
|
||||
LOG(INFO) << "Source file has video.";
|
||||
} else {
|
||||
LOG(ERROR) << "Unknown stream type; ignore.";
|
||||
}
|
||||
}
|
||||
|
||||
Rewind();
|
||||
}
|
||||
|
||||
void FakeMediaSource::SetVariableFrameSizeMode(bool enabled) {
|
||||
variable_frame_size_mode_ = enabled;
|
||||
}
|
||||
|
||||
void FakeMediaSource::Start(scoped_refptr<AudioFrameInput> audio_frame_input,
|
||||
scoped_refptr<VideoFrameInput> video_frame_input) {
|
||||
audio_frame_input_ = audio_frame_input;
|
||||
video_frame_input_ = video_frame_input;
|
||||
|
||||
LOG(INFO) << "Max Frame rate: " << video_config_.max_frame_rate;
|
||||
LOG(INFO) << "Source Frame rate: "
|
||||
<< video_frame_rate_numerator_ << "/"
|
||||
<< video_frame_rate_denominator_ << " fps.";
|
||||
LOG(INFO) << "Audio playback rate: " << playback_rate_;
|
||||
|
||||
if (start_time_.is_null())
|
||||
start_time_ = clock_->NowTicks();
|
||||
|
||||
if (!is_transcoding_audio() && !is_transcoding_video()) {
|
||||
// Send fake patterns.
|
||||
task_runner_->PostTask(FROM_HERE,
|
||||
base::BindOnce(&FakeMediaSource::SendNextFakeFrame,
|
||||
weak_factory_.GetWeakPtr()));
|
||||
return;
|
||||
}
|
||||
|
||||
// Send transcoding streams.
|
||||
bool is_encrypted = false;
|
||||
audio_algo_.Initialize(source_audio_params_, is_encrypted);
|
||||
audio_algo_.FlushBuffers();
|
||||
audio_fifo_input_bus_ = AudioBus::Create(
|
||||
source_audio_params_.channels(),
|
||||
source_audio_params_.frames_per_buffer());
|
||||
// Audio FIFO can carry all data fron AudioRendererAlgorithm.
|
||||
audio_fifo_ = std::make_unique<AudioFifo>(source_audio_params_.channels(),
|
||||
audio_algo_.QueueCapacity());
|
||||
audio_converter_ = std::make_unique<media::AudioConverter>(
|
||||
source_audio_params_, output_audio_params_, true);
|
||||
audio_converter_->AddInput(this);
|
||||
task_runner_->PostTask(FROM_HERE,
|
||||
base::BindOnce(&FakeMediaSource::SendNextFrame,
|
||||
weak_factory_.GetWeakPtr()));
|
||||
}
|
||||
|
||||
void FakeMediaSource::SendNextFakeFrame() {
|
||||
UpdateNextFrameSize();
|
||||
scoped_refptr<VideoFrame> video_frame =
|
||||
VideoFrame::CreateBlackFrame(current_frame_size_);
|
||||
PopulateVideoFrame(video_frame.get(), synthetic_count_);
|
||||
++synthetic_count_;
|
||||
|
||||
const base::TimeTicks now = clock_->NowTicks();
|
||||
|
||||
base::TimeDelta video_time = VideoFrameTime(++video_frame_count_);
|
||||
video_frame->set_timestamp(video_time);
|
||||
if (keep_frames_)
|
||||
inserted_video_frame_queue_.push(video_frame);
|
||||
video_frame_input_->InsertRawVideoFrame(video_frame,
|
||||
start_time_ + video_time);
|
||||
|
||||
// Send just enough audio data to match next video frame's time.
|
||||
base::TimeDelta audio_time = AudioFrameTime(audio_frame_count_);
|
||||
while (audio_time < video_time) {
|
||||
if (is_transcoding_audio()) {
|
||||
Decode(true);
|
||||
CHECK(!audio_bus_queue_.empty()) << "No audio decoded.";
|
||||
std::unique_ptr<AudioBus> bus(audio_bus_queue_.front());
|
||||
audio_bus_queue_.pop();
|
||||
audio_frame_input_->InsertAudio(std::move(bus), start_time_ + audio_time);
|
||||
} else {
|
||||
audio_frame_input_->InsertAudio(
|
||||
audio_bus_factory_->NextAudioBus(base::Milliseconds(kAudioFrameMs)),
|
||||
start_time_ + audio_time);
|
||||
}
|
||||
audio_time = AudioFrameTime(++audio_frame_count_);
|
||||
}
|
||||
|
||||
// This is the time since FakeMediaSource was started.
|
||||
const base::TimeDelta elapsed_time = now - start_time_;
|
||||
|
||||
// Handle the case when frame generation cannot keep up.
|
||||
// Move the time ahead to match the next frame.
|
||||
while (video_time < elapsed_time) {
|
||||
LOG(WARNING) << "Skipping one frame.";
|
||||
video_time = VideoFrameTime(++video_frame_count_);
|
||||
}
|
||||
|
||||
task_runner_->PostDelayedTask(
|
||||
FROM_HERE,
|
||||
base::BindOnce(&FakeMediaSource::SendNextFakeFrame,
|
||||
weak_factory_.GetWeakPtr()),
|
||||
video_time - elapsed_time);
|
||||
}
|
||||
|
||||
void FakeMediaSource::UpdateNextFrameSize() {
|
||||
if (variable_frame_size_mode_) {
|
||||
bool update_size_change_time = false;
|
||||
if (current_frame_size_.IsEmpty()) {
|
||||
current_frame_size_ = gfx::Size(kStartingFakeFrameWidth,
|
||||
kStartingFakeFrameHeight);
|
||||
update_size_change_time = true;
|
||||
} else if (clock_->NowTicks() >= next_frame_size_change_time_) {
|
||||
current_frame_size_ = gfx::Size(
|
||||
base::RandInt(kMinFakeFrameWidth, kMaxFakeFrameWidth),
|
||||
base::RandInt(kMinFakeFrameHeight, kMaxFakeFrameHeight));
|
||||
update_size_change_time = true;
|
||||
}
|
||||
|
||||
if (update_size_change_time) {
|
||||
next_frame_size_change_time_ =
|
||||
clock_->NowTicks() +
|
||||
base::Milliseconds(base::RandDouble() * kMaxFrameSizeChangeMillis);
|
||||
}
|
||||
} else {
|
||||
current_frame_size_ = gfx::Size(kStartingFakeFrameWidth,
|
||||
kStartingFakeFrameHeight);
|
||||
next_frame_size_change_time_ = base::TimeTicks();
|
||||
}
|
||||
}
|
||||
|
||||
bool FakeMediaSource::SendNextTranscodedVideo(base::TimeDelta elapsed_time) {
|
||||
if (!is_transcoding_video())
|
||||
return false;
|
||||
|
||||
Decode(false);
|
||||
if (video_frame_queue_.empty())
|
||||
return false;
|
||||
|
||||
const scoped_refptr<VideoFrame> video_frame = video_frame_queue_.front();
|
||||
if (elapsed_time < video_frame->timestamp())
|
||||
return false;
|
||||
video_frame_queue_.pop();
|
||||
|
||||
// Use the timestamp from the file if we're transcoding.
|
||||
video_frame->set_timestamp(ScaleTimestamp(video_frame->timestamp()));
|
||||
if (keep_frames_)
|
||||
inserted_video_frame_queue_.push(video_frame);
|
||||
video_frame_input_->InsertRawVideoFrame(
|
||||
video_frame, start_time_ + video_frame->timestamp());
|
||||
|
||||
// Make sure queue is not empty.
|
||||
Decode(false);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool FakeMediaSource::SendNextTranscodedAudio(base::TimeDelta elapsed_time) {
|
||||
if (!is_transcoding_audio())
|
||||
return false;
|
||||
|
||||
Decode(true);
|
||||
if (audio_bus_queue_.empty())
|
||||
return false;
|
||||
|
||||
base::TimeDelta audio_time = audio_sent_ts_->GetTimestamp();
|
||||
if (elapsed_time < audio_time)
|
||||
return false;
|
||||
std::unique_ptr<AudioBus> bus(audio_bus_queue_.front());
|
||||
audio_bus_queue_.pop();
|
||||
audio_sent_ts_->AddFrames(bus->frames());
|
||||
audio_frame_input_->InsertAudio(std::move(bus), start_time_ + audio_time);
|
||||
|
||||
// Make sure queue is not empty.
|
||||
Decode(true);
|
||||
return true;
|
||||
}
|
||||
|
||||
void FakeMediaSource::SendNextFrame() {
|
||||
// Send as much as possible. Audio is sent according to
|
||||
// system time.
|
||||
while (SendNextTranscodedAudio(clock_->NowTicks() - start_time_)) {
|
||||
}
|
||||
|
||||
// Video is sync'ed to audio.
|
||||
while (SendNextTranscodedVideo(audio_sent_ts_->GetTimestamp())) {
|
||||
}
|
||||
|
||||
if (audio_bus_queue_.empty() && video_frame_queue_.empty()) {
|
||||
// Both queues are empty can only mean that we have reached
|
||||
// the end of the stream.
|
||||
LOG(INFO) << "Rewind.";
|
||||
Rewind();
|
||||
}
|
||||
|
||||
// Send next send.
|
||||
task_runner_->PostDelayedTask(FROM_HERE,
|
||||
base::BindOnce(&FakeMediaSource::SendNextFrame,
|
||||
weak_factory_.GetWeakPtr()),
|
||||
base::Milliseconds(kAudioFrameMs));
|
||||
}
|
||||
|
||||
base::TimeDelta FakeMediaSource::VideoFrameTime(int frame_number) {
|
||||
return frame_number * base::Seconds(1) * video_frame_rate_denominator_ /
|
||||
video_frame_rate_numerator_;
|
||||
}
|
||||
|
||||
base::TimeDelta FakeMediaSource::ScaleTimestamp(base::TimeDelta timestamp) {
|
||||
return timestamp / playback_rate_;
|
||||
}
|
||||
|
||||
base::TimeDelta FakeMediaSource::AudioFrameTime(int frame_number) {
|
||||
return frame_number * base::Milliseconds(kAudioFrameMs);
|
||||
}
|
||||
|
||||
void FakeMediaSource::Rewind() {
|
||||
CHECK(av_seek_frame(av_format_context_, -1, 0, AVSEEK_FLAG_BACKWARD) >= 0)
|
||||
<< "Failed to rewind to the beginning.";
|
||||
}
|
||||
|
||||
ScopedAVPacket FakeMediaSource::DemuxOnePacket(bool* audio) {
|
||||
auto packet = ScopedAVPacket::Allocate();
|
||||
if (av_read_frame(av_format_context_, packet.get()) < 0) {
|
||||
VLOG(1) << "Failed to read one AVPacket.";
|
||||
return {};
|
||||
}
|
||||
|
||||
int stream_index = static_cast<int>(packet->stream_index);
|
||||
if (stream_index == audio_stream_index_) {
|
||||
*audio = true;
|
||||
} else if (stream_index == video_stream_index_) {
|
||||
*audio = false;
|
||||
} else {
|
||||
// Ignore unknown packet.
|
||||
LOG(INFO) << "Unknown packet.";
|
||||
return {};
|
||||
}
|
||||
return packet;
|
||||
}
|
||||
|
||||
void FakeMediaSource::DecodeAudio(ScopedAVPacket packet) {
|
||||
auto result = audio_decoding_loop_->DecodePacket(
|
||||
packet.get(), base::BindRepeating(&FakeMediaSource::OnNewAudioFrame,
|
||||
base::Unretained(this)));
|
||||
CHECK_EQ(result, FFmpegDecodingLoop::DecodeStatus::kOkay)
|
||||
<< "Failed to decode audio.";
|
||||
|
||||
const int frames_needed_to_scale =
|
||||
playback_rate_ * av_audio_context_->sample_rate / kAudioPacketsPerSecond;
|
||||
while (frames_needed_to_scale <= audio_algo_.BufferedFrames()) {
|
||||
if (!audio_algo_.FillBuffer(audio_fifo_input_bus_.get(), 0,
|
||||
audio_fifo_input_bus_->frames(),
|
||||
playback_rate_)) {
|
||||
// Nothing can be scaled. Decode some more.
|
||||
return;
|
||||
}
|
||||
|
||||
// Prevent overflow of audio data in the FIFO.
|
||||
if (audio_fifo_input_bus_->frames() + audio_fifo_->frames() <=
|
||||
audio_fifo_->max_frames()) {
|
||||
audio_fifo_->Push(audio_fifo_input_bus_.get());
|
||||
} else {
|
||||
LOG(WARNING) << "Audio FIFO full; dropping samples.";
|
||||
}
|
||||
|
||||
// Make sure there's enough data to resample audio.
|
||||
if (audio_fifo_->frames() <
|
||||
2 * source_audio_params_.sample_rate() / kAudioPacketsPerSecond) {
|
||||
continue;
|
||||
}
|
||||
|
||||
std::unique_ptr<media::AudioBus> resampled_bus(media::AudioBus::Create(
|
||||
output_audio_params_.channels(),
|
||||
output_audio_params_.sample_rate() / kAudioPacketsPerSecond));
|
||||
audio_converter_->Convert(resampled_bus.get());
|
||||
audio_bus_queue_.push(resampled_bus.release());
|
||||
}
|
||||
}
|
||||
|
||||
bool FakeMediaSource::OnNewAudioFrame(AVFrame* frame) {
|
||||
int frames_read = frame->nb_samples;
|
||||
if (frames_read < 0)
|
||||
return false;
|
||||
|
||||
if (!audio_sent_ts_) {
|
||||
// Initialize the base time to the first packet in the file. This is set to
|
||||
// the frequency we send to the receiver. Not the frequency of the source
|
||||
// file. This is because we increment the frame count by samples we sent.
|
||||
audio_sent_ts_ = std::make_unique<AudioTimestampHelper>(
|
||||
output_audio_params_.sample_rate());
|
||||
// For some files this is an invalid value.
|
||||
base::TimeDelta base_ts;
|
||||
audio_sent_ts_->SetBaseTimestamp(base_ts);
|
||||
}
|
||||
|
||||
scoped_refptr<AudioBuffer> buffer = AudioBuffer::CopyFrom(
|
||||
AVSampleFormatToSampleFormat(av_audio_context_->sample_fmt,
|
||||
av_audio_context_->codec_id),
|
||||
ChannelLayoutToChromeChannelLayout(
|
||||
av_audio_context_->ch_layout.u.mask,
|
||||
av_audio_context_->ch_layout.nb_channels),
|
||||
av_audio_context_->ch_layout.nb_channels, av_audio_context_->sample_rate,
|
||||
frames_read, &frame->data[0],
|
||||
PtsToTimeDelta(frame->pts, av_audio_stream()->time_base));
|
||||
audio_algo_.EnqueueBuffer(buffer);
|
||||
return true;
|
||||
}
|
||||
|
||||
void FakeMediaSource::DecodeVideo(ScopedAVPacket packet) {
|
||||
auto result = video_decoding_loop_->DecodePacket(
|
||||
packet.get(), base::BindRepeating(&FakeMediaSource::OnNewVideoFrame,
|
||||
base::Unretained(this)));
|
||||
CHECK_EQ(result, FFmpegDecodingLoop::DecodeStatus::kOkay)
|
||||
<< "Failed to decode video.";
|
||||
}
|
||||
|
||||
bool FakeMediaSource::OnNewVideoFrame(AVFrame* frame) {
|
||||
gfx::Size size(av_video_context_->width, av_video_context_->height);
|
||||
|
||||
if (!video_first_pts_set_) {
|
||||
video_first_pts_ = frame->pts;
|
||||
video_first_pts_set_ = true;
|
||||
}
|
||||
const AVRational& time_base = av_video_stream()->time_base;
|
||||
base::TimeDelta timestamp =
|
||||
PtsToTimeDelta(frame->pts - video_first_pts_, time_base);
|
||||
if (timestamp < last_video_frame_timestamp_) {
|
||||
// Stream has rewound. Rebase |video_first_pts_|.
|
||||
const AVRational& frame_rate = av_video_stream()->r_frame_rate;
|
||||
timestamp = last_video_frame_timestamp_ +
|
||||
(base::Seconds(1) * frame_rate.den / frame_rate.num);
|
||||
const int64_t adjustment_pts = TimeDeltaToPts(timestamp, time_base);
|
||||
video_first_pts_ = frame->pts - adjustment_pts;
|
||||
}
|
||||
|
||||
AVFrame* shallow_copy = av_frame_clone(frame);
|
||||
scoped_refptr<media::VideoFrame> video_frame =
|
||||
VideoFrame::WrapExternalYuvData(
|
||||
media::PIXEL_FORMAT_I420, size, gfx::Rect(size), size,
|
||||
shallow_copy->linesize[0], shallow_copy->linesize[1],
|
||||
shallow_copy->linesize[2], shallow_copy->data[0],
|
||||
shallow_copy->data[1], shallow_copy->data[2], timestamp);
|
||||
if (!video_frame)
|
||||
return false;
|
||||
video_frame_queue_.push(video_frame);
|
||||
video_frame_queue_.back()->AddDestructionObserver(
|
||||
base::BindOnce(&AVFreeFrame, shallow_copy));
|
||||
last_video_frame_timestamp_ = timestamp;
|
||||
return true;
|
||||
}
|
||||
|
||||
void FakeMediaSource::Decode(bool decode_audio) {
|
||||
// Read the stream until one video frame can be decoded.
|
||||
while (true) {
|
||||
if (decode_audio && !audio_bus_queue_.empty())
|
||||
return;
|
||||
if (!decode_audio && !video_frame_queue_.empty())
|
||||
return;
|
||||
|
||||
bool audio_packet = false;
|
||||
ScopedAVPacket packet = DemuxOnePacket(&audio_packet);
|
||||
if (!packet) {
|
||||
VLOG(1) << "End of stream.";
|
||||
return;
|
||||
}
|
||||
|
||||
if (audio_packet)
|
||||
DecodeAudio(std::move(packet));
|
||||
else
|
||||
DecodeVideo(std::move(packet));
|
||||
}
|
||||
}
|
||||
|
||||
double FakeMediaSource::ProvideInput(
|
||||
media::AudioBus* output_bus,
|
||||
uint32_t frames_delayed,
|
||||
const media::AudioGlitchInfo& glitch_info) {
|
||||
if (audio_fifo_->frames() >= output_bus->frames()) {
|
||||
audio_fifo_->Consume(output_bus, 0, output_bus->frames());
|
||||
return 1.0;
|
||||
} else {
|
||||
LOG(WARNING) << "Not enough audio data for resampling.";
|
||||
output_bus->Zero();
|
||||
return 0.0;
|
||||
}
|
||||
}
|
||||
|
||||
scoped_refptr<media::VideoFrame>
|
||||
FakeMediaSource::PopOldestInsertedVideoFrame() {
|
||||
CHECK(!inserted_video_frame_queue_.empty());
|
||||
scoped_refptr<media::VideoFrame> video_frame =
|
||||
inserted_video_frame_queue_.front();
|
||||
inserted_video_frame_queue_.pop();
|
||||
return video_frame;
|
||||
}
|
||||
|
||||
AVStream* FakeMediaSource::av_audio_stream() {
|
||||
return av_format_context_->streams[audio_stream_index_];
|
||||
}
|
||||
|
||||
AVStream* FakeMediaSource::av_video_stream() {
|
||||
return av_format_context_->streams[video_stream_index_];
|
||||
}
|
||||
|
||||
} // namespace cast
|
||||
} // namespace media
|
@ -1,192 +0,0 @@
|
||||
// Copyright 2014 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// A fake media source that generates video and audio frames to a cast
|
||||
// sender.
|
||||
// This class can transcode a WebM file using FFmpeg. It can also
|
||||
// generate an animation and audio of fixed frequency.
|
||||
|
||||
#ifndef MEDIA_CAST_TEST_FAKE_MEDIA_SOURCE_H_
|
||||
#define MEDIA_CAST_TEST_FAKE_MEDIA_SOURCE_H_
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include "base/containers/queue.h"
|
||||
#include "base/files/file_path.h"
|
||||
#include "base/files/memory_mapped_file.h"
|
||||
#include "base/memory/raw_ptr.h"
|
||||
#include "base/memory/scoped_refptr.h"
|
||||
#include "base/memory/weak_ptr.h"
|
||||
#include "base/task/single_thread_task_runner.h"
|
||||
#include "base/time/tick_clock.h"
|
||||
#include "base/time/time.h"
|
||||
#include "media/base/audio_converter.h"
|
||||
#include "media/base/audio_parameters.h"
|
||||
#include "media/base/media_util.h"
|
||||
#include "media/cast/cast_config.h"
|
||||
#include "media/ffmpeg/scoped_av_packet.h"
|
||||
#include "media/filters/audio_renderer_algorithm.h"
|
||||
|
||||
struct AVCodecContext;
|
||||
struct AVFormatContext;
|
||||
struct AVFrame;
|
||||
struct AVStream;
|
||||
|
||||
namespace media {
|
||||
|
||||
class AudioBus;
|
||||
class AudioConverter;
|
||||
class AudioFifo;
|
||||
class AudioTimestampHelper;
|
||||
class FFmpegGlue;
|
||||
class FFmpegDecodingLoop;
|
||||
class InMemoryUrlProtocol;
|
||||
class VideoFrame;
|
||||
|
||||
struct ScopedPtrAVFreeContext;
|
||||
|
||||
namespace cast {
|
||||
|
||||
class AudioFrameInput;
|
||||
class VideoFrameInput;
|
||||
class TestAudioBusFactory;
|
||||
|
||||
class FakeMediaSource final : public media::AudioConverter::InputCallback {
|
||||
public:
|
||||
// |task_runner| is to schedule decoding tasks.
|
||||
// |clock| is used by this source but is not owned.
|
||||
// |audio_config| is the desired audio config.
|
||||
// |video_config| is the desired video config.
|
||||
// |keep_frames| is true if all VideoFrames are saved in a queue.
|
||||
FakeMediaSource(scoped_refptr<base::SingleThreadTaskRunner> task_runner,
|
||||
const base::TickClock* clock,
|
||||
const FrameSenderConfig& audio_config,
|
||||
const FrameSenderConfig& video_config,
|
||||
bool keep_frames);
|
||||
|
||||
FakeMediaSource(const FakeMediaSource&) = delete;
|
||||
FakeMediaSource& operator=(const FakeMediaSource&) = delete;
|
||||
|
||||
~FakeMediaSource() final;
|
||||
|
||||
// Transcode this file as the source of video and audio frames.
|
||||
// If |final_fps| is non zero then the file is played at the desired rate.
|
||||
void SetSourceFile(const base::FilePath& video_file, int final_fps);
|
||||
|
||||
// Set to true to randomly change the frame size at random points in time.
|
||||
// Only applies when SetSourceFile() is not used.
|
||||
void SetVariableFrameSizeMode(bool enabled);
|
||||
|
||||
void Start(scoped_refptr<AudioFrameInput> audio_frame_input,
|
||||
scoped_refptr<VideoFrameInput> video_frame_input);
|
||||
|
||||
const FrameSenderConfig& get_video_config() const { return video_config_; }
|
||||
|
||||
scoped_refptr<media::VideoFrame> PopOldestInsertedVideoFrame();
|
||||
|
||||
private:
|
||||
bool is_transcoding_audio() const { return audio_stream_index_ >= 0; }
|
||||
bool is_transcoding_video() const { return video_stream_index_ >= 0; }
|
||||
|
||||
void SendNextFrame();
|
||||
void SendNextFakeFrame();
|
||||
|
||||
void UpdateNextFrameSize();
|
||||
|
||||
// Return true if a frame was sent.
|
||||
bool SendNextTranscodedVideo(base::TimeDelta elapsed_time);
|
||||
|
||||
// Return true if a frame was sent.
|
||||
bool SendNextTranscodedAudio(base::TimeDelta elapsed_time);
|
||||
|
||||
// Helper methods to compute timestamps for the frame number specified.
|
||||
base::TimeDelta VideoFrameTime(int frame_number);
|
||||
|
||||
base::TimeDelta ScaleTimestamp(base::TimeDelta timestamp);
|
||||
|
||||
base::TimeDelta AudioFrameTime(int frame_number);
|
||||
|
||||
// Go to the beginning of the stream.
|
||||
void Rewind();
|
||||
|
||||
// Call FFmpeg to fetch one packet.
|
||||
ScopedAVPacket DemuxOnePacket(bool* audio);
|
||||
|
||||
void DecodeAudio(ScopedAVPacket packet);
|
||||
bool OnNewAudioFrame(AVFrame* frame);
|
||||
void DecodeVideo(ScopedAVPacket packet);
|
||||
bool OnNewVideoFrame(AVFrame* frame);
|
||||
void Decode(bool decode_audio);
|
||||
|
||||
// media::AudioConverter::InputCallback implementation.
|
||||
double ProvideInput(media::AudioBus* output_bus,
|
||||
uint32_t frames_delayed,
|
||||
const media::AudioGlitchInfo& glitch_info) final;
|
||||
|
||||
AVStream* av_audio_stream();
|
||||
AVStream* av_video_stream();
|
||||
|
||||
const scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
|
||||
const media::AudioParameters output_audio_params_;
|
||||
const FrameSenderConfig video_config_;
|
||||
const bool keep_frames_;
|
||||
bool variable_frame_size_mode_;
|
||||
gfx::Size current_frame_size_;
|
||||
base::TimeTicks next_frame_size_change_time_;
|
||||
scoped_refptr<AudioFrameInput> audio_frame_input_;
|
||||
scoped_refptr<VideoFrameInput> video_frame_input_;
|
||||
uint8_t synthetic_count_;
|
||||
const raw_ptr<const base::TickClock> clock_; // Not owned by this class.
|
||||
|
||||
// Time when the stream starts.
|
||||
base::TimeTicks start_time_;
|
||||
|
||||
// The following three members are used only for fake frames.
|
||||
int audio_frame_count_; // Each audio frame is exactly 10ms.
|
||||
int video_frame_count_;
|
||||
std::unique_ptr<TestAudioBusFactory> audio_bus_factory_;
|
||||
|
||||
base::MemoryMappedFile file_data_;
|
||||
std::unique_ptr<InMemoryUrlProtocol> protocol_;
|
||||
std::unique_ptr<FFmpegGlue> glue_;
|
||||
raw_ptr<AVFormatContext> av_format_context_;
|
||||
|
||||
int audio_stream_index_;
|
||||
std::unique_ptr<AVCodecContext, ScopedPtrAVFreeContext> av_audio_context_;
|
||||
std::unique_ptr<FFmpegDecodingLoop> audio_decoding_loop_;
|
||||
AudioParameters source_audio_params_;
|
||||
double playback_rate_;
|
||||
|
||||
int video_stream_index_;
|
||||
std::unique_ptr<AVCodecContext, ScopedPtrAVFreeContext> av_video_context_;
|
||||
std::unique_ptr<FFmpegDecodingLoop> video_decoding_loop_;
|
||||
int video_frame_rate_numerator_;
|
||||
int video_frame_rate_denominator_;
|
||||
|
||||
// These are used for audio resampling.
|
||||
std::unique_ptr<media::AudioConverter> audio_converter_;
|
||||
std::unique_ptr<media::AudioFifo> audio_fifo_;
|
||||
std::unique_ptr<media::AudioBus> audio_fifo_input_bus_;
|
||||
media::AudioRendererAlgorithm audio_algo_;
|
||||
media::NullMediaLog media_log_;
|
||||
|
||||
// Track the timestamp of audio sent to the receiver.
|
||||
std::unique_ptr<media::AudioTimestampHelper> audio_sent_ts_;
|
||||
|
||||
base::queue<scoped_refptr<VideoFrame>> video_frame_queue_;
|
||||
base::queue<scoped_refptr<VideoFrame>> inserted_video_frame_queue_;
|
||||
int64_t video_first_pts_;
|
||||
bool video_first_pts_set_;
|
||||
base::TimeDelta last_video_frame_timestamp_;
|
||||
|
||||
base::queue<raw_ptr<AudioBus, CtnExperimental>> audio_bus_queue_;
|
||||
|
||||
// NOTE: Weak pointers must be invalidated before all other member variables.
|
||||
base::WeakPtrFactory<FakeMediaSource> weak_factory_{this};
|
||||
};
|
||||
|
||||
} // namespace cast
|
||||
} // namespace media
|
||||
|
||||
#endif // MEDIA_CAST_TEST_FAKE_MEDIA_SOURCE_H_
|
38
media/cast/test/fake_openscreen_clock.cc
Normal file
38
media/cast/test/fake_openscreen_clock.cc
Normal file
@ -0,0 +1,38 @@
|
||||
// Copyright 2024 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "media/cast/test/fake_openscreen_clock.h"
|
||||
|
||||
#include "base/test/simple_test_tick_clock.h"
|
||||
#include "base/time/time.h"
|
||||
|
||||
namespace media::cast {
|
||||
|
||||
base::SimpleTestTickClock* g_tick_clock = nullptr;
|
||||
base::TimeTicks* g_origin_ticks = nullptr;
|
||||
|
||||
// static
|
||||
void FakeOpenscreenClock::SetTickClock(base::SimpleTestTickClock* tick_clock) {
|
||||
CHECK(tick_clock);
|
||||
CHECK(!g_tick_clock);
|
||||
g_tick_clock = tick_clock;
|
||||
static base::TimeTicks origin_ticks(tick_clock->NowTicks());
|
||||
g_origin_ticks = &origin_ticks;
|
||||
}
|
||||
|
||||
// static
|
||||
void FakeOpenscreenClock::ClearTickClock() {
|
||||
CHECK(g_tick_clock);
|
||||
g_tick_clock = nullptr;
|
||||
*g_origin_ticks = base::TimeTicks();
|
||||
}
|
||||
|
||||
// static
|
||||
openscreen::Clock::time_point FakeOpenscreenClock::now() {
|
||||
CHECK(g_tick_clock);
|
||||
return openscreen::Clock::time_point(openscreen::Clock::duration(
|
||||
(g_tick_clock->NowTicks() - *g_origin_ticks).InMicroseconds()));
|
||||
}
|
||||
|
||||
} // namespace media::cast
|
31
media/cast/test/fake_openscreen_clock.h
Normal file
31
media/cast/test/fake_openscreen_clock.h
Normal file
@ -0,0 +1,31 @@
|
||||
// Copyright 2024 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef MEDIA_CAST_TEST_FAKE_OPENSCREEN_CLOCK_H_
|
||||
#define MEDIA_CAST_TEST_FAKE_OPENSCREEN_CLOCK_H_
|
||||
|
||||
#include "third_party/openscreen/src/platform/api/time.h"
|
||||
|
||||
namespace base {
|
||||
class SimpleTestTickClock;
|
||||
}
|
||||
|
||||
namespace media::cast {
|
||||
|
||||
// Provides an openscreen::ClockNowFunctionPtr backed by a
|
||||
// base::SimpleTickClock. Usage:
|
||||
// FakeOpenscreenClock::SetTickClock(&simple_tick_clock);
|
||||
// auto openscreen_object = OpenscreenObject(..., &FakeOpenscreenClock::now,
|
||||
// ...);
|
||||
// FakeOpenscreenClock::ClearTickClock();
|
||||
class FakeOpenscreenClock {
|
||||
public:
|
||||
static void SetTickClock(base::SimpleTestTickClock* clock);
|
||||
static void ClearTickClock();
|
||||
static openscreen::Clock::time_point now();
|
||||
};
|
||||
|
||||
} // namespace media::cast
|
||||
|
||||
#endif // MEDIA_CAST_TEST_FAKE_OPENSCREEN_CLOCK_H_
|
16
media/cast/test/mock_openscreen_environment.cc
Normal file
16
media/cast/test/mock_openscreen_environment.cc
Normal file
@ -0,0 +1,16 @@
|
||||
// Copyright 2024 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "media/cast/test/mock_openscreen_environment.h"
|
||||
|
||||
namespace media::cast {
|
||||
|
||||
MockOpenscreenEnvironment::MockOpenscreenEnvironment(
|
||||
openscreen::ClockNowFunctionPtr now_function,
|
||||
openscreen::TaskRunner& task_runner)
|
||||
: Environment(now_function, task_runner) {}
|
||||
|
||||
MockOpenscreenEnvironment::~MockOpenscreenEnvironment() = default;
|
||||
|
||||
} // namespace media::cast
|
37
media/cast/test/mock_openscreen_environment.h
Normal file
37
media/cast/test/mock_openscreen_environment.h
Normal file
@ -0,0 +1,37 @@
|
||||
// Copyright 2024 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef MEDIA_CAST_TEST_MOCK_OPENSCREEN_ENVIRONMENT_H_
|
||||
#define MEDIA_CAST_TEST_MOCK_OPENSCREEN_ENVIRONMENT_H_
|
||||
|
||||
#include "testing/gmock/include/gmock/gmock.h"
|
||||
#include "third_party/openscreen/src/cast/streaming/environment.h"
|
||||
|
||||
namespace media::cast {
|
||||
|
||||
// An openscreen::cast::Environment that can intercept all packet sends, for
|
||||
// unit testing.
|
||||
class MockOpenscreenEnvironment : public openscreen::cast::Environment {
|
||||
public:
|
||||
MockOpenscreenEnvironment(openscreen::ClockNowFunctionPtr now_function,
|
||||
openscreen::TaskRunner& task_runner);
|
||||
~MockOpenscreenEnvironment() override;
|
||||
|
||||
// Used to return fake values, to simulate a bound socket for testing.
|
||||
MOCK_METHOD(openscreen::IPEndpoint,
|
||||
GetBoundLocalEndpoint,
|
||||
(),
|
||||
(const, override));
|
||||
|
||||
// Used for intercepting packet sends from the implementation under test.
|
||||
MOCK_METHOD(void,
|
||||
SendPacket,
|
||||
(openscreen::ByteView packet,
|
||||
openscreen::cast::PacketMetadata metadata),
|
||||
(override));
|
||||
};
|
||||
|
||||
} // namespace media::cast
|
||||
|
||||
#endif // MEDIA_CAST_TEST_MOCK_OPENSCREEN_ENVIRONMENT_H_
|
@ -1,325 +0,0 @@
|
||||
// Copyright 2013 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Test application that simulates a cast sender - Data can be either generated
|
||||
// or read from a file.
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
|
||||
#include "base/at_exit.h"
|
||||
#include "base/base_paths.h"
|
||||
#include "base/command_line.h"
|
||||
#include "base/files/file_path.h"
|
||||
#include "base/functional/bind.h"
|
||||
#include "base/functional/callback_helpers.h"
|
||||
#include "base/json/json_writer.h"
|
||||
#include "base/logging.h"
|
||||
#include "base/memory/raw_ptr.h"
|
||||
#include "base/message_loop/message_pump_type.h"
|
||||
#include "base/path_service.h"
|
||||
#include "base/run_loop.h"
|
||||
#include "base/strings/string_number_conversions.h"
|
||||
#include "base/task/single_thread_task_executor.h"
|
||||
#include "base/task/single_thread_task_runner.h"
|
||||
#include "base/threading/thread.h"
|
||||
#include "base/time/default_tick_clock.h"
|
||||
#include "base/values.h"
|
||||
#include "media/base/media.h"
|
||||
#include "media/base/mock_filters.h"
|
||||
#include "media/base/video_frame.h"
|
||||
#include "media/cast/cast_config.h"
|
||||
#include "media/cast/cast_environment.h"
|
||||
#include "media/cast/cast_sender.h"
|
||||
#include "media/cast/logging/encoding_event_subscriber.h"
|
||||
#include "media/cast/logging/logging_defines.h"
|
||||
#include "media/cast/logging/proto/raw_events.pb.h"
|
||||
#include "media/cast/logging/receiver_time_offset_estimator_impl.h"
|
||||
#include "media/cast/logging/stats_event_subscriber.h"
|
||||
#include "media/cast/net/cast_transport.h"
|
||||
#include "media/cast/net/cast_transport_defines.h"
|
||||
#include "media/cast/net/udp_transport_impl.h"
|
||||
#include "media/cast/test/fake_media_source.h"
|
||||
#include "media/cast/test/utility/default_config.h"
|
||||
#include "media/cast/test/utility/input_builder.h"
|
||||
|
||||
namespace {
|
||||
|
||||
// Flags for this program:
|
||||
//
|
||||
// --address=xx.xx.xx.xx
|
||||
// IP address of receiver.
|
||||
//
|
||||
// --port=xxxx
|
||||
// Port number of receiver.
|
||||
//
|
||||
// --source-file=xxx.webm
|
||||
// WebM file as source of video frames.
|
||||
//
|
||||
// --fps=xx
|
||||
// Override framerate of the video stream.
|
||||
//
|
||||
// --vary-frame-sizes
|
||||
// Randomly vary the video frame sizes at random points in time. Has no
|
||||
// effect if --source-file is being used.
|
||||
const char kSwitchAddress[] = "address";
|
||||
const char kSwitchPort[] = "port";
|
||||
const char kSwitchSourceFile[] = "source-file";
|
||||
const char kSwitchFps[] = "fps";
|
||||
const char kSwitchVaryFrameSizes[] = "vary-frame-sizes";
|
||||
|
||||
void UpdateCastTransportStatus(
|
||||
media::cast::CastTransportStatus status) {
|
||||
VLOG(1) << "Transport status: " << status;
|
||||
}
|
||||
|
||||
void QuitLoopOnInitializationResult(base::OnceClosure quit_closure,
|
||||
media::cast::OperationalStatus result) {
|
||||
CHECK(result == media::cast::STATUS_INITIALIZED)
|
||||
<< "Cast sender uninitialized";
|
||||
std::move(quit_closure).Run();
|
||||
}
|
||||
|
||||
net::IPEndPoint CreateUDPAddress(const std::string& ip_str, uint16_t port) {
|
||||
net::IPAddress ip_address;
|
||||
CHECK(ip_address.AssignFromIPLiteral(ip_str));
|
||||
return net::IPEndPoint(ip_address, port);
|
||||
}
|
||||
|
||||
void WriteLogsToFileAndDestroySubscribers(
|
||||
const scoped_refptr<media::cast::CastEnvironment>& cast_environment,
|
||||
std::unique_ptr<media::cast::EncodingEventSubscriber>
|
||||
video_event_subscriber,
|
||||
std::unique_ptr<media::cast::EncodingEventSubscriber>
|
||||
audio_event_subscriber,
|
||||
base::ScopedFILE video_log_file,
|
||||
base::ScopedFILE audio_log_file) {
|
||||
cast_environment->logger()->Unsubscribe(video_event_subscriber.get());
|
||||
cast_environment->logger()->Unsubscribe(audio_event_subscriber.get());
|
||||
}
|
||||
|
||||
void WriteStatsAndDestroySubscribers(
|
||||
const scoped_refptr<media::cast::CastEnvironment>& cast_environment,
|
||||
std::unique_ptr<media::cast::StatsEventSubscriber> video_stats_subscriber,
|
||||
std::unique_ptr<media::cast::StatsEventSubscriber> audio_stats_subscriber,
|
||||
std::unique_ptr<media::cast::ReceiverTimeOffsetEstimatorImpl> estimator) {
|
||||
cast_environment->logger()->Unsubscribe(video_stats_subscriber.get());
|
||||
cast_environment->logger()->Unsubscribe(audio_stats_subscriber.get());
|
||||
cast_environment->logger()->Unsubscribe(estimator.get());
|
||||
|
||||
base::Value::Dict stats = video_stats_subscriber->GetStats();
|
||||
std::string json;
|
||||
base::JSONWriter::WriteWithOptions(
|
||||
stats, base::JSONWriter::OPTIONS_PRETTY_PRINT, &json);
|
||||
VLOG(0) << "Video stats: " << json;
|
||||
|
||||
stats = audio_stats_subscriber->GetStats();
|
||||
json.clear();
|
||||
base::JSONWriter::WriteWithOptions(
|
||||
stats, base::JSONWriter::OPTIONS_PRETTY_PRINT, &json);
|
||||
VLOG(0) << "Audio stats: " << json;
|
||||
}
|
||||
|
||||
class TransportClient : public media::cast::CastTransport::Client {
|
||||
public:
|
||||
explicit TransportClient(
|
||||
media::cast::LogEventDispatcher* log_event_dispatcher)
|
||||
: log_event_dispatcher_(log_event_dispatcher) {}
|
||||
|
||||
TransportClient(const TransportClient&) = delete;
|
||||
TransportClient& operator=(const TransportClient&) = delete;
|
||||
|
||||
void OnStatusChanged(media::cast::CastTransportStatus status) final {
|
||||
VLOG(1) << "Transport status: " << status;
|
||||
}
|
||||
void OnLoggingEventsReceived(
|
||||
std::unique_ptr<std::vector<media::cast::FrameEvent>> frame_events,
|
||||
std::unique_ptr<std::vector<media::cast::PacketEvent>> packet_events)
|
||||
final {
|
||||
DCHECK(log_event_dispatcher_);
|
||||
log_event_dispatcher_->DispatchBatchOfEvents(std::move(frame_events),
|
||||
std::move(packet_events));
|
||||
}
|
||||
void ProcessRtpPacket(std::unique_ptr<media::cast::Packet> packet) final {}
|
||||
|
||||
private:
|
||||
const raw_ptr<media::cast::LogEventDispatcher>
|
||||
log_event_dispatcher_; // Not owned by this class.
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
base::AtExitManager at_exit;
|
||||
base::CommandLine::Init(argc, argv);
|
||||
InitLogging(logging::LoggingSettings());
|
||||
|
||||
// Prepare media module for FFmpeg decoding.
|
||||
media::InitializeMediaLibrary();
|
||||
|
||||
base::Thread test_thread("Cast sender test app thread");
|
||||
base::Thread audio_thread("Cast audio encoder thread");
|
||||
base::Thread video_thread("Cast video encoder thread");
|
||||
test_thread.Start();
|
||||
audio_thread.Start();
|
||||
video_thread.Start();
|
||||
|
||||
base::SingleThreadTaskExecutor io_task_executor(base::MessagePumpType::IO);
|
||||
|
||||
// Default parameters.
|
||||
base::CommandLine* cmd = base::CommandLine::ForCurrentProcess();
|
||||
std::string remote_ip_address = cmd->GetSwitchValueASCII(kSwitchAddress);
|
||||
if (remote_ip_address.empty())
|
||||
remote_ip_address = "127.0.0.1";
|
||||
int remote_port = 0;
|
||||
if (!base::StringToInt(cmd->GetSwitchValueASCII(kSwitchPort), &remote_port) ||
|
||||
remote_port < 0 || remote_port > 65535) {
|
||||
remote_port = 2344;
|
||||
}
|
||||
LOG(INFO) << "Sending to " << remote_ip_address << ":" << remote_port
|
||||
<< ".";
|
||||
|
||||
media::cast::FrameSenderConfig audio_config =
|
||||
media::cast::GetDefaultAudioSenderConfig();
|
||||
media::cast::FrameSenderConfig video_config =
|
||||
media::cast::GetDefaultVideoSenderConfig();
|
||||
|
||||
// Running transport on the main thread.
|
||||
// Setting up transport config.
|
||||
net::IPEndPoint remote_endpoint =
|
||||
CreateUDPAddress(remote_ip_address, static_cast<uint16_t>(remote_port));
|
||||
|
||||
// Enable raw event and stats logging.
|
||||
// Running transport on the main thread.
|
||||
scoped_refptr<media::cast::CastEnvironment> cast_environment(
|
||||
new media::cast::CastEnvironment(
|
||||
base::DefaultTickClock::GetInstance(), io_task_executor.task_runner(),
|
||||
audio_thread.task_runner(), video_thread.task_runner()));
|
||||
|
||||
// SendProcess initialization.
|
||||
std::unique_ptr<media::cast::FakeMediaSource> fake_media_source(
|
||||
new media::cast::FakeMediaSource(test_thread.task_runner(),
|
||||
cast_environment->Clock(), audio_config,
|
||||
video_config, false));
|
||||
|
||||
int final_fps = 0;
|
||||
if (!base::StringToInt(cmd->GetSwitchValueASCII(kSwitchFps),
|
||||
&final_fps)){
|
||||
final_fps = 0;
|
||||
}
|
||||
base::FilePath source_path = cmd->GetSwitchValuePath(kSwitchSourceFile);
|
||||
if (!source_path.empty()) {
|
||||
LOG(INFO) << "Source: " << source_path.value();
|
||||
fake_media_source->SetSourceFile(source_path, final_fps);
|
||||
}
|
||||
if (cmd->HasSwitch(kSwitchVaryFrameSizes))
|
||||
fake_media_source->SetVariableFrameSizeMode(true);
|
||||
|
||||
// CastTransport initialization.
|
||||
std::unique_ptr<media::cast::CastTransport> transport_sender =
|
||||
media::cast::CastTransport::Create(
|
||||
cast_environment->Clock(), base::Seconds(1),
|
||||
std::make_unique<TransportClient>(cast_environment->logger()),
|
||||
std::make_unique<media::cast::UdpTransportImpl>(
|
||||
io_task_executor.task_runner(), net::IPEndPoint(),
|
||||
remote_endpoint, base::BindRepeating(&UpdateCastTransportStatus)),
|
||||
io_task_executor.task_runner());
|
||||
|
||||
// Set up event subscribers.
|
||||
std::unique_ptr<media::cast::EncodingEventSubscriber> video_event_subscriber;
|
||||
std::unique_ptr<media::cast::EncodingEventSubscriber> audio_event_subscriber;
|
||||
std::string video_log_file_name("/tmp/video_events.log.gz");
|
||||
std::string audio_log_file_name("/tmp/audio_events.log.gz");
|
||||
LOG(INFO) << "Logging audio events to: " << audio_log_file_name;
|
||||
LOG(INFO) << "Logging video events to: " << video_log_file_name;
|
||||
video_event_subscriber =
|
||||
std::make_unique<media::cast::EncodingEventSubscriber>(
|
||||
media::cast::VIDEO_EVENT, 10000);
|
||||
audio_event_subscriber =
|
||||
std::make_unique<media::cast::EncodingEventSubscriber>(
|
||||
media::cast::AUDIO_EVENT, 10000);
|
||||
cast_environment->logger()->Subscribe(video_event_subscriber.get());
|
||||
cast_environment->logger()->Subscribe(audio_event_subscriber.get());
|
||||
|
||||
// Subscribers for stats.
|
||||
std::unique_ptr<media::cast::ReceiverTimeOffsetEstimatorImpl>
|
||||
offset_estimator(new media::cast::ReceiverTimeOffsetEstimatorImpl());
|
||||
cast_environment->logger()->Subscribe(offset_estimator.get());
|
||||
std::unique_ptr<media::cast::StatsEventSubscriber> video_stats_subscriber(
|
||||
new media::cast::StatsEventSubscriber(media::cast::VIDEO_EVENT,
|
||||
cast_environment->Clock(),
|
||||
offset_estimator.get()));
|
||||
std::unique_ptr<media::cast::StatsEventSubscriber> audio_stats_subscriber(
|
||||
new media::cast::StatsEventSubscriber(media::cast::AUDIO_EVENT,
|
||||
cast_environment->Clock(),
|
||||
offset_estimator.get()));
|
||||
cast_environment->logger()->Subscribe(video_stats_subscriber.get());
|
||||
cast_environment->logger()->Subscribe(audio_stats_subscriber.get());
|
||||
|
||||
base::ScopedFILE video_log_file(fopen(video_log_file_name.c_str(), "w"));
|
||||
if (!video_log_file) {
|
||||
VLOG(1) << "Failed to open video log file for writing.";
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
base::ScopedFILE audio_log_file(fopen(audio_log_file_name.c_str(), "w"));
|
||||
if (!audio_log_file) {
|
||||
VLOG(1) << "Failed to open audio log file for writing.";
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
const int logging_duration_seconds = 10;
|
||||
io_task_executor.task_runner()->PostDelayedTask(
|
||||
FROM_HERE,
|
||||
base::BindOnce(&WriteLogsToFileAndDestroySubscribers, cast_environment,
|
||||
std::move(video_event_subscriber),
|
||||
std::move(audio_event_subscriber),
|
||||
std::move(video_log_file), std::move(audio_log_file)),
|
||||
base::Seconds(logging_duration_seconds));
|
||||
|
||||
io_task_executor.task_runner()->PostDelayedTask(
|
||||
FROM_HERE,
|
||||
base::BindOnce(&WriteStatsAndDestroySubscribers, cast_environment,
|
||||
std::move(video_stats_subscriber),
|
||||
std::move(audio_stats_subscriber),
|
||||
std::move(offset_estimator)),
|
||||
base::Seconds(logging_duration_seconds));
|
||||
|
||||
// CastSender initialization.
|
||||
std::unique_ptr<media::cast::CastSender> cast_sender =
|
||||
media::cast::CastSender::Create(cast_environment, transport_sender.get());
|
||||
{
|
||||
base::RunLoop loop;
|
||||
|
||||
io_task_executor.task_runner()->PostTask(
|
||||
FROM_HERE,
|
||||
base::BindOnce(
|
||||
&media::cast::CastSender::InitializeVideo,
|
||||
base::Unretained(cast_sender.get()),
|
||||
fake_media_source->get_video_config(),
|
||||
std::make_unique<media::MockVideoEncoderMetricsProvider>(),
|
||||
base::BindRepeating(&QuitLoopOnInitializationResult,
|
||||
loop.QuitWhenIdleClosure()),
|
||||
base::DoNothing()));
|
||||
loop.Run(); // Wait for video initialization.
|
||||
}
|
||||
{
|
||||
base::RunLoop loop;
|
||||
|
||||
io_task_executor.task_runner()->PostTask(
|
||||
FROM_HERE,
|
||||
base::BindOnce(&media::cast::CastSender::InitializeAudio,
|
||||
base::Unretained(cast_sender.get()), audio_config,
|
||||
base::BindRepeating(&QuitLoopOnInitializationResult,
|
||||
loop.QuitWhenIdleClosure())));
|
||||
loop.Run(); // Wait for audio initialization.
|
||||
}
|
||||
|
||||
fake_media_source->Start(cast_sender->audio_frame_input(),
|
||||
cast_sender->video_frame_input());
|
||||
base::RunLoop().Run();
|
||||
return 0;
|
||||
}
|
@ -1,708 +0,0 @@
|
||||
// Copyright 2014 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Simulate end to end streaming.
|
||||
//
|
||||
// Input:
|
||||
// --source=
|
||||
// WebM used as the source of video and audio frames.
|
||||
// --output=
|
||||
// File path to writing out the raw event log of the simulation session.
|
||||
// --sim-id=
|
||||
// Unique simulation ID.
|
||||
// --target-delay-ms=
|
||||
// Target playout delay to configure (integer number of milliseconds).
|
||||
// Optional; default is 400.
|
||||
// --max-frame-rate=
|
||||
// The maximum frame rate allowed at any time during the Cast session.
|
||||
// Optional; default is 30.
|
||||
// --source-frame-rate=
|
||||
// Overrides the playback rate; the source video will play faster/slower.
|
||||
// --run-time=
|
||||
// In seconds, how long the Cast session runs for.
|
||||
// Optional; default is 180.
|
||||
// --metrics-output=
|
||||
// File path to write PSNR and SSIM metrics between source frames and
|
||||
// decoded frames. Assumes all encoded frames are decoded.
|
||||
// --yuv-output=
|
||||
// File path to write YUV decoded frames in YUV4MPEG2 format.
|
||||
// --no-simulation
|
||||
// Do not run network simulation.
|
||||
//
|
||||
// Output:
|
||||
// - Raw event log of the simulation session tagged with the unique test ID,
|
||||
// written out to the specified file path.
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
|
||||
#include "base/at_exit.h"
|
||||
#include "base/base_paths.h"
|
||||
#include "base/command_line.h"
|
||||
#include "base/containers/queue.h"
|
||||
#include "base/containers/span.h"
|
||||
#include "base/files/file_path.h"
|
||||
#include "base/files/file_util.h"
|
||||
#include "base/files/memory_mapped_file.h"
|
||||
#include "base/files/scoped_file.h"
|
||||
#include "base/functional/bind.h"
|
||||
#include "base/functional/callback_helpers.h"
|
||||
#include "base/json/json_writer.h"
|
||||
#include "base/logging.h"
|
||||
#include "base/memory/ptr_util.h"
|
||||
#include "base/memory/raw_ptr.h"
|
||||
#include "base/numerics/checked_math.h"
|
||||
#include "base/path_service.h"
|
||||
#include "base/ranges/algorithm.h"
|
||||
#include "base/strings/string_number_conversions.h"
|
||||
#include "base/strings/stringprintf.h"
|
||||
#include "base/task/single_thread_task_runner.h"
|
||||
#include "base/test/simple_test_tick_clock.h"
|
||||
#include "base/time/tick_clock.h"
|
||||
#include "base/time/time.h"
|
||||
#include "base/values.h"
|
||||
#include "media/base/audio_bus.h"
|
||||
#include "media/base/fake_single_thread_task_runner.h"
|
||||
#include "media/base/media.h"
|
||||
#include "media/base/mock_filters.h"
|
||||
#include "media/base/video_frame.h"
|
||||
#include "media/cast/cast_config.h"
|
||||
#include "media/cast/cast_environment.h"
|
||||
#include "media/cast/cast_sender.h"
|
||||
#include "media/cast/logging/encoding_event_subscriber.h"
|
||||
#include "media/cast/logging/logging_defines.h"
|
||||
#include "media/cast/logging/proto/raw_events.pb.h"
|
||||
#include "media/cast/logging/raw_event_subscriber_bundle.h"
|
||||
#include "media/cast/logging/simple_event_subscriber.h"
|
||||
#include "media/cast/net/cast_transport.h"
|
||||
#include "media/cast/net/cast_transport_config.h"
|
||||
#include "media/cast/net/cast_transport_defines.h"
|
||||
#include "media/cast/net/cast_transport_impl.h"
|
||||
#include "media/cast/test/fake_media_source.h"
|
||||
#include "media/cast/test/loopback_transport.h"
|
||||
#include "media/cast/test/proto/network_simulation_model.pb.h"
|
||||
#include "media/cast/test/receiver/cast_receiver.h"
|
||||
#include "media/cast/test/skewed_tick_clock.h"
|
||||
#include "media/cast/test/utility/audio_utility.h"
|
||||
#include "media/cast/test/utility/default_config.h"
|
||||
#include "media/cast/test/utility/test_util.h"
|
||||
#include "media/cast/test/utility/udp_proxy.h"
|
||||
#include "media/cast/test/utility/video_utility.h"
|
||||
|
||||
using media::cast::proto::IPPModel;
|
||||
using media::cast::proto::NetworkSimulationModel;
|
||||
using media::cast::proto::NetworkSimulationModelType;
|
||||
|
||||
namespace media {
|
||||
namespace cast {
|
||||
namespace {
|
||||
const char kLibDir[] = "lib-dir";
|
||||
const char kModelPath[] = "model";
|
||||
const char kMetricsOutputPath[] = "metrics-output";
|
||||
const char kOutputPath[] = "output";
|
||||
const char kMaxFrameRate[] = "max-frame-rate";
|
||||
const char kNoSimulation[] = "no-simulation";
|
||||
const char kRunTime[] = "run-time";
|
||||
const char kSimulationId[] = "sim-id";
|
||||
const char kSourcePath[] = "source";
|
||||
const char kSourceFrameRate[] = "source-frame-rate";
|
||||
const char kTargetDelay[] = "target-delay-ms";
|
||||
const char kYuvOutputPath[] = "yuv-output";
|
||||
|
||||
int GetIntegerSwitchValue(const char* switch_name, int default_value) {
|
||||
const std::string as_str =
|
||||
base::CommandLine::ForCurrentProcess()->GetSwitchValueASCII(switch_name);
|
||||
if (as_str.empty())
|
||||
return default_value;
|
||||
int as_int;
|
||||
CHECK(base::StringToInt(as_str, &as_int));
|
||||
CHECK_GT(as_int, 0);
|
||||
return as_int;
|
||||
}
|
||||
|
||||
void LogAudioOperationalStatus(OperationalStatus status) {
|
||||
LOG(INFO) << "Audio status: " << status;
|
||||
}
|
||||
|
||||
void LogVideoOperationalStatus(OperationalStatus status) {
|
||||
LOG(INFO) << "Video status: " << status;
|
||||
}
|
||||
|
||||
struct PacketProxy {
|
||||
PacketProxy() : receiver(nullptr) {}
|
||||
void ReceivePacket(std::unique_ptr<Packet> packet) {
|
||||
if (receiver)
|
||||
receiver->ReceivePacket(std::move(packet));
|
||||
}
|
||||
raw_ptr<CastReceiver> receiver;
|
||||
};
|
||||
|
||||
class TransportClient : public CastTransport::Client {
|
||||
public:
|
||||
TransportClient(LogEventDispatcher* log_event_dispatcher,
|
||||
PacketProxy* packet_proxy)
|
||||
: log_event_dispatcher_(log_event_dispatcher),
|
||||
packet_proxy_(packet_proxy) {}
|
||||
|
||||
TransportClient(const TransportClient&) = delete;
|
||||
TransportClient& operator=(const TransportClient&) = delete;
|
||||
|
||||
void OnStatusChanged(CastTransportStatus status) final {
|
||||
LOG(INFO) << "Cast transport status: " << status;
|
||||
}
|
||||
void OnLoggingEventsReceived(
|
||||
std::unique_ptr<std::vector<FrameEvent>> frame_events,
|
||||
std::unique_ptr<std::vector<PacketEvent>> packet_events) final {
|
||||
DCHECK(log_event_dispatcher_);
|
||||
log_event_dispatcher_->DispatchBatchOfEvents(std::move(frame_events),
|
||||
std::move(packet_events));
|
||||
}
|
||||
void ProcessRtpPacket(std::unique_ptr<Packet> packet) final {
|
||||
if (packet_proxy_)
|
||||
packet_proxy_->ReceivePacket(std::move(packet));
|
||||
}
|
||||
|
||||
private:
|
||||
const raw_ptr<LogEventDispatcher>
|
||||
log_event_dispatcher_; // Not owned by this class.
|
||||
const raw_ptr<PacketProxy> packet_proxy_; // Not owned by this class.
|
||||
};
|
||||
|
||||
// Maintains a queue of encoded video frames.
|
||||
// This works by tracking FRAME_CAPTURE_END and FRAME_ENCODED events.
|
||||
// If a video frame is detected to be encoded it transfers a frame
|
||||
// from FakeMediaSource to its internal queue. Otherwise it drops a
|
||||
// frame from FakeMediaSource.
|
||||
class EncodedVideoFrameTracker final : public RawEventSubscriber {
|
||||
public:
|
||||
EncodedVideoFrameTracker(FakeMediaSource* media_source)
|
||||
: media_source_(media_source), last_frame_event_type_(UNKNOWN) {}
|
||||
|
||||
EncodedVideoFrameTracker(const EncodedVideoFrameTracker&) = delete;
|
||||
EncodedVideoFrameTracker& operator=(const EncodedVideoFrameTracker&) = delete;
|
||||
|
||||
~EncodedVideoFrameTracker() override = default;
|
||||
|
||||
// RawEventSubscriber implementations.
|
||||
void OnReceiveFrameEvent(const FrameEvent& frame_event) override {
|
||||
// This method only cares about video FRAME_CAPTURE_END and
|
||||
// FRAME_ENCODED events.
|
||||
if (frame_event.media_type != VIDEO_EVENT) {
|
||||
return;
|
||||
}
|
||||
if (frame_event.type != FRAME_CAPTURE_END &&
|
||||
frame_event.type != FRAME_ENCODED) {
|
||||
return;
|
||||
}
|
||||
// If there are two consecutive FRAME_CAPTURE_END events that means
|
||||
// a frame is dropped.
|
||||
if (last_frame_event_type_ == FRAME_CAPTURE_END &&
|
||||
frame_event.type == FRAME_CAPTURE_END) {
|
||||
media_source_->PopOldestInsertedVideoFrame();
|
||||
}
|
||||
if (frame_event.type == FRAME_ENCODED) {
|
||||
video_frames_.push(media_source_->PopOldestInsertedVideoFrame());
|
||||
}
|
||||
last_frame_event_type_ = frame_event.type;
|
||||
}
|
||||
|
||||
void OnReceivePacketEvent(const PacketEvent& packet_event) override {
|
||||
// Don't care.
|
||||
}
|
||||
|
||||
scoped_refptr<media::VideoFrame> PopOldestEncodedFrame() {
|
||||
CHECK(!video_frames_.empty());
|
||||
scoped_refptr<media::VideoFrame> video_frame = video_frames_.front();
|
||||
video_frames_.pop();
|
||||
return video_frame;
|
||||
}
|
||||
|
||||
private:
|
||||
raw_ptr<FakeMediaSource> media_source_;
|
||||
CastLoggingEvent last_frame_event_type_;
|
||||
base::queue<scoped_refptr<media::VideoFrame>> video_frames_;
|
||||
};
|
||||
|
||||
// Safely creates a span for data backing a `frame` on specified `plane`.
|
||||
base::span<const uint8_t> MakeFrameSpan(const media::VideoFrame* frame,
|
||||
size_t plane) {
|
||||
base::CheckedNumeric<size_t> size = frame->stride(plane);
|
||||
size *= frame->rows(plane);
|
||||
return base::make_span(frame->data(plane), size.ValueOrDie());
|
||||
}
|
||||
|
||||
// Appends a YUV frame in I420 format to the file located at |path|.
|
||||
void AppendYuvToFile(const base::FilePath& path,
|
||||
scoped_refptr<media::VideoFrame> frame) {
|
||||
// Write YUV420 format to file.
|
||||
std::string header;
|
||||
base::StringAppendF(&header, "FRAME W%d H%d\n", frame->coded_size().width(),
|
||||
frame->coded_size().height());
|
||||
AppendToFile(path, header);
|
||||
AppendToFile(path, MakeFrameSpan(frame.get(), media::VideoFrame::kYPlane));
|
||||
AppendToFile(path, MakeFrameSpan(frame.get(), media::VideoFrame::kUPlane));
|
||||
AppendToFile(path, MakeFrameSpan(frame.get(), media::VideoFrame::kVPlane));
|
||||
}
|
||||
|
||||
// A container to save output of GotVideoFrame() for computation based
|
||||
// on output frames.
|
||||
struct GotVideoFrameOutput {
|
||||
GotVideoFrameOutput() : counter(0) {}
|
||||
int counter;
|
||||
std::vector<double> psnr;
|
||||
std::vector<double> ssim;
|
||||
};
|
||||
|
||||
void GotVideoFrame(GotVideoFrameOutput* metrics_output,
|
||||
const base::FilePath& yuv_output,
|
||||
EncodedVideoFrameTracker* video_frame_tracker,
|
||||
CastReceiver* cast_receiver,
|
||||
scoped_refptr<media::VideoFrame> video_frame,
|
||||
base::TimeTicks render_time,
|
||||
bool continuous) {
|
||||
++metrics_output->counter;
|
||||
cast_receiver->RequestDecodedVideoFrame(
|
||||
base::BindRepeating(&GotVideoFrame, metrics_output, yuv_output,
|
||||
video_frame_tracker, cast_receiver));
|
||||
|
||||
// If |video_frame_tracker| is available that means we're computing
|
||||
// quality metrices.
|
||||
if (video_frame_tracker) {
|
||||
scoped_refptr<media::VideoFrame> src_frame =
|
||||
video_frame_tracker->PopOldestEncodedFrame();
|
||||
metrics_output->psnr.push_back(I420PSNR(*src_frame, *video_frame));
|
||||
metrics_output->ssim.push_back(I420SSIM(*src_frame, *video_frame));
|
||||
}
|
||||
|
||||
if (!yuv_output.empty()) {
|
||||
AppendYuvToFile(yuv_output, std::move(video_frame));
|
||||
}
|
||||
}
|
||||
|
||||
void GotAudioFrame(int* counter,
|
||||
CastReceiver* cast_receiver,
|
||||
std::unique_ptr<AudioBus> audio_bus,
|
||||
base::TimeTicks playout_time,
|
||||
bool is_continuous) {
|
||||
++*counter;
|
||||
cast_receiver->RequestDecodedAudioFrame(
|
||||
base::BindRepeating(&GotAudioFrame, counter, cast_receiver));
|
||||
}
|
||||
|
||||
// Run simulation once.
|
||||
//
|
||||
// |log_output_path| is the path to write serialized log.
|
||||
// |extra_data| is extra tagging information to write to log.
|
||||
void RunSimulation(const base::FilePath& source_path,
|
||||
const base::FilePath& log_output_path,
|
||||
const base::FilePath& metrics_output_path,
|
||||
const base::FilePath& yuv_output_path,
|
||||
const std::string& extra_data,
|
||||
const NetworkSimulationModel& model) {
|
||||
// Fake clock. Make sure start time is non zero.
|
||||
base::SimpleTestTickClock testing_clock;
|
||||
testing_clock.Advance(base::Seconds(1));
|
||||
|
||||
// Task runner.
|
||||
scoped_refptr<FakeSingleThreadTaskRunner> task_runner =
|
||||
new FakeSingleThreadTaskRunner(&testing_clock);
|
||||
base::SingleThreadTaskRunner::CurrentDefaultHandle
|
||||
task_runner_current_default_handle(task_runner);
|
||||
|
||||
// CastEnvironments.
|
||||
test::SkewedTickClock sender_clock(&testing_clock);
|
||||
scoped_refptr<CastEnvironment> sender_env =
|
||||
new CastEnvironment(&sender_clock, task_runner, task_runner, task_runner);
|
||||
test::SkewedTickClock receiver_clock(&testing_clock);
|
||||
scoped_refptr<CastEnvironment> receiver_env = new CastEnvironment(
|
||||
&receiver_clock, task_runner, task_runner, task_runner);
|
||||
|
||||
// Event subscriber. Store at most 1 hour of events.
|
||||
EncodingEventSubscriber audio_event_subscriber(AUDIO_EVENT, 100 * 60 * 60);
|
||||
EncodingEventSubscriber video_event_subscriber(VIDEO_EVENT, 30 * 60 * 60);
|
||||
sender_env->logger()->Subscribe(&audio_event_subscriber);
|
||||
sender_env->logger()->Subscribe(&video_event_subscriber);
|
||||
|
||||
// Audio sender config.
|
||||
FrameSenderConfig audio_sender_config = GetDefaultAudioSenderConfig();
|
||||
audio_sender_config.min_playout_delay =
|
||||
audio_sender_config.max_playout_delay =
|
||||
base::Milliseconds(GetIntegerSwitchValue(kTargetDelay, 400));
|
||||
|
||||
// Audio receiver config.
|
||||
FrameReceiverConfig audio_receiver_config = GetDefaultAudioReceiverConfig();
|
||||
audio_receiver_config.rtp_max_delay_ms =
|
||||
audio_sender_config.max_playout_delay.InMilliseconds();
|
||||
|
||||
// Video sender config.
|
||||
FrameSenderConfig video_sender_config = GetDefaultVideoSenderConfig();
|
||||
video_sender_config.max_bitrate = 2500000;
|
||||
video_sender_config.min_bitrate = 2000000;
|
||||
video_sender_config.start_bitrate = 2000000;
|
||||
video_sender_config.min_playout_delay =
|
||||
video_sender_config.max_playout_delay =
|
||||
audio_sender_config.max_playout_delay;
|
||||
video_sender_config.max_frame_rate = GetIntegerSwitchValue(kMaxFrameRate, 30);
|
||||
|
||||
// Video receiver config.
|
||||
FrameReceiverConfig video_receiver_config = GetDefaultVideoReceiverConfig();
|
||||
video_receiver_config.rtp_max_delay_ms =
|
||||
video_sender_config.max_playout_delay.InMilliseconds();
|
||||
|
||||
// Loopback transport. Owned by CastTransport.
|
||||
LoopBackTransport* receiver_to_sender = new LoopBackTransport(receiver_env);
|
||||
LoopBackTransport* sender_to_receiver = new LoopBackTransport(sender_env);
|
||||
|
||||
PacketProxy packet_proxy;
|
||||
|
||||
// Cast receiver.
|
||||
std::unique_ptr<CastTransport> transport_receiver(new CastTransportImpl(
|
||||
&testing_clock, base::Seconds(1),
|
||||
std::make_unique<TransportClient>(receiver_env->logger(), &packet_proxy),
|
||||
base::WrapUnique(receiver_to_sender), task_runner));
|
||||
std::unique_ptr<CastReceiver> cast_receiver(
|
||||
CastReceiver::Create(receiver_env, audio_receiver_config,
|
||||
video_receiver_config, transport_receiver.get()));
|
||||
|
||||
packet_proxy.receiver = cast_receiver.get();
|
||||
|
||||
// Cast sender and transport sender.
|
||||
std::unique_ptr<CastTransport> transport_sender(new CastTransportImpl(
|
||||
&testing_clock, base::Seconds(1),
|
||||
std::make_unique<TransportClient>(sender_env->logger(), nullptr),
|
||||
base::WrapUnique(sender_to_receiver), task_runner));
|
||||
std::unique_ptr<CastSender> cast_sender(
|
||||
CastSender::Create(sender_env, transport_sender.get()));
|
||||
|
||||
// Initialize network simulation model.
|
||||
const bool use_network_simulation =
|
||||
model.type() == media::cast::proto::INTERRUPTED_POISSON_PROCESS;
|
||||
std::unique_ptr<test::InterruptedPoissonProcess> ipp;
|
||||
if (use_network_simulation) {
|
||||
LOG(INFO) << "Running Poisson based network simulation.";
|
||||
const IPPModel& ipp_model = model.ipp();
|
||||
std::vector<double> average_rates(ipp_model.average_rate_size());
|
||||
base::ranges::copy(ipp_model.average_rate(), average_rates.begin());
|
||||
ipp = std::make_unique<test::InterruptedPoissonProcess>(
|
||||
average_rates, ipp_model.coef_burstiness(), ipp_model.coef_variance(),
|
||||
0);
|
||||
receiver_to_sender->Initialize(ipp->NewBuffer(128 * 1024),
|
||||
transport_sender->PacketReceiverForTesting(),
|
||||
task_runner, &testing_clock);
|
||||
sender_to_receiver->Initialize(
|
||||
ipp->NewBuffer(128 * 1024),
|
||||
transport_receiver->PacketReceiverForTesting(), task_runner,
|
||||
&testing_clock);
|
||||
} else {
|
||||
LOG(INFO) << "No network simulation.";
|
||||
receiver_to_sender->Initialize(std::unique_ptr<test::PacketPipe>(),
|
||||
transport_sender->PacketReceiverForTesting(),
|
||||
task_runner, &testing_clock);
|
||||
sender_to_receiver->Initialize(
|
||||
std::unique_ptr<test::PacketPipe>(),
|
||||
transport_receiver->PacketReceiverForTesting(), task_runner,
|
||||
&testing_clock);
|
||||
}
|
||||
|
||||
// Initialize a fake media source and a tracker to encoded video frames.
|
||||
const bool quality_test = !metrics_output_path.empty();
|
||||
FakeMediaSource media_source(task_runner, &testing_clock, audio_sender_config,
|
||||
video_sender_config, quality_test);
|
||||
std::unique_ptr<EncodedVideoFrameTracker> video_frame_tracker;
|
||||
if (quality_test) {
|
||||
video_frame_tracker =
|
||||
std::make_unique<EncodedVideoFrameTracker>(&media_source);
|
||||
sender_env->logger()->Subscribe(video_frame_tracker.get());
|
||||
}
|
||||
|
||||
// Quality metrics computed for each frame decoded.
|
||||
GotVideoFrameOutput metrics_output;
|
||||
|
||||
// Start receiver.
|
||||
int audio_frame_count = 0;
|
||||
cast_receiver->RequestDecodedVideoFrame(
|
||||
base::BindRepeating(&GotVideoFrame, &metrics_output, yuv_output_path,
|
||||
video_frame_tracker.get(), cast_receiver.get()));
|
||||
cast_receiver->RequestDecodedAudioFrame(base::BindRepeating(
|
||||
&GotAudioFrame, &audio_frame_count, cast_receiver.get()));
|
||||
|
||||
// Initializing audio and video senders.
|
||||
cast_sender->InitializeAudio(audio_sender_config,
|
||||
base::BindOnce(&LogAudioOperationalStatus));
|
||||
cast_sender->InitializeVideo(
|
||||
media_source.get_video_config(),
|
||||
std::make_unique<media::MockVideoEncoderMetricsProvider>(),
|
||||
base::BindRepeating(&LogVideoOperationalStatus), base::DoNothing());
|
||||
task_runner->RunTasks();
|
||||
|
||||
// Truncate YUV files to prepare for writing.
|
||||
if (!yuv_output_path.empty()) {
|
||||
base::ScopedFILE file(base::OpenFile(yuv_output_path, "wb"));
|
||||
if (!file.get()) {
|
||||
LOG(ERROR) << "Cannot save YUV output to file.";
|
||||
return;
|
||||
}
|
||||
LOG(INFO) << "Writing YUV output to file: " << yuv_output_path.value();
|
||||
|
||||
// Write YUV4MPEG2 header.
|
||||
const std::string header("YUV4MPEG2 W1280 H720 F30000:1001 Ip A1:1 C420\n");
|
||||
AppendToFile(yuv_output_path, header);
|
||||
}
|
||||
|
||||
// Start sending.
|
||||
if (!source_path.empty()) {
|
||||
// 0 means using the FPS from the file.
|
||||
media_source.SetSourceFile(source_path,
|
||||
GetIntegerSwitchValue(kSourceFrameRate, 0));
|
||||
}
|
||||
media_source.Start(cast_sender->audio_frame_input(),
|
||||
cast_sender->video_frame_input());
|
||||
|
||||
// By default runs simulation for 3 minutes or the desired duration
|
||||
// by using --run-time= flag.
|
||||
base::TimeDelta elapsed_time;
|
||||
const base::TimeDelta desired_run_time =
|
||||
base::Seconds(GetIntegerSwitchValue(kRunTime, 180));
|
||||
while (elapsed_time < desired_run_time) {
|
||||
// Each step is 100us.
|
||||
base::TimeDelta step = base::Microseconds(100);
|
||||
task_runner->Sleep(step);
|
||||
elapsed_time += step;
|
||||
}
|
||||
|
||||
// Unsubscribe from logging events.
|
||||
sender_env->logger()->Unsubscribe(&audio_event_subscriber);
|
||||
sender_env->logger()->Unsubscribe(&video_event_subscriber);
|
||||
if (quality_test)
|
||||
sender_env->logger()->Unsubscribe(video_frame_tracker.get());
|
||||
|
||||
// Get event logs for audio and video.
|
||||
media::cast::proto::LogMetadata audio_metadata, video_metadata;
|
||||
media::cast::FrameEventList audio_frame_events, video_frame_events;
|
||||
media::cast::PacketEventList audio_packet_events, video_packet_events;
|
||||
audio_metadata.set_extra_data(extra_data);
|
||||
video_metadata.set_extra_data(extra_data);
|
||||
audio_event_subscriber.GetEventsAndReset(&audio_metadata, &audio_frame_events,
|
||||
&audio_packet_events);
|
||||
video_event_subscriber.GetEventsAndReset(&video_metadata, &video_frame_events,
|
||||
&video_packet_events);
|
||||
|
||||
// Print simulation results.
|
||||
|
||||
// Compute and print statistics for video:
|
||||
//
|
||||
// * Total video frames captured.
|
||||
// * Total video frames encoded.
|
||||
// * Total video frames dropped.
|
||||
// * Total video frames received late.
|
||||
// * Average target bitrate.
|
||||
// * Average encoded bitrate.
|
||||
int total_video_frames = 0;
|
||||
int encoded_video_frames = 0;
|
||||
int dropped_video_frames = 0;
|
||||
int late_video_frames = 0;
|
||||
int64_t total_delay_of_late_frames_ms = 0;
|
||||
int64_t encoded_size = 0;
|
||||
int64_t target_bitrate = 0;
|
||||
for (size_t i = 0; i < video_frame_events.size(); ++i) {
|
||||
const media::cast::proto::AggregatedFrameEvent& event =
|
||||
*video_frame_events[i];
|
||||
++total_video_frames;
|
||||
if (event.has_encoded_frame_size()) {
|
||||
++encoded_video_frames;
|
||||
encoded_size += event.encoded_frame_size();
|
||||
target_bitrate += event.target_bitrate();
|
||||
} else {
|
||||
++dropped_video_frames;
|
||||
}
|
||||
if (event.has_delay_millis() && event.delay_millis() < 0) {
|
||||
++late_video_frames;
|
||||
total_delay_of_late_frames_ms += -event.delay_millis();
|
||||
}
|
||||
}
|
||||
|
||||
// Subtract fraction of dropped frames from |elapsed_time| before estimating
|
||||
// the average encoded bitrate.
|
||||
const base::TimeDelta elapsed_time_undropped =
|
||||
total_video_frames <= 0
|
||||
? base::TimeDelta()
|
||||
: (elapsed_time * (total_video_frames - dropped_video_frames) /
|
||||
total_video_frames);
|
||||
constexpr double kKilobitsPerByte = 8.0 / 1000;
|
||||
const double avg_encoded_bitrate =
|
||||
elapsed_time_undropped <= base::TimeDelta()
|
||||
? 0
|
||||
: encoded_size * kKilobitsPerByte * elapsed_time_undropped.ToHz();
|
||||
double avg_target_bitrate =
|
||||
encoded_video_frames ? target_bitrate / encoded_video_frames / 1000 : 0;
|
||||
|
||||
LOG(INFO) << "Configured target playout delay (ms): "
|
||||
<< video_receiver_config.rtp_max_delay_ms;
|
||||
LOG(INFO) << "Audio frame count: " << audio_frame_count;
|
||||
LOG(INFO) << "Inserted video frames: " << total_video_frames;
|
||||
LOG(INFO) << "Decoded video frames: " << metrics_output.counter;
|
||||
LOG(INFO) << "Dropped video frames: " << dropped_video_frames;
|
||||
LOG(INFO) << "Late video frames: " << late_video_frames
|
||||
<< " (average lateness: "
|
||||
<< (late_video_frames > 0
|
||||
? static_cast<double>(total_delay_of_late_frames_ms) /
|
||||
late_video_frames
|
||||
: 0)
|
||||
<< " ms)";
|
||||
LOG(INFO) << "Average encoded bitrate (kbps): " << avg_encoded_bitrate;
|
||||
LOG(INFO) << "Average target bitrate (kbps): " << avg_target_bitrate;
|
||||
LOG(INFO) << "Writing log: " << log_output_path.value();
|
||||
|
||||
// Truncate file and then write serialized log.
|
||||
{
|
||||
base::ScopedFILE file(base::OpenFile(log_output_path, "wb"));
|
||||
if (!file.get()) {
|
||||
LOG(INFO) << "Cannot write to log.";
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Write quality metrics.
|
||||
if (quality_test) {
|
||||
LOG(INFO) << "Writing quality metrics: " << metrics_output_path.value();
|
||||
std::string line;
|
||||
for (size_t i = 0;
|
||||
i < metrics_output.psnr.size() && i < metrics_output.ssim.size();
|
||||
++i) {
|
||||
base::StringAppendF(&line, "%f %f\n", metrics_output.psnr[i],
|
||||
metrics_output.ssim[i]);
|
||||
}
|
||||
WriteFile(metrics_output_path, line.data(), line.length());
|
||||
}
|
||||
}
|
||||
|
||||
NetworkSimulationModel DefaultModel() {
|
||||
NetworkSimulationModel model;
|
||||
model.set_type(cast::proto::INTERRUPTED_POISSON_PROCESS);
|
||||
IPPModel* ipp = model.mutable_ipp();
|
||||
ipp->set_coef_burstiness(0.609);
|
||||
ipp->set_coef_variance(4.1);
|
||||
|
||||
ipp->add_average_rate(0.609);
|
||||
ipp->add_average_rate(0.495);
|
||||
ipp->add_average_rate(0.561);
|
||||
ipp->add_average_rate(0.458);
|
||||
ipp->add_average_rate(0.538);
|
||||
ipp->add_average_rate(0.513);
|
||||
ipp->add_average_rate(0.585);
|
||||
ipp->add_average_rate(0.592);
|
||||
ipp->add_average_rate(0.658);
|
||||
ipp->add_average_rate(0.556);
|
||||
ipp->add_average_rate(0.371);
|
||||
ipp->add_average_rate(0.595);
|
||||
ipp->add_average_rate(0.490);
|
||||
ipp->add_average_rate(0.980);
|
||||
ipp->add_average_rate(0.781);
|
||||
ipp->add_average_rate(0.463);
|
||||
|
||||
return model;
|
||||
}
|
||||
|
||||
bool IsModelValid(const NetworkSimulationModel& model) {
|
||||
if (!model.has_type())
|
||||
return false;
|
||||
NetworkSimulationModelType type = model.type();
|
||||
if (type == media::cast::proto::INTERRUPTED_POISSON_PROCESS) {
|
||||
if (!model.has_ipp())
|
||||
return false;
|
||||
const IPPModel& ipp = model.ipp();
|
||||
if (ipp.coef_burstiness() <= 0.0 || ipp.coef_variance() <= 0.0)
|
||||
return false;
|
||||
if (ipp.average_rate_size() == 0)
|
||||
return false;
|
||||
for (int i = 0; i < ipp.average_rate_size(); i++) {
|
||||
if (ipp.average_rate(i) <= 0.0)
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
NetworkSimulationModel LoadModel(const base::FilePath& model_path) {
|
||||
if (base::CommandLine::ForCurrentProcess()->HasSwitch(kNoSimulation)) {
|
||||
NetworkSimulationModel model;
|
||||
model.set_type(media::cast::proto::NO_SIMULATION);
|
||||
return model;
|
||||
}
|
||||
if (model_path.empty()) {
|
||||
LOG(ERROR) << "Model path not set; Using default model.";
|
||||
return DefaultModel();
|
||||
}
|
||||
std::string model_str;
|
||||
if (!base::ReadFileToString(model_path, &model_str)) {
|
||||
LOG(ERROR) << "Failed to read model file.";
|
||||
return DefaultModel();
|
||||
}
|
||||
|
||||
NetworkSimulationModel model;
|
||||
if (!model.ParseFromString(model_str)) {
|
||||
LOG(ERROR) << "Failed to parse model.";
|
||||
return DefaultModel();
|
||||
}
|
||||
if (!IsModelValid(model)) {
|
||||
LOG(ERROR) << "Invalid model.";
|
||||
return DefaultModel();
|
||||
}
|
||||
|
||||
return model;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
} // namespace cast
|
||||
} // namespace media
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
base::AtExitManager at_exit;
|
||||
base::CommandLine::Init(argc, argv);
|
||||
InitLogging(logging::LoggingSettings());
|
||||
|
||||
const base::CommandLine* cmd = base::CommandLine::ForCurrentProcess();
|
||||
base::FilePath media_path = cmd->GetSwitchValuePath(media::cast::kLibDir);
|
||||
if (media_path.empty()) {
|
||||
if (!base::PathService::Get(base::DIR_OUT_TEST_DATA_ROOT, &media_path)) {
|
||||
LOG(ERROR) << "Failed to load FFmpeg.";
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
media::InitializeMediaLibrary();
|
||||
|
||||
base::FilePath source_path =
|
||||
cmd->GetSwitchValuePath(media::cast::kSourcePath);
|
||||
base::FilePath log_output_path =
|
||||
cmd->GetSwitchValuePath(media::cast::kOutputPath);
|
||||
if (log_output_path.empty()) {
|
||||
base::GetTempDir(&log_output_path);
|
||||
log_output_path = log_output_path.AppendASCII("sim-events.gz");
|
||||
}
|
||||
base::FilePath metrics_output_path =
|
||||
cmd->GetSwitchValuePath(media::cast::kMetricsOutputPath);
|
||||
base::FilePath yuv_output_path =
|
||||
cmd->GetSwitchValuePath(media::cast::kYuvOutputPath);
|
||||
std::string sim_id = cmd->GetSwitchValueASCII(media::cast::kSimulationId);
|
||||
|
||||
NetworkSimulationModel model =
|
||||
media::cast::LoadModel(cmd->GetSwitchValuePath(media::cast::kModelPath));
|
||||
|
||||
base::Value::Dict values;
|
||||
values.Set("sim", true);
|
||||
values.Set("sim-id", sim_id);
|
||||
|
||||
std::string extra_data;
|
||||
base::JSONWriter::Write(values, &extra_data);
|
||||
|
||||
// Run.
|
||||
media::cast::RunSimulation(source_path, log_output_path, metrics_output_path,
|
||||
yuv_output_path, extra_data, model);
|
||||
return 0;
|
||||
}
|
Reference in New Issue
Block a user