0

Cleanup all TODO(miu) tasks

Now that miu@ is a Xoogler, this patch updates all remaining TODOs,
either removing as appropriate (e.g. Verified or WontFix bugs), changing
ownership to @jophba, or fixing if very minor.

This patch will be the first in a cleanup series intended to eliminate
dangling tech debt that's not tied to any objectives.

Change-Id: I91beb1a445096916871bca0d9b8e8a3231adb04d
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/3498526
Reviewed-by: John Rummell <jrummell@chromium.org>
Reviewed-by: Mike Wasserman <msw@chromium.org>
Reviewed-by: Derek Schuff <dschuff@chromium.org>
Reviewed-by: Brandon Jones <bajones@chromium.org>
Reviewed-by: Guido Urdaneta <guidou@chromium.org>
Reviewed-by: Jonathan Ross <jonross@chromium.org>
Reviewed-by: Nasko Oskov <nasko@chromium.org>
Reviewed-by: Mark Foltz <mfoltz@chromium.org>
Commit-Queue: Jordan Bayles <jophba@chromium.org>
Cr-Commit-Position: refs/heads/main@{#979929}
This commit is contained in:
Jordan Bayles
2022-03-10 20:53:16 +00:00
committed by Chromium LUCI CQ
parent 14c48bf965
commit 98d6d51f13
57 changed files with 39 additions and 203 deletions
chrome
browser
test
data
extensions
api_test
tab_capture
end_to_end
content
docs/media
gpu/command_buffer/client
media
third_party/blink/renderer

@ -204,8 +204,6 @@ ExtensionFunction::ResponseAction TabCaptureCaptureFunction::Run() {
target_contents, extension_id, false, extension()->url(), source,
extension()->name(), extension_web_contents);
if (device_id.empty()) {
// TODO(miu): Allow multiple consumers of single tab capture.
// http://crbug.com/535336
return RespondNow(Error(kCapturingSameTab));
}
AddMediaStreamSourceConstraints(target_contents, &params->options, device_id);

@ -44,8 +44,6 @@ class TabCaptureRegistry::LiveRequest : public content::WebContentsObserver {
registry_(registry),
capture_state_(tab_capture::TAB_CAPTURE_STATE_NONE),
is_verified_(false),
// TODO(miu): This initial value for |is_fullscreened_| is a faulty
// assumption. http://crbug.com/350491
is_fullscreened_(false),
render_process_id_(
target_contents->GetMainFrame()->GetProcess()->GetID()),

@ -161,10 +161,8 @@ class OffscreenTab final : public ProfileObserver,
// Poll timer to monitor the capturer count on |offscreen_tab_web_contents_|.
// When the capturer count returns to zero, this OffscreenTab is automatically
// destroyed.
//
// TODO(miu): Add a method to WebContentsObserver to report capturer count
// changes and get rid of this polling-based approach.
// http://crbug.com/540965
// TODO(https://crbug.com/540965): add a method to WebContentsObserver to
// report capturer count and get rid of this polling-based approach.
base::OneShotTimer capture_poll_timer_;
// This is false until after the Start() method is called, and capture of the

@ -529,11 +529,7 @@ TEST_F(FullscreenControllerStateUnitTest, OneCapturedFullscreenedTab) {
EXPECT_TRUE(wc_delegate->IsFullscreenForTabOrPending(first_tab));
EXPECT_FALSE(wc_delegate->IsFullscreenForTabOrPending(second_tab));
EXPECT_FALSE(GetFullscreenController()->IsWindowFullscreenForTabOrPending());
// TODO(miu): Need to make an adjustment to content::WebContentsViewMac for
// the following to work:
#if !BUILDFLAG(IS_MAC)
EXPECT_EQ(kCaptureSize, first_tab->GetViewBounds().size());
#endif
// Switch back to the first tab and exit fullscreen.
browser()->tab_strip_model()->ActivateTabAt(

@ -240,7 +240,3 @@ chrome.test.runTests([
});
}
]);
// TODO(miu): Once the WebAudio API is finalized, we should add a test to emit a
// tone from the sender page, and have the receiver page check for the audio
// tone.

@ -626,11 +626,11 @@ IN_PROC_BROWSER_TEST_P(CompositingRenderWidgetHostViewBrowserTest,
// Tests that the callback passed to CopyFromSurface is always called, even
// when the RenderWidgetHostView is deleting in the middle of an async copy.
//
// TODO(miu): On some bots (e.g., ChromeOS and Cast Shell), this test fails
// because the RunLoop quits before its QuitClosure() is run. This is because
// the call to WebContents::Close() leads to something that makes the current
// thread's RunLoop::Delegate constantly report "should quit." We'll need to
// find a better way of testing this functionality.
// TODO(https://crbug.com/1033066): On some bots (e.g., ChromeOS and Cast
// Shell), this test fails because the RunLoop quits before its QuitClosure() is
// run. This is because the call to WebContents::Close() leads to something that
// makes the current thread's RunLoop::Delegate constantly report "should quit."
// We'll need to find a better way of testing this functionality.
IN_PROC_BROWSER_TEST_P(CompositingRenderWidgetHostViewBrowserTest,
DISABLED_CopyFromSurface_CallbackDespiteDelete) {
SET_UP_SURFACE_OR_PASS_TEST(nullptr);

@ -67,7 +67,8 @@ struct CONTENT_EXPORT DesktopMediaID {
// it possible for both of these to be non-null, which means both IDs are
// referring to the same logical window.
Id id = kNullId;
// TODO(miu): Make this an int, after clean-up for http://crbug.com/513490.
// TODO(https://crbug.com/1304392): Change window ID to an integer.
Id window_id = kNullId;
// This records whether the desktop share has sound or not.

@ -2559,10 +2559,6 @@ void PepperPluginInstanceImpl::SetSizeAttributesForFullscreen() {
if (!render_frame_)
return;
// TODO(miu): Revisit this logic. If the style must be modified for correct
// behavior, the width and height should probably be set to 100%, rather than
// a fixed screen size.
display::ScreenInfo info =
render_frame_->GetLocalRootWebFrameWidget()->GetScreenInfo();
screen_size_for_fullscreen_ = info.rect.size();

@ -388,7 +388,7 @@ requested to be presented through the Presentation API.
## Offscreen Rendering
TODO(miu)
*TODO: Add notes about off-screen rendering.*
# Security

@ -37,8 +37,6 @@ namespace {
const GLfloat kRGBtoGrayscaleColorWeights[4] = {0.213f, 0.715f, 0.072f, 0.0f};
// Linear translation from RGB to YUV color space.
// TODO(miu): This needs to stop being hardcoded...and need to identify to&from
// color spaces.
const GLfloat kRGBtoYColorWeights[4] = {0.257f, 0.504f, 0.098f, 0.0625f};
const GLfloat kRGBtoUColorWeights[4] = {-0.148f, -0.291f, 0.439f, 0.5f};
const GLfloat kRGBtoVColorWeights[4] = {0.439f, -0.368f, -0.071f, 0.5f};

@ -115,8 +115,6 @@ MEDIA_SHMEM_EXPORT uint32_t ComputeAudioOutputBufferSize(int channels,
class MEDIA_SHMEM_EXPORT AudioParameters {
public:
// TODO(miu): Rename this enum to something that correctly reflects its
// semantics, such as "TransportScheme."
// GENERATED_JAVA_ENUM_PACKAGE: org.chromium.media
// GENERATED_JAVA_CLASS_NAME_OVERRIDE: AudioEncodingFormat
// GENERATED_JAVA_PREFIX_TO_STRIP: AUDIO_

@ -267,8 +267,6 @@ static absl::optional<VideoFrameLayout> GetDefaultLayout(
}
default:
// TODO(miu): This function should support any pixel format.
// http://crbug.com/555909 .
DLOG(ERROR) << "Unsupported pixel format"
<< VideoPixelFormatToString(format);
return absl::nullopt;

@ -588,9 +588,6 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
// Returns a dictionary of optional metadata. This contains information
// associated with the frame that downstream clients might use for frame-level
// logging, quality/performance optimizations, signaling, etc.
//
// TODO(miu): Move some of the "extra" members of VideoFrame (below) into
// here as a later clean-up step.
const VideoFrameMetadata& metadata() const { return metadata_; }
VideoFrameMetadata& metadata() { return metadata_; }
void set_metadata(const VideoFrameMetadata& metadata) {

@ -90,9 +90,6 @@ bool ThreadSafeCaptureOracle::ObserveEventAndDecideCapture(
frame_number = oracle_.next_frame_number();
visible_size = oracle_.capture_size();
// TODO(miu): Clients should request exact padding, instead of this
// memory-wasting hack to make frames that are compatible with all HW
// encoders. http://crbug.com/555911
coded_size.SetSize(base::bits::AlignUp(visible_size.width(), 16),
base::bits::AlignUp(visible_size.height(), 16));

@ -248,9 +248,6 @@ base::TimeTicks AnimatedContentSampler::ComputeNextFrameTimestamp(
// clock relative to the video hardware, which affects the event times; and
// 2) The small error introduced by this frame timestamp rewriting, as it is
// based on averaging over recent events.
//
// TODO(miu): This is similar to the ClockSmoother in
// media/base/audio_shifter.cc. Consider refactor-and-reuse here.
const base::TimeDelta drift = ideal_timestamp - event_time;
const int64_t correct_over_num_frames =
kDriftCorrection.IntDiv(sampling_period_);

@ -64,8 +64,6 @@ double FractionFromExpectedFrameRate(base::TimeDelta delta, int frame_rate) {
}
// Returns the next-higher TimeTicks value.
// TODO(miu): Patch FeedbackSignalAccumulator reset behavior and remove this
// hack.
base::TimeTicks JustAfter(base::TimeTicks t) {
return t + base::Microseconds(1);
}

@ -95,8 +95,6 @@ VideoCaptureParams::SuggestConstraints() const {
break;
case ResolutionChangePolicy::FIXED_ASPECT_RATIO: {
// TODO(miu): This is a place-holder until "min constraints" are plumbed-
// in from the MediaStream framework. http://crbug.com/473336
constexpr int kMinLines = 180;
if (max_frame_size.height() <= kMinLines) {
min_frame_size = max_frame_size;

@ -280,10 +280,6 @@ static_library("test_support") {
# FFMPEG software video decoders are not available on Android and/or
# Chromecast content_shell builds.
#
# TODO(miu): There *are* hardware decoder APIs available via FFMPEG, and we
# should use those for the Android receiver. See media's BUILD.gn for usage
# details. http://crbug.com/558714
if (!is_android && !is_chromecast) {
sources += [
"test/fake_media_source.cc",
@ -312,7 +308,7 @@ test("cast_unittests") {
"net/rtcp/rtcp_unittest.cc",
"net/rtcp/rtcp_utility_unittest.cc",
# TODO(miu): The following two are test utility modules. Rename/move the
# TODO(jophba): The following two are test utility modules. Rename/move the
# files.
"net/rtcp/test_rtcp_packet_builder.cc",
"net/rtcp/test_rtcp_packet_builder.h",

@ -18,8 +18,6 @@ VideoCodecParams::VideoCodecParams(const VideoCodecParams& other) = default;
VideoCodecParams::~VideoCodecParams() = default;
// TODO(miu): Provide IsValidConfig() functions?
FrameSenderConfig::FrameSenderConfig()
: sender_ssrc(0),
receiver_ssrc(0),
@ -29,8 +27,8 @@ FrameSenderConfig::FrameSenderConfig()
// All three delays are set to the same value due to adaptive latency
// being disabled in Chrome. This will be fixed as part of the migration
// to libcast.
min_playout_delay(base::Milliseconds(kDefaultRtpMaxDelayMs)),
max_playout_delay(min_playout_delay),
min_playout_delay(kDefaultTargetPlayoutDelay),
max_playout_delay(kDefaultTargetPlayoutDelay),
animated_playout_delay(min_playout_delay),
rtp_payload_type(RtpPayloadType::UNKNOWN),
use_external_encoder(false),
@ -49,7 +47,7 @@ FrameSenderConfig::~FrameSenderConfig() = default;
FrameReceiverConfig::FrameReceiverConfig()
: receiver_ssrc(0),
sender_ssrc(0),
rtp_max_delay_ms(kDefaultRtpMaxDelayMs),
rtp_max_delay_ms(kDefaultTargetPlayoutDelay.InMilliseconds()),
rtp_payload_type(RtpPayloadType::UNKNOWN),
rtp_timebase(0),
channels(0),

@ -70,8 +70,10 @@ enum class RtpPayloadType {
LAST = VIDEO_AV1
};
// TODO(miu): Eliminate these after moving "default config" into the top-level
// media/cast directory. http://crbug.com/530839
// Desired end-to-end latency.
// TODO(https://crbug.com/1304761): default playout delay should be 400ms.
constexpr base::TimeDelta kDefaultTargetPlayoutDelay = base::Milliseconds(100);
enum SuggestedDefaults {
// Audio encoder bitrate. Zero means "auto," which asks the encoder to select
// a bitrate that dynamically adjusts to the content. Otherwise, a constant
@ -87,14 +89,6 @@ enum SuggestedDefaults {
// Suggested default maximum video frame rate.
kDefaultMaxFrameRate = 30,
// End-to-end latency in milliseconds.
//
// DO NOT USE THIS (400 ms is proven as ideal for general-purpose use).
//
// TODO(miu): Change to 400, and confirm nothing has broken in later change.
// http://crbug.com/530839
kDefaultRtpMaxDelayMs = 100,
// Suggested minimum and maximum video bitrates for general-purpose use (up to
// 1080p, 30 FPS).
kDefaultMinVideoBitrate = 300000,
@ -208,7 +202,6 @@ struct FrameSenderConfig {
VideoCodecParams video_codec_params;
};
// TODO(miu): Naming and minor type changes are badly needed in a later CL.
struct FrameReceiverConfig {
FrameReceiverConfig();
FrameReceiverConfig(const FrameReceiverConfig& other);
@ -226,7 +219,7 @@ struct FrameReceiverConfig {
// transmit/retransmit, receive, decode, and render; given its run-time
// environment (sender/receiver hardware performance, network conditions,
// etc.).
int rtp_max_delay_ms; // TODO(miu): Change to TimeDelta target_playout_delay.
int rtp_max_delay_ms;
// RTP payload type enum: Specifies the type/encoding of frame data.
RtpPayloadType rtp_payload_type;
@ -245,8 +238,6 @@ struct FrameReceiverConfig {
double target_frame_rate;
// Codec used for the compression of signal data.
// TODO(miu): Merge the AudioCodec and VideoCodec enums into one so this union
// is not necessary.
Codec codec;
// The AES crypto key and initialization vector. Each of these strings

@ -158,9 +158,6 @@ bool PacedSender::ShouldResend(const PacketKey& packet_key,
// packet Y sent just before X. Reject retransmission of X if ACK for
// Y has not been received.
// Only do this for video packets.
//
// TODO(miu): This sounds wrong. Audio packets are always transmitted first
// (because they are put in |priority_packet_list_|, see PopNextPacket()).
auto session_it = sessions_.find(packet_key.ssrc);
// The session should always have been registered in |sessions_|.
DCHECK(session_it != sessions_.end());
@ -411,9 +408,6 @@ void PacedSender::SendStoredPackets() {
}
// Keep ~0.5 seconds of data (1000 packets).
//
// TODO(miu): This has no relation to the actual size of the frames, and so
// there's no way to reason whether 1000 is enough or too much, or whatever.
if (send_history_buffer_.size() >=
max_burst_size_ * kMaxDedupeWindowMs / kPacingIntervalMs) {
send_history_.swap(send_history_buffer_);
@ -432,8 +426,6 @@ void PacedSender::LogPacketEvent(const Packet& packet, CastLoggingEvent type) {
PacketEvent& event = recent_packet_events_->back();
// Populate the new PacketEvent by parsing the wire-format |packet|.
//
// TODO(miu): This parsing logic belongs in RtpParser.
event.timestamp = clock_->NowTicks();
event.type = type;
base::BigEndianReader reader(packet);

@ -63,11 +63,6 @@ void ReceiverRtcpSession::OnReceivedNtp(uint32_t ntp_seconds,
const base::TimeTicks now = clock_->NowTicks();
time_last_report_received_ = now;
// TODO(miu): This clock offset calculation does not account for packet
// transit time over the network. End2EndTest.EvilNetwork confirms that this
// contributes a very significant source of error here. Determine whether
// RTT should be factored-in, and how that changes the rest of the
// calculation.
const base::TimeDelta measured_offset =
now - ConvertNtpToTimeTicks(ntp_seconds, ntp_fraction);
local_clock_ahead_by_.Update(now, measured_offset);

@ -329,11 +329,6 @@ bool RtcpParser::ParseFeedbackCommon(base::BigEndianReader* reader,
!reader->ReadU16(&cast_message_.target_delay_ms))
return false;
// TODO(miu): 8 bits is not enough. If an RTCP packet is received very late
// (e.g., more than 1.2 seconds late for 100 FPS audio), the frame ID here
// will be mis-interpreted as a higher-numbered frame than what the packet
// intends to represent. This could make the sender's tracking of ACK'ed
// frames inconsistent.
cast_message_.ack_frame_id =
max_valid_frame_id_.ExpandLessThanOrEqual(truncated_last_frame_id);

@ -14,8 +14,6 @@
namespace media {
namespace cast {
// TODO(miu): Consolidate with RtpPacketizer as a single Cast packet
// serialization implementation.
class RtpPacketBuilder {
public:
RtpPacketBuilder();

@ -14,9 +14,6 @@
namespace media {
namespace cast {
// TODO(miu): RtpParser and RtpPacketizer should be consolidated into a single
// module that handles all RTP/Cast packet serialization and deserialization
// throughout the media/cast library.
class RtpParser {
public:
RtpParser(uint32_t expected_sender_ssrc, uint8_t expected_payload_type);

@ -144,9 +144,7 @@ void RtpSender::ResendFrameForKickstart(FrameId frame_id,
}
void RtpSender::UpdateSequenceNumber(Packet* packet) {
// TODO(miu): This is an abstraction violation. This needs to be a part of
// the overall packet (de)serialization consolidation.
static const int kByteOffsetToSequenceNumber = 2;
constexpr int kByteOffsetToSequenceNumber = 2;
base::BigEndianWriter big_endian_writer(
reinterpret_cast<char*>((&packet->front()) + kByteOffsetToSequenceNumber),
sizeof(uint16_t));

@ -139,18 +139,13 @@ void Av1Encoder::ConfigureForNewFrameSize(const gfx::Size& frame_size) {
// Rate control settings.
config_.rc_dropframe_thresh = 0; // The encoder may not drop any frames.
config_.rc_resize_mode = 0; // TODO(miu): Why not? Investigate this.
config_.rc_resize_mode = 0;
config_.rc_end_usage = AOM_CBR;
config_.rc_target_bitrate = bitrate_kbit_;
config_.rc_min_quantizer = cast_config_.video_codec_params.min_qp;
config_.rc_max_quantizer = cast_config_.video_codec_params.max_qp;
// TODO(miu): Revisit these now that the encoder is being successfully
// micro-managed.
config_.rc_undershoot_pct = 100;
config_.rc_overshoot_pct = 15;
// TODO(miu): Document why these rc_buf_*_sz values were chosen and/or
// research for better values. Should they be computed from the target
// playout delay?
config_.rc_buf_initial_sz = 500;
config_.rc_buf_optimal_sz = 600;
config_.rc_buf_sz = 1000;

@ -231,7 +231,6 @@ base::TimeDelta FrameSender::GetAllowedInFlightMediaDuration() const {
// The total amount allowed in-flight media should equal the amount that fits
// within the entire playout delay window, plus the amount of time it takes to
// receive an ACK from the receiver.
// TODO(miu): Research is needed, but there is likely a better formula.
return target_playout_delay_ + (current_round_trip_time_ / 2);
}
@ -370,7 +369,6 @@ void FrameSender::OnReceivedCastFeedback(const RtcpCastMessage& cast_feedback) {
} else {
duplicate_ack_counter_ = 0;
}
// TODO(miu): The values "2" and "3" should be derived from configuration.
if (duplicate_ack_counter_ >= 2 && duplicate_ack_counter_ % 3 == 2) {
ResendForKickstart();
}

@ -550,9 +550,6 @@ void H264VideoToolboxEncoder::CompressionCallback(void* encoder_opaque,
&encoded_frame->data);
}
// TODO(miu): Compute and populate the |encoder_utilization| and
// |lossy_utilization| performance metrics in |encoded_frame|.
encoded_frame->encode_completion_time =
encoder->cast_environment_->Clock()->NowTicks();
encoder->cast_environment_->GetTaskRunner(CastEnvironment::MAIN)

@ -39,8 +39,8 @@ struct SenderEncodedFrame final : public EncodedFrame {
// encode the frame within the target bitrate (even at its lowest quality
// setting). Negative values indicate the field was not computed.
//
// TODO(miu): Rename to idealized_bitrate_utilization.
double lossy_utilization;
// TODO(jophba): Rename to idealized_bitrate_utilization.
double lossy_utilization = {};
// The time at which the encode of the frame completed.
base::TimeTicks encode_completion_time;

@ -363,7 +363,7 @@ TEST_F(VideoSenderTest, ResendTimer) {
video_sender_->InsertRawVideoFrame(video_frame, reference_time);
base::TimeDelta max_resend_timeout =
base::Milliseconds(1 + kDefaultRtpMaxDelayMs);
kDefaultTargetPlayoutDelay + base::Milliseconds(1);
// Make sure that we do a re-send.
RunTasks(max_resend_timeout.InMilliseconds());

@ -149,18 +149,13 @@ void VpxEncoder::ConfigureForNewFrameSize(const gfx::Size& frame_size) {
// Rate control settings.
config_.rc_dropframe_thresh = 0; // The encoder may not drop any frames.
config_.rc_resize_allowed = 0; // TODO(miu): Why not? Investigate this.
config_.rc_resize_allowed = 0;
config_.rc_end_usage = VPX_CBR;
config_.rc_target_bitrate = bitrate_kbit_;
config_.rc_min_quantizer = cast_config_.video_codec_params.min_qp;
config_.rc_max_quantizer = cast_config_.video_codec_params.max_qp;
// TODO(miu): Revisit these now that the encoder is being successfully
// micro-managed.
config_.rc_undershoot_pct = 100;
config_.rc_overshoot_pct = 15;
// TODO(miu): Document why these rc_buf_*_sz values were chosen and/or
// research for better values. Should they be computed from the target
// playout delay?
config_.rc_buf_initial_sz = 500;
config_.rc_buf_optimal_sz = 600;
config_.rc_buf_sz = 1000;

@ -69,9 +69,6 @@ static const double kVideoAcceptedPSNR = 38.0;
// The tests are commonly implemented with |kFrameTimerMs| RunTask function;
// a normal video is 30 fps hence the 33 ms between frames.
//
// TODO(miu): The errors in timing will add up significantly. Find an
// alternative approach that eliminates use of this constant.
static const int kFrameTimerMs = 33;
// The size of audio frames. The encoder joins/breaks all inserted audio into
@ -166,11 +163,6 @@ class LoopBackPacketPipe final : public test::PacketPipe {
// Class that sends the packet direct from sender into the receiver with the
// ability to drop packets between the two.
//
// TODO(miu): This should be reconciled/merged into
// media/cast/test/loopback_transport.*. It's roughly the same class and has
// exactly the same name (and when it was outside of the anonymous namespace bad
// things happened when linking on Android!).
class LoopBackTransport : public PacketTransport {
public:
explicit LoopBackTransport(scoped_refptr<CastEnvironment> cast_environment)

@ -25,9 +25,6 @@
#include "media/cast/test/utility/video_utility.h"
#include "ui/gfx/geometry/size.h"
// TODO(miu): Figure out why _mkdir() and _rmdir() are missing when compiling
// third_party/ffmpeg/libavformat/os_support.h (lines 182, 183).
// http://crbug.com/572986
#if BUILDFLAG(IS_WIN)
#include <direct.h>
#endif // BUILDFLAG(IS_WIN)

@ -152,7 +152,6 @@ class AudioDecoder::OpusImpl final : public AudioDecoder::ImplBase {
// Copy interleaved samples from |buffer_| into a new AudioBus (where
// samples are stored in planar format, for each channel).
audio_bus = AudioBus::Create(num_channels_, num_samples_decoded);
// TODO(miu): This should be moved into AudioBus::FromInterleaved().
for (int ch = 0; ch < num_channels_; ++ch) {
const float* src = buffer_.get() + ch;
const float* const src_end = src + num_samples_decoded * num_channels_;

@ -13,17 +13,12 @@ namespace cast {
namespace {
// TODO(miu): These should probably be dynamic and computed base on configured
// end-to-end latency and packet loss rates. http://crbug.com/563784
enum {
// Number of milliseconds between sending of ACK/NACK Cast Message RTCP
// packets back to the sender.
kCastMessageUpdateIntervalMs = 33,
// Interval between sending of ACK/NACK Cast Message RTCP packets back to the
// sender.
constexpr base::TimeDelta kCastMessageUpdateInterval = base::Milliseconds(33);
// Number of milliseconds between repeating a NACK for packets in the same
// frame.
kNackRepeatIntervalMs = 30,
};
// Interval between repeating a NACK for packets in the same frame.
constexpr base::TimeDelta kNackRepeatInterval = base::Milliseconds(30);
} // namespace
@ -117,8 +112,7 @@ bool CastMessageBuilder::TimeToSendNextCastMessage(
if (last_update_time_.is_null() && framer_->Empty())
return false;
*time_to_send =
last_update_time_ + base::Milliseconds(kCastMessageUpdateIntervalMs);
*time_to_send = last_update_time_ + kCastMessageUpdateInterval;
return true;
}
@ -142,8 +136,7 @@ bool CastMessageBuilder::UpdateCastMessageInternal(RtcpCastMessage* message) {
// Is it time to update the cast message?
base::TimeTicks now = clock_->NowTicks();
if (now - last_update_time_ <
base::Milliseconds(kCastMessageUpdateIntervalMs)) {
if (now - last_update_time_ < kCastMessageUpdateInterval) {
return false;
}
last_update_time_ = now;
@ -174,7 +167,7 @@ void CastMessageBuilder::BuildPacketList() {
if (it != time_last_nacked_map_.end()) {
// We have sent a NACK in this frame before, make sure enough time have
// passed.
if (now - it->second < base::Milliseconds(kNackRepeatIntervalMs)) {
if (now - it->second < kNackRepeatInterval) {
continue;
}
}

@ -33,7 +33,6 @@ using AudioFrameDecodedCallback =
base::RepeatingCallback<void(std::unique_ptr<AudioBus> audio_bus,
base::TimeTicks playout_time,
bool is_continuous)>;
// TODO(miu): |video_frame| includes a timestamp, so use that instead.
using VideoFrameDecodedCallback =
base::RepeatingCallback<void(scoped_refptr<media::VideoFrame> video_frame,
base::TimeTicks playout_time,

@ -162,8 +162,6 @@ void CastReceiverImpl::EmitDecodedAudioFrame(
DCHECK(cast_environment->CurrentlyOn(CastEnvironment::MAIN));
if (audio_bus.get()) {
// TODO(miu): This is reporting incorrect timestamp and delay.
// http://crbug.com/547251
std::unique_ptr<FrameEvent> playout_event(new FrameEvent());
playout_event->timestamp = cast_environment->Clock()->NowTicks();
playout_event->type = FRAME_PLAYOUT;
@ -189,8 +187,6 @@ void CastReceiverImpl::EmitDecodedVideoFrame(
DCHECK(cast_environment->CurrentlyOn(CastEnvironment::MAIN));
if (video_frame) {
// TODO(miu): This is reporting incorrect timestamp and delay.
// http://crbug.com/547251
std::unique_ptr<FrameEvent> playout_event(new FrameEvent());
playout_event->timestamp = cast_environment->Clock()->NowTicks();
playout_event->type = FRAME_PLAYOUT;

@ -204,8 +204,6 @@ void FrameReceiver::EmitAvailableEncodedFrames() {
while (!frame_request_queue_.empty()) {
// Attempt to peek at the next completed frame from the |framer_|.
// TODO(miu): We should only be peeking at the metadata, and not copying the
// payload yet! Or, at least, peek using a StringPiece instead of a copy.
std::unique_ptr<EncodedFrame> encoded_frame(new EncodedFrame());
bool is_consecutively_next_frame = false;
bool have_multiple_complete_frames = false;

@ -172,8 +172,6 @@ class FrameReceiver final : public RtpPayloadFeedback {
base::TimeDelta target_playout_delay_;
// Hack: This is used in logic that determines whether to skip frames.
// TODO(miu): Revisit this. Logic needs to also account for expected decode
// time.
const base::TimeDelta expected_frame_duration_;
// Set to false initially, then set to true after scheduling the periodic

@ -50,17 +50,12 @@ class Framer {
bool* next_frame,
bool* have_multiple_complete_frames);
// TODO(hubbe): Move this elsewhere.
void AckFrame(FrameId frame_id);
void ReleaseFrame(FrameId frame_id);
bool TimeToSendNextCastMessage(base::TimeTicks* time_to_send);
void SendCastMessage();
// TODO(miu): These methods are called from CastMessageBuilder. We need to
// resolve these circular dependencies with some refactoring.
// http://crbug.com/530845
FrameId newest_frame_id() const { return newest_frame_id_; }
bool Empty() const;
bool FrameExists(FrameId frame_id) const;

@ -12,10 +12,6 @@ namespace cast {
namespace {
constexpr uint32_t kMaxSequenceNumber = 65536;
// TODO(miu): Get rid of all the special 16-bit rounding detection and special
// handling throughout this file, and just use good 'ol int64_t.
// http://crbug.com/530839
bool IsNewerSequenceNumber(uint16_t sequence_number,
uint16_t prev_sequence_number) {
return (sequence_number != prev_sequence_number) &&

@ -16,7 +16,6 @@
namespace media {
namespace cast {
// TODO(miu): Document this class.
class ReceiverStats {
public:
explicit ReceiverStats(const base::TickClock* clock);

@ -110,8 +110,6 @@ class VideoDecoder::Vp8Impl final : public VideoDecoder::ImplBase {
return;
vpx_codec_dec_cfg_t cfg = {0};
// TODO(miu): Revisit this for typical multi-core desktop use case. This
// feels like it should be 4 or 8.
cfg.threads = 1;
DCHECK(vpx_codec_get_caps(vpx_codec_vp8_dx()) & VPX_CODEC_CAP_POSTPROC);
@ -221,7 +219,6 @@ VideoDecoder::VideoDecoder(
impl_ = new Vp8Impl(cast_environment);
break;
case CODEC_VIDEO_H264:
// TODO(miu): Need implementation.
NOTIMPLEMENTED();
break;
default:

@ -144,7 +144,6 @@ class VideoDecoderTest : public ::testing::TestWithParam<Codec> {
EXPECT_EQ(expected_video_frame->coded_size().height(),
video_frame->coded_size().height());
EXPECT_LT(40.0, I420PSNR(*expected_video_frame, *video_frame));
// TODO(miu): Once we start using VideoFrame::timestamp_, check that here.
// Signal the main test thread that more video was decoded.
base::AutoLock auto_lock(lock_);

@ -18,7 +18,7 @@ FrameReceiverConfig GetDefaultAudioReceiverConfig() {
FrameReceiverConfig config;
config.receiver_ssrc = 2;
config.sender_ssrc = 1;
config.rtp_max_delay_ms = kDefaultRtpMaxDelayMs;
config.rtp_max_delay_ms = kDefaultTargetPlayoutDelay.InMilliseconds();
config.rtp_payload_type = RtpPayloadType::AUDIO_OPUS;
config.rtp_timebase = 48000;
config.channels = 2;
@ -31,7 +31,7 @@ FrameReceiverConfig GetDefaultVideoReceiverConfig() {
FrameReceiverConfig config;
config.receiver_ssrc = 12;
config.sender_ssrc = 11;
config.rtp_max_delay_ms = kDefaultRtpMaxDelayMs;
config.rtp_max_delay_ms = kDefaultTargetPlayoutDelay.InMilliseconds();
config.rtp_payload_type = RtpPayloadType::VIDEO_VP8;
config.rtp_timebase = kVideoFrequency;
config.channels = 1;

@ -157,7 +157,6 @@ class DemuxerStreamAdapterTest : public ::testing::Test {
protected:
void SetUp() override { SetUpDataPipe(); }
// TODO(miu): Add separate media thread, to test threading also.
base::test::SingleThreadTaskEnvironment task_environment_;
std::unique_ptr<FakeDemuxerStream> demuxer_stream_;
std::unique_ptr<FakeRemotingDataStreamSender> data_stream_sender_;

@ -15,8 +15,6 @@ namespace {
////////////////////////////////////////////////////////////////////////////////
// BEGIN: These were all borrowed from src/media/filters/ffmpeg_demuxer.cc.
// TODO(miu): This code will be de-duped in a soon-upcoming change.
// Some videos just want to watch the world burn, with a height of 0; cap the
// "infinite" aspect ratio resulting.
constexpr int kInfiniteRatio = 99999;

@ -74,9 +74,6 @@ MediaStreamAudioProcessor::MediaStreamAudioProcessor(
}
MediaStreamAudioProcessor::~MediaStreamAudioProcessor() {
// TODO(miu): This class is ref-counted, shared among threads, and then
// requires itself to be destroyed on the main thread only?!?!? Fix this, and
// then remove the hack in WebRtcAudioSink::Adapter.
DCHECK(main_thread_runner_->BelongsToCurrentThread());
Stop();
}

@ -41,10 +41,6 @@ void MediaStreamUtils::CreateNativeAudioMediaStreamTrack(
// At this point, a MediaStreamAudioSource instance must exist. The one
// exception is when a WebAudio destination node is acting as a source of
// audio.
//
// TODO(miu): This needs to be moved to an appropriate location. A WebAudio
// source should have been created before this method was called so that this
// special case code isn't needed here.
if (!audio_source && source->RequiresAudioConsumer()) {
DVLOG(1) << "Creating WebAudio media stream source.";
audio_source = new WebAudioMediaStreamSource(source, task_runner);

@ -134,9 +134,6 @@ int GetCaptureBufferSize(bool need_webrtc_processing,
// If the buffer size is missing from the MediaStreamDevice, provide 10ms as
// a fall-back.
//
// TODO(miu): Identify where/why the buffer size might be missing, fix the
// code, and then require it here. https://crbug.com/638081
return input_device_params.sample_rate() / 100;
#endif
}

@ -80,9 +80,8 @@ int TrackAudioRenderer::Render(base::TimeDelta delay,
return 0;
}
// TODO(miu): Plumbing is needed to determine the actual playout timestamp
// of the audio, instead of just snapshotting TimeTicks::Now(), for proper
// audio/video sync. https://crbug.com/335335
// TODO(https://crbug.com/1302080): use the actual playout time instead of
// stubbing with Now().
const base::TimeTicks playout_time = base::TimeTicks::Now() + delay;
DVLOG(2) << "Pulling audio out of shifter to be played "
<< delay.InMilliseconds() << " ms from now.";

@ -54,10 +54,6 @@ class WebAudioMediaStreamAudioSinkTest : public testing::Test {
};
TEST_F(WebAudioMediaStreamAudioSinkTest, VerifyDataFlow) {
// TODO(miu): This test should be re-worked so that the audio data and format
// is feed into a MediaStreamAudioSource and, through the
// MediaStreamAudioTrack, ultimately delivered to the |source_provider_|.
// Point the WebVector into memory owned by |sink_bus_|.
WebVector<float*> audio_data(static_cast<size_t>(sink_bus_->channels()));
for (int i = 0; i < sink_bus_->channels(); ++i)

@ -44,9 +44,6 @@ void WebAudioMediaStreamSource::SetFormat(int number_of_channels,
// Set the format used by this WebAudioMediaStreamSource. We are using 10ms
// data as a buffer size since that is the native buffer size of WebRtc packet
// running on.
//
// TODO(miu): Re-evaluate whether this is needed. For now (this refactoring),
// I did not want to change behavior. https://crbug.com/577874
fifo_.Reset(sample_rate / 100);
media::AudioParameters params(media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
channel_layout, sample_rate,
@ -90,11 +87,9 @@ void WebAudioMediaStreamSource::ConsumeAudio(
"WebAudioMediaStreamSource::ConsumeAudio", "frames",
number_of_frames);
// TODO(miu): Plumbing is needed to determine the actual capture timestamp
// of the audio, instead of just snapshotting base::TimeTicks::Now(), for
// proper audio/video sync. https://crbug.com/335335
// TODO(https://crbug.com/1302080): this should use the actual audio
// playout stamp instead of Now().
current_reference_time_ = base::TimeTicks::Now();
wrapper_bus_->set_frames(number_of_frames);
DCHECK_EQ(wrapper_bus_->channels(), static_cast<int>(audio_data.size()));
for (wtf_size_t i = 0; i < audio_data.size(); ++i) {

@ -139,9 +139,6 @@ void WebRtcAudioSink::DeliverRebufferedAudio(const media::AudioBus& audio_bus,
TRACE_EVENT1("audio", "WebRtcAudioSink::DeliverRebufferedAudio", "frames",
audio_bus.frames());
// TODO(miu): Why doesn't a WebRTC sink care about reference time passed to
// OnData(), and the |frame_delay| here? How is AV sync achieved otherwise?
// TODO(henrika): Remove this conversion once the interface in libjingle
// supports float vectors.
static_assert(sizeof(interleaved_data_[0]) == 2,
@ -159,7 +156,6 @@ void WebRtcAudioSink::DeliverRebufferedAudio(const media::AudioBus& audio_bus,
}
namespace {
// TODO(miu): MediaStreamAudioProcessor destructor requires this nonsense.
void DereferenceOnMainThread(
const scoped_refptr<webrtc::AudioProcessorInterface>& processor) {}
} // namespace

@ -143,8 +143,6 @@ class PLATFORM_EXPORT WebRtcAudioSink : public WebMediaStreamAudioSink {
// Task runner used for the final de-referencing of |audio_processor_| at
// destruction time.
//
// TODO(miu): Remove this once MediaStreamAudioProcessor is fixed.
const scoped_refptr<base::SingleThreadTaskRunner> main_task_runner_;
// The audio processsor that applies audio post-processing on the source

@ -876,8 +876,6 @@ void VideoCaptureImpl::OnBufferReady(
// If the timestamp is not prepared, we use reference time to make a rough
// estimate. e.g. ThreadSafeCaptureOracle::DidCaptureFrame().
// TODO(miu): Fix upstream capturers to always set timestamp and reference
// time. See http://crbug/618407/ for tracking.
if (buffer->info->timestamp.is_zero())
buffer->info->timestamp = reference_time - first_frame_ref_time_;