0

Fix some instances of -Wshadow.

Bug: 794619
Change-Id: Ic2ccb78d8c9d65aca45d1b7eab92d5ace40dbd84
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/3110906
Commit-Queue: Peter Kasting <pkasting@chromium.org>
Reviewed-by: Tom Sepez <tsepez@chromium.org>
Reviewed-by: Thomas Guilbert <tguilbert@chromium.org>
Reviewed-by: Peter Kvitek <kvitekp@chromium.org>
Reviewed-by: Peng Huang <penghuang@chromium.org>
Cr-Commit-Position: refs/heads/main@{#914925}
This commit is contained in:
Peter Kasting
2021-08-24 21:53:34 +00:00
committed by Chromium LUCI CQ
parent 5714acca3c
commit 905a2a9713
41 changed files with 299 additions and 323 deletions

@ -7371,8 +7371,7 @@ bool GLES2Implementation::PackStringsToBucket(GLsizei count,
if (copy_size < buffer.size()) {
// Append NULL in the end.
DCHECK(copy_size + 1 == buffer.size());
char* str = reinterpret_cast<char*>(buffer.address());
str[copy_size] = 0;
reinterpret_cast<char*>(buffer.address())[copy_size] = 0;
}
helper_->SetBucketData(kResultBucketId, offset, buffer.size(),
buffer.shm_id(), buffer.offset());

@ -625,28 +625,27 @@ INSTANTIATE_TEST_SUITE_P(Service, GLES3DecoderTest2, ::testing::Bool());
template <>
void GLES2DecoderTestBase::SpecializedSetup<cmds::GetProgramInfoLog, 0>(
bool /* valid */) {
const GLuint kClientVertexShaderId = 5001;
const GLuint kServiceVertexShaderId = 6001;
const GLuint kClientFragmentShaderId = 5002;
const GLuint kServiceFragmentShaderId = 6002;
const GLuint kTestClientVertexShaderId = 5001;
const GLuint kTestServiceVertexShaderId = 6001;
const GLuint kTestClientFragmentShaderId = 5002;
const GLuint kTestServiceFragmentShaderId = 6002;
const char* log = "hello"; // Matches auto-generated unit test.
DoCreateShader(
GL_VERTEX_SHADER, kClientVertexShaderId, kServiceVertexShaderId);
DoCreateShader(
GL_FRAGMENT_SHADER, kClientFragmentShaderId, kServiceFragmentShaderId);
DoCreateShader(GL_VERTEX_SHADER, kTestClientVertexShaderId,
kTestServiceVertexShaderId);
DoCreateShader(GL_FRAGMENT_SHADER, kTestClientFragmentShaderId,
kTestServiceFragmentShaderId);
TestHelper::SetShaderStates(
gl_.get(), GetShader(kClientVertexShaderId), true);
TestHelper::SetShaderStates(
gl_.get(), GetShader(kClientFragmentShaderId), true);
TestHelper::SetShaderStates(gl_.get(), GetShader(kTestClientVertexShaderId),
true);
TestHelper::SetShaderStates(gl_.get(), GetShader(kTestClientFragmentShaderId),
true);
InSequence dummy;
EXPECT_CALL(*gl_,
AttachShader(kServiceProgramId, kServiceVertexShaderId))
EXPECT_CALL(*gl_, AttachShader(kServiceProgramId, kTestServiceVertexShaderId))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*gl_,
AttachShader(kServiceProgramId, kServiceFragmentShaderId))
AttachShader(kServiceProgramId, kTestServiceFragmentShaderId))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*gl_, LinkProgram(kServiceProgramId))
@ -673,10 +672,10 @@ void GLES2DecoderTestBase::SpecializedSetup<cmds::GetProgramInfoLog, 0>(
ASSERT_TRUE(program != nullptr);
cmds::AttachShader attach_cmd;
attach_cmd.Init(client_program_id_, kClientVertexShaderId);
attach_cmd.Init(client_program_id_, kTestClientVertexShaderId);
EXPECT_EQ(error::kNoError, ExecuteCmd(attach_cmd));
attach_cmd.Init(client_program_id_, kClientFragmentShaderId);
attach_cmd.Init(client_program_id_, kTestClientFragmentShaderId);
EXPECT_EQ(error::kNoError, ExecuteCmd(attach_cmd));
program->Link(nullptr, Program::kCountOnlyStaticallyUsed, this);
@ -754,27 +753,26 @@ void GLES2DecoderTestBase::SpecializedSetup<cmds::DeleteQueriesEXTImmediate, 0>(
template <>
void GLES2DecoderTestBase::SpecializedSetup<cmds::LinkProgram, 0>(
bool /* valid */) {
const GLuint kClientVertexShaderId = 5001;
const GLuint kServiceVertexShaderId = 6001;
const GLuint kClientFragmentShaderId = 5002;
const GLuint kServiceFragmentShaderId = 6002;
DoCreateShader(
GL_VERTEX_SHADER, kClientVertexShaderId, kServiceVertexShaderId);
DoCreateShader(
GL_FRAGMENT_SHADER, kClientFragmentShaderId, kServiceFragmentShaderId);
const GLuint kTestClientVertexShaderId = 5001;
const GLuint kTestServiceVertexShaderId = 6001;
const GLuint kTestClientFragmentShaderId = 5002;
const GLuint kTestServiceFragmentShaderId = 6002;
DoCreateShader(GL_VERTEX_SHADER, kTestClientVertexShaderId,
kTestServiceVertexShaderId);
DoCreateShader(GL_FRAGMENT_SHADER, kTestClientFragmentShaderId,
kTestServiceFragmentShaderId);
TestHelper::SetShaderStates(
gl_.get(), GetShader(kClientVertexShaderId), true);
TestHelper::SetShaderStates(
gl_.get(), GetShader(kClientFragmentShaderId), true);
TestHelper::SetShaderStates(gl_.get(), GetShader(kTestClientVertexShaderId),
true);
TestHelper::SetShaderStates(gl_.get(), GetShader(kTestClientFragmentShaderId),
true);
InSequence dummy;
EXPECT_CALL(*gl_,
AttachShader(kServiceProgramId, kServiceVertexShaderId))
EXPECT_CALL(*gl_, AttachShader(kServiceProgramId, kTestServiceVertexShaderId))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*gl_,
AttachShader(kServiceProgramId, kServiceFragmentShaderId))
AttachShader(kServiceProgramId, kTestServiceFragmentShaderId))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*gl_, GetProgramiv(kServiceProgramId, GL_LINK_STATUS, _))
@ -791,10 +789,10 @@ void GLES2DecoderTestBase::SpecializedSetup<cmds::LinkProgram, 0>(
.WillOnce(SetArgPointee<2>(0));
cmds::AttachShader attach_cmd;
attach_cmd.Init(client_program_id_, kClientVertexShaderId);
attach_cmd.Init(client_program_id_, kTestClientVertexShaderId);
EXPECT_EQ(error::kNoError, ExecuteCmd(attach_cmd));
attach_cmd.Init(client_program_id_, kClientFragmentShaderId);
attach_cmd.Init(client_program_id_, kTestClientFragmentShaderId);
EXPECT_EQ(error::kNoError, ExecuteCmd(attach_cmd));
}

@ -1051,14 +1051,14 @@ void GLES2DecoderTestBase::SetupShaderForUniform(GLenum uniform_type) {
{ "bar", 1, uniform_type, 0, 2, -1, },
{ "car", 4, uniform_type, 1, 1, -1, },
};
const GLuint kClientVertexShaderId = 5001;
const GLuint kServiceVertexShaderId = 6001;
const GLuint kClientFragmentShaderId = 5002;
const GLuint kServiceFragmentShaderId = 6002;
const GLuint kTestClientVertexShaderId = 5001;
const GLuint kTestServiceVertexShaderId = 6001;
const GLuint kTestClientFragmentShaderId = 5002;
const GLuint kTestServiceFragmentShaderId = 6002;
SetupShader(attribs, base::size(attribs), uniforms, base::size(uniforms),
client_program_id_, kServiceProgramId, kClientVertexShaderId,
kServiceVertexShaderId, kClientFragmentShaderId,
kServiceFragmentShaderId);
client_program_id_, kServiceProgramId, kTestClientVertexShaderId,
kTestServiceVertexShaderId, kTestClientFragmentShaderId,
kTestServiceFragmentShaderId);
EXPECT_CALL(*gl_, UseProgram(kServiceProgramId))
.Times(1)

@ -301,11 +301,10 @@ TEST_P(SharedImageBackingFactoryGLImageTest, Basic) {
// Create a R-8 image texture, and check that the internal_format is that
// of the image (GL_RGBA for TextureImageFactory). This only matters for
// the validating decoder.
auto format = viz::ResourceFormat::RED_8;
gpu::SurfaceHandle surface_handle = gpu::kNullSurfaceHandle;
backing = backing_factory_->CreateSharedImage(
mailbox, format, surface_handle, size, color_space, surface_origin,
alpha_type, usage, false /* is_thread_safe */);
mailbox, viz::ResourceFormat::RED_8, gpu::kNullSurfaceHandle, size,
color_space, surface_origin, alpha_type, usage,
false /* is_thread_safe */);
EXPECT_TRUE(backing);
shared_image = shared_image_manager_->Register(std::move(backing),
memory_type_tracker_.get());

@ -176,8 +176,8 @@ void TestHelper::SetupTextureInitializationExpectations(
GL_TEXTURE_CUBE_MAP_POSITIVE_Z,
GL_TEXTURE_CUBE_MAP_NEGATIVE_Z,
};
for (size_t ii = 0; ii < base::size(faces); ++ii) {
EXPECT_CALL(*gl, TexImage2D(faces[ii], 0, GL_RGBA, 1, 1, 0, GL_RGBA,
for (size_t face = 0; face < base::size(faces); ++face) {
EXPECT_CALL(*gl, TexImage2D(faces[face], 0, GL_RGBA, 1, 1, 0, GL_RGBA,
GL_UNSIGNED_BYTE, _))
.Times(1)
.RetiresOnSaturation();

@ -146,7 +146,7 @@ TEST_F(SharedImageGLBackingProduceDawnTest, Basic) {
reservation.deviceId, reservation.deviceGeneration, reservation.id,
reservation.generation, WGPUTextureUsage_CopySrc,
webgpu::WEBGPU_MAILBOX_NONE, reinterpret_cast<GLbyte*>(&gl_mailbox));
wgpu::Texture texture = wgpu::Texture::Acquire(reservation.texture);
wgpu::Texture wgpu_texture = wgpu::Texture::Acquire(reservation.texture);
// Copy the texture in a mappable buffer.
wgpu::BufferDescriptor buffer_desc;
@ -155,7 +155,7 @@ TEST_F(SharedImageGLBackingProduceDawnTest, Basic) {
wgpu::Buffer readback_buffer = device.CreateBuffer(&buffer_desc);
wgpu::ImageCopyTexture copy_src = {};
copy_src.texture = texture;
copy_src.texture = wgpu_texture;
copy_src.mipLevel = 0;
copy_src.origin = {0, 0, 0};

@ -277,16 +277,17 @@ void GpuChannelManager::GpuPeakMemoryMonitor::OnMemoryAllocatedChange(
// approaches peak. If that is the case we should track a
// |peak_since_last_sequence_update_| on the the memory changes. Then only
// update the sequences with a new one is added, or the peak is requested.
for (auto& sequence : sequence_trackers_) {
if (current_memory_ > sequence.second.total_memory_) {
sequence.second.total_memory_ = current_memory_;
for (auto& seq : sequence_trackers_) {
if (current_memory_ > seq.second.total_memory_) {
seq.second.total_memory_ = current_memory_;
for (auto& sequence : sequence_trackers_) {
TRACE_EVENT_ASYNC_STEP_INTO1("gpu", "PeakMemoryTracking",
sequence.first, "Peak", "peak",
current_memory_);
}
for (auto& source : current_memory_per_source_) {
sequence.second.peak_memory_per_source_[source.first] = source.second;
for (auto& memory_per_source : current_memory_per_source_) {
seq.second.peak_memory_per_source_[memory_per_source.first] =
memory_per_source.second;
}
}
}

@ -75,17 +75,16 @@ TEST_F(BasicVulkanTest, EmptyVulkanSwaps) {
// Also make sure we can swap multiple times.
for (int i = 0; i < 10; ++i) {
absl::optional<VulkanSwapChain::ScopedWrite> scoped_write;
scoped_write.emplace(surface->swap_chain());
EXPECT_TRUE(scoped_write->success());
VkSemaphore begin_semaphore = scoped_write->begin_semaphore();
begin_semaphore = scoped_write->begin_semaphore();
EXPECT_NE(begin_semaphore, kNullSemaphore);
VkSemaphore end_semaphore = scoped_write->end_semaphore();
end_semaphore = scoped_write->end_semaphore();
EXPECT_NE(end_semaphore, kNullSemaphore);
auto command_buffer = command_pool->CreatePrimaryCommandBuffer();
command_buffer = command_pool->CreatePrimaryCommandBuffer();
{
ScopedSingleUseCommandBufferRecorder recorder(*command_buffer);
@ -99,6 +98,7 @@ TEST_F(BasicVulkanTest, EmptyVulkanSwaps) {
EXPECT_EQ(gfx::SwapResult::SWAP_ACK, surface->SwapBuffers());
vkQueueWaitIdle(GetDeviceQueue()->GetVulkanQueue());
command_buffer->Destroy();
command_buffer.reset();
}
surface->Finish();
surface->Destroy();

@ -65,23 +65,23 @@ Builder::Builder() : options_(0, nullptr) {}
Builder::~Builder() = default;
Builder& Builder::SetProductNameAndVersion(
const std::string& product_name_and_version) {
options_.product_name_and_version = product_name_and_version;
const std::string& name_and_version) {
options_.product_name_and_version = name_and_version;
return *this;
}
Builder& Builder::SetUserAgent(const std::string& user_agent) {
options_.user_agent = user_agent;
Builder& Builder::SetUserAgent(const std::string& agent) {
options_.user_agent = agent;
return *this;
}
Builder& Builder::SetAcceptLanguage(const std::string& accept_language) {
options_.accept_language = accept_language;
Builder& Builder::SetAcceptLanguage(const std::string& language) {
options_.accept_language = language;
return *this;
}
Builder& Builder::SetEnableBeginFrameControl(bool enable_begin_frame_control) {
options_.enable_begin_frame_control = enable_begin_frame_control;
Builder& Builder::SetEnableBeginFrameControl(bool enable) {
options_.enable_begin_frame_control = enable;
return *this;
}
@ -95,40 +95,38 @@ Builder& Builder::EnableDevToolsPipe() {
return *this;
}
Builder& Builder::SetMessagePump(base::MessagePump* message_pump) {
options_.message_pump = message_pump;
Builder& Builder::SetMessagePump(base::MessagePump* pump) {
options_.message_pump = pump;
return *this;
}
Builder& Builder::SetProxyConfig(
std::unique_ptr<net::ProxyConfig> proxy_config) {
options_.proxy_config = std::move(proxy_config);
Builder& Builder::SetProxyConfig(std::unique_ptr<net::ProxyConfig> config) {
options_.proxy_config = std::move(config);
return *this;
}
Builder& Builder::SetSingleProcessMode(bool single_process_mode) {
options_.single_process_mode = single_process_mode;
Builder& Builder::SetSingleProcessMode(bool single_process) {
options_.single_process_mode = single_process;
return *this;
}
Builder& Builder::SetDisableSandbox(bool disable_sandbox) {
options_.disable_sandbox = disable_sandbox;
Builder& Builder::SetDisableSandbox(bool disable) {
options_.disable_sandbox = disable;
return *this;
}
Builder& Builder::SetEnableResourceScheduler(bool enable_resource_scheduler) {
options_.enable_resource_scheduler = enable_resource_scheduler;
Builder& Builder::SetEnableResourceScheduler(bool enable) {
options_.enable_resource_scheduler = enable;
return *this;
}
Builder& Builder::SetGLImplementation(const std::string& gl_implementation) {
options_.gl_implementation = gl_implementation;
Builder& Builder::SetGLImplementation(const std::string& implementation) {
options_.gl_implementation = implementation;
return *this;
}
Builder& Builder::SetANGLEImplementation(
const std::string& angle_implementation) {
options_.angle_implementation = angle_implementation;
Builder& Builder::SetANGLEImplementation(const std::string& implementation) {
options_.angle_implementation = implementation;
return *this;
}
@ -139,39 +137,39 @@ Builder& Builder::SetAppendCommandLineFlagsCallback(
}
#if defined(OS_WIN)
Builder& Builder::SetInstance(HINSTANCE instance) {
options_.instance = instance;
Builder& Builder::SetInstance(HINSTANCE hinstance) {
options_.instance = hinstance;
return *this;
}
Builder& Builder::SetSandboxInfo(sandbox::SandboxInterfaceInfo* sandbox_info) {
options_.sandbox_info = sandbox_info;
Builder& Builder::SetSandboxInfo(sandbox::SandboxInterfaceInfo* info) {
options_.sandbox_info = info;
return *this;
}
#endif // defined(OS_WIN)
Builder& Builder::SetUserDataDir(const base::FilePath& user_data_dir) {
options_.user_data_dir = user_data_dir;
Builder& Builder::SetUserDataDir(const base::FilePath& dir) {
options_.user_data_dir = dir;
return *this;
}
Builder& Builder::SetWindowSize(const gfx::Size& window_size) {
options_.window_size = window_size;
Builder& Builder::SetWindowSize(const gfx::Size& size) {
options_.window_size = size;
return *this;
}
Builder& Builder::SetIncognitoMode(bool incognito_mode) {
options_.incognito_mode = incognito_mode;
Builder& Builder::SetIncognitoMode(bool incognito) {
options_.incognito_mode = incognito;
return *this;
}
Builder& Builder::SetSitePerProcess(bool site_per_process) {
options_.site_per_process = site_per_process;
Builder& Builder::SetSitePerProcess(bool per_process) {
options_.site_per_process = per_process;
return *this;
}
Builder& Builder::SetBlockNewWebContents(bool block_new_web_contents) {
options_.block_new_web_contents = block_new_web_contents;
Builder& Builder::SetBlockNewWebContents(bool block) {
options_.block_new_web_contents = block;
return *this;
}
@ -191,9 +189,8 @@ Builder& Builder::SetCrashDumpsDir(const base::FilePath& dir) {
return *this;
}
Builder& Builder::SetFontRenderHinting(
gfx::FontRenderParams::Hinting font_render_hinting) {
options_.font_render_hinting = font_render_hinting;
Builder& Builder::SetFontRenderHinting(gfx::FontRenderParams::Hinting hinting) {
options_.font_render_hinting = hinting;
return *this;
}

@ -235,38 +235,36 @@ class HEADLESS_EXPORT HeadlessBrowser::Options::Builder {
Builder& EnableDevToolsServer(const net::HostPortPair& endpoint);
Builder& EnableDevToolsPipe();
Builder& SetMessagePump(base::MessagePump* message_pump);
Builder& SetSingleProcessMode(bool single_process_mode);
Builder& SetDisableSandbox(bool disable_sandbox);
Builder& SetEnableResourceScheduler(bool enable_resource_scheduler);
Builder& SetGLImplementation(const std::string& gl_implementation);
Builder& SetANGLEImplementation(const std::string& angle_implementation);
Builder& SetMessagePump(base::MessagePump* pump);
Builder& SetSingleProcessMode(bool single_process);
Builder& SetDisableSandbox(bool disable);
Builder& SetEnableResourceScheduler(bool enable);
Builder& SetGLImplementation(const std::string& implementation);
Builder& SetANGLEImplementation(const std::string& implementation);
Builder& SetAppendCommandLineFlagsCallback(
const Options::AppendCommandLineFlagsCallback& callback);
#if defined(OS_WIN)
Builder& SetInstance(HINSTANCE instance);
Builder& SetSandboxInfo(sandbox::SandboxInterfaceInfo* sandbox_info);
Builder& SetInstance(HINSTANCE hinstance);
Builder& SetSandboxInfo(sandbox::SandboxInterfaceInfo* info);
#endif
// Per-context settings.
Builder& SetProductNameAndVersion(
const std::string& product_name_and_version);
Builder& SetAcceptLanguage(const std::string& accept_language);
Builder& SetEnableBeginFrameControl(bool enable_begin_frame_control);
Builder& SetUserAgent(const std::string& user_agent);
Builder& SetProxyConfig(std::unique_ptr<net::ProxyConfig> proxy_config);
Builder& SetWindowSize(const gfx::Size& window_size);
Builder& SetUserDataDir(const base::FilePath& user_data_dir);
Builder& SetIncognitoMode(bool incognito_mode);
Builder& SetSitePerProcess(bool site_per_process);
Builder& SetBlockNewWebContents(bool block_new_web_contents);
Builder& SetProductNameAndVersion(const std::string& name_and_version);
Builder& SetAcceptLanguage(const std::string& language);
Builder& SetEnableBeginFrameControl(bool enable);
Builder& SetUserAgent(const std::string& agent);
Builder& SetProxyConfig(std::unique_ptr<net::ProxyConfig> config);
Builder& SetWindowSize(const gfx::Size& size);
Builder& SetUserDataDir(const base::FilePath& dir);
Builder& SetIncognitoMode(bool incognito);
Builder& SetSitePerProcess(bool per_process);
Builder& SetBlockNewWebContents(bool block);
Builder& SetOverrideWebPreferencesCallback(
base::RepeatingCallback<void(blink::web_pref::WebPreferences*)> callback);
Builder& SetCrashReporterEnabled(bool enabled);
Builder& SetCrashDumpsDir(const base::FilePath& dir);
Builder& SetFontRenderHinting(
gfx::FontRenderParams::Hinting font_render_hinting);
Builder& SetFontRenderHinting(gfx::FontRenderParams::Hinting hinting);
Options Build();

@ -59,7 +59,7 @@ void ChannelProxy::Context::ClearIPCTaskRunner() {
void ChannelProxy::Context::CreateChannel(
std::unique_ptr<ChannelFactory> factory) {
base::AutoLock l(channel_lifetime_lock_);
base::AutoLock channel_lock(channel_lifetime_lock_);
DCHECK(!channel_);
DCHECK_EQ(factory->GetIPCTaskRunner(), ipc_task_runner_);
channel_ = factory->BuildChannel(this);
@ -69,7 +69,7 @@ void ChannelProxy::Context::CreateChannel(
if (support) {
thread_safe_channel_ = support->CreateThreadSafeChannel();
base::AutoLock l(pending_filters_lock_);
base::AutoLock filter_lock(pending_filters_lock_);
for (auto& entry : pending_io_thread_interfaces_)
support->AddGenericAssociatedInterface(entry.first, entry.second);
pending_io_thread_interfaces_.clear();
@ -409,9 +409,9 @@ void ChannelProxy::Context::ClearChannel() {
void ChannelProxy::Context::AddGenericAssociatedInterfaceForIOThread(
const std::string& name,
const GenericAssociatedInterfaceFactory& factory) {
base::AutoLock l(channel_lifetime_lock_);
base::AutoLock channel_lock(channel_lifetime_lock_);
if (!channel_) {
base::AutoLock l(pending_filters_lock_);
base::AutoLock filter_lock(pending_filters_lock_);
pending_io_thread_interfaces_.emplace_back(name, factory);
return;
}

@ -138,18 +138,20 @@ scoped_refptr<MessageAttachment> MessageAttachment::CreateFromMojoHandle(
}
#elif defined(OS_FUCHSIA)
if (type == Type::FUCHSIA_HANDLE) {
zx::handle handle;
zx::handle zx_handle;
if (platform_handle.type == MOJO_PLATFORM_HANDLE_TYPE_FUCHSIA_HANDLE)
handle.reset(static_cast<zx_handle_t>(platform_handle.value));
return new internal::HandleAttachmentFuchsia(std::move(handle));
zx_handle.reset(static_cast<zx_handle_t>(platform_handle.value));
return new internal::HandleAttachmentFuchsia(std::move(zx_handle));
}
#elif defined(OS_WIN)
if (type == Type::WIN_HANDLE) {
base::PlatformFile handle = base::kInvalidPlatformFile;
if (platform_handle.type == MOJO_PLATFORM_HANDLE_TYPE_WINDOWS_HANDLE)
handle = reinterpret_cast<base::PlatformFile>(platform_handle.value);
base::PlatformFile platform_file = base::kInvalidPlatformFile;
if (platform_handle.type == MOJO_PLATFORM_HANDLE_TYPE_WINDOWS_HANDLE) {
platform_file =
reinterpret_cast<base::PlatformFile>(platform_handle.value);
}
return new internal::HandleAttachmentWin(
handle, internal::HandleAttachmentWin::FROM_WIRE);
platform_file, internal::HandleAttachmentWin::FROM_WIRE);
}
#endif
NOTREACHED();

@ -114,7 +114,7 @@ TEST_F(AudioBlockFifoTest, PushAndConsume) {
// Consume all blocks of data.
for (int i = 1; i <= blocks; ++i) {
const AudioBus* bus = fifo.Consume();
bus = fifo.Consume();
EXPECT_TRUE(channels == bus->channels());
EXPECT_TRUE(frames == bus->frames());
EXPECT_TRUE(fifo.GetUnfilledFrames() == frames * i);
@ -132,7 +132,7 @@ TEST_F(AudioBlockFifoTest, PushAndConsume) {
// Consume all the existing filled blocks of data.
while (fifo.available_blocks()) {
const AudioBus* bus = fifo.Consume();
bus = fifo.Consume();
EXPECT_TRUE(channels == bus->channels());
EXPECT_TRUE(frames == bus->frames());
}

@ -121,7 +121,7 @@ TEST_F(AudioFifoTest, FramesInFifo) {
const int frames_in_fifo = bus2->frames();
fifo.Push(bus2.get());
EXPECT_EQ(fifo.frames(), frames_in_fifo);
for (int n = 0; n < kMaxFrameCount; ++n) {
for (n = 0; n < kMaxFrameCount; ++n) {
fifo.Push(bus2.get());
fifo.Consume(bus2.get(), 0, frames_in_fifo);
EXPECT_EQ(fifo.frames(), frames_in_fifo);

@ -253,7 +253,7 @@ TEST(VideoCaptureOracleTest, SamplesAtCorrectTimesAroundRefreshRequests) {
t += refresh_interval;
if (oracle.ObserveEventAndDecideCapture(VideoCaptureOracle::kRefreshRequest,
gfx::Rect(), t)) {
const int frame_number = oracle.next_frame_number();
frame_number = oracle.next_frame_number();
oracle.RecordCapture(0.0);
ASSERT_TRUE(oracle.CompleteCapture(frame_number, true, &ignored));
did_complete_a_capture = true;

@ -107,8 +107,8 @@ VideoCaptureFeedback& VideoCaptureFeedback::RequireMapped(bool require) {
}
VideoCaptureFeedback& VideoCaptureFeedback::WithMappedSizes(
std::vector<gfx::Size> mapped_sizes) {
this->mapped_sizes = std::move(mapped_sizes);
std::vector<gfx::Size> sizes) {
mapped_sizes = std::move(sizes);
SortSizesDescending(mapped_sizes);
return *this;
}

@ -48,7 +48,7 @@ struct CAPTURE_EXPORT VideoCaptureFeedback {
VideoCaptureFeedback& WithMaxFramerate(float max_framerate_fps);
VideoCaptureFeedback& WithMaxPixels(int max_pixels);
VideoCaptureFeedback& RequireMapped(bool require);
VideoCaptureFeedback& WithMappedSizes(std::vector<gfx::Size> mapped_sizes);
VideoCaptureFeedback& WithMappedSizes(std::vector<gfx::Size> sizes);
// Combine constraints of two different sinks resulting in constraints fitting
// both of them.

@ -435,7 +435,7 @@ bool VideoCaptureDeviceFactoryWin::CreateDeviceFilterDirectShow(
for (ComPtr<IMoniker> moniker;
enum_moniker->Next(1, &moniker, nullptr) == S_OK; moniker.Reset()) {
ComPtr<IPropertyBag> prop_bag;
HRESULT hr = moniker->BindToStorage(0, 0, IID_PPV_ARGS(&prop_bag));
hr = moniker->BindToStorage(0, 0, IID_PPV_ARGS(&prop_bag));
if (FAILED(hr))
continue;

@ -128,15 +128,15 @@ void VideoCaptureDeviceWin::GetPinCapabilityList(
ComPtr<IAMVideoControl> video_control;
hr = capture_filter.As(&video_control);
int count = 0, size = 0;
hr = stream_config->GetNumberOfCapabilities(&count, &size);
int count = 0, byte_size = 0;
hr = stream_config->GetNumberOfCapabilities(&count, &byte_size);
if (FAILED(hr)) {
DLOG(ERROR) << "GetNumberOfCapabilities failed: "
<< logging::SystemErrorCodeToString(hr);
return;
}
std::unique_ptr<BYTE[]> caps(new BYTE[size]);
std::unique_ptr<BYTE[]> caps(new BYTE[byte_size]);
for (int i = 0; i < count; ++i) {
VideoCaptureDeviceWin::ScopedMediaType media_type;
hr = stream_config->GetStreamCaps(i, media_type.Receive(), caps.get());

@ -419,9 +419,9 @@ TEST_F(RtcpBuilderTest, RtcpReceiverReportRedundancy) {
p.AddReceiverFrameLog(test_rtp_timestamp().lower_32_bits(), num_events,
time_base_ms - (num_events - 1) * kResendDelay *
kTimeBetweenEventsMs);
for (int i = 0; i < num_events; i++) {
for (int event = 0; event < num_events; event++) {
p.AddReceiverEventLog(0, FRAME_ACK_SENT,
base::checked_cast<uint16_t>(i * kResendDelay *
base::checked_cast<uint16_t>(event * kResendDelay *
kTimeBetweenEventsMs));
}

@ -79,8 +79,9 @@ void RtpSender::ResendPackets(
if (!stored_packets)
continue;
for (auto it = stored_packets->begin(); it != stored_packets->end(); ++it) {
const PacketKey& packet_key = it->first;
for (auto packet_it = stored_packets->begin();
packet_it != stored_packets->end(); ++packet_it) {
const PacketKey& packet_key = packet_it->first;
const uint16_t packet_id = packet_key.packet_id;
// Should we resend the packet?
@ -94,7 +95,7 @@ void RtpSender::ResendPackets(
// If we were asked to resend the last packet, check if it's the
// last packet.
if (!resend && resend_last && (it + 1) == stored_packets->end()) {
if (!resend && resend_last && (packet_it + 1) == stored_packets->end()) {
resend = true;
}
@ -102,11 +103,11 @@ void RtpSender::ResendPackets(
// Resend packet to the network.
VLOG(3) << "Resend " << frame_id << ":" << packet_id;
// Set a unique incremental sequence number for every packet.
PacketRef packet_copy = FastCopyPacket(it->second);
PacketRef packet_copy = FastCopyPacket(packet_it->second);
UpdateSequenceNumber(&packet_copy->data);
packets_to_resend.push_back(std::make_pair(packet_key, packet_copy));
} else if (cancel_rtx_if_not_in_list) {
transport_->CancelSendingPacket(it->first);
transport_->CancelSendingPacket(packet_it->first);
}
}
transport_->ResendPackets(packets_to_resend, dedup_info);

@ -152,7 +152,7 @@ TEST_F(CongestionControlTest, RetainsSufficientHistory) {
base::TimeDelta::FromMilliseconds(400);
// Sanity-check: With no data, GetBitrate() returns an in-range value.
const int bitrate = congestion_control_->GetBitrate(
int bitrate = congestion_control_->GetBitrate(
testing_clock_.NowTicks() + kFakePlayoutDelay, kFakePlayoutDelay);
ASSERT_GE(bitrate, kMinBitrateConfigured);
ASSERT_LE(bitrate, kMaxBitrateConfigured);
@ -165,7 +165,7 @@ TEST_F(CongestionControlTest, RetainsSufficientHistory) {
congestion_control_->SendFrameToTransport(frame_id, 16384,
testing_clock_.NowTicks());
const int bitrate = congestion_control_->GetBitrate(
bitrate = congestion_control_->GetBitrate(
testing_clock_.NowTicks() + kFakePlayoutDelay, kFakePlayoutDelay);
ASSERT_GE(bitrate, kMinBitrateConfigured);
ASSERT_LE(bitrate, kMaxBitrateConfigured);
@ -180,7 +180,7 @@ TEST_F(CongestionControlTest, RetainsSufficientHistory) {
for (int i = 0; i < kMaxUnackedFrames; ++i) {
congestion_control_->AckFrame(frame_id, testing_clock_.NowTicks());
const int bitrate = congestion_control_->GetBitrate(
bitrate = congestion_control_->GetBitrate(
testing_clock_.NowTicks() + kFakePlayoutDelay, kFakePlayoutDelay);
ASSERT_GE(bitrate, kMinBitrateConfigured);
ASSERT_LE(bitrate, kMaxBitrateConfigured);

@ -415,7 +415,7 @@ TEST_F(VideoSenderTest, StopSendingInTheAbsenceOfAck) {
// Send 3 more frames and record the number of packets sent.
for (int i = 0; i < 3; ++i) {
scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
video_frame = GetNewVideoFrame();
video_sender_->InsertRawVideoFrame(video_frame, testing_clock_.NowTicks());
RunTasks(33);
}
@ -424,7 +424,7 @@ TEST_F(VideoSenderTest, StopSendingInTheAbsenceOfAck) {
// Send 3 more frames - they should not be encoded, as we have not received
// any acks.
for (int i = 0; i < 3; ++i) {
scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
video_frame = GetNewVideoFrame();
video_sender_->InsertRawVideoFrame(video_frame, testing_clock_.NowTicks());
RunTasks(33);
}
@ -461,7 +461,7 @@ TEST_F(VideoSenderTest, DuplicateAckRetransmit) {
// Send 3 more frames but don't ACK.
for (int i = 0; i < 3; ++i) {
scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
video_frame = GetNewVideoFrame();
video_sender_->InsertRawVideoFrame(video_frame, testing_clock_.NowTicks());
RunTasks(33);
}
@ -504,7 +504,7 @@ TEST_F(VideoSenderTest, DuplicateAckRetransmitDoesNotCancelRetransmits) {
// Send 2 more frames but don't ACK.
for (int i = 0; i < 2; ++i) {
scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
video_frame = GetNewVideoFrame();
video_sender_->InsertRawVideoFrame(video_frame, testing_clock_.NowTicks());
RunTasks(33);
}

@ -52,7 +52,7 @@ FFmpegAudioDecoder::FFmpegAudioDecoder(
const scoped_refptr<base::SequencedTaskRunner>& task_runner,
MediaLog* media_log)
: task_runner_(task_runner),
state_(kUninitialized),
state_(DecoderState::kUninitialized),
av_sample_format_(0),
media_log_(media_log),
pool_(new AudioBufferMemoryPool()) {
@ -62,7 +62,7 @@ FFmpegAudioDecoder::FFmpegAudioDecoder(
FFmpegAudioDecoder::~FFmpegAudioDecoder() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
if (state_ != kUninitialized)
if (state_ != DecoderState::kUninitialized)
ReleaseFFmpegResources();
}
@ -105,7 +105,7 @@ void FFmpegAudioDecoder::Initialize(const AudioDecoderConfig& config,
// Success!
config_ = config;
output_cb_ = BindToCurrentLoop(output_cb);
state_ = kNormal;
state_ = DecoderState::kNormal;
std::move(bound_init_cb).Run(OkStatus());
}
@ -113,16 +113,16 @@ void FFmpegAudioDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
DecodeCB decode_cb) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(decode_cb);
CHECK_NE(state_, kUninitialized);
CHECK_NE(state_, DecoderState::kUninitialized);
DecodeCB decode_cb_bound = BindToCurrentLoop(std::move(decode_cb));
if (state_ == kError) {
if (state_ == DecoderState::kError) {
std::move(decode_cb_bound).Run(DecodeStatus::DECODE_ERROR);
return;
}
// Do nothing if decoding has finished.
if (state_ == kDecodeFinished) {
if (state_ == DecoderState::kDecodeFinished) {
std::move(decode_cb_bound).Run(DecodeStatus::OK);
return;
}
@ -134,7 +134,7 @@ void FFmpegAudioDecoder::Reset(base::OnceClosure closure) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
avcodec_flush_buffers(codec_context_.get());
state_ = kNormal;
state_ = DecoderState::kNormal;
ResetTimestampState(config_);
task_runner_->PostTask(FROM_HERE, std::move(closure));
}
@ -142,9 +142,9 @@ void FFmpegAudioDecoder::Reset(base::OnceClosure closure) {
void FFmpegAudioDecoder::DecodeBuffer(const DecoderBuffer& buffer,
DecodeCB decode_cb) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK_NE(state_, kUninitialized);
DCHECK_NE(state_, kDecodeFinished);
DCHECK_NE(state_, kError);
DCHECK_NE(state_, DecoderState::kUninitialized);
DCHECK_NE(state_, DecoderState::kDecodeFinished);
DCHECK_NE(state_, DecoderState::kError);
// Make sure we are notified if http://crbug.com/49709 returns. Issue also
// occurs with some damaged files.
@ -155,13 +155,13 @@ void FFmpegAudioDecoder::DecodeBuffer(const DecoderBuffer& buffer,
}
if (!FFmpegDecode(buffer)) {
state_ = kError;
state_ = DecoderState::kError;
std::move(decode_cb).Run(DecodeStatus::DECODE_ERROR);
return;
}
if (buffer.end_of_stream())
state_ = kDecodeFinished;
state_ = DecoderState::kDecodeFinished;
std::move(decode_cb).Run(DecodeStatus::OK);
}
@ -337,7 +337,7 @@ bool FFmpegAudioDecoder::ConfigureDecoder(const AudioDecoderConfig& config) {
DLOG(ERROR) << "Could not initialize audio decoder: "
<< codec_context_->codec_id;
ReleaseFFmpegResources();
state_ = kUninitialized;
state_ = DecoderState::kUninitialized;
return false;
}
// Verify avcodec_open2() used all given options.
@ -352,7 +352,7 @@ bool FFmpegAudioDecoder::ConfigureDecoder(const AudioDecoderConfig& config) {
<< " channels, but FFmpeg thinks the file contains "
<< codec_context_->channels << " channels";
ReleaseFFmpegResources();
state_ = kUninitialized;
state_ = DecoderState::kUninitialized;
return false;
}

@ -73,12 +73,7 @@ class MEDIA_EXPORT FFmpegAudioDecoder : public AudioDecoder {
// A decoding error occurs and decoding needs to stop.
// (any state) -> kNormal:
// Any time Reset() is called.
enum DecoderState {
kUninitialized,
kNormal,
kDecodeFinished,
kError
};
enum class DecoderState { kUninitialized, kNormal, kDecodeFinished, kError };
// Reset decoder and call |reset_cb_|.
void DoReset();

@ -260,7 +260,7 @@ void FFmpegVideoDecoder::Initialize(const VideoDecoderConfig& config,
// Success!
config_ = config;
output_cb_ = output_cb;
state_ = kNormal;
state_ = DecoderState::kNormal;
std::move(bound_init_cb).Run(OkStatus());
}
@ -270,48 +270,48 @@ void FFmpegVideoDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(buffer.get());
DCHECK(decode_cb);
CHECK_NE(state_, kUninitialized);
CHECK_NE(state_, DecoderState::kUninitialized);
DecodeCB decode_cb_bound = BindToCurrentLoop(std::move(decode_cb));
if (state_ == kError) {
if (state_ == DecoderState::kError) {
std::move(decode_cb_bound).Run(DecodeStatus::DECODE_ERROR);
return;
}
if (state_ == kDecodeFinished) {
if (state_ == DecoderState::kDecodeFinished) {
std::move(decode_cb_bound).Run(DecodeStatus::OK);
return;
}
DCHECK_EQ(state_, kNormal);
DCHECK_EQ(state_, DecoderState::kNormal);
// During decode, because reads are issued asynchronously, it is possible to
// receive multiple end of stream buffers since each decode is acked. There
// are three states the decoder can be in:
//
// kNormal: This is the starting state. Buffers are decoded. Decode errors
// are discarded.
// kDecodeFinished: All calls return empty frames.
// kError: Unexpected error happened.
// DecoderState::kNormal: This is the starting state. Buffers are decoded.
// Decode errors are discarded.
// DecoderState::kDecodeFinished: All calls return empty frames.
// DecoderState::kError: Unexpected error happened.
//
// These are the possible state transitions.
//
// kNormal -> kDecodeFinished:
// DecoderState::kNormal -> DecoderState::kDecodeFinished:
// When EOS buffer is received and the codec has been flushed.
// kNormal -> kError:
// DecoderState::kNormal -> DecoderState::kError:
// A decoding error occurs and decoding needs to stop.
// (any state) -> kNormal:
// (any state) -> DecoderState::kNormal:
// Any time Reset() is called.
if (!FFmpegDecode(*buffer)) {
state_ = kError;
state_ = DecoderState::kError;
std::move(decode_cb_bound).Run(DecodeStatus::DECODE_ERROR);
return;
}
if (buffer->end_of_stream())
state_ = kDecodeFinished;
state_ = DecoderState::kDecodeFinished;
// VideoDecoderShim expects that |decode_cb| is called only after
// |output_cb_|.
@ -323,7 +323,7 @@ void FFmpegVideoDecoder::Reset(base::OnceClosure closure) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
avcodec_flush_buffers(codec_context_.get());
state_ = kNormal;
state_ = DecoderState::kNormal;
// PostTask() to avoid calling |closure| immediately.
base::SequencedTaskRunnerHandle::Get()->PostTask(FROM_HERE,
std::move(closure));
@ -332,7 +332,7 @@ void FFmpegVideoDecoder::Reset(base::OnceClosure closure) {
FFmpegVideoDecoder::~FFmpegVideoDecoder() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
if (state_ != kUninitialized)
if (state_ != DecoderState::kUninitialized)
ReleaseFFmpegResources();
}

@ -60,12 +60,7 @@ class MEDIA_EXPORT FFmpegVideoDecoder : public VideoDecoder {
void force_allocation_error_for_testing() { force_allocation_error_ = true; }
private:
enum DecoderState {
kUninitialized,
kNormal,
kDecodeFinished,
kError
};
enum class DecoderState { kUninitialized, kNormal, kDecodeFinished, kError };
// Handles decoding of an unencrypted encoded buffer. A return value of false
// indicates that an error has occurred.
@ -83,7 +78,7 @@ class MEDIA_EXPORT FFmpegVideoDecoder : public VideoDecoder {
MediaLog* const media_log_;
DecoderState state_ = kUninitialized;
DecoderState state_ = DecoderState::kUninitialized;
OutputCB output_cb_;

@ -683,19 +683,19 @@ bool SourceBufferState::OnNewConfigs(
std::vector<AudioDecoderConfig>{audio_config});
} else {
if (audio_streams_.size() > 1) {
auto it = audio_streams_.find(track_id);
if (it != audio_streams_.end())
stream = it->second;
auto stream_it = audio_streams_.find(track_id);
if (stream_it != audio_streams_.end())
stream = stream_it->second;
} else {
// If there is only one audio track then bytestream id might change in
// a new init segment. So update our state and notify frame processor.
const auto& it = audio_streams_.begin();
if (it != audio_streams_.end()) {
stream = it->second;
if (it->first != track_id) {
track_id_changes[it->first] = track_id;
const auto& stream_it = audio_streams_.begin();
if (stream_it != audio_streams_.end()) {
stream = stream_it->second;
if (stream_it->first != track_id) {
track_id_changes[stream_it->first] = track_id;
audio_streams_[track_id] = stream;
audio_streams_.erase(it->first);
audio_streams_.erase(stream_it->first);
}
}
}
@ -771,19 +771,19 @@ bool SourceBufferState::OnNewConfigs(
std::vector<VideoDecoderConfig>{video_config});
} else {
if (video_streams_.size() > 1) {
auto it = video_streams_.find(track_id);
if (it != video_streams_.end())
stream = it->second;
auto stream_it = video_streams_.find(track_id);
if (stream_it != video_streams_.end())
stream = stream_it->second;
} else {
// If there is only one video track then bytestream id might change in
// a new init segment. So update our state and notify frame processor.
const auto& it = video_streams_.begin();
if (it != video_streams_.end()) {
stream = it->second;
if (it->first != track_id) {
track_id_changes[it->first] = track_id;
const auto& stream_it = video_streams_.begin();
if (stream_it != video_streams_.end()) {
stream = stream_it->second;
if (stream_it->first != track_id) {
track_id_changes[stream_it->first] = track_id;
video_streams_[track_id] = stream;
video_streams_.erase(it->first);
video_streams_.erase(stream_it->first);
}
}
}

@ -1414,12 +1414,12 @@ TEST_F(VideoRendererAlgorithmTest, VariablePlaybackRateCadence) {
TickGenerator frame_tg(base::TimeTicks(), NTSC(30));
TickGenerator display_tg(tick_clock_->NowTicks(), 60);
const double kTestRates[] = {1.0, 2, 0.215, 0.5, 1.0, 3.15};
const bool kTestRateHasCadence[base::size(kTestRates)] = {true, true, true,
true, true, false};
const double kPlaybackRates[] = {1.0, 2, 0.215, 0.5, 1.0, 3.15};
const bool kTestRateHasCadence[base::size(kPlaybackRates)] = {
true, true, true, true, true, false};
for (size_t i = 0; i < base::size(kTestRates); ++i) {
const double playback_rate = kTestRates[i];
for (size_t i = 0; i < base::size(kPlaybackRates); ++i) {
const double playback_rate = kPlaybackRates[i];
SCOPED_TRACE(base::StringPrintf("Playback Rate: %.03f", playback_rate));
time_source_.SetPlaybackRate(playback_rate);
RunFramePumpTest(

@ -158,7 +158,7 @@ void VpxVideoDecoder::Initialize(const VideoDecoderConfig& config,
// Success!
config_ = config;
state_ = kNormal;
state_ = DecoderState::kNormal;
output_cb_ = output_cb;
std::move(bound_init_cb).Run(OkStatus());
}
@ -169,32 +169,32 @@ void VpxVideoDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(buffer);
DCHECK(decode_cb);
DCHECK_NE(state_, kUninitialized)
DCHECK_NE(state_, DecoderState::kUninitialized)
<< "Called Decode() before successful Initialize()";
DecodeCB bound_decode_cb = bind_callbacks_
? BindToCurrentLoop(std::move(decode_cb))
: std::move(decode_cb);
if (state_ == kError) {
if (state_ == DecoderState::kError) {
std::move(bound_decode_cb).Run(DecodeStatus::DECODE_ERROR);
return;
}
if (state_ == kDecodeFinished) {
if (state_ == DecoderState::kDecodeFinished) {
std::move(bound_decode_cb).Run(DecodeStatus::OK);
return;
}
if (state_ == kNormal && buffer->end_of_stream()) {
state_ = kDecodeFinished;
if (state_ == DecoderState::kNormal && buffer->end_of_stream()) {
state_ = DecoderState::kDecodeFinished;
std::move(bound_decode_cb).Run(DecodeStatus::OK);
return;
}
scoped_refptr<VideoFrame> video_frame;
if (!VpxDecode(buffer.get(), &video_frame)) {
state_ = kError;
state_ = DecoderState::kError;
std::move(bound_decode_cb).Run(DecodeStatus::DECODE_ERROR);
return;
}
@ -212,7 +212,7 @@ void VpxVideoDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
void VpxVideoDecoder::Reset(base::OnceClosure reset_cb) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
state_ = kNormal;
state_ = DecoderState::kNormal;
if (bind_callbacks_)
BindToCurrentLoop(std::move(reset_cb)).Run();

@ -55,13 +55,7 @@ class MEDIA_EXPORT VpxVideoDecoder : public OffloadableVideoDecoder {
}
private:
enum DecoderState {
kUninitialized,
kNormal,
kFlushCodec,
kDecodeFinished,
kError
};
enum class DecoderState { kUninitialized, kNormal, kDecodeFinished, kError };
// Return values for decoding alpha plane.
enum AlphaDecodeStatus {
@ -99,7 +93,7 @@ class MEDIA_EXPORT VpxVideoDecoder : public OffloadableVideoDecoder {
// |state_| must only be read and written to on |offload_task_runner_| if it
// is non-null and there are outstanding tasks on the offload thread.
DecoderState state_ = kUninitialized;
DecoderState state_ = DecoderState::kUninitialized;
OutputCB output_cb_;

@ -1186,16 +1186,16 @@ bool VideoSampleEntry::Parse(BoxReader* reader) {
video_color_space = vp_config->color_space;
video_codec_level = vp_config->level;
SMPTE2086MasteringDisplayMetadataBox mastering_display_color_volume;
if (reader->HasChild(&mastering_display_color_volume)) {
RCHECK(reader->ReadChild(&mastering_display_color_volume));
this->mastering_display_color_volume = mastering_display_color_volume;
SMPTE2086MasteringDisplayMetadataBox color_volume;
if (reader->HasChild(&color_volume)) {
RCHECK(reader->ReadChild(&color_volume));
mastering_display_color_volume = color_volume;
}
ContentLightLevel content_light_level_information;
if (reader->HasChild(&content_light_level_information)) {
RCHECK(reader->ReadChild(&content_light_level_information));
this->content_light_level_information = content_light_level_information;
ContentLightLevel level_information;
if (reader->HasChild(&level_information)) {
RCHECK(reader->ReadChild(&level_information));
content_light_level_information = level_information;
}
break;
}
@ -1225,16 +1225,16 @@ bool VideoSampleEntry::Parse(BoxReader* reader) {
color_parameter_information);
}
MasteringDisplayColorVolume mastering_display_color_volume;
if (reader->HasChild(&mastering_display_color_volume)) {
RCHECK(reader->ReadChild(&mastering_display_color_volume));
this->mastering_display_color_volume = mastering_display_color_volume;
MasteringDisplayColorVolume color_volume;
if (reader->HasChild(&color_volume)) {
RCHECK(reader->ReadChild(&color_volume));
mastering_display_color_volume = color_volume;
}
ContentLightLevelInformation content_light_level_information;
if (reader->HasChild(&content_light_level_information)) {
RCHECK(reader->ReadChild(&content_light_level_information));
this->content_light_level_information = content_light_level_information;
ContentLightLevelInformation level_information;
if (reader->HasChild(&level_information)) {
RCHECK(reader->ReadChild(&level_information));
content_light_level_information = level_information;
}
if (video_codec_profile == VIDEO_CODEC_PROFILE_UNKNOWN) {

@ -109,7 +109,7 @@ H264Decoder::H264Accelerator::ParseEncryptedSliceHeader(
H264Decoder::H264Decoder(std::unique_ptr<H264Accelerator> accelerator,
VideoCodecProfile profile,
const VideoColorSpace& container_color_space)
: state_(kNeedStreamMetadata),
: state_(State::kNeedStreamMetadata),
container_color_space_(container_color_space),
max_frame_num_(0),
max_pic_num_(0),
@ -159,8 +159,8 @@ void H264Decoder::Reset() {
// If we are in kDecoding, we can resume without processing an SPS.
// The state becomes kDecoding again, (1) at the first IDR slice or (2) at
// the first slice after the recovery point SEI.
if (state_ == kDecoding)
state_ = kAfterReset;
if (state_ == State::kDecoding)
state_ = State::kAfterReset;
}
void H264Decoder::PrepareRefPicLists() {
@ -876,8 +876,8 @@ bool H264Decoder::HandleMemoryManagementOps(scoped_refptr<H264Picture> pic) {
ref_pic_marking->max_long_term_frame_idx_plus1 - 1;
H264Picture::Vector long_terms;
dpb_.GetLongTermRefPicsAppending(&long_terms);
for (size_t i = 0; i < long_terms.size(); ++i) {
scoped_refptr<H264Picture>& long_term_pic = long_terms[i];
for (size_t long_term = 0; long_term < long_terms.size(); ++long_term) {
scoped_refptr<H264Picture>& long_term_pic = long_terms[long_term];
DCHECK(long_term_pic->ref && long_term_pic->long_term);
// Ok to cast, max_long_term_frame_idx is much smaller than 16bit.
if (long_term_pic->long_term_frame_idx >
@ -899,8 +899,8 @@ bool H264Decoder::HandleMemoryManagementOps(scoped_refptr<H264Picture> pic) {
// First unmark if any existing with this long_term_frame_idx...
H264Picture::Vector long_terms;
dpb_.GetLongTermRefPicsAppending(&long_terms);
for (size_t i = 0; i < long_terms.size(); ++i) {
scoped_refptr<H264Picture>& long_term_pic = long_terms[i];
for (size_t long_term = 0; long_term < long_terms.size(); ++long_term) {
scoped_refptr<H264Picture>& long_term_pic = long_terms[long_term];
DCHECK(long_term_pic->ref && long_term_pic->long_term);
// Ok to cast, long_term_frame_idx is much smaller than 16bit.
if (long_term_pic->long_term_frame_idx ==
@ -1364,7 +1364,7 @@ H264Decoder::H264Accelerator::Status H264Decoder::ProcessCurrentSlice() {
#define SET_ERROR_AND_RETURN() \
do { \
DVLOG(1) << "Error during decode"; \
state_ = kError; \
state_ = State::kError; \
return H264Decoder::kDecodeError; \
} while (0)
@ -1408,7 +1408,7 @@ void H264Decoder::SetStream(int32_t id, const DecoderBuffer& decoder_buffer) {
}
H264Decoder::DecodeResult H264Decoder::Decode() {
if (state_ == kError) {
if (state_ == State::kError) {
DVLOG(1) << "Decoder in error state";
return kDecodeError;
}
@ -1457,7 +1457,8 @@ H264Decoder::DecodeResult H264Decoder::Decode() {
case H264NALU::kNonIDRSlice:
// We can't resume from a non-IDR slice unless recovery point SEI
// process is going.
if (state_ == kError || (state_ == kAfterReset && !recovery_frame_cnt_))
if (state_ == State::kError ||
(state_ == State::kAfterReset && !recovery_frame_cnt_))
break;
FALLTHROUGH;
@ -1465,7 +1466,7 @@ H264Decoder::DecodeResult H264Decoder::Decode() {
// TODO(posciak): the IDR may require an SPS that we don't have
// available. For now we'd fail if that happens, but ideally we'd like
// to keep going until the next SPS in the stream.
if (state_ == kNeedStreamMetadata) {
if (state_ == State::kNeedStreamMetadata) {
// We need an SPS, skip this IDR and keep looking.
break;
}
@ -1479,10 +1480,10 @@ H264Decoder::DecodeResult H264Decoder::Decode() {
// steps will be executed.
if (!curr_slice_hdr_) {
curr_slice_hdr_ = std::make_unique<H264SliceHeader>();
state_ = kParseSliceHeader;
state_ = State::kParseSliceHeader;
}
if (state_ == kParseSliceHeader) {
if (state_ == State::kParseSliceHeader) {
// Check if the slice header is encrypted.
bool parsed_header = false;
if (current_decrypt_config_) {
@ -1504,18 +1505,18 @@ H264Decoder::DecodeResult H264Decoder::Decode() {
if (par_res != H264Parser::kOk)
SET_ERROR_AND_RETURN();
}
state_ = kTryPreprocessCurrentSlice;
state_ = State::kTryPreprocessCurrentSlice;
}
if (state_ == kTryPreprocessCurrentSlice) {
if (state_ == State::kTryPreprocessCurrentSlice) {
CHECK_ACCELERATOR_RESULT(PreprocessCurrentSlice());
state_ = kEnsurePicture;
state_ = State::kEnsurePicture;
}
if (state_ == kEnsurePicture) {
if (state_ == State::kEnsurePicture) {
if (curr_pic_) {
// |curr_pic_| already exists, so skip to ProcessCurrentSlice().
state_ = kTryCurrentSlice;
state_ = State::kTryCurrentSlice;
} else {
// New picture/finished previous one, try to start a new one
// or tell the client we need more surfaces.
@ -1525,19 +1526,19 @@ H264Decoder::DecodeResult H264Decoder::Decode() {
if (current_decrypt_config_)
curr_pic_->set_decrypt_config(current_decrypt_config_->Clone());
state_ = kTryNewFrame;
state_ = State::kTryNewFrame;
}
}
if (state_ == kTryNewFrame) {
if (state_ == State::kTryNewFrame) {
CHECK_ACCELERATOR_RESULT(StartNewFrame(curr_slice_hdr_.get()));
state_ = kTryCurrentSlice;
state_ = State::kTryCurrentSlice;
}
DCHECK_EQ(state_, kTryCurrentSlice);
DCHECK_EQ(state_, State::kTryCurrentSlice);
CHECK_ACCELERATOR_RESULT(ProcessCurrentSlice());
curr_slice_hdr_.reset();
state_ = kDecoding;
state_ = State::kDecoding;
break;
}
@ -1555,8 +1556,8 @@ H264Decoder::DecodeResult H264Decoder::Decode() {
last_sps_nalu_.assign(curr_nalu_->data,
curr_nalu_->data + curr_nalu_->size);
if (state_ == kNeedStreamMetadata)
state_ = kAfterReset;
if (state_ == State::kNeedStreamMetadata)
state_ = State::kAfterReset;
if (need_new_buffers) {
curr_pic_ = nullptr;
@ -1584,7 +1585,7 @@ H264Decoder::DecodeResult H264Decoder::Decode() {
case H264NALU::kAUD:
case H264NALU::kEOSeq:
case H264NALU::kEOStream:
if (state_ != kDecoding)
if (state_ != State::kDecoding)
break;
CHECK_ACCELERATOR_RESULT(FinishPrevFrameIfPresent());
@ -1605,7 +1606,7 @@ H264Decoder::DecodeResult H264Decoder::Decode() {
sei_subsamples_.push_back(subsamples[0]);
}
}
if (state_ == kAfterReset && !recovery_frame_cnt_ &&
if (state_ == State::kAfterReset && !recovery_frame_cnt_ &&
!recovery_frame_num_) {
// If we are after reset, we can also resume from a SEI recovery point
// (spec D.2.8) if one is present. However, if we are already in the

@ -194,7 +194,7 @@ class MEDIA_GPU_EXPORT H264Decoder : public AcceleratedVideoDecoder {
private:
// Internal state of the decoder.
enum State {
enum class State {
// After initialization, need an SPS.
kNeedStreamMetadata,
// Ready to decode from any point.

@ -2742,8 +2742,8 @@ void DXVAVideoDecodeAccelerator::BindPictureBufferToSample(
// Get the viz resource format per texture.
std::array<viz::ResourceFormat, VideoFrame::kMaxPlanes> viz_formats;
{
const bool result = VideoPixelFormatToVizFormat(
picture_buffer->pixel_format(), textures_per_picture, viz_formats);
result = VideoPixelFormatToVizFormat(picture_buffer->pixel_format(),
textures_per_picture, viz_formats);
RETURN_AND_NOTIFY_ON_FAILURE(
result, "Could not convert pixel format to viz format",
PLATFORM_FAILURE, );
@ -2899,8 +2899,7 @@ void DXVAVideoDecodeAccelerator::CopyTextureOnDecoderThread(
DCHECK(d3d11_processor_.Get());
if (dest_keyed_mutex) {
HRESULT hr =
dest_keyed_mutex->AcquireSync(keyed_mutex_value, kAcquireSyncWaitMs);
hr = dest_keyed_mutex->AcquireSync(keyed_mutex_value, kAcquireSyncWaitMs);
RETURN_AND_NOTIFY_ON_FAILURE(
hr == S_OK, "D3D11 failed to acquire keyed mutex for texture.",
PLATFORM_FAILURE, );
@ -2956,7 +2955,7 @@ void DXVAVideoDecodeAccelerator::CopyTextureOnDecoderThread(
PLATFORM_FAILURE, );
if (dest_keyed_mutex) {
HRESULT hr = dest_keyed_mutex->ReleaseSync(keyed_mutex_value + 1);
hr = dest_keyed_mutex->ReleaseSync(keyed_mutex_value + 1);
RETURN_AND_NOTIFY_ON_FAILURE(hr == S_OK, "Failed to release keyed mutex.",
PLATFORM_FAILURE, );

@ -57,15 +57,15 @@ void ExtraTreesTrainer::OnRandomTreeModel(TrainedModelCB model_cb,
// If this is the last tree, then return the finished model.
if (trees_.size() == task_.rf_number_of_trees) {
std::unique_ptr<Model> model =
std::unique_ptr<Model> finished_model =
std::make_unique<VotingEnsemble>(std::move(trees_));
// If we have a converter, then wrap everything in a ConvertingModel.
if (converter_) {
model = std::make_unique<ConvertingModel>(std::move(converter_),
std::move(model));
finished_model = std::make_unique<ConvertingModel>(
std::move(converter_), std::move(finished_model));
}
std::move(model_cb).Run(std::move(model));
std::move(model_cb).Run(std::move(finished_model));
return;
}

@ -15,8 +15,6 @@ UsbMidiOutputStream::UsbMidiOutputStream(const UsbMidiJack& jack)
: jack_(jack), pending_size_(0), is_sending_sysex_(false) {}
void UsbMidiOutputStream::Send(const std::vector<uint8_t>& data) {
// To prevent link errors caused by DCHECK_*.
const size_t kPacketContentSize = UsbMidiOutputStream::kPacketContentSize;
DCHECK_LT(jack_.cable_number, 16u);
std::vector<uint8_t> data_to_send;

@ -538,11 +538,10 @@ void AudioRendererImpl::OnDeviceInfoReceived(
// mixer will attempt to up-mix stereo source streams to just the left/right
// speaker of the 5.1 setup, nulling out the other channels
// (http://crbug.com/177872).
ChannelLayout hw_channel_layout =
hw_params.channel_layout() == CHANNEL_LAYOUT_DISCRETE ||
try_supported_channel_layouts
? CHANNEL_LAYOUT_STEREO
: hw_params.channel_layout();
hw_channel_layout = hw_params.channel_layout() == CHANNEL_LAYOUT_DISCRETE ||
try_supported_channel_layouts
? CHANNEL_LAYOUT_STEREO
: hw_params.channel_layout();
int hw_channel_count = ChannelLayoutToChannelCount(hw_channel_layout);
// The layout we pass to |audio_parameters_| will be used for the lifetime

@ -682,20 +682,20 @@ TEST_F(PaintCanvasVideoRendererTest, Y16) {
TEST_F(PaintCanvasVideoRendererTest, Yuv420P12OddWidth) {
// Allocate the Y, U, V planes for a 3x3 12-bit YUV 4:2:0 image. Note that
// there are no padding bytes after each row.
constexpr int kWidth = 3;
constexpr int kHeight = 3;
constexpr int kUvWidth = (kWidth + 1) / 2;
constexpr int kUvHeight = (kHeight + 1) / 2;
constexpr int kImgWidth = 3;
constexpr int kImgHeight = 3;
constexpr int kUvWidth = (kImgWidth + 1) / 2;
constexpr int kUvHeight = (kImgHeight + 1) / 2;
std::unique_ptr<uint16_t[]> y_plane =
std::make_unique<uint16_t[]>(kWidth * kHeight);
std::make_unique<uint16_t[]>(kImgWidth * kImgHeight);
std::unique_ptr<uint16_t[]> u_plane =
std::make_unique<uint16_t[]>(kUvWidth * kUvHeight);
std::unique_ptr<uint16_t[]> v_plane =
std::make_unique<uint16_t[]>(kUvWidth * kUvHeight);
// Set all pixels to white.
for (int i = 0; i < kHeight; ++i) {
for (int j = 0; j < kWidth; ++j) {
y_plane[i * kWidth + j] = 4095;
for (int i = 0; i < kImgHeight; ++i) {
for (int j = 0; j < kImgWidth; ++j) {
y_plane[i * kImgWidth + j] = 4095;
}
}
for (int i = 0; i < kUvHeight; ++i) {
@ -704,25 +704,25 @@ TEST_F(PaintCanvasVideoRendererTest, Yuv420P12OddWidth) {
v_plane[i * kUvWidth + j] = 2048;
}
}
const int32_t y_stride = sizeof(uint16_t) * kWidth;
const int32_t y_stride = sizeof(uint16_t) * kImgWidth;
const int32_t uv_stride = sizeof(uint16_t) * kUvWidth;
uint8_t* const y_data = reinterpret_cast<uint8_t*>(y_plane.get());
uint8_t* const u_data = reinterpret_cast<uint8_t*>(u_plane.get());
uint8_t* const v_data = reinterpret_cast<uint8_t*>(v_plane.get());
auto size = gfx::Size(kWidth, kHeight);
auto size = gfx::Size(kImgWidth, kImgHeight);
scoped_refptr<VideoFrame> frame = VideoFrame::WrapExternalYuvData(
PIXEL_FORMAT_YUV420P12, size, gfx::Rect(size), size, y_stride, uv_stride,
uv_stride, y_data, u_data, v_data, base::TimeDelta());
std::unique_ptr<uint32_t[]> rgba =
std::make_unique<uint32_t[]>(kWidth * kHeight);
std::make_unique<uint32_t[]>(kImgWidth * kImgHeight);
PaintCanvasVideoRenderer::ConvertVideoFrameToRGBPixels(
frame.get(), rgba.get(), frame->visible_rect().width() * 4,
/*premultiply_alpha=*/true);
for (int i = 0; i < kHeight; ++i) {
for (int j = 0; j < kWidth; ++j) {
EXPECT_EQ(rgba[i * kWidth + j], 0xffffffff);
for (int i = 0; i < kImgHeight; ++i) {
for (int j = 0; j < kImgWidth; ++j) {
EXPECT_EQ(rgba[i * kImgWidth + j], 0xffffffff);
}
}
}

@ -1336,15 +1336,15 @@ void GpuMemoryBufferVideoFramePool::PoolImpl::MailboxHoldersReleased(
frame_resources->MarkUnused(now);
auto it = resources_pool_.begin();
while (it != resources_pool_.end()) {
FrameResources* frame_resources = *it;
FrameResources* resources = *it;
constexpr base::TimeDelta kStaleFrameLimit =
base::TimeDelta::FromSeconds(10);
if (!frame_resources->is_used() &&
now - frame_resources->last_use_time() > kStaleFrameLimit) {
if (!resources->is_used() &&
now - resources->last_use_time() > kStaleFrameLimit) {
resources_pool_.erase(it++);
DeleteFrameResources(gpu_factories_, frame_resources);
delete frame_resources;
DeleteFrameResources(gpu_factories_, resources);
delete resources;
} else {
it++;
}

@ -348,7 +348,6 @@ void VpxVideoEncoder::Initialize(VideoCodecProfile profile,
void VpxVideoEncoder::Encode(scoped_refptr<VideoFrame> frame,
bool key_frame,
StatusCB done_cb) {
Status status;
done_cb = BindToCurrentLoop(std::move(done_cb));
if (!codec_) {
std::move(done_cb).Run(StatusCode::kEncoderInitializeNeverCompleted);
@ -368,7 +367,7 @@ void VpxVideoEncoder::Encode(scoped_refptr<VideoFrame> frame,
frame->format() == PIXEL_FORMAT_ARGB;
if ((!frame->IsMappable() && !frame->HasGpuMemoryBuffer()) ||
!supported_format) {
status =
Status status =
Status(StatusCode::kEncoderFailedEncode, "Unexpected frame format.")
.WithData("IsMappable", frame->IsMappable())
.WithData("format", frame->format());
@ -392,6 +391,7 @@ void VpxVideoEncoder::Encode(scoped_refptr<VideoFrame> frame,
is_yuv ? frame->format() : PIXEL_FORMAT_I420, options_.frame_size,
gfx::Rect(options_.frame_size), options_.frame_size,
frame->timestamp());
Status status;
if (resized_frame) {
status = ConvertAndScaleFrame(*frame, *resized_frame, resize_buf_);
} else {
@ -498,8 +498,8 @@ void VpxVideoEncoder::Encode(scoped_refptr<VideoFrame> frame,
vpx_codec_err_to_string(vpx_error),
vpx_codec_error_detail(codec_.get()));
DLOG(ERROR) << msg;
status = Status(StatusCode::kEncoderFailedEncode, msg)
.WithData("vpx_error", vpx_error);
Status status = Status(StatusCode::kEncoderFailedEncode, msg)
.WithData("vpx_error", vpx_error);
std::move(done_cb).Run(std::move(status));
return;
}