0

Make AudioDeviceFactory a singleton

Previously AudioDeviceFactory was an interface where all methods were
protected and called from static AudioDeviceFactory. This approach
is uncommon in chromium and it makes it harder to override
AudioDeviceFactory behavior (as that required all methods to be
implemented to override some of the default behavior implemented in
the static methods). This change makes AudioDeviceFactory a singleton.
Now clients should get the instance in order to use it instead of
calling static methods, i.e. AudioDeviceFactory::NewFoo() becomes
AudioDeviceFactory::GetInstance()->NewFoo() .
All methods implemented in AudioDeviceFactory provide default behavior,
which can be overridden in child classes as necessary.
Also, AudioRendererMixerManager no longer needs to be a singleton. Now
it's created and owned by AudioDeviceFactory.

Bug: 1253010
Change-Id: I9375e7cdc695a82078b0f76c25594c13fa9b7958
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/3692334
Reviewed-by: Olga Sharonova <olka@chromium.org>
Reviewed-by: Dale Curtis <dalecurtis@chromium.org>
Commit-Queue: Sergey Ulanov <sergeyu@chromium.org>
Reviewed-by: danakj <danakj@chromium.org>
Reviewed-by: Kenneth MacKay <kmackay@chromium.org>
Cr-Commit-Position: refs/heads/main@{#1012764}
This commit is contained in:
Sergey Ulanov
2022-06-09 23:21:24 +00:00
committed by Chromium LUCI CQ
parent ed6ce9979a
commit 1880ad82c1
14 changed files with 158 additions and 299 deletions

@ -184,26 +184,8 @@ CastAudioDeviceFactory::~CastAudioDeviceFactory() {
DVLOG(1) << "Unregister CastAudioDeviceFactory";
}
scoped_refptr<::media::AudioRendererSink>
CastAudioDeviceFactory::CreateFinalAudioRendererSink(
const blink::LocalFrameToken& frame_token,
const ::media::AudioSinkParameters& params,
base::TimeDelta auth_timeout) {
// Use default implementation.
return nullptr;
}
scoped_refptr<::media::AudioRendererSink>
CastAudioDeviceFactory::CreateAudioRendererSink(
blink::WebAudioDeviceSourceType source_type,
const blink::LocalFrameToken& frame_token,
const ::media::AudioSinkParameters& params) {
// Use default implementation.
return nullptr;
}
scoped_refptr<::media::SwitchableAudioRendererSink>
CastAudioDeviceFactory::CreateSwitchableAudioRendererSink(
CastAudioDeviceFactory::NewSwitchableAudioRendererSink(
blink::WebAudioDeviceSourceType source_type,
const blink::LocalFrameToken& frame_token,
const ::media::AudioSinkParameters& params) {
@ -211,13 +193,5 @@ CastAudioDeviceFactory::CreateSwitchableAudioRendererSink(
params);
}
scoped_refptr<::media::AudioCapturerSource>
CastAudioDeviceFactory::CreateAudioCapturerSource(
const blink::LocalFrameToken& frame_token,
const ::media::AudioSourceParameters& params) {
// Use default implementation.
return nullptr;
}
} // namespace media
} // namespace chromecast

@ -13,8 +13,6 @@
#include "third_party/blink/public/web/modules/media/audio/audio_device_factory.h"
namespace media {
class AudioCapturerSource;
class AudioRendererSink;
class SwitchableAudioRendererSink;
} // namespace media
@ -24,31 +22,13 @@ namespace media {
class CastAudioDeviceFactory final : public blink::AudioDeviceFactory {
public:
CastAudioDeviceFactory();
CastAudioDeviceFactory(const CastAudioDeviceFactory&) = delete;
CastAudioDeviceFactory& operator=(const CastAudioDeviceFactory&) = delete;
~CastAudioDeviceFactory() override;
scoped_refptr<::media::AudioRendererSink> CreateFinalAudioRendererSink(
const blink::LocalFrameToken& frame_token,
const ::media::AudioSinkParameters& params,
base::TimeDelta auth_timeout) override;
scoped_refptr<::media::AudioRendererSink> CreateAudioRendererSink(
blink::WebAudioDeviceSourceType source_type,
const blink::LocalFrameToken& frame_token,
const ::media::AudioSinkParameters& params) override;
scoped_refptr<::media::SwitchableAudioRendererSink>
CreateSwitchableAudioRendererSink(
NewSwitchableAudioRendererSink(
blink::WebAudioDeviceSourceType source_type,
const blink::LocalFrameToken& frame_token,
const ::media::AudioSinkParameters& params) override;
scoped_refptr<::media::AudioCapturerSource> CreateAudioCapturerSource(
const blink::LocalFrameToken& frame_token,
const ::media::AudioSourceParameters& params) override;
};
} // namespace media

@ -405,7 +405,7 @@ blink::WebMediaPlayer* MediaFactory::CreateMediaPlayer(
return nullptr;
scoped_refptr<media::SwitchableAudioRendererSink> audio_renderer_sink =
blink::AudioDeviceFactory::NewSwitchableAudioRendererSink(
blink::AudioDeviceFactory::GetInstance()->NewSwitchableAudioRendererSink(
blink::WebAudioDeviceSourceType::kMediaElement,
render_frame_->GetWebFrame()->GetLocalFrameToken(),
media::AudioSinkParameters(/*session_id=*/base::UnguessableToken(),

@ -102,8 +102,8 @@ media::AudioParameters GetOutputDeviceParameters(
const blink::LocalFrameToken& frame_token,
const base::UnguessableToken& session_id,
const std::string& device_id) {
return AudioDeviceFactory::GetOutputDeviceInfo(frame_token,
{session_id, device_id})
return AudioDeviceFactory::GetInstance()
->GetOutputDeviceInfo(frame_token, {session_id, device_id})
.output_params();
}
@ -187,7 +187,7 @@ void RendererWebAudioDeviceImpl::Start() {
if (sink_)
return; // Already started.
sink_ = AudioDeviceFactory::NewAudioRendererSink(
sink_ = AudioDeviceFactory::GetInstance()->NewAudioRendererSink(
GetLatencyHintSourceType(latency_hint_.Category()), frame_token_,
media::AudioSinkParameters(session_id_, std::string()));

@ -88,22 +88,7 @@ class RendererWebAudioDeviceImplTest
blink::scheduler::GetSingleThreadTaskRunnerForTesting());
}
MOCK_METHOD2(CreateAudioCapturerSource,
scoped_refptr<media::AudioCapturerSource>(
const blink::LocalFrameToken&,
const media::AudioSourceParameters&));
MOCK_METHOD3(
CreateFinalAudioRendererSink,
scoped_refptr<media::AudioRendererSink>(const blink::LocalFrameToken&,
const media::AudioSinkParameters&,
base::TimeDelta));
MOCK_METHOD3(CreateSwitchableAudioRendererSink,
scoped_refptr<media::SwitchableAudioRendererSink>(
blink::WebAudioDeviceSourceType,
const blink::LocalFrameToken&,
const media::AudioSinkParameters&));
scoped_refptr<media::AudioRendererSink> CreateAudioRendererSink(
scoped_refptr<media::AudioRendererSink> NewAudioRendererSink(
blink::WebAudioDeviceSourceType render_token,
const blink::LocalFrameToken& frame_token,
const media::AudioSinkParameters& params) override {

@ -2025,9 +2025,10 @@ PP_Var PepperPluginInstanceImpl::ExecuteScript(PP_Instance instance,
uint32_t PepperPluginInstanceImpl::GetAudioHardwareOutputSampleRate(
PP_Instance instance) {
return render_frame()
? blink::AudioDeviceFactory::GetOutputDeviceInfo(
render_frame()->GetWebFrame()->GetLocalFrameToken(),
media::AudioSinkParameters())
? blink::AudioDeviceFactory::GetInstance()
->GetOutputDeviceInfo(
render_frame()->GetWebFrame()->GetLocalFrameToken(),
media::AudioSinkParameters())
.output_params()
.sample_rate()
: 0;
@ -2036,9 +2037,10 @@ uint32_t PepperPluginInstanceImpl::GetAudioHardwareOutputSampleRate(
uint32_t PepperPluginInstanceImpl::GetAudioHardwareOutputBufferSize(
PP_Instance instance) {
return render_frame()
? blink::AudioDeviceFactory::GetOutputDeviceInfo(
render_frame()->GetWebFrame()->GetLocalFrameToken(),
media::AudioSinkParameters())
? blink::AudioDeviceFactory::GetInstance()
->GetOutputDeviceInfo(
render_frame()->GetWebFrame()->GetLocalFrameToken(),
media::AudioSinkParameters())
.output_params()
.frames_per_buffer()
: 0;

@ -5988,10 +5988,11 @@ void RenderFrameImpl::CheckIfAudioSinkExistsAndIsAuthorized(
blink::WebSetSinkIdCompleteCallback completion_callback) {
std::move(
blink::ConvertToOutputDeviceStatusCB(std::move(completion_callback)))
.Run(blink::AudioDeviceFactory::GetOutputDeviceInfo(
GetWebFrame()->GetLocalFrameToken(),
media::AudioSinkParameters(base::UnguessableToken(),
sink_id.Utf8()))
.Run(blink::AudioDeviceFactory::GetInstance()
->GetOutputDeviceInfo(
GetWebFrame()->GetLocalFrameToken(),
media::AudioSinkParameters(base::UnguessableToken(),
sink_id.Utf8()))
.device_status());
}

@ -142,9 +142,9 @@ media::AudioParameters GetAudioHardwareParams() {
if (!render_frame)
return media::AudioParameters::UnavailableDeviceParams();
return blink::AudioDeviceFactory::GetOutputDeviceInfo(
render_frame->GetWebFrame()->GetLocalFrameToken(),
media::AudioSinkParameters())
return blink::AudioDeviceFactory::GetInstance()
->GetOutputDeviceInfo(render_frame->GetWebFrame()->GetLocalFrameToken(),
media::AudioSinkParameters())
.output_params();
}
@ -543,7 +543,7 @@ scoped_refptr<media::AudioCapturerSource>
RendererBlinkPlatformImpl::NewAudioCapturerSource(
blink::WebLocalFrame* web_frame,
const media::AudioSourceParameters& params) {
return blink::AudioDeviceFactory::NewAudioCapturerSource(
return blink::AudioDeviceFactory::GetInstance()->NewAudioCapturerSource(
web_frame->GetLocalFrameToken(), params);
}
@ -592,7 +592,7 @@ RendererBlinkPlatformImpl::NewAudioRendererSink(
blink::WebAudioDeviceSourceType source_type,
blink::WebLocalFrame* web_frame,
const media::AudioSinkParameters& params) {
return blink::AudioDeviceFactory::NewAudioRendererSink(
return blink::AudioDeviceFactory::GetInstance()->NewAudioRendererSink(
source_type, web_frame->GetLocalFrameToken(), params);
}

@ -31,43 +31,33 @@ content::RenderFrame* GetRenderFrameForToken(
} // namespace
WebEngineAudioDeviceFactory::WebEngineAudioDeviceFactory()
: audio_capturer_thread_("AudioCapturerThread") {}
WebEngineAudioDeviceFactory::WebEngineAudioDeviceFactory() = default;
WebEngineAudioDeviceFactory::~WebEngineAudioDeviceFactory() = default;
scoped_refptr<media::AudioRendererSink>
WebEngineAudioDeviceFactory::CreateFinalAudioRendererSink(
const blink::LocalFrameToken& frame_token,
const media::AudioSinkParameters& params,
base::TimeDelta auth_timeout) {
// Return nullptr to fallback to the default renderer implementation.
return nullptr;
}
scoped_refptr<media::AudioRendererSink>
WebEngineAudioDeviceFactory::CreateAudioRendererSink(
WebEngineAudioDeviceFactory::NewAudioRendererSink(
blink::WebAudioDeviceSourceType source_type,
const blink::LocalFrameToken& frame_token,
const media::AudioSinkParameters& params) {
bool allow_audio_consumer = true;
switch (source_type) {
case blink::WebAudioDeviceSourceType::kMediaElement:
// MediaElement uses CreateSwitchableAudioRendererSink().
CHECK(false);
// MediaElement uses NewSwitchableAudioRendererSink().
NOTREACHED();
return nullptr;
case blink::WebAudioDeviceSourceType::kWebRtc:
case blink::WebAudioDeviceSourceType::kNonRtcAudioTrack:
// Return nullptr for WebRTC streams. This will cause the caller to
// fallback to AudioOutputDevice, which outputs through
// AudioOutputStreamFuchsia.
return nullptr;
// AudioConsumer is not enabled for WebRTC streams yet.
allow_audio_consumer = false;
break;
// kNone is used in AudioDeviceFactory::GetOutputDeviceInfo() to get
// default output device params.
case blink::WebAudioDeviceSourceType::kNone:
break;
// Create WebEngineAudioDeviceFactory for all WebAudio.
// Create WebEngineAudioDeviceFactory for all WebAudio streams.
case blink::WebAudioDeviceSourceType::kWebAudioInteractive:
case blink::WebAudioDeviceSourceType::kWebAudioBalanced:
case blink::WebAudioDeviceSourceType::kWebAudioPlayback:
@ -75,24 +65,30 @@ WebEngineAudioDeviceFactory::CreateAudioRendererSink(
break;
}
auto* render_frame = GetRenderFrameForToken(frame_token);
CHECK(render_frame);
// Connect WebEngineMediaResourceProvider.
mojo::Remote<mojom::WebEngineMediaResourceProvider> media_resource_provider;
render_frame->GetBrowserInterfaceBroker()->GetInterface(
media_resource_provider.BindNewPipeAndPassReceiver());
// If AudioConsumer is not enabled then fallback to AudioOutputDevice.
bool use_audio_consumer = false;
if (!media_resource_provider->ShouldUseAudioConsumer(&use_audio_consumer) ||
!use_audio_consumer) {
return nullptr;
}
// AudioConsumer can be used only to output to the default device.
if (!params.device_id.empty())
return nullptr;
allow_audio_consumer = false;
mojo::Remote<mojom::WebEngineMediaResourceProvider> media_resource_provider;
bool use_audio_consumer = false;
if (allow_audio_consumer) {
auto* render_frame = GetRenderFrameForToken(frame_token);
CHECK(render_frame);
// Connect WebEngineMediaResourceProvider.
render_frame->GetBrowserInterfaceBroker()->GetInterface(
media_resource_provider.BindNewPipeAndPassReceiver());
bool result =
media_resource_provider->ShouldUseAudioConsumer(&use_audio_consumer);
DCHECK(result);
}
// If AudioConsumer is not enabled then fallback to AudioOutputDevice.
if (!use_audio_consumer) {
return AudioDeviceFactory::NewAudioRendererSink(source_type, frame_token,
params);
}
// Connect AudioConsumer.
fidl::InterfaceHandle<fuchsia::media::AudioConsumer> audio_consumer;
@ -101,20 +97,3 @@ WebEngineAudioDeviceFactory::CreateAudioRendererSink(
return media::FuchsiaAudioOutputDevice::CreateOnDefaultThread(
std::move(audio_consumer));
}
scoped_refptr<media::SwitchableAudioRendererSink>
WebEngineAudioDeviceFactory::CreateSwitchableAudioRendererSink(
blink::WebAudioDeviceSourceType source_type,
const blink::LocalFrameToken& frame_token,
const media::AudioSinkParameters& params) {
// Return nullptr to fallback to the default renderer implementation.
return nullptr;
}
scoped_refptr<media::AudioCapturerSource>
WebEngineAudioDeviceFactory::CreateAudioCapturerSource(
const blink::LocalFrameToken& frame_token,
const media::AudioSourceParameters& params) {
// Return nullptr to fallback to the default capturer implementation.
return nullptr;
}

@ -5,7 +5,6 @@
#ifndef FUCHSIA_WEB_WEBENGINE_RENDERER_WEB_ENGINE_AUDIO_DEVICE_FACTORY_H_
#define FUCHSIA_WEB_WEBENGINE_RENDERER_WEB_ENGINE_AUDIO_DEVICE_FACTORY_H_
#include "base/threading/thread.h"
#include "base/time/time.h"
#include "third_party/blink/public/common/tokens/tokens.h"
#include "third_party/blink/public/platform/web_common.h"
@ -16,30 +15,11 @@ class WebEngineAudioDeviceFactory final : public blink::AudioDeviceFactory {
WebEngineAudioDeviceFactory();
~WebEngineAudioDeviceFactory() override;
protected:
// WebAudioDeviceFactory overrides.
scoped_refptr<media::AudioRendererSink> CreateFinalAudioRendererSink(
const blink::LocalFrameToken& frame_token,
const media::AudioSinkParameters& params,
base::TimeDelta auth_timeout) override;
scoped_refptr<media::AudioRendererSink> CreateAudioRendererSink(
// blink::AudioDeviceFactory overrides.
scoped_refptr<media::AudioRendererSink> NewAudioRendererSink(
blink::WebAudioDeviceSourceType source_type,
const blink::LocalFrameToken& frame_token,
const media::AudioSinkParameters& params) override;
scoped_refptr<media::SwitchableAudioRendererSink>
CreateSwitchableAudioRendererSink(
blink::WebAudioDeviceSourceType source_type,
const blink::LocalFrameToken& frame_token,
const media::AudioSinkParameters& params) override;
scoped_refptr<media::AudioCapturerSource> CreateAudioCapturerSource(
const blink::LocalFrameToken& frame_token,
const media::AudioSourceParameters& params) override;
private:
base::Thread audio_capturer_thread_;
};
#endif // FUCHSIA_WEB_WEBENGINE_RENDERER_WEB_ENGINE_AUDIO_DEVICE_FACTORY_H_

@ -23,13 +23,22 @@ class AudioCapturerSource;
namespace blink {
class AudioRendererMixerManager;
class AudioRendererSinkCache;
// A factory for creating AudioRendererSinks and AudioCapturerSources. There is
// a global factory function that can be installed for the purposes of testing
// to provide specialized implementations.
// Public methods can be called only on the main (renderer) thread.
// TODO(crbug.com/1255249): Rename the class and probably split it into
// AudioRendererSinkFactory and AudioCapturerSourceFactory.
class BLINK_MODULES_EXPORT AudioDeviceFactory {
public:
// Returns an instance of this class for the current process.
static AudioDeviceFactory* GetInstance();
explicit AudioDeviceFactory(bool override_default = true);
AudioDeviceFactory(const AudioDeviceFactory&) = delete;
AudioDeviceFactory& operator=(const AudioDeviceFactory&) = delete;
@ -37,21 +46,12 @@ class BLINK_MODULES_EXPORT AudioDeviceFactory {
static media::AudioLatency::LatencyType GetSourceLatencyType(
WebAudioDeviceSourceType source);
// Creates a sink for AudioRendererMixer. |frame_token| refers to the
// RenderFrame containing the entity producing the audio. Note: These sinks do
// not support the blocking GetOutputDeviceInfo() API and instead clients are
// required to use the GetOutputDeviceInfoAsync() API. As such they are
// configured with no authorization timeout value.
static scoped_refptr<media::AudioRendererSink> NewAudioRendererMixerSink(
const LocalFrameToken& frame_token,
const media::AudioSinkParameters& params);
// Creates an AudioRendererSink bound to an AudioOutputDevice.
// Basing on |source_type| and build configuration, audio played out through
// the sink goes to AOD directly or can be mixed with other audio before that.
// TODO(olka): merge it with NewRestartableOutputDevice() as soon as
// AudioOutputDevice is fixed to be restartable.
static scoped_refptr<media::AudioRendererSink> NewAudioRendererSink(
virtual scoped_refptr<media::AudioRendererSink> NewAudioRendererSink(
WebAudioDeviceSourceType source_type,
const LocalFrameToken& frame_token,
const media::AudioSinkParameters& params);
@ -59,65 +59,52 @@ class BLINK_MODULES_EXPORT AudioDeviceFactory {
// Creates a SwitchableAudioRendererSink bound to an AudioOutputDevice
// Basing on |source_type| and build configuration, audio played out through
// the sink goes to AOD directly or can be mixed with other audio before that.
static scoped_refptr<media::SwitchableAudioRendererSink>
virtual scoped_refptr<media::SwitchableAudioRendererSink>
NewSwitchableAudioRendererSink(WebAudioDeviceSourceType source_type,
const LocalFrameToken& frame_token,
const media::AudioSinkParameters& params);
// A helper to get device info in the absence of AudioOutputDevice.
// Must be called on renderer thread only.
static media::OutputDeviceInfo GetOutputDeviceInfo(
virtual media::OutputDeviceInfo GetOutputDeviceInfo(
const LocalFrameToken& frame_token,
const media::AudioSinkParameters& params);
// Creates an AudioCapturerSource using the currently registered factory.
// |frame_token| refers to the RenderFrame containing the entity
// consuming the audio.
static scoped_refptr<media::AudioCapturerSource> NewAudioCapturerSource(
virtual scoped_refptr<media::AudioCapturerSource> NewAudioCapturerSource(
const LocalFrameToken& frame_token,
const media::AudioSourceParameters& params);
protected:
AudioDeviceFactory();
virtual ~AudioDeviceFactory();
// You can derive from this class and specify an implementation for these
// functions to provide alternate audio device implementations.
// If the return value of either of these function is NULL, we fall back
// on the default implementation.
// Creates a sink for a stream that can be mixed with other streams.
scoped_refptr<media::SwitchableAudioRendererSink> NewMixableSink(
blink::WebAudioDeviceSourceType source_type,
const blink::LocalFrameToken& frame_token,
const media::AudioSinkParameters& params);
// Creates a sink for AudioRendererMixer. |frame_token| refers to the
// RenderFrame containing the entity producing the audio. Note: These sinks do
// not support the blocking GetOutputDeviceInfo() API and instead clients are
// required to use the GetOutputDeviceInfoAsync() API. As such they are
// configured with no authorization timeout value.
virtual scoped_refptr<media::AudioRendererSink> NewAudioRendererMixerSink(
const LocalFrameToken& frame_token,
const media::AudioSinkParameters& params);
// Creates a final sink in the rendering pipeline, which represents the actual
// output device. |auth_timeout| is the authorization timeout allowed for the
// underlying AudioOutputDevice instance; a timeout of zero means no timeout.
virtual scoped_refptr<media::AudioRendererSink> CreateFinalAudioRendererSink(
const LocalFrameToken& frame_token,
const media::AudioSinkParameters& params,
base::TimeDelta auth_timeout) = 0;
virtual scoped_refptr<media::AudioRendererSink> CreateAudioRendererSink(
WebAudioDeviceSourceType source_type,
const LocalFrameToken& frame_token,
const media::AudioSinkParameters& params) = 0;
virtual scoped_refptr<media::SwitchableAudioRendererSink>
CreateSwitchableAudioRendererSink(
WebAudioDeviceSourceType source_type,
const LocalFrameToken& frame_token,
const media::AudioSinkParameters& params) = 0;
virtual scoped_refptr<media::AudioCapturerSource> CreateAudioCapturerSource(
const LocalFrameToken& frame_token,
const media::AudioSourceParameters& params) = 0;
private:
// The current globally registered factory. This is NULL when we should
// create the default AudioRendererSinks.
static AudioDeviceFactory* factory_;
static scoped_refptr<media::AudioRendererSink> NewFinalAudioRendererSink(
scoped_refptr<media::AudioRendererSink> NewFinalAudioRendererSink(
const LocalFrameToken& frame_token,
const media::AudioSinkParameters& params,
base::TimeDelta auth_timeout);
private:
std::unique_ptr<AudioRendererMixerManager> mixer_manager_;
std::unique_ptr<AudioRendererSinkCache> sink_cache_;
};
} // namespace blink

@ -28,11 +28,11 @@
namespace blink {
// static
AudioDeviceFactory* AudioDeviceFactory::factory_ = nullptr;
namespace {
// Set when the default factory is overridden.
AudioDeviceFactory* g_factory_override = nullptr;
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
BUILDFLAG(IS_CHROMEOS_LACROS)
// Due to driver deadlock issues on Windows (http://crbug/422522) there is a
@ -73,18 +73,31 @@ bool IsMixable(blink::WebAudioDeviceSourceType source_type) {
return source_type == blink::WebAudioDeviceSourceType::kMediaElement;
}
scoped_refptr<media::SwitchableAudioRendererSink> NewMixableSink(
blink::WebAudioDeviceSourceType source_type,
const blink::LocalFrameToken& frame_token,
const media::AudioSinkParameters& params) {
DCHECK(IsMainThread()) << __func__ << "() is called on a wrong thread.";
return AudioRendererMixerManager::GetInstance().CreateInput(
frame_token, params.session_id, params.device_id,
AudioDeviceFactory::GetSourceLatencyType(source_type));
}
} // namespace
// static
AudioDeviceFactory* AudioDeviceFactory::GetInstance() {
if (g_factory_override)
return g_factory_override;
static base::NoDestructor<AudioDeviceFactory> g_default_factory(
/*override_default=*/false);
return g_default_factory.get();
}
AudioDeviceFactory::AudioDeviceFactory(bool override_default) {
if (override_default) {
DCHECK(!g_factory_override) << "Can't register two factories at once.";
g_factory_override = this;
}
}
AudioDeviceFactory::~AudioDeviceFactory() {
DCHECK_EQ(g_factory_override, this);
g_factory_override = nullptr;
}
// static
media::AudioLatency::LatencyType AudioDeviceFactory::GetSourceLatencyType(
blink::WebAudioDeviceSourceType source) {
switch (source) {
@ -105,28 +118,11 @@ media::AudioLatency::LatencyType AudioDeviceFactory::GetSourceLatencyType(
return media::AudioLatency::LATENCY_INTERACTIVE;
}
scoped_refptr<media::AudioRendererSink>
AudioDeviceFactory::NewAudioRendererMixerSink(
const blink::LocalFrameToken& frame_token,
const media::AudioSinkParameters& params) {
// AudioRendererMixer sinks are always used asynchronously and thus can
// operate without a timeout value.
return NewFinalAudioRendererSink(frame_token, params, base::TimeDelta());
}
// static
scoped_refptr<media::AudioRendererSink>
AudioDeviceFactory::NewAudioRendererSink(
blink::WebAudioDeviceSourceType source_type,
const blink::LocalFrameToken& frame_token,
const media::AudioSinkParameters& params) {
if (factory_) {
scoped_refptr<media::AudioRendererSink> device =
factory_->CreateAudioRendererSink(source_type, frame_token, params);
if (device)
return device;
}
if (IsMixable(source_type))
return NewMixableSink(source_type, frame_token, params);
@ -136,20 +132,11 @@ AudioDeviceFactory::NewAudioRendererSink(
GetDefaultAuthTimeout());
}
// static
scoped_refptr<media::SwitchableAudioRendererSink>
AudioDeviceFactory::NewSwitchableAudioRendererSink(
blink::WebAudioDeviceSourceType source_type,
const blink::LocalFrameToken& frame_token,
const media::AudioSinkParameters& params) {
if (factory_) {
scoped_refptr<media::SwitchableAudioRendererSink> sink =
factory_->CreateSwitchableAudioRendererSink(source_type, frame_token,
params);
if (sink)
return sink;
}
if (IsMixable(source_type))
return NewMixableSink(source_type, frame_token, params);
@ -159,19 +146,10 @@ AudioDeviceFactory::NewSwitchableAudioRendererSink(
return nullptr;
}
// static
scoped_refptr<media::AudioCapturerSource>
AudioDeviceFactory::NewAudioCapturerSource(
const blink::LocalFrameToken& frame_token,
const media::AudioSourceParameters& params) {
if (factory_) {
// We don't pass on |session_id|, as this branch is only used for tests.
scoped_refptr<media::AudioCapturerSource> source =
factory_->CreateAudioCapturerSource(frame_token, params);
if (source)
return source;
}
return base::MakeRefCounted<media::AudioInputDevice>(
blink::AudioInputIPCFactory::GetInstance().CreateAudioInputIPC(
frame_token, params),
@ -179,47 +157,55 @@ AudioDeviceFactory::NewAudioCapturerSource(
media::AudioInputDevice::DeadStreamDetection::kEnabled);
}
// static
media::OutputDeviceInfo AudioDeviceFactory::GetOutputDeviceInfo(
const blink::LocalFrameToken& frame_token,
const media::AudioSinkParameters& params) {
DCHECK(IsMainThread()) << __func__ << "() is called on a wrong thread.";
constexpr base::TimeDelta kDeleteTimeout = base::Milliseconds(5000);
// There's one process wide instance that lives on the render thread.
static base::NoDestructor<AudioRendererSinkCache> cache(
base::ThreadPool::CreateSequencedTaskRunner(
{base::TaskPriority::BEST_EFFORT,
base::TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN}),
base::BindRepeating(&AudioDeviceFactory::NewAudioRendererSink,
blink::WebAudioDeviceSourceType::kNone),
kDeleteTimeout);
return cache->GetSinkInfo(frame_token, params.session_id, params.device_id);
if (!sink_cache_) {
sink_cache_ = std::make_unique<AudioRendererSinkCache>(
base::ThreadPool::CreateSequencedTaskRunner(
{base::TaskPriority::BEST_EFFORT,
base::TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN}),
base::BindRepeating(&AudioDeviceFactory::NewAudioRendererSink,
base::Unretained(this),
blink::WebAudioDeviceSourceType::kNone),
kDeleteTimeout);
}
return sink_cache_->GetSinkInfo(frame_token, params.session_id,
params.device_id);
}
AudioDeviceFactory::AudioDeviceFactory() {
DCHECK(!factory_) << "Can't register two factories at once.";
factory_ = this;
scoped_refptr<media::AudioRendererSink>
AudioDeviceFactory::NewAudioRendererMixerSink(
const blink::LocalFrameToken& frame_token,
const media::AudioSinkParameters& params) {
// AudioRendererMixer sinks are always used asynchronously and thus can
// operate without a timeout value.
return NewFinalAudioRendererSink(frame_token, params, base::TimeDelta());
}
AudioDeviceFactory::~AudioDeviceFactory() {
factory_ = nullptr;
scoped_refptr<media::SwitchableAudioRendererSink>
AudioDeviceFactory::NewMixableSink(blink::WebAudioDeviceSourceType source_type,
const blink::LocalFrameToken& frame_token,
const media::AudioSinkParameters& params) {
DCHECK(IsMainThread()) << __func__ << "() is called on a wrong thread.";
if (!mixer_manager_) {
mixer_manager_ = std::make_unique<AudioRendererMixerManager>(
base::BindRepeating(&AudioDeviceFactory::NewAudioRendererMixerSink,
base::Unretained(this)));
}
return mixer_manager_->CreateInput(
frame_token, params.session_id, params.device_id,
AudioDeviceFactory::GetSourceLatencyType(source_type));
}
// static
scoped_refptr<media::AudioRendererSink>
AudioDeviceFactory::NewFinalAudioRendererSink(
const blink::LocalFrameToken& frame_token,
const media::AudioSinkParameters& params,
base::TimeDelta auth_timeout) {
if (factory_) {
scoped_refptr<media::AudioRendererSink> sink =
factory_->CreateFinalAudioRendererSink(frame_token, params,
auth_timeout);
if (sink)
return sink;
}
return NewOutputDevice(frame_token, params, auth_timeout);
}

@ -117,14 +117,6 @@ AudioRendererMixerManager::~AudioRendererMixerManager() {
// |mixers_| may leak (i.e., may be non-empty at this time) as well.
}
// static
AudioRendererMixerManager& AudioRendererMixerManager::GetInstance() {
DEFINE_THREAD_SAFE_STATIC_LOCAL(
AudioRendererMixerManager, instance,
(base::BindRepeating(&AudioDeviceFactory::NewAudioRendererMixerSink)));
return instance;
}
scoped_refptr<media::AudioRendererMixerInput>
AudioRendererMixerManager::CreateInput(
const blink::LocalFrameToken& source_frame_token,

@ -43,17 +43,20 @@ namespace blink {
class BLINK_MODULES_EXPORT AudioRendererMixerManager final
: public media::AudioRendererMixerPool {
public:
// Callback which will be used to create sinks. See AudioDeviceFactory for
// more details on the parameters.
using CreateSinkCB =
base::RepeatingCallback<scoped_refptr<media::AudioRendererSink>(
const blink::LocalFrameToken& source_frame_token,
const media::AudioSinkParameters& params)>;
explicit AudioRendererMixerManager(CreateSinkCB create_sink_cb);
~AudioRendererMixerManager() final;
AudioRendererMixerManager(const AudioRendererMixerManager&) = delete;
AudioRendererMixerManager& operator=(const AudioRendererMixerManager&) =
delete;
~AudioRendererMixerManager() final;
// AudioRendererMixerManager instance which manages renderer side mixer
// instances shared based on configured audio parameters. Lazily created on
// first call.
static AudioRendererMixerManager& GetInstance();
// Creates an AudioRendererMixerInput with the proper callbacks necessary to
// retrieve an AudioRendererMixer instance from AudioRendererMixerManager.
// |source_frame_token| refers to the RenderFrame containing the entity
@ -84,16 +87,6 @@ class BLINK_MODULES_EXPORT AudioRendererMixerManager final
const blink::LocalFrameToken& source_frame_token,
const std::string& device_id);
protected:
// Callback which will be used to create sinks. See AudioDeviceFactory for
// more details on the parameters.
using CreateSinkCB =
base::RepeatingCallback<scoped_refptr<media::AudioRendererSink>(
const blink::LocalFrameToken& source_frame_token,
const media::AudioSinkParameters& params)>;
explicit AudioRendererMixerManager(CreateSinkCB create_sink_cb);
private:
friend class AudioRendererMixerManagerTest;