0

[PM] Track lazy and bounded V8PerFrameMemoryRequest's separately

Upgrade lazy requests to bounded to prevent starvation if the lazy
request has not received a result by the time the next bounded request
would be sent.

R=chrisha, ulan

Bug: 1080672
Change-Id: Ic3f44a67ff8052efdd83160ba453218fbb6f24e3
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2349978
Commit-Queue: Joe Mason <joenotcharles@chromium.org>
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: Chris Hamilton <chrisha@chromium.org>
Cr-Commit-Position: refs/heads/master@{#798757}
This commit is contained in:
Joe Mason
2020-08-17 19:09:31 +00:00
committed by Commit Bot
parent 3ec8590a65
commit 9b5926ea98
3 changed files with 233 additions and 51 deletions

@ -5,6 +5,8 @@
#ifndef COMPONENTS_PERFORMANCE_MANAGER_PUBLIC_V8_MEMORY_V8_PER_FRAME_MEMORY_DECORATOR_H_
#define COMPONENTS_PERFORMANCE_MANAGER_PUBLIC_V8_MEMORY_V8_PER_FRAME_MEMORY_DECORATOR_H_
#include <vector>
#include "base/containers/flat_map.h"
#include "base/memory/scoped_refptr.h"
#include "base/memory/weak_ptr.h"
@ -267,6 +269,10 @@ class V8PerFrameMemoryDecorator
// Returns the next measurement request that should be scheduled.
V8PerFrameMemoryRequest* GetNextRequest() const;
// Returns the next measurement request with mode kBounded that should be
// scheduled.
V8PerFrameMemoryRequest* GetNextBoundedRequest() const;
// Implementation details below this point.
// V8PerFrameMemoryRequest objects register themselves with the decorator.
@ -287,8 +293,9 @@ class V8PerFrameMemoryDecorator
Graph* graph_ = nullptr;
// List of requests sorted by min_time_between_requests (lowest first).
std::vector<V8PerFrameMemoryRequest*> measurement_requests_;
// Lists of requests sorted by min_time_between_requests (lowest first).
std::vector<V8PerFrameMemoryRequest*> bounded_measurement_requests_;
std::vector<V8PerFrameMemoryRequest*> lazy_measurement_requests_;
SEQUENCE_CHECKER(sequence_checker_);
};

@ -188,6 +188,8 @@ class NodeAttachedProcessData
private:
void StartMeasurement(MeasurementMode mode);
void ScheduleUpgradeToBoundedMeasurement();
void UpgradeToBoundedMeasurementIfNeeded();
void EnsureRemote();
void OnPerFrameV8MemoryUsageData(
blink::mojom::PerProcessV8MemoryUsageDataPtr result);
@ -196,16 +198,29 @@ class NodeAttachedProcessData
mojo::Remote<blink::mojom::V8PerFrameMemoryReporter> resource_usage_reporter_;
// State transitions:
//
// +-----------------------------------+
// | |
// | +-> MeasuringLazy +-+
// v | +
// Idle +-> Waiting +> |
// ^ | v
// | +-> MeasuringBounded +-+
// | |
// +--------------------------------------+
enum class State {
kWaiting, // Waiting to take a measurement.
kMeasuring, // Waiting for measurement results.
kIdle, // No measurements scheduled.
kIdle, // No measurements scheduled.
kWaiting, // Waiting to take a measurement.
kMeasuringBounded, // Waiting for results from a bounded measurement.
kMeasuringLazy, // Waiting for results from a lazy measurement.
};
State state_ = State::kIdle;
// Used to schedule the next measurement.
base::TimeTicks last_request_time_;
base::OneShotTimer timer_;
base::OneShotTimer request_timer_;
base::OneShotTimer bounded_upgrade_timer_;
V8PerFrameMemoryProcessData data_;
bool data_available_ = false;
@ -224,7 +239,15 @@ NodeAttachedProcessData::NodeAttachedProcessData(
void NodeAttachedProcessData::ScheduleNextMeasurement() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
if (state_ == State::kMeasuring) {
if (state_ == State::kMeasuringLazy) {
// Upgrade to a bounded measurement if the lazy measurement is taking too
// long. Otherwise do nothing until the current measurement finishes.
// ScheduleNextMeasurement will be called again at that point.
ScheduleUpgradeToBoundedMeasurement();
return;
}
if (state_ == State::kMeasuringBounded) {
// Don't restart the timer until the current measurement finishes.
// ScheduleNextMeasurement will be called again at that point.
return;
@ -241,7 +264,8 @@ void NodeAttachedProcessData::ScheduleNextMeasurement() {
// All measurements have been cancelled, or decorator was removed from
// graph.
state_ = State::kIdle;
timer_.Stop();
request_timer_.Stop();
bounded_upgrade_timer_.Stop();
last_request_time_ = base::TimeTicks();
return;
}
@ -253,19 +277,27 @@ void NodeAttachedProcessData::ScheduleNextMeasurement() {
return;
}
// TODO(joenotcharles): Make sure kLazy requests can't starve kBounded
// requests.
base::TimeTicks next_request_time =
last_request_time_ + next_request->min_time_between_requests();
timer_.Start(FROM_HERE, next_request_time - base::TimeTicks::Now(),
base::BindOnce(&NodeAttachedProcessData::StartMeasurement,
base::Unretained(this), next_request->mode()));
request_timer_.Start(
FROM_HERE, next_request_time - base::TimeTicks::Now(),
base::BindOnce(&NodeAttachedProcessData::StartMeasurement,
base::Unretained(this), next_request->mode()));
}
void NodeAttachedProcessData::StartMeasurement(MeasurementMode mode) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK_EQ(state_, State::kWaiting);
state_ = State::kMeasuring;
if (mode == MeasurementMode::kLazy) {
DCHECK_EQ(state_, State::kWaiting);
state_ = State::kMeasuringLazy;
// Ensure this lazy measurement doesn't starve any bounded measurements in
// the queue.
ScheduleUpgradeToBoundedMeasurement();
} else {
DCHECK(state_ == State::kWaiting || state_ == State::kMeasuringLazy);
state_ = State::kMeasuringBounded;
}
last_request_time_ = base::TimeTicks::Now();
EnsureRemote();
@ -284,10 +316,47 @@ void NodeAttachedProcessData::StartMeasurement(MeasurementMode mode) {
weak_factory_.GetWeakPtr()));
}
void NodeAttachedProcessData::ScheduleUpgradeToBoundedMeasurement() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK_EQ(state_, State::kMeasuringLazy);
V8PerFrameMemoryRequest* bounded_request = nullptr;
auto* decorator =
V8PerFrameMemoryDecorator::GetFromGraph(process_node_->GetGraph());
if (decorator) {
bounded_request = decorator->GetNextBoundedRequest();
}
if (!bounded_request) {
// All measurements have been cancelled, or decorator was removed from
// graph.
return;
}
base::TimeTicks bounded_request_time =
last_request_time_ + bounded_request->min_time_between_requests();
bounded_upgrade_timer_.Start(
FROM_HERE, bounded_request_time - base::TimeTicks::Now(),
base::BindOnce(
&NodeAttachedProcessData::UpgradeToBoundedMeasurementIfNeeded,
base::Unretained(this)));
}
void NodeAttachedProcessData::UpgradeToBoundedMeasurementIfNeeded() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
if (state_ != State::kMeasuringLazy) {
// State changed before timer expired.
return;
}
StartMeasurement(MeasurementMode::kBounded);
}
void NodeAttachedProcessData::OnPerFrameV8MemoryUsageData(
blink::mojom::PerProcessV8MemoryUsageDataPtr result) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK_EQ(state_, State::kMeasuring);
// Data has arrived so don't upgrade lazy requests to bounded, even if
// another lazy request is issued before the timer expires.
bounded_upgrade_timer_.Stop();
// Distribute the data to the frames.
// If a frame doesn't have corresponding data in the result, clear any data
@ -338,9 +407,12 @@ void NodeAttachedProcessData::OnPerFrameV8MemoryUsageData(
data_available_ = true;
data_.set_unassociated_v8_bytes_used(unassociated_v8_bytes_used);
// Schedule another measurement for this process node.
state_ = State::kIdle;
ScheduleNextMeasurement();
// Schedule another measurement for this process node unless one is already
// scheduled.
if (state_ != State::kWaiting) {
state_ = State::kIdle;
ScheduleNextMeasurement();
}
V8PerFrameMemoryDecorator::ObserverNotifier()
.NotifyObserversOnMeasurementAvailable(process_node_);
@ -529,7 +601,8 @@ const V8PerFrameMemoryProcessData* V8PerFrameMemoryProcessData::ForProcessNode(
V8PerFrameMemoryDecorator::V8PerFrameMemoryDecorator() = default;
V8PerFrameMemoryDecorator::~V8PerFrameMemoryDecorator() {
DCHECK(measurement_requests_.empty());
DCHECK(bounded_measurement_requests_.empty());
DCHECK(lazy_measurement_requests_.empty());
}
void V8PerFrameMemoryDecorator::OnPassedToGraph(Graph* graph) {
@ -551,11 +624,17 @@ void V8PerFrameMemoryDecorator::OnPassedToGraph(Graph* graph) {
void V8PerFrameMemoryDecorator::OnTakenFromGraph(Graph* graph) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK_EQ(graph, graph_);
for (V8PerFrameMemoryRequest* request : measurement_requests_) {
for (V8PerFrameMemoryRequest* request : bounded_measurement_requests_) {
request->OnDecoratorUnregistered(
util::PassKey<V8PerFrameMemoryDecorator>());
}
measurement_requests_.clear();
bounded_measurement_requests_.clear();
for (V8PerFrameMemoryRequest* request : lazy_measurement_requests_) {
request->OnDecoratorUnregistered(
util::PassKey<V8PerFrameMemoryDecorator>());
}
lazy_measurement_requests_.clear();
UpdateProcessMeasurementSchedules();
graph->GetNodeDataDescriberRegistry()->UnregisterDescriber(this);
@ -609,8 +688,26 @@ base::Value V8PerFrameMemoryDecorator::DescribeProcessNodeData(
V8PerFrameMemoryRequest* V8PerFrameMemoryDecorator::GetNextRequest() const {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
return measurement_requests_.empty() ? nullptr
: measurement_requests_.front();
V8PerFrameMemoryRequest* next_bounded_request = GetNextBoundedRequest();
if (lazy_measurement_requests_.empty())
return next_bounded_request;
V8PerFrameMemoryRequest* next_lazy_request =
lazy_measurement_requests_.front();
// Prioritize bounded requests.
if (next_bounded_request &&
next_bounded_request->min_time_between_requests() <=
next_lazy_request->min_time_between_requests()) {
return next_bounded_request;
}
return next_lazy_request;
}
V8PerFrameMemoryRequest* V8PerFrameMemoryDecorator::GetNextBoundedRequest()
const {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
return bounded_measurement_requests_.empty()
? nullptr
: bounded_measurement_requests_.front();
}
void V8PerFrameMemoryDecorator::AddMeasurementRequest(
@ -618,28 +715,25 @@ void V8PerFrameMemoryDecorator::AddMeasurementRequest(
V8PerFrameMemoryRequest* request) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(request);
DCHECK(!base::Contains(measurement_requests_, request))
std::vector<V8PerFrameMemoryRequest*>& measurement_requests =
request->mode() == MeasurementMode::kLazy ? lazy_measurement_requests_
: bounded_measurement_requests_;
DCHECK(!base::Contains(measurement_requests, request))
<< "V8PerFrameMemoryRequest object added twice";
// Each user of this decorator is expected to issue a single
// V8PerFrameMemoryRequest, so the size of measurement_requests_ is too low
// V8PerFrameMemoryRequest, so the size of measurement_requests is too low
// to make the complexity of real priority queue worthwhile.
for (std::vector<V8PerFrameMemoryRequest*>::const_iterator it =
measurement_requests_.begin();
it != measurement_requests_.end(); ++it) {
measurement_requests.begin();
it != measurement_requests.end(); ++it) {
if (request->min_time_between_requests() <
(*it)->min_time_between_requests() ||
// Make sure bounded request sort before lazy requests so that they
// aren't starved.
(request->min_time_between_requests() ==
(*it)->min_time_between_requests() &&
request->mode() ==
V8PerFrameMemoryRequest::MeasurementMode::kBounded)) {
measurement_requests_.insert(it, request);
(*it)->min_time_between_requests()) {
measurement_requests.insert(it, request);
UpdateProcessMeasurementSchedules();
return;
}
}
measurement_requests_.push_back(request);
measurement_requests.push_back(request);
UpdateProcessMeasurementSchedules();
}
@ -648,7 +742,10 @@ void V8PerFrameMemoryDecorator::RemoveMeasurementRequest(
V8PerFrameMemoryRequest* request) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(request);
size_t num_erased = base::Erase(measurement_requests_, request);
size_t num_erased = base::Erase(request->mode() == MeasurementMode::kLazy
? lazy_measurement_requests_
: bounded_measurement_requests_,
request);
DCHECK_EQ(num_erased, 1ULL);
UpdateProcessMeasurementSchedules();
}
@ -657,14 +754,22 @@ void V8PerFrameMemoryDecorator::UpdateProcessMeasurementSchedules() const {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(graph_);
#if DCHECK_IS_ON()
// Check the data invariant on measurement_requests_, which will be used by
// Check the data invariant on measurement_requests, which will be used by
// ScheduleNextMeasurement.
for (size_t i = 1; i < measurement_requests_.size(); ++i) {
DCHECK(measurement_requests_[i - 1]);
DCHECK(measurement_requests_[i]);
DCHECK_LE(measurement_requests_[i - 1]->min_time_between_requests(),
measurement_requests_[i]->min_time_between_requests());
}
auto check_invariants =
[](const std::vector<V8PerFrameMemoryRequest*>& measurement_requests,
MeasurementMode mode) {
for (size_t i = 1; i < measurement_requests.size(); ++i) {
DCHECK(measurement_requests[i - 1]);
DCHECK(measurement_requests[i]);
DCHECK_EQ(measurement_requests[i - 1]->mode(), mode);
DCHECK_EQ(measurement_requests[i]->mode(), mode);
DCHECK_LE(measurement_requests[i - 1]->min_time_between_requests(),
measurement_requests[i]->min_time_between_requests());
}
};
check_invariants(bounded_measurement_requests_, MeasurementMode::kBounded);
check_invariants(lazy_measurement_requests_, MeasurementMode::kLazy);
#endif
for (const ProcessNode* node : graph_->GetAllProcessNodes()) {
NodeAttachedProcessData* process_data = NodeAttachedProcessData::Get(node);
@ -682,7 +787,11 @@ void V8PerFrameMemoryDecorator::NotifyObserversOnMeasurementAvailable(
util::PassKey<ObserverNotifier> key,
const ProcessNode* process_node) const {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
for (V8PerFrameMemoryRequest* request : measurement_requests_) {
for (V8PerFrameMemoryRequest* request : bounded_measurement_requests_) {
request->NotifyObserversOnMeasurementAvailable(
util::PassKey<V8PerFrameMemoryDecorator>(), process_node);
}
for (V8PerFrameMemoryRequest* request : lazy_measurement_requests_) {
request->NotifyObserversOnMeasurementAvailable(
util::PassKey<V8PerFrameMemoryDecorator>(), process_node);
}

@ -571,8 +571,10 @@ TEST_F(V8PerFrameMemoryDecoratorTest, PerFrameDataIsDistributed) {
}
TEST_F(V8PerFrameMemoryDecoratorTest, LazyRequests) {
constexpr base::TimeDelta kLazyRequestLength =
base::TimeDelta::FromSeconds(30);
V8PerFrameMemoryRequest lazy_request(
kMinTimeBetweenRequests, V8PerFrameMemoryRequest::MeasurementMode::kLazy,
kLazyRequestLength, V8PerFrameMemoryRequest::MeasurementMode::kLazy,
graph());
MockV8PerFrameMemoryReporter reporter;
@ -586,15 +588,79 @@ TEST_F(V8PerFrameMemoryDecoratorTest, LazyRequests) {
content::PROCESS_TYPE_RENDERER,
RenderProcessHostProxy::CreateForTesting(kTestProcessID));
task_env().RunUntilIdle();
task_env().FastForwardBy(base::TimeDelta::FromSeconds(1));
testing::Mock::VerifyAndClearExpectations(&reporter);
// Bounded requests should be preferred over lazy requests with the same
// min_time_between_requests.
V8PerFrameMemoryRequest bounded_request(kMinTimeBetweenRequests, graph());
// If a lazy request takes too long to respond it should be upgraded to a
// bounded request if one is in the queue.
constexpr base::TimeDelta kLongBoundedRequestLength =
base::TimeDelta::FromSeconds(45);
V8PerFrameMemoryRequest long_bounded_request(kLongBoundedRequestLength,
graph());
auto* decorator = V8PerFrameMemoryDecorator::GetFromGraph(graph());
ASSERT_TRUE(decorator);
ASSERT_TRUE(decorator->GetNextRequest());
EXPECT_EQ(decorator->GetNextRequest()->min_time_between_requests(),
kLazyRequestLength);
EXPECT_EQ(decorator->GetNextRequest()->mode(),
V8PerFrameMemoryRequest::MeasurementMode::kLazy);
{
// Next lazy request sent after 30 sec + 10 sec delay until reply = 40 sec
// until reply arrives. kLongBoundedRequestLength > 40 sec so the reply
// should arrive in time to prevent upgrading the request.
auto data = blink::mojom::PerProcessV8MemoryUsageData::New();
data->unassociated_bytes_used = 1U;
ExpectQueryAndDelayReply(&reporter, base::TimeDelta::FromSeconds(10),
std::move(data),
MockV8PerFrameMemoryReporter::Mode::LAZY);
}
// Wait long enough for the upgraded request to be sent, to verify that it
// wasn't sent.
task_env().FastForwardBy(kLongBoundedRequestLength);
testing::Mock::VerifyAndClearExpectations(&reporter);
constexpr base::TimeDelta kUpgradeRequestLength =
base::TimeDelta::FromSeconds(40);
V8PerFrameMemoryRequest bounded_request_upgrade(kUpgradeRequestLength,
graph());
ASSERT_TRUE(decorator->GetNextRequest());
EXPECT_EQ(decorator->GetNextRequest()->min_time_between_requests(),
kLazyRequestLength);
EXPECT_EQ(decorator->GetNextRequest()->mode(),
V8PerFrameMemoryRequest::MeasurementMode::kLazy);
{
::testing::InSequence seq;
// Again, 40 sec total until reply arrives. kUpgradeRequestLength <= 40 sec
// so a second upgraded request should be sent.
auto data = blink::mojom::PerProcessV8MemoryUsageData::New();
data->unassociated_bytes_used = 2U;
ExpectQueryAndDelayReply(&reporter, base::TimeDelta::FromSeconds(10),
std::move(data),
MockV8PerFrameMemoryReporter::Mode::LAZY);
auto data2 = blink::mojom::PerProcessV8MemoryUsageData::New();
data2->unassociated_bytes_used = 3U;
ExpectQueryAndReply(&reporter, std::move(data2),
MockV8PerFrameMemoryReporter::Mode::DEFAULT);
}
// Wait long enough for the upgraded request to be sent.
task_env().FastForwardBy(kUpgradeRequestLength);
testing::Mock::VerifyAndClearExpectations(&reporter);
EXPECT_TRUE(V8PerFrameMemoryProcessData::ForProcessNode(process.get()));
EXPECT_EQ(3u, V8PerFrameMemoryProcessData::ForProcessNode(process.get())
->unassociated_v8_bytes_used());
// Bounded requests should be preferred over lazy requests with the same
// min_time_between_requests.
V8PerFrameMemoryRequest short_bounded_request(kLazyRequestLength, graph());
ASSERT_TRUE(decorator->GetNextRequest());
EXPECT_EQ(decorator->GetNextRequest()->min_time_between_requests(),
kLazyRequestLength);
EXPECT_EQ(decorator->GetNextRequest()->mode(),
V8PerFrameMemoryRequest::MeasurementMode::kBounded);
}