net: Socket pools prioritize requests with ignore_limits.
Also, when a socket request is cancelled when there are more ConnectJobs than socket requests for a group, no ConnectJobs are cancelled. These two changes are needed to prevent a lockup due to a socket created for sync XHR's socket being giving to another socket request of equal or higher priority. This issue may well still exist with SPDY sessions. BUG=155536 Review URL: https://chromiumcodereview.appspot.com/15927019 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@205491 0039d316-1c4b-4281-b951-d872f2087c98
This commit is contained in:
@ -19,6 +19,8 @@
|
||||
|
||||
using base::TimeDelta;
|
||||
|
||||
namespace net {
|
||||
|
||||
namespace {
|
||||
|
||||
// Indicate whether we should enable idle socket cleanup timer. When timer is
|
||||
@ -37,9 +39,29 @@ const int kCleanupInterval = 10; // DO NOT INCREASE THIS TIMEOUT.
|
||||
// after a certain timeout has passed without receiving an ACK.
|
||||
bool g_connect_backup_jobs_enabled = true;
|
||||
|
||||
} // namespace
|
||||
// Compares the effective priority of two results, and returns 1 if |request1|
|
||||
// has greater effective priority than |request2|, 0 if they have the same
|
||||
// effective priority, and -1 if |request2| has the greater effective priority.
|
||||
// Requests with |ignore_limits| set have higher effective priority than those
|
||||
// without. If both requests have |ignore_limits| set/unset, then the request
|
||||
// with the highest Pririoty has the highest effective priority. Does not take
|
||||
// into account the fact that Requests are serviced in FIFO order if they would
|
||||
// otherwise have the same priority.
|
||||
int CompareEffectiveRequestPriority(
|
||||
const internal::ClientSocketPoolBaseHelper::Request& request1,
|
||||
const internal::ClientSocketPoolBaseHelper::Request& request2) {
|
||||
if (request1.ignore_limits() && !request2.ignore_limits())
|
||||
return 1;
|
||||
if (!request1.ignore_limits() && request2.ignore_limits())
|
||||
return -1;
|
||||
if (request1.priority() > request2.priority())
|
||||
return 1;
|
||||
if (request1.priority() < request2.priority())
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
namespace net {
|
||||
} // namespace
|
||||
|
||||
ConnectJob::ConnectJob(const std::string& group_name,
|
||||
base::TimeDelta timeout_duration,
|
||||
@ -187,16 +209,16 @@ ClientSocketPoolBaseHelper::CallbackResultPair::CallbackResultPair(
|
||||
|
||||
ClientSocketPoolBaseHelper::CallbackResultPair::~CallbackResultPair() {}
|
||||
|
||||
// InsertRequestIntoQueue inserts the request into the queue based on
|
||||
// priority. Highest priorities are closest to the front. Older requests are
|
||||
// prioritized over requests of equal priority.
|
||||
//
|
||||
// static
|
||||
void ClientSocketPoolBaseHelper::InsertRequestIntoQueue(
|
||||
const Request* r, RequestQueue* pending_requests) {
|
||||
RequestQueue::iterator it = pending_requests->begin();
|
||||
while (it != pending_requests->end() && r->priority() <= (*it)->priority())
|
||||
// TODO(mmenke): Should the network stack require requests with
|
||||
// |ignore_limits| have the highest priority?
|
||||
while (it != pending_requests->end() &&
|
||||
CompareEffectiveRequestPriority(*r, *(*it)) <= 0) {
|
||||
++it;
|
||||
}
|
||||
pending_requests->insert(it, r);
|
||||
}
|
||||
|
||||
@ -494,8 +516,10 @@ void ClientSocketPoolBaseHelper::CancelRequest(
|
||||
req->net_log().AddEvent(NetLog::TYPE_CANCELLED);
|
||||
req->net_log().EndEvent(NetLog::TYPE_SOCKET_POOL);
|
||||
|
||||
// We let the job run, unless we're at the socket limit.
|
||||
if (group->jobs().size() && ReachedMaxSocketsLimit()) {
|
||||
// We let the job run, unless we're at the socket limit and there is
|
||||
// not another request waiting on the job.
|
||||
if (group->jobs().size() > group->pending_requests().size() &&
|
||||
ReachedMaxSocketsLimit()) {
|
||||
RemoveConnectJob(*group->jobs().begin(), group);
|
||||
CheckForStalledSocketGroups();
|
||||
}
|
||||
|
@ -443,6 +443,10 @@ class NET_EXPORT_PRIVATE ClientSocketPoolBaseHelper
|
||||
typedef std::map<const ClientSocketHandle*, CallbackResultPair>
|
||||
PendingCallbackMap;
|
||||
|
||||
// Inserts the request into the queue based on order they will receive
|
||||
// sockets. Sockets which ignore the socket pool limits are first. Then
|
||||
// requests are sorted by priority, with higher priorities closer to the
|
||||
// front. Older requests are prioritized over requests of equal priority.
|
||||
static void InsertRequestIntoQueue(const Request* r,
|
||||
RequestQueue* pending_requests);
|
||||
static const Request* RemoveRequestFromQueue(const RequestQueue::iterator& it,
|
||||
|
@ -61,7 +61,7 @@ void TestLoadTimingInfoConnectedReused(const ClientSocketHandle& handle) {
|
||||
}
|
||||
|
||||
// Make sure |handle| sets load times correctly when it has been assigned a
|
||||
// fresh socket. Also runs TestLoadTimingInfoConnectedReused, since the owner
|
||||
// fresh socket. Also runs TestLoadTimingInfoConnectedReused, since the owner
|
||||
// of a connection where |is_reused| is false may consider the connection
|
||||
// reused.
|
||||
void TestLoadTimingInfoConnectedNotReused(const ClientSocketHandle& handle) {
|
||||
@ -427,7 +427,7 @@ class TestConnectJobFactory
|
||||
job_types_(NULL),
|
||||
client_socket_factory_(client_socket_factory),
|
||||
net_log_(net_log) {
|
||||
}
|
||||
}
|
||||
|
||||
virtual ~TestConnectJobFactory() {}
|
||||
|
||||
@ -700,11 +700,17 @@ class ClientSocketPoolBaseTest : public testing::Test {
|
||||
connect_job_factory_));
|
||||
}
|
||||
|
||||
int StartRequest(const std::string& group_name,
|
||||
net::RequestPriority priority) {
|
||||
int StartRequestWithParams(
|
||||
const std::string& group_name,
|
||||
RequestPriority priority,
|
||||
const scoped_refptr<TestSocketParams>& params) {
|
||||
return test_base_.StartRequestUsingPool<
|
||||
TestClientSocketPool, TestSocketParams>(
|
||||
pool_.get(), group_name, priority, params_);
|
||||
pool_.get(), group_name, priority, params);
|
||||
}
|
||||
|
||||
int StartRequest(const std::string& group_name, RequestPriority priority) {
|
||||
return StartRequestWithParams(group_name, priority, params_);
|
||||
}
|
||||
|
||||
int GetOrderOfRequest(size_t index) const {
|
||||
@ -3847,6 +3853,166 @@ TEST_F(ClientSocketPoolBaseTest,
|
||||
EXPECT_EQ(OK, callback.WaitForResult());
|
||||
}
|
||||
|
||||
// Test that when a socket pool and group are at their limits, a request
|
||||
// with |ignore_limits| triggers creation of a new socket, and gets the socket
|
||||
// instead of a request with the same priority that was issued earlier, but
|
||||
// that does not have |ignore_limits| set.
|
||||
TEST_F(ClientSocketPoolBaseTest, IgnoreLimits) {
|
||||
scoped_refptr<TestSocketParams> params_ignore_limits(new TestSocketParams());
|
||||
params_ignore_limits->set_ignore_limits(true);
|
||||
CreatePool(1, 1);
|
||||
|
||||
// Issue a request to reach the socket pool limit.
|
||||
EXPECT_EQ(OK, StartRequestWithParams("a", kDefaultPriority, params_));
|
||||
EXPECT_EQ(0, pool_->NumConnectJobsInGroup("a"));
|
||||
|
||||
connect_job_factory_->set_job_type(TestConnectJob::kMockPendingJob);
|
||||
|
||||
EXPECT_EQ(ERR_IO_PENDING, StartRequestWithParams("a", kDefaultPriority,
|
||||
params_));
|
||||
EXPECT_EQ(0, pool_->NumConnectJobsInGroup("a"));
|
||||
|
||||
EXPECT_EQ(ERR_IO_PENDING, StartRequestWithParams("a", kDefaultPriority,
|
||||
params_ignore_limits));
|
||||
ASSERT_EQ(1, pool_->NumConnectJobsInGroup("a"));
|
||||
|
||||
EXPECT_EQ(OK, request(2)->WaitForResult());
|
||||
EXPECT_FALSE(request(1)->have_result());
|
||||
}
|
||||
|
||||
// Test that when a socket pool and group are at their limits, a request with
|
||||
// |ignore_limits| set triggers creation of a new socket, and gets the socket
|
||||
// instead of a request with a higher priority that was issued earlier, but
|
||||
// that does not have |ignore_limits| set.
|
||||
TEST_F(ClientSocketPoolBaseTest, IgnoreLimitsLowPriority) {
|
||||
scoped_refptr<TestSocketParams> params_ignore_limits(new TestSocketParams());
|
||||
params_ignore_limits->set_ignore_limits(true);
|
||||
CreatePool(1, 1);
|
||||
|
||||
// Issue a request to reach the socket pool limit.
|
||||
EXPECT_EQ(OK, StartRequestWithParams("a", HIGHEST, params_));
|
||||
EXPECT_EQ(0, pool_->NumConnectJobsInGroup("a"));
|
||||
|
||||
connect_job_factory_->set_job_type(TestConnectJob::kMockPendingJob);
|
||||
|
||||
EXPECT_EQ(ERR_IO_PENDING, StartRequestWithParams("a", HIGHEST, params_));
|
||||
EXPECT_EQ(0, pool_->NumConnectJobsInGroup("a"));
|
||||
|
||||
EXPECT_EQ(ERR_IO_PENDING, StartRequestWithParams("a", LOW,
|
||||
params_ignore_limits));
|
||||
ASSERT_EQ(1, pool_->NumConnectJobsInGroup("a"));
|
||||
|
||||
EXPECT_EQ(OK, request(2)->WaitForResult());
|
||||
EXPECT_FALSE(request(1)->have_result());
|
||||
}
|
||||
|
||||
// Test that when a socket pool and group are at their limits, a request with
|
||||
// |ignore_limits| set triggers creation of a new socket, and gets the socket
|
||||
// instead of a request with a higher priority that was issued later and
|
||||
// does not have |ignore_limits| set.
|
||||
TEST_F(ClientSocketPoolBaseTest, IgnoreLimitsLowPriority2) {
|
||||
scoped_refptr<TestSocketParams> params_ignore_limits(new TestSocketParams());
|
||||
params_ignore_limits->set_ignore_limits(true);
|
||||
CreatePool(1, 1);
|
||||
|
||||
// Issue a request to reach the socket pool limit.
|
||||
EXPECT_EQ(OK, StartRequestWithParams("a", HIGHEST, params_));
|
||||
EXPECT_EQ(0, pool_->NumConnectJobsInGroup("a"));
|
||||
|
||||
connect_job_factory_->set_job_type(TestConnectJob::kMockPendingJob);
|
||||
|
||||
EXPECT_EQ(ERR_IO_PENDING, StartRequestWithParams("a", LOW,
|
||||
params_ignore_limits));
|
||||
ASSERT_EQ(1, pool_->NumConnectJobsInGroup("a"));
|
||||
|
||||
EXPECT_EQ(ERR_IO_PENDING, StartRequestWithParams("a", HIGHEST, params_));
|
||||
EXPECT_EQ(1, pool_->NumConnectJobsInGroup("a"));
|
||||
|
||||
EXPECT_EQ(OK, request(1)->WaitForResult());
|
||||
EXPECT_FALSE(request(2)->have_result());
|
||||
}
|
||||
|
||||
// Test that when a socket pool and group are at their limits, a ConnectJob
|
||||
// issued for a request with |ignore_limits| set is not cancelled when a request
|
||||
// without |ignore_limits| issued to the same group is cancelled.
|
||||
TEST_F(ClientSocketPoolBaseTest, IgnoreLimitsCancelOtherJob) {
|
||||
scoped_refptr<TestSocketParams> params_ignore_limits(new TestSocketParams());
|
||||
params_ignore_limits->set_ignore_limits(true);
|
||||
CreatePool(1, 1);
|
||||
|
||||
// Issue a request to reach the socket pool limit.
|
||||
EXPECT_EQ(OK, StartRequestWithParams("a", HIGHEST, params_));
|
||||
EXPECT_EQ(0, pool_->NumConnectJobsInGroup("a"));
|
||||
|
||||
connect_job_factory_->set_job_type(TestConnectJob::kMockPendingJob);
|
||||
|
||||
EXPECT_EQ(ERR_IO_PENDING, StartRequestWithParams("a", HIGHEST, params_));
|
||||
EXPECT_EQ(0, pool_->NumConnectJobsInGroup("a"));
|
||||
|
||||
EXPECT_EQ(ERR_IO_PENDING, StartRequestWithParams("a", HIGHEST,
|
||||
params_ignore_limits));
|
||||
ASSERT_EQ(1, pool_->NumConnectJobsInGroup("a"));
|
||||
|
||||
// Cancel the pending request without ignore_limits set. The ConnectJob
|
||||
// should not be cancelled.
|
||||
request(1)->handle()->Reset();
|
||||
ASSERT_EQ(1, pool_->NumConnectJobsInGroup("a"));
|
||||
|
||||
EXPECT_EQ(OK, request(2)->WaitForResult());
|
||||
EXPECT_FALSE(request(1)->have_result());
|
||||
}
|
||||
|
||||
// More involved test of ignore limits. Issues a bunch of requests and later
|
||||
// checks the order in which they receive sockets.
|
||||
TEST_F(ClientSocketPoolBaseTest, IgnoreLimitsOrder) {
|
||||
scoped_refptr<TestSocketParams> params_ignore_limits(new TestSocketParams());
|
||||
params_ignore_limits->set_ignore_limits(true);
|
||||
CreatePool(1, 1);
|
||||
|
||||
connect_job_factory_->set_job_type(TestConnectJob::kMockPendingJob);
|
||||
|
||||
// Requests 0 and 1 do not have ignore_limits set, so they finish last. Since
|
||||
// the maximum number of sockets per pool is 1, the second requests does not
|
||||
// trigger a ConnectJob.
|
||||
EXPECT_EQ(ERR_IO_PENDING, StartRequestWithParams("a", HIGHEST, params_));
|
||||
EXPECT_EQ(ERR_IO_PENDING, StartRequestWithParams("a", HIGHEST, params_));
|
||||
|
||||
// Requests 2 and 3 have ignore_limits set, but have a low priority, so they
|
||||
// finish just before the first two.
|
||||
EXPECT_EQ(ERR_IO_PENDING,
|
||||
StartRequestWithParams("a", LOW, params_ignore_limits));
|
||||
EXPECT_EQ(ERR_IO_PENDING,
|
||||
StartRequestWithParams("a", LOW, params_ignore_limits));
|
||||
|
||||
// Request 4 finishes first, since it is high priority and ignores limits.
|
||||
EXPECT_EQ(ERR_IO_PENDING,
|
||||
StartRequestWithParams("a", HIGHEST, params_ignore_limits));
|
||||
|
||||
// Request 5 and 6 are cancelled right after starting. This should result in
|
||||
// creating two ConnectJobs. Since only one request (Request 1) did not
|
||||
// result in creating a ConnectJob, only one of the ConnectJobs should be
|
||||
// cancelled when the requests are.
|
||||
EXPECT_EQ(ERR_IO_PENDING,
|
||||
StartRequestWithParams("a", HIGHEST, params_ignore_limits));
|
||||
EXPECT_EQ(ERR_IO_PENDING,
|
||||
StartRequestWithParams("a", HIGHEST, params_ignore_limits));
|
||||
EXPECT_EQ(6, pool_->NumConnectJobsInGroup("a"));
|
||||
request(5)->handle()->Reset();
|
||||
EXPECT_EQ(6, pool_->NumConnectJobsInGroup("a"));
|
||||
request(6)->handle()->Reset();
|
||||
ASSERT_EQ(5, pool_->NumConnectJobsInGroup("a"));
|
||||
|
||||
// Wait for the last request to get a socket.
|
||||
EXPECT_EQ(OK, request(1)->WaitForResult());
|
||||
|
||||
// Check order in which requests received sockets.
|
||||
// These are 1-based indices, while request(x) uses 0-based indices.
|
||||
EXPECT_EQ(1, GetOrderOfRequest(5));
|
||||
EXPECT_EQ(2, GetOrderOfRequest(3));
|
||||
EXPECT_EQ(3, GetOrderOfRequest(4));
|
||||
EXPECT_EQ(4, GetOrderOfRequest(1));
|
||||
EXPECT_EQ(5, GetOrderOfRequest(2));
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
|
@ -1025,7 +1025,10 @@ class ClientSocketPoolTest {
|
||||
// Releases connections until there is nothing to release.
|
||||
void ReleaseAllConnections(KeepAlive keep_alive);
|
||||
|
||||
// Note that this uses 0-based indices, while GetOrderOfRequest takes and
|
||||
// returns 0-based indices.
|
||||
TestSocketRequest* request(int i) { return requests_[i]; }
|
||||
|
||||
size_t requests_size() const { return requests_.size(); }
|
||||
ScopedVector<TestSocketRequest>* requests() { return &requests_; }
|
||||
size_t completion_count() const { return completion_count_; }
|
||||
|
Reference in New Issue
Block a user