Skip to content

Commit

Permalink
Revert 218616 "[net/dns] Perform A/AAAA queries for AF_UNSPEC re..."
Browse files Browse the repository at this point in the history
This was causing crashes in the case of DNS responses with no addresses.

> [net/dns] Perform A/AAAA queries for AF_UNSPEC resolutions in parallel. 
> 
> The second DnsTransaction is scheduled as a second job on the resolver's
> PrioritizedDispatcher, at the beginning of its priority queue.  The two
> DnsTransactions run independently of each other, although if one of them
> finishes with an error, the other one will be scrapped immediately, and
> the Job will fall back to ProcTask. 
> 
> BUG=174992
> 
> Review URL: https://chromiumcodereview.appspot.com/19498003

TBR=mmenke@chromium.org
BUG=277625

Review URL: https://codereview.chromium.org/23102009

git-svn-id: svn://svn.chromium.org/chrome/trunk/src@219027 0039d316-1c4b-4281-b951-d872f2087c98
  • Loading branch information
mmenke@chromium.org committed Aug 22, 2013
1 parent 167c712 commit 70c04ab
Show file tree
Hide file tree
Showing 11 changed files with 225 additions and 777 deletions.
6 changes: 3 additions & 3 deletions chrome/browser/net/dns_probe_test_util.cc
Original file line number Diff line number Diff line change
Expand Up @@ -28,10 +28,10 @@ scoped_ptr<DnsClient> CreateMockDnsClientForProbes(

const uint16 kTypeA = net::dns_protocol::kTypeA;
MockDnsClientRuleList rules;
rules.push_back(MockDnsClientRule(DnsProbeRunner::kKnownGoodHostname, kTypeA,
result, false));
rules.push_back(
MockDnsClientRule(DnsProbeRunner::kKnownGoodHostname, kTypeA, result));

return scoped_ptr<DnsClient>(new net::MockDnsClient(config, rules));
return CreateMockDnsClient(config, rules).Pass();
}

} // namespace chrome_browser_net
44 changes: 11 additions & 33 deletions net/base/prioritized_dispatcher.cc
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,17 @@ PrioritizedDispatcher::PrioritizedDispatcher(const Limits& limits)
: queue_(limits.reserved_slots.size()),
max_running_jobs_(limits.reserved_slots.size()),
num_running_jobs_(0) {
SetLimits(limits);
size_t total = 0;
for (size_t i = 0; i < limits.reserved_slots.size(); ++i) {
total += limits.reserved_slots[i];
max_running_jobs_[i] = total;
}
// Unreserved slots are available for all priorities.
DCHECK_LE(total, limits.total_jobs) << "sum(reserved_slots) <= total_jobs";
size_t spare = limits.total_jobs - total;
for (size_t i = limits.reserved_slots.size(); i > 0; --i) {
max_running_jobs_[i - 1] += spare;
}
}

PrioritizedDispatcher::~PrioritizedDispatcher() {}
Expand All @@ -35,18 +45,6 @@ PrioritizedDispatcher::Handle PrioritizedDispatcher::Add(
return queue_.Insert(job, priority);
}

PrioritizedDispatcher::Handle PrioritizedDispatcher::AddAtHead(
Job* job, Priority priority) {
DCHECK(job);
DCHECK_LT(priority, num_priorities());
if (num_running_jobs_ < max_running_jobs_[priority]) {
++num_running_jobs_;
job->Start();
return Handle();
}
return queue_.InsertAtFront(job, priority);
}

void PrioritizedDispatcher::Cancel(const Handle& handle) {
queue_.Erase(handle);
}
Expand Down Expand Up @@ -88,11 +86,6 @@ void PrioritizedDispatcher::OnJobFinished() {
MaybeDispatchJob(handle, handle.priority());
}

void PrioritizedDispatcher::Disable() {
// Set limits to allow no new jobs to start.
SetLimits(Limits(queue_.num_priorities(), 0));
}

bool PrioritizedDispatcher::MaybeDispatchJob(const Handle& handle,
Priority job_priority) {
DCHECK_LT(job_priority, num_priorities());
Expand All @@ -105,19 +98,4 @@ bool PrioritizedDispatcher::MaybeDispatchJob(const Handle& handle,
return true;
}

void PrioritizedDispatcher::SetLimits(const Limits& limits) {
DCHECK_EQ(queue_.num_priorities(), limits.reserved_slots.size());
size_t total = 0;
for (size_t i = 0; i < limits.reserved_slots.size(); ++i) {
total += limits.reserved_slots[i];
max_running_jobs_[i] = total;
}
// Unreserved slots are available for all priorities.
DCHECK_LE(total, limits.total_jobs) << "sum(reserved_slots) <= total_jobs";
size_t spare = limits.total_jobs - total;
for (size_t i = limits.reserved_slots.size(); i > 0; --i) {
max_running_jobs_[i - 1] += spare;
}
}

} // namespace net
13 changes: 0 additions & 13 deletions net/base/prioritized_dispatcher.h
Original file line number Diff line number Diff line change
Expand Up @@ -78,10 +78,6 @@ class NET_EXPORT_PRIVATE PrioritizedDispatcher {
// it is queued in the dispatcher.
Handle Add(Job* job, Priority priority);

// Just like Add, except that it adds Job at the font of queue of jobs with
// priorities of |priority|.
Handle AddAtHead(Job* job, Priority priority);

// Removes the job with |handle| from the queue. Invalidates |handle|.
// Note: a Handle is valid iff the job is in the queue, i.e. has not Started.
void Cancel(const Handle& handle);
Expand All @@ -98,21 +94,12 @@ class NET_EXPORT_PRIVATE PrioritizedDispatcher {
// Notifies the dispatcher that a running job has finished. Could start a job.
void OnJobFinished();

// Tells the dispatcher not to start new jobs when old jobs complete or are
// cancelled. Can't be undone. Can be used to avoid starting new jobs during
// cleanup.
void Disable();

private:
// Attempts to dispatch the job with |handle| at priority |priority| (might be
// different than |handle.priority()|. Returns true if successful. If so
// the |handle| becomes invalid.
bool MaybeDispatchJob(const Handle& handle, Priority priority);

// Updates |max_running_jobs_| to match |limits|. Does not start any new
// new jobs, even if the new limits would allow queued jobs to start.
void SetLimits(const Limits& limits);

// Queue for jobs that need to wait for a spare slot.
PriorityQueue<Job*> queue_;
// Maximum total number of running jobs allowed after a job at a particular
Expand Down
127 changes: 10 additions & 117 deletions net/base/prioritized_dispatcher_unittest.cc
Original file line number Diff line number Diff line change
Expand Up @@ -52,17 +52,13 @@ class PrioritizedDispatcherTest : public testing::Test {
return handle_;
}

void Add(bool at_head) {
void Add() {
CHECK(handle_.is_null());
CHECK(!running_);
size_t num_queued = dispatcher_->num_queued_jobs();
size_t num_running = dispatcher_->num_running_jobs();

if (!at_head) {
handle_ = dispatcher_->Add(this, priority_);
} else {
handle_ = dispatcher_->AddAtHead(this, priority_);
}
handle_ = dispatcher_->Add(this, priority_);

if (handle_.is_null()) {
EXPECT_EQ(num_queued, dispatcher_->num_queued_jobs());
Expand Down Expand Up @@ -144,14 +140,7 @@ class PrioritizedDispatcherTest : public testing::Test {
TestJob* AddJob(char data, Priority priority) {
TestJob* job = new TestJob(dispatcher_.get(), data, priority, &log_);
jobs_.push_back(job);
job->Add(false);
return job;
}

TestJob* AddJobAtHead(char data, Priority priority) {
TestJob* job = new TestJob(dispatcher_.get(), data, priority, &log_);
jobs_.push_back(job);
job->Add(true);
job->Add();
return job;
}

Expand Down Expand Up @@ -213,33 +202,6 @@ TEST_F(PrioritizedDispatcherTest, AddPriority) {
Expect("a.c.d.b.e.");
}

TEST_F(PrioritizedDispatcherTest, AddAtHead) {
PrioritizedDispatcher::Limits limits(NUM_PRIORITIES, 1);
Prepare(limits);

TestJob* job_a = AddJob('a', MEDIUM);
TestJob* job_b = AddJobAtHead('b', MEDIUM);
TestJob* job_c = AddJobAtHead('c', HIGHEST);
TestJob* job_d = AddJobAtHead('d', HIGHEST);
TestJob* job_e = AddJobAtHead('e', MEDIUM);
TestJob* job_f = AddJob('f', MEDIUM);

ASSERT_TRUE(job_a->running());
job_a->Finish();
ASSERT_TRUE(job_d->running());
job_d->Finish();
ASSERT_TRUE(job_c->running());
job_c->Finish();
ASSERT_TRUE(job_e->running());
job_e->Finish();
ASSERT_TRUE(job_b->running());
job_b->Finish();
ASSERT_TRUE(job_f->running());
job_f->Finish();

Expect("a.d.c.e.b.f.");
}

TEST_F(PrioritizedDispatcherTest, EnforceLimits) {
// Reserve 2 for HIGHEST and 1 for LOW or higher.
// This leaves 2 for LOWEST or lower.
Expand Down Expand Up @@ -283,40 +245,29 @@ TEST_F(PrioritizedDispatcherTest, EnforceLimits) {
}

TEST_F(PrioritizedDispatcherTest, ChangePriority) {
PrioritizedDispatcher::Limits limits(NUM_PRIORITIES, 2);
// Reserve one slot only for HIGHEST priority requests.
limits.reserved_slots[HIGHEST] = 1;
PrioritizedDispatcher::Limits limits(NUM_PRIORITIES, 1);
Prepare(limits);

TestJob* job_a = AddJob('a', IDLE);
TestJob* job_b = AddJob('b', LOW);
TestJob* job_c = AddJob('c', MEDIUM);
TestJob* job_d = AddJob('d', MEDIUM);
TestJob* job_e = AddJob('e', IDLE);
TestJob* job_b = AddJob('b', MEDIUM);
TestJob* job_c = AddJob('c', HIGHEST);
TestJob* job_d = AddJob('d', HIGHEST);

ASSERT_FALSE(job_b->running());
ASSERT_FALSE(job_c->running());
job_b->ChangePriority(MEDIUM);
job_c->ChangePriority(LOW);
job_b->ChangePriority(HIGHEST);
job_c->ChangePriority(MEDIUM);

ASSERT_TRUE(job_a->running());
job_a->Finish();
ASSERT_TRUE(job_d->running());
job_d->Finish();

EXPECT_FALSE(job_e->running());
// Increasing |job_e|'s priority to HIGHEST should result in it being
// started immediately.
job_e->ChangePriority(HIGHEST);
ASSERT_TRUE(job_e->running());
job_e->Finish();

ASSERT_TRUE(job_b->running());
job_b->Finish();
ASSERT_TRUE(job_c->running());
job_c->Finish();

Expect("a.d.be..c.");
Expect("a.d.b.c.");
}

TEST_F(PrioritizedDispatcherTest, Cancel) {
Expand Down Expand Up @@ -373,64 +324,6 @@ TEST_F(PrioritizedDispatcherTest, EvictFromEmpty) {
EXPECT_TRUE(dispatcher_->EvictOldestLowest() == NULL);
}

TEST_F(PrioritizedDispatcherTest, AddWhileDisabled) {
PrioritizedDispatcher::Limits limits(NUM_PRIORITIES, 1);
Prepare(limits);

dispatcher_->Disable();
TestJob* job_a = AddJob('a', MEDIUM);
TestJob* job_b = AddJobAtHead('b', MEDIUM);

EXPECT_FALSE(job_a->running());
EXPECT_FALSE(job_b->running());
EXPECT_EQ(0u, dispatcher_->num_running_jobs());
EXPECT_EQ(2u, dispatcher_->num_queued_jobs());
}

TEST_F(PrioritizedDispatcherTest, DisableThenCancel) {
PrioritizedDispatcher::Limits limits(NUM_PRIORITIES, 1);
Prepare(limits);

TestJob* job_a = AddJob('a', IDLE);
TestJob* job_b = AddJob('b', IDLE);
TestJob* job_c = AddJob('c', IDLE);
dispatcher_->Disable();

EXPECT_TRUE(job_a->running());
EXPECT_FALSE(job_b->running());
EXPECT_FALSE(job_c->running());
job_a->Finish();

EXPECT_FALSE(job_b->running());
EXPECT_FALSE(job_c->running());

job_b->Cancel();
EXPECT_FALSE(job_c->running());
job_c->Cancel();

Expect("a.");
}

TEST_F(PrioritizedDispatcherTest, DisableThenIncreatePriority) {
PrioritizedDispatcher::Limits limits(NUM_PRIORITIES, 2);
limits.reserved_slots[HIGHEST] = 1;
Prepare(limits);

TestJob* job_a = AddJob('a', IDLE);
TestJob* job_b = AddJob('b', IDLE);
EXPECT_TRUE(job_a->running());
EXPECT_FALSE(job_b->running());
dispatcher_->Disable();

job_b->ChangePriority(HIGHEST);
EXPECT_FALSE(job_b->running());
job_a->Finish();
EXPECT_FALSE(job_b->running());

job_b->Cancel();
Expect("a.");
}

#if GTEST_HAS_DEATH_TEST && !defined(NDEBUG)
TEST_F(PrioritizedDispatcherTest, CancelNull) {
PrioritizedDispatcher::Limits limits(NUM_PRIORITIES, 1);
Expand Down
21 changes: 0 additions & 21 deletions net/base/priority_queue.h
Original file line number Diff line number Diff line change
Expand Up @@ -139,24 +139,6 @@ class PriorityQueue : public base::NonThreadSafe {
#endif
}

// Adds |value| with |priority| to the queue. Returns a pointer to the
// created element.
Pointer InsertAtFront(const T& value, Priority priority) {
DCHECK(CalledOnValidThread());
DCHECK_LT(priority, lists_.size());
++size_;
List& list = lists_[priority];
#if !defined(NDEBUG)
unsigned id = next_id_;
valid_ids_.insert(id);
++next_id_;
return Pointer(priority, list.insert(list.begin(),
std::make_pair(id, value)));
#else
return Pointer(priority, list.insert(list.begin(), value));
#endif
}

// Removes the value pointed by |pointer| from the queue. All pointers to this
// value including |pointer| become invalid.
void Erase(const Pointer& pointer) {
Expand Down Expand Up @@ -231,9 +213,6 @@ class PriorityQueue : public base::NonThreadSafe {
size_ = 0u;
}

// Returns the number of priorities the queue supports.
size_t num_priorities() const { return lists_.size(); }

// Returns number of queued values.
size_t size() const {
DCHECK(CalledOnValidThread());
Expand Down
17 changes: 1 addition & 16 deletions net/base/priority_queue_unittest.cc
Original file line number Diff line number Diff line change
Expand Up @@ -91,22 +91,7 @@ TEST_F(PriorityQueueTest, EraseFromMiddle) {
queue_.Erase(pointers_[2]);
queue_.Erase(pointers_[3]);

const int expected_order[] = { 8, 1, 6, 0, 5, 4, 7 };

for (size_t i = 0; i < arraysize(expected_order); ++i) {
EXPECT_EQ(expected_order[i], queue_.FirstMin().value());
queue_.Erase(queue_.FirstMin());
}
CheckEmpty();
}

TEST_F(PriorityQueueTest, InsertAtFront) {
queue_.InsertAtFront(9, 2);
queue_.InsertAtFront(10, 0);
queue_.InsertAtFront(11, 1);
queue_.InsertAtFront(12, 1);

const int expected_order[] = { 10, 3, 8, 12, 11, 1, 6, 9, 0, 2, 5, 4, 7 };
int expected_order[] = { 8, 1, 6, 0, 5, 4, 7 };

for (size_t i = 0; i < arraysize(expected_order); ++i) {
EXPECT_EQ(expected_order[i], queue_.FirstMin().value());
Expand Down
Loading

0 comments on commit 70c04ab

Please sign in to comment.