0

Moved result processing to TestLauncher.

Unit test delegate and content test delegate
result processing are very similar.
The purpose of this CL is to combine the common
result processing and move it to TestLauncher.

This would be the last major refactor for this bug.

Bug: 936248
Change-Id: If8131bef01ee02bc92626c6c431dc557b06dc05e
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1761653
Reviewed-by: Scott Violet <sky@chromium.org>
Reviewed-by: Erik Chen <erikchen@chromium.org>
Commit-Queue: Ilia Samsonov <isamsonov@google.com>
Cr-Commit-Position: refs/heads/master@{#689281}
This commit is contained in:
Ilia Samsonov
2019-08-22 00:57:10 +00:00
committed by Commit Bot
parent c3a479d827
commit c368d05fa0
7 changed files with 149 additions and 263 deletions

@ -42,6 +42,7 @@
#include "base/task/post_task.h"
#include "base/task/thread_pool/thread_pool.h"
#include "base/test/gtest_util.h"
#include "base/test/gtest_xml_util.h"
#include "base/test/launcher/test_launcher_tracer.h"
#include "base/test/launcher/test_results_tracker.h"
#include "base/test/test_switches.h"
@ -888,16 +889,116 @@ void TestLauncher::LaunchChildGTestProcess(
process_results.was_timeout));
}
// Determines which result status will be assigned for missing test results.
TestResult::Status MissingResultStatus(size_t tests_to_run_count,
bool was_timeout,
bool exit_code) {
// There is more than one test, cannot assess status.
if (tests_to_run_count > 1u)
return TestResult::TEST_SKIPPED;
// There is only one test and no results.
// Try to determine status by timeout or exit code.
if (was_timeout)
return TestResult::TEST_TIMEOUT;
if (exit_code != 0)
return TestResult::TEST_FAILURE;
// It's strange case when test executed successfully,
// but we failed to read machine-readable report for it.
return TestResult::TEST_UNKNOWN;
}
// Returns interpreted test results.
void TestLauncher::ProcessTestResults(
const std::vector<std::string>& test_names,
const base::FilePath& result_file,
const FilePath& result_file,
const std::string& output,
const base::TimeDelta& elapsed_time,
TimeDelta elapsed_time,
int exit_code,
bool was_timeout) {
std::vector<TestResult> test_results = launcher_delegate_->ProcessTestResults(
test_names, result_file, output, elapsed_time, exit_code, was_timeout);
for (const auto& result : test_results)
std::vector<TestResult> test_results;
bool crashed = false;
bool have_test_results =
ProcessGTestOutput(result_file, &test_results, &crashed);
if (!have_test_results) {
// We do not have reliable details about test results (parsing test
// stdout is known to be unreliable).
LOG(ERROR) << "Failed to get out-of-band test success data, "
"dumping full stdio below:\n"
<< output << "\n";
// This is odd, but sometimes ProcessGtestOutput returns
// false, but TestResults is not empty.
test_results.clear();
}
TestResult::Status missing_result_status =
MissingResultStatus(test_names.size(), was_timeout, exit_code);
// TODO(phajdan.jr): Check for duplicates and mismatches between
// the results we got from XML file and tests we intended to run.
std::map<std::string, TestResult> results_map;
for (const auto& i : test_results)
results_map[i.full_name] = i;
// Results to be reported back to the test launcher.
std::vector<TestResult> final_results;
for (const auto& i : test_names) {
if (Contains(results_map, i)) {
TestResult test_result = results_map[i];
// Fix up the test status: we forcibly kill the child process
// after the timeout, so from XML results it looks just like
// a crash.
if ((was_timeout && test_result.status == TestResult::TEST_CRASH) ||
// If we run multiple tests in a batch with a timeout applied
// to the entire batch. It is possible that with other tests
// running quickly some tests take longer than the per-test timeout.
// For consistent handling of tests independent of order and other
// factors, mark them as timing out.
test_result.elapsed_time > launcher_delegate_->GetTimeout()) {
test_result.status = TestResult::TEST_TIMEOUT;
}
final_results.push_back(test_result);
} else {
// TODO(phajdan.jr): Explicitly pass the info that the test didn't
// run for a mysterious reason.
LOG(ERROR) << "no test result for " << i;
TestResult test_result;
test_result.full_name = i;
test_result.status = missing_result_status;
final_results.push_back(test_result);
}
}
// TODO(phajdan.jr): Handle the case where processing XML output
// indicates a crash but none of the test results is marked as crashing.
bool has_non_success_test = false;
for (const auto& i : final_results) {
if (i.status != TestResult::TEST_SUCCESS) {
has_non_success_test = true;
break;
}
}
if (!has_non_success_test && exit_code != 0) {
// This is a bit surprising case: all tests are marked as successful,
// but the exit code was not zero. This can happen e.g. under memory
// tools that report leaks this way. Mark all tests as a failure on exit,
// and for more precise info they'd need to be retried serially.
for (auto& i : final_results)
i.status = TestResult::TEST_FAILURE_ON_EXIT;
}
for (auto& i : final_results) {
// Fix the output snippet after possible changes to the test result.
i.output_snippet = GetTestOutputSnippet(i, output);
}
launcher_delegate_->ProcessTestResults(final_results, elapsed_time);
for (const auto& result : final_results)
OnTestFinished(result);
}

@ -49,18 +49,9 @@ class TestLauncherDelegate {
// must put the result in |output| and return true on success.
virtual bool GetTests(std::vector<TestIdentifier>* output) = 0;
// Invoked after a child process finishes, reporting the process |exit_code|,
// child process |elapsed_time|, whether or not the process was terminated as
// a result of a timeout, and the output of the child (stdout and stderr
// together). NOTE: this method is invoked on the main thread.
// Returns test results of child process.
virtual std::vector<TestResult> ProcessTestResults(
const std::vector<std::string>& test_names,
const base::FilePath& output_file,
const std::string& output,
const base::TimeDelta& elapsed_time,
int exit_code,
bool was_timeout) = 0;
// Additional delegate TestResult processing.
virtual void ProcessTestResults(std::vector<TestResult>& test_results,
TimeDelta elapsed_time) {}
// Called to get the command line for the specified tests.
// |output_file_| is populated with the path to the result file, and must
@ -72,7 +63,7 @@ class TestLauncherDelegate {
// Invoked when a test process exceeds its runtime, immediately before it is
// terminated. |command_line| is the command line used to launch the process.
// NOTE: this method is invoked on the thread the process is launched on.
virtual void OnTestTimedOut(const base::CommandLine& cmd_line) {}
virtual void OnTestTimedOut(const CommandLine& cmd_line) {}
// Returns the delegate specific wrapper for command line.
// If it is not empty, it is prepended to the final command line.
@ -204,10 +195,16 @@ class TestLauncher {
// wait for child processes). virtual to mock in testing.
virtual void CreateAndStartThreadPool(int num_parallel_jobs);
// Callback to receive result of a test.
// |result_file| is a path to xml file written by child process.
// It contains information about test and failed
// EXPECT/ASSERT/DCHECK statements. Test launcher parses that
// file to get additional information about test run (status,
// error-messages, stack-traces and file/line for failures).
void ProcessTestResults(const std::vector<std::string>& test_names,
const base::FilePath& result_file,
const FilePath& result_file,
const std::string& output,
const base::TimeDelta& elapsed_time,
TimeDelta elapsed_time,
int exit_code,
bool was_timeout);

@ -78,14 +78,9 @@ class MockTestLauncherDelegate : public TestLauncherDelegate {
MOCK_METHOD2(WillRunTest,
bool(const std::string& test_case_name,
const std::string& test_name));
MOCK_METHOD6(
ProcessTestResults,
std::vector<TestResult>(const std::vector<std::string>& test_names,
const base::FilePath& output_file,
const std::string& output,
const base::TimeDelta& elapsed_time,
int exit_code,
bool was_timeout));
MOCK_METHOD2(ProcessTestResults,
void(std::vector<TestResult>& test_names,
TimeDelta elapsed_time));
MOCK_METHOD3(GetCommandLine,
CommandLine(const std::vector<std::string>& test_names,
const FilePath& temp_dir_,
@ -127,7 +122,7 @@ class TestLauncherTest : public testing::Test {
testing::Return(true)));
EXPECT_CALL(delegate, WillRunTest(_, _))
.WillRepeatedly(testing::Return(true));
EXPECT_CALL(delegate, ProcessTestResults(_, _, _, _, _, _)).Times(0);
EXPECT_CALL(delegate, ProcessTestResults(_, _)).Times(0);
EXPECT_CALL(delegate, GetCommandLine(_, _, _))
.WillRepeatedly(testing::Return(CommandLine(CommandLine::NO_PROGRAM)));
EXPECT_CALL(delegate, GetWrapper())
@ -658,18 +653,22 @@ TEST_F(UnitTestLauncherDelegateTester, BatchSize) {
// The following 3 tests are disabled as they are meant to only run from
// |RunMockTests| to validate tests launcher output for known results.
// Basic Test to pass
// Basic test to pass
TEST(MockUnitTests, DISABLED_PassTest) {
ASSERT_TRUE(true);
}
// Basic Test to fail
// Basic test to fail
TEST(MockUnitTests, DISABLED_FailTest) {
ASSERT_TRUE(false);
}
// Basic Test to crash
// Basic test to crash
TEST(MockUnitTests, DISABLED_CrashTest) {
IMMEDIATE_CRASH();
}
// Basic test will not be reached with default batch size.
TEST(MockUnitTests, DISABLED_NoRunTest) {
ASSERT_TRUE(true);
}
// Using TestLauncher to launch 3 simple unitests
// and validate the resulting json file.
@ -681,7 +680,7 @@ TEST_F(UnitTestLauncherDelegateTester, RunMockTests) {
FilePath path = dir.GetPath().AppendASCII("SaveSummaryResult.json");
command_line.AppendSwitchPath("test-launcher-summary-output", path);
command_line.AppendSwitch("gtest_also_run_disabled_tests");
command_line.AppendSwitch("--test-launcher-retry-limit=0");
command_line.AppendSwitchASCII("test-launcher-retry-limit", "0");
#if defined(OS_WIN)
// In Windows versions prior to Windows 8, nested job objects are
// not allowed and cause this test to fail.
@ -699,16 +698,18 @@ TEST_F(UnitTestLauncherDelegateTester, RunMockTests) {
Value* val = root->FindDictKey("test_locations");
ASSERT_TRUE(val);
EXPECT_EQ(3u, val->DictSize());
EXPECT_EQ(4u, val->DictSize());
// If path or test location changes, the following expectation
// will need to change accordingly.
std::string file_name = "../../base/test/launcher/test_launcher_unittest.cc";
EXPECT_TRUE(test_launcher_utils::ValidateTestLocation(
val, "MockUnitTests.DISABLED_PassTest", file_name, 662));
val, "MockUnitTests.DISABLED_PassTest", file_name, 657));
EXPECT_TRUE(test_launcher_utils::ValidateTestLocation(
val, "MockUnitTests.DISABLED_FailTest", file_name, 666));
val, "MockUnitTests.DISABLED_FailTest", file_name, 661));
EXPECT_TRUE(test_launcher_utils::ValidateTestLocation(
val, "MockUnitTests.DISABLED_CrashTest", file_name, 670));
val, "MockUnitTests.DISABLED_CrashTest", file_name, 665));
EXPECT_TRUE(test_launcher_utils::ValidateTestLocation(
val, "MockUnitTests.DISABLED_NoRunTest", file_name, 669));
val = root->FindListKey("per_iteration_data");
ASSERT_TRUE(val);
@ -717,7 +718,7 @@ TEST_F(UnitTestLauncherDelegateTester, RunMockTests) {
Value* iteration_val = &(val->GetList().at(0));
ASSERT_TRUE(iteration_val);
ASSERT_TRUE(iteration_val->is_dict());
EXPECT_EQ(3u, iteration_val->DictSize());
EXPECT_EQ(4u, iteration_val->DictSize());
// We expect the result to be stripped of disabled prefix.
EXPECT_TRUE(test_launcher_utils::ValidateTestResult(
iteration_val, "MockUnitTests.PassTest", "SUCCESS", 0u));
@ -725,6 +726,8 @@ TEST_F(UnitTestLauncherDelegateTester, RunMockTests) {
iteration_val, "MockUnitTests.FailTest", "FAILURE", 1u));
EXPECT_TRUE(test_launcher_utils::ValidateTestResult(
iteration_val, "MockUnitTests.CrashTest", "CRASH", 0u));
EXPECT_TRUE(test_launcher_utils::ValidateTestResult(
iteration_val, "MockUnitTests.NoRunTest", "NOTRUN", 0u));
}
} // namespace

@ -26,7 +26,6 @@
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_util.h"
#include "base/system/sys_info.h"
#include "base/test/gtest_xml_util.h"
#include "base/test/launcher/test_launcher.h"
#include "base/test/test_switches.h"
#include "base/test/test_timeouts.h"
@ -214,139 +213,6 @@ void InitGoogleTestWChar(int* argc, wchar_t** argv) {
}
#endif // defined(OS_WIN)
// Called if there are no test results, populates results with UNKNOWN results.
// If There is only one test, will try to determine status by exit_code and
// was_timeout.
std::vector<TestResult> ProcessMissingTestResults(
const std::vector<std::string>& test_names,
const std::string& output,
bool was_timeout,
bool exit_code) {
std::vector<TestResult> results;
// We do not have reliable details about test results (parsing test
// stdout is known to be unreliable).
fprintf(stdout,
"Failed to get out-of-band test success data, "
"dumping full stdio below:\n%s\n",
output.c_str());
fflush(stdout);
// There is only one test and no results.
// Try to determine status by exit code.
if (test_names.size() == 1) {
const std::string& test_name = test_names.front();
TestResult test_result;
test_result.full_name = test_name;
if (was_timeout) {
test_result.status = TestResult::TEST_TIMEOUT;
} else if (exit_code != 0) {
test_result.status = TestResult::TEST_FAILURE;
} else {
// It's strange case when test executed successfully,
// but we failed to read machine-readable report for it.
test_result.status = TestResult::TEST_UNKNOWN;
}
results.push_back(test_result);
return results;
}
for (auto& test_name : test_names) {
TestResult test_result;
test_result.full_name = test_name;
test_result.status = TestResult::TEST_SKIPPED;
results.push_back(test_result);
}
return results;
}
// Returns interpreted test results.
std::vector<TestResult> UnitTestProcessTestResults(
const std::vector<std::string>& test_names,
const base::FilePath& output_file,
const std::string& output,
int exit_code,
bool was_timeout) {
std::vector<TestResult> test_results;
bool crashed = false;
bool have_test_results =
ProcessGTestOutput(output_file, &test_results, &crashed);
if (!have_test_results) {
return ProcessMissingTestResults(test_names, output, was_timeout,
exit_code);
}
// TODO(phajdan.jr): Check for duplicates and mismatches between
// the results we got from XML file and tests we intended to run.
std::map<std::string, TestResult> results_map;
for (const auto& i : test_results)
results_map[i.full_name] = i;
// Results to be reported back to the test launcher.
std::vector<TestResult> final_results;
for (const auto& i : test_names) {
if (Contains(results_map, i)) {
TestResult test_result = results_map[i];
if (test_result.status == TestResult::TEST_CRASH) {
if (was_timeout) {
// Fix up the test status: we forcibly kill the child process
// after the timeout, so from XML results it looks just like
// a crash.
test_result.status = TestResult::TEST_TIMEOUT;
}
} else if (test_result.status == TestResult::TEST_SUCCESS ||
test_result.status == TestResult::TEST_FAILURE) {
// We run multiple tests in a batch with a timeout applied
// to the entire batch. It is possible that with other tests
// running quickly some tests take longer than the per-test timeout.
// For consistent handling of tests independent of order and other
// factors, mark them as timing out.
if (test_result.elapsed_time > TestTimeouts::test_launcher_timeout()) {
test_result.status = TestResult::TEST_TIMEOUT;
}
}
test_result.output_snippet = GetTestOutputSnippet(test_result, output);
final_results.push_back(test_result);
} else {
// TODO(phajdan.jr): Explicitly pass the info that the test didn't
// run for a mysterious reason.
LOG(ERROR) << "no test result for " << i;
TestResult test_result;
test_result.full_name = i;
test_result.status = TestResult::TEST_SKIPPED;
test_result.output_snippet = GetTestOutputSnippet(test_result, output);
final_results.push_back(test_result);
}
}
// TODO(phajdan.jr): Handle the case where processing XML output
// indicates a crash but none of the test results is marked as crashing.
bool has_non_success_test = false;
for (const auto& i : final_results) {
if (i.status != TestResult::TEST_SUCCESS) {
has_non_success_test = true;
break;
}
}
if (!has_non_success_test && exit_code != 0) {
// This is a bit surprising case: all tests are marked as successful,
// but the exit code was not zero. This can happen e.g. under memory
// tools that report leaks this way. Mark all tests as a failure on exit,
// and for more precise info they'd need to be retried serially.
for (auto& i : final_results)
i.status = TestResult::TEST_FAILURE_ON_EXIT;
}
for (auto& i : final_results) {
// Fix the output snippet after possible changes to the test result.
i.output_snippet = GetTestOutputSnippet(i, output);
}
return final_results;
}
} // namespace
// Flag to avoid using job objects
@ -470,17 +336,6 @@ bool UnitTestLauncherDelegate::GetTests(std::vector<TestIdentifier>* output) {
return platform_delegate_->GetTests(output);
}
std::vector<TestResult> UnitTestLauncherDelegate::ProcessTestResults(
const std::vector<std::string>& test_names,
const base::FilePath& output_file,
const std::string& output,
const base::TimeDelta& elapsed_time,
int exit_code,
bool was_timeout) {
return UnitTestProcessTestResults(test_names, output_file, output, exit_code,
was_timeout);
}
CommandLine UnitTestLauncherDelegate::GetCommandLine(
const std::vector<std::string>& test_names,
const FilePath& temp_dir,

@ -129,14 +129,6 @@ class UnitTestLauncherDelegate : public TestLauncherDelegate {
// TestLauncherDelegate:
bool GetTests(std::vector<TestIdentifier>* output) override;
std::vector<TestResult> ProcessTestResults(
const std::vector<std::string>& test_names,
const base::FilePath& output_file,
const std::string& output,
const base::TimeDelta& elapsed_time,
int exit_code,
bool was_timeout) override;
CommandLine GetCommandLine(const std::vector<std::string>& test_names,
const FilePath& temp_dir,
FilePath* output_file) override;

@ -144,19 +144,9 @@ class WrapperTestLauncherDelegate : public base::TestLauncherDelegate {
// ProcessLifetimeObserver) to the caller's content::TestLauncherDelegate.
void OnTestTimedOut(const base::CommandLine& command_line) override;
// Callback to receive result of a test.
// |output_file| is a path to xml file written by test-launcher
// child process. It contains information about test and failed
// EXPECT/ASSERT/DCHECK statements. Test launcher parses that
// file to get additional information about test run (status,
// error-messages, stack-traces and file/line for failures).
std::vector<base::TestResult> ProcessTestResults(
const std::vector<std::string>& test_names,
const base::FilePath& output_file,
const std::string& output,
const base::TimeDelta& elapsed_time,
int exit_code,
bool was_timeout) override;
// Delegate additional TestResult processing.
void ProcessTestResults(std::vector<base::TestResult>& test_results,
base::TimeDelta elapsed_time) override;
content::TestLauncherDelegate* launcher_delegate_;
@ -240,66 +230,14 @@ void WrapperTestLauncherDelegate::OnTestTimedOut(
launcher_delegate_->OnTestTimedOut(command_line);
}
std::vector<base::TestResult> WrapperTestLauncherDelegate::ProcessTestResults(
const std::vector<std::string>& test_names,
const base::FilePath& output_file,
const std::string& output,
const base::TimeDelta& elapsed_time,
int exit_code,
bool was_timeout) {
base::TestResult result;
DCHECK_EQ(1u, test_names.size());
std::string test_name = test_names.front();
result.full_name = test_name;
void WrapperTestLauncherDelegate::ProcessTestResults(
std::vector<base::TestResult>& test_results,
base::TimeDelta elapsed_time) {
CHECK_EQ(1u, test_results.size());
bool crashed = false;
std::vector<base::TestResult> parsed_results;
bool have_test_results =
base::ProcessGTestOutput(output_file, &parsed_results, &crashed);
test_results.front().elapsed_time = elapsed_time;
if (!base::DeleteFile(output_file.DirName(), true)) {
LOG(WARNING) << "Failed to delete output file: " << output_file.value();
}
// Use GTest XML to determine test status. Fallback to exit code if
// parsing failed.
if (have_test_results && !parsed_results.empty()) {
// We expect only one test result here.
DCHECK_EQ(1U, parsed_results.size())
<< "Unexpectedly ran test more than once: " << test_name;
DCHECK_EQ(test_name, parsed_results.front().full_name);
result = parsed_results.front();
if (was_timeout) {
// Fix up test status: we forcibly kill the child process
// after the timeout, so from XML results it looks like
// a crash.
result.status = base::TestResult::TEST_TIMEOUT;
} else if (result.status == base::TestResult::TEST_SUCCESS &&
exit_code != 0) {
// This is a bit surprising case: test is marked as successful,
// but the exit code was not zero. This can happen e.g. under
// memory tools that report leaks this way. Mark test as a
// failure on exit.
result.status = base::TestResult::TEST_FAILURE_ON_EXIT;
}
} else {
if (was_timeout)
result.status = base::TestResult::TEST_TIMEOUT;
else if (exit_code != 0)
result.status = base::TestResult::TEST_FAILURE;
else
result.status = base::TestResult::TEST_UNKNOWN;
}
result.elapsed_time = elapsed_time;
result.output_snippet = GetTestOutputSnippet(result, output);
launcher_delegate_->PostRunTest(&result);
return std::vector<base::TestResult>({result});
launcher_delegate_->PostRunTest(&test_results.front());
}
} // namespace

@ -176,7 +176,7 @@ IN_PROC_BROWSER_TEST_F(ContentBrowserTest, RunMockTests) {
temp_dir.GetPath().AppendASCII("SaveSummaryResult.json");
command_line.AppendSwitchPath("test-launcher-summary-output", path);
command_line.AppendSwitch("gtest_also_run_disabled_tests");
command_line.AppendSwitch("--test-launcher-retry-limit=0");
command_line.AppendSwitchASCII("test-launcher-retry-limit", "0");
std::string output;
base::GetAppOutputAndError(command_line, &output);