Rev 2738: (mbp) fix problem with extractBenchmarkTime; better assertSubset in file:///home/pqm/archives/thelove/bzr/%2Btrunk/
Canonical.com Patch Queue Manager
pqm at pqm.ubuntu.com
Tue Aug 21 05:47:15 BST 2007
At file:///home/pqm/archives/thelove/bzr/%2Btrunk/
------------------------------------------------------------
revno: 2738
revision-id: pqm at pqm.ubuntu.com-20070821044713-ttnupbvhlsbwh1he
parent: pqm at pqm.ubuntu.com-20070821041912-ph1kv921fvotcgdd
parent: mbp at sourcefrog.net-20070821035307-krkxgs8g0eryzl2r
committer: Canonical.com Patch Queue Manager <pqm at pqm.ubuntu.com>
branch nick: +trunk
timestamp: Tue 2007-08-21 05:47:13 +0100
message:
(mbp) fix problem with extractBenchmarkTime; better assertSubset
modified:
bzrlib/tests/__init__.py selftest.py-20050531073622-8d0e3c8845c97a64
bzrlib/tests/test_selftest.py test_selftest.py-20051202044319-c110a115d8c0456a
------------------------------------------------------------
revno: 2695.1.5
merged: mbp at sourcefrog.net-20070821035307-krkxgs8g0eryzl2r
parent: mbp at sourcefrog.net-20070815035227-csgyf3ja9ob9d0mr
parent: pqm at pqm.ubuntu.com-20070821024621-czmqk59igiyvsgk8
committer: Martin Pool <mbp at sourcefrog.net>
branch nick: test-cleanup
timestamp: Tue 2007-08-21 13:53:07 +1000
message:
merge trunk
------------------------------------------------------------
revno: 2695.1.4
merged: mbp at sourcefrog.net-20070815035227-csgyf3ja9ob9d0mr
parent: mbp at sourcefrog.net-20070815032035-opegxanl10iiabi1
committer: Martin Pool <mbp at sourcefrog.net>
branch nick: test-cleanup
timestamp: Wed 2007-08-15 13:52:27 +1000
message:
Much faster assertSubset using sets, not O(n**2)
------------------------------------------------------------
revno: 2695.1.3
merged: mbp at sourcefrog.net-20070815032035-opegxanl10iiabi1
parent: mbp at sourcefrog.net-20070815030138-y0ojw4em13mcc1fh
committer: Martin Pool <mbp at sourcefrog.net>
branch nick: test-cleanup
timestamp: Wed 2007-08-15 13:20:35 +1000
message:
Fix up selftest tests for new extractBenchmarkTime behaviour; remove many unneeded calls to it
------------------------------------------------------------
revno: 2695.1.2
merged: mbp at sourcefrog.net-20070815030138-y0ojw4em13mcc1fh
parent: mbp at sourcefrog.net-20070813063556-2wjnfozy2ud7wiv1
committer: Martin Pool <mbp at sourcefrog.net>
branch nick: test-cleanup
timestamp: Wed 2007-08-15 13:01:38 +1000
message:
_benchmarkTime should not be an attribute of ExtendedTestResult, because it only applies to the most recent test reported
------------------------------------------------------------
revno: 2695.1.1
merged: mbp at sourcefrog.net-20070813063556-2wjnfozy2ud7wiv1
parent: pqm at pqm.ubuntu.com-20070810230629-bcp0rgmbhp0z35e1
committer: Martin Pool <mbp at sourcefrog.net>
branch nick: test-cleanup
timestamp: Mon 2007-08-13 16:35:56 +1000
message:
Fix problem if the first test is missing a dependency
=== modified file 'bzrlib/tests/__init__.py'
--- a/bzrlib/tests/__init__.py 2007-08-21 02:06:43 +0000
+++ b/bzrlib/tests/__init__.py 2007-08-21 03:53:07 +0000
@@ -158,9 +158,17 @@
class ExtendedTestResult(unittest._TextTestResult):
"""Accepts, reports and accumulates the results of running tests.
- Compared to this unittest version this class adds support for profiling,
- benchmarking, stopping as soon as a test fails, and skipping tests.
- There are further-specialized subclasses for different types of display.
+ Compared to this unittest version this class adds support for
+ profiling, benchmarking, stopping as soon as a test fails, and
+ skipping tests. There are further-specialized subclasses for
+ different types of display.
+
+ When a test finishes, in whatever way, it calls one of the addSuccess,
+ addFailure or addError classes. These in turn may redirect to a more
+ specific case for the special test results supported by our extended
+ tests.
+
+ Note that just one of these objects is fed the results from many tests.
"""
stop_early = False
@@ -200,18 +208,19 @@
self.count = 0
self._overall_start_time = time.time()
- def extractBenchmarkTime(self, testCase):
+ def _extractBenchmarkTime(self, testCase):
"""Add a benchmark time for the current test case."""
- self._benchmarkTime = getattr(testCase, "_benchtime", None)
+ return getattr(testCase, "_benchtime", None)
def _elapsedTestTimeString(self):
"""Return a time string for the overall time the current test has taken."""
return self._formatTime(time.time() - self._start_time)
- def _testTimeString(self):
- if self._benchmarkTime is not None:
+ def _testTimeString(self, testCase):
+ benchmark_time = self._extractBenchmarkTime(testCase)
+ if benchmark_time is not None:
return "%s/%s" % (
- self._formatTime(self._benchmarkTime),
+ self._formatTime(benchmark_time),
self._elapsedTestTimeString())
else:
return " %s" % self._elapsedTestTimeString()
@@ -245,52 +254,82 @@
setKeepLogfile()
def addError(self, test, err):
- self.extractBenchmarkTime(test)
- self._cleanupLogFile(test)
+ """Tell result that test finished with an error.
+
+ Called from the TestCase run() method when the test
+ fails with an unexpected error.
+ """
+ self._testConcluded(test)
if isinstance(err[1], TestSkipped):
- return self.addSkipped(test, err)
+ return self._addSkipped(test, err)
elif isinstance(err[1], UnavailableFeature):
return self.addNotSupported(test, err[1].args[0])
- unittest.TestResult.addError(self, test, err)
- self.error_count += 1
- self.report_error(test, err)
- if self.stop_early:
- self.stop()
+ else:
+ unittest.TestResult.addError(self, test, err)
+ self.error_count += 1
+ self.report_error(test, err)
+ if self.stop_early:
+ self.stop()
def addFailure(self, test, err):
- self._cleanupLogFile(test)
- self.extractBenchmarkTime(test)
+ """Tell result that test failed.
+
+ Called from the TestCase run() method when the test
+ fails because e.g. an assert() method failed.
+ """
+ self._testConcluded(test)
if isinstance(err[1], KnownFailure):
- return self.addKnownFailure(test, err)
- unittest.TestResult.addFailure(self, test, err)
- self.failure_count += 1
- self.report_failure(test, err)
- if self.stop_early:
- self.stop()
-
- def addKnownFailure(self, test, err):
+ return self._addKnownFailure(test, err)
+ else:
+ unittest.TestResult.addFailure(self, test, err)
+ self.failure_count += 1
+ self.report_failure(test, err)
+ if self.stop_early:
+ self.stop()
+
+ def addSuccess(self, test):
+ """Tell result that test completed successfully.
+
+ Called from the TestCase run()
+ """
+ self._testConcluded(test)
+ if self._bench_history is not None:
+ benchmark_time = self._extractBenchmarkTime(test)
+ if benchmark_time is not None:
+ self._bench_history.write("%s %s\n" % (
+ self._formatTime(benchmark_time),
+ test.id()))
+ self.report_success(test)
+ unittest.TestResult.addSuccess(self, test)
+
+ def _testConcluded(self, test):
+ """Common code when a test has finished.
+
+ Called regardless of whether it succeded, failed, etc.
+ """
+ self._cleanupLogFile(test)
+
+ def _addKnownFailure(self, test, err):
self.known_failure_count += 1
self.report_known_failure(test, err)
def addNotSupported(self, test, feature):
+ """The test will not be run because of a missing feature.
+ """
+ # this can be called in two different ways: it may be that the
+ # test started running, and then raised (through addError)
+ # UnavailableFeature. Alternatively this method can be called
+ # while probing for features before running the tests; in that
+ # case we will see startTest and stopTest, but the test will never
+ # actually run.
self.unsupported.setdefault(str(feature), 0)
self.unsupported[str(feature)] += 1
self.report_unsupported(test, feature)
- def addSuccess(self, test):
- self.extractBenchmarkTime(test)
- if self._bench_history is not None:
- if self._benchmarkTime is not None:
- self._bench_history.write("%s %s\n" % (
- self._formatTime(self._benchmarkTime),
- test.id()))
- self.report_success(test)
- unittest.TestResult.addSuccess(self, test)
-
- def addSkipped(self, test, skip_excinfo):
+ def _addSkipped(self, test, skip_excinfo):
self.report_skip(test, skip_excinfo)
- # seems best to treat this as success from point-of-view of unittest
- # -- it actually does nothing so it barely matters :)
+ # seems best to treat this as success from point-of-view of
+ # unittest -- it actually does nothing so it barely matters :)
try:
test.tearDown()
except KeyboardInterrupt:
@@ -458,21 +497,21 @@
def report_error(self, test, err):
self.stream.writeln('ERROR %s\n%s'
- % (self._testTimeString(),
+ % (self._testTimeString(test),
self._error_summary(err)))
def report_failure(self, test, err):
self.stream.writeln(' FAIL %s\n%s'
- % (self._testTimeString(),
+ % (self._testTimeString(test),
self._error_summary(err)))
def report_known_failure(self, test, err):
self.stream.writeln('XFAIL %s\n%s'
- % (self._testTimeString(),
+ % (self._testTimeString(test),
self._error_summary(err)))
def report_success(self, test):
- self.stream.writeln(' OK %s' % self._testTimeString())
+ self.stream.writeln(' OK %s' % self._testTimeString(test))
for bench_called, stats in getattr(test, '_benchcalls', []):
self.stream.writeln('LSProf output for %s(%s, %s)' % bench_called)
stats.pprint(file=self.stream)
@@ -483,14 +522,13 @@
def report_skip(self, test, skip_excinfo):
self.skip_count += 1
self.stream.writeln(' SKIP %s\n%s'
- % (self._testTimeString(),
+ % (self._testTimeString(test),
self._error_summary(skip_excinfo)))
def report_unsupported(self, test, feature):
"""test cannot be run because feature is missing."""
self.stream.writeln("NODEP %s\n The feature '%s' is not available."
- %(self._testTimeString(), feature))
-
+ %(self._testTimeString(test), feature))
class TextTestRunner(object):
@@ -865,7 +903,7 @@
"""Assert that every entry in sublist is present in superlist."""
missing = set(sublist) - set(superlist)
if len(missing) > 0:
- raise AssertionError("value(s) %r not present in container %r" %
+ raise AssertionError("value(s) %r not present in container %r" %
(missing, superlist))
def assertListRaises(self, excClass, func, *args, **kwargs):
=== modified file 'bzrlib/tests/test_selftest.py'
--- a/bzrlib/tests/test_selftest.py 2007-08-15 04:33:34 +0000
+++ b/bzrlib/tests/test_selftest.py 2007-08-21 03:53:07 +0000
@@ -706,32 +706,35 @@
class TestTestResult(TestCase):
- def test_elapsed_time_with_benchmarking(self):
+ def check_timing(self, test_case, expected_re):
result = bzrlib.tests.TextTestResult(self._log_file,
- descriptions=0,
- verbosity=1,
- )
- result._recordTestStartTime()
- time.sleep(0.003)
- result.extractBenchmarkTime(self)
- timed_string = result._testTimeString()
- # without explicit benchmarking, we should get a simple time.
- self.assertContainsRe(timed_string, "^ +[0-9]+ms$")
+ descriptions=0,
+ verbosity=1,
+ )
+ test_case.run(result)
+ timed_string = result._testTimeString(test_case)
+ self.assertContainsRe(timed_string, expected_re)
+
+ def test_test_reporting(self):
+ class ShortDelayTestCase(TestCase):
+ def test_short_delay(self):
+ time.sleep(0.003)
+ def test_short_benchmark(self):
+ self.time(time.sleep, 0.003)
+ self.check_timing(ShortDelayTestCase('test_short_delay'),
+ r"^ +[0-9]+ms$")
# if a benchmark time is given, we want a x of y style result.
- self.time(time.sleep, 0.001)
- result.extractBenchmarkTime(self)
- timed_string = result._testTimeString()
- self.assertContainsRe(
- timed_string, "^ +[0-9]+ms/ +[0-9]+ms$")
- # extracting the time from a non-bzrlib testcase sets to None
- result._recordTestStartTime()
- result.extractBenchmarkTime(
- unittest.FunctionTestCase(self.test_elapsed_time_with_benchmarking))
- timed_string = result._testTimeString()
- self.assertContainsRe(timed_string, "^ +[0-9]+ms$")
- # cheat. Yes, wash thy mouth out with soap.
- self._benchtime = None
+ self.check_timing(ShortDelayTestCase('test_short_benchmark'),
+ r"^ +[0-9]+ms/ +[0-9]+ms$")
+ def test_unittest_reporting_unittest_class(self):
+ # getting the time from a non-bzrlib test works ok
+ class ShortDelayTestCase(unittest.TestCase):
+ def test_short_delay(self):
+ time.sleep(0.003)
+ self.check_timing(ShortDelayTestCase('test_short_delay'),
+ r"^ +[0-9]+ms$")
+
def test_assigned_benchmark_file_stores_date(self):
output = StringIO()
result = bzrlib.tests.TextTestResult(self._log_file,
@@ -740,7 +743,6 @@
bench_history=output
)
output_string = output.getvalue()
-
# if you are wondering about the regexp please read the comment in
# test_bench_history (bzrlib.tests.test_selftest.TestRunner)
# XXX: what comment? -- Andrew Bennetts
@@ -845,7 +847,6 @@
)
test = self.get_passing_test()
result.startTest(test)
- result.extractBenchmarkTime(test)
prefix = len(result_stream.getvalue())
# the err parameter has the shape:
# (class, exception object, traceback)
@@ -871,7 +872,6 @@
test = self.get_passing_test()
# this seeds the state to handle reporting the test.
result.startTest(test)
- result.extractBenchmarkTime(test)
# the err parameter has the shape:
# (class, exception object, traceback)
# KnownFailures dont get their tracebacks shown though, so we
@@ -936,7 +936,6 @@
test = self.get_passing_test()
feature = Feature()
result.startTest(test)
- result.extractBenchmarkTime(test)
prefix = len(result_stream.getvalue())
result.report_unsupported(test, feature)
output = result_stream.getvalue()[prefix:]
@@ -956,7 +955,6 @@
feature = Feature()
# this seeds the state to handle reporting the test.
result.startTest(test)
- result.extractBenchmarkTime(test)
result.report_unsupported(test, feature)
# no output on unsupported features
self.assertEqual(
@@ -995,26 +993,29 @@
def test_strict_with_unsupported_feature(self):
result = bzrlib.tests.TextTestResult(self._log_file, descriptions=0,
- verbosity=1)
+ verbosity=1)
test = self.get_passing_test()
feature = "Unsupported Feature"
result.addNotSupported(test, feature)
self.assertFalse(result.wasStrictlySuccessful())
-
+ self.assertEqual(None, result._extractBenchmarkTime(test))
+
def test_strict_with_known_failure(self):
result = bzrlib.tests.TextTestResult(self._log_file, descriptions=0,
- verbosity=1)
+ verbosity=1)
test = self.get_passing_test()
err = (KnownFailure, KnownFailure('foo'), None)
- result.addKnownFailure(test, err)
+ result._addKnownFailure(test, err)
self.assertFalse(result.wasStrictlySuccessful())
+ self.assertEqual(None, result._extractBenchmarkTime(test))
def test_strict_with_success(self):
result = bzrlib.tests.TextTestResult(self._log_file, descriptions=0,
- verbosity=1)
+ verbosity=1)
test = self.get_passing_test()
result.addSuccess(test)
self.assertTrue(result.wasStrictlySuccessful())
+ self.assertEqual(None, result._extractBenchmarkTime(test))
class TestRunner(TestCase):
More information about the bazaar-commits
mailing list