Rev 2369: Some minor cleanups of test code, and implement KnownFailure support as in file:///home/robertc/source/baz/test-prereqs/

Robert Collins robertc at robertcollins.net
Thu Mar 22 09:05:47 GMT 2007


At file:///home/robertc/source/baz/test-prereqs/

------------------------------------------------------------
revno: 2369
revision-id: robertc at robertcollins.net-20070322090459-q0gjq21qgrj453hf
parent: robertc at robertcollins.net-20070321042802-1bxr1t97046woojb
committer: Robert Collins <robertc at robertcollins.net>
branch nick: test-prereqs
timestamp: Thu 2007-03-22 20:04:59 +1100
message:
  Some minor cleanups of test code, and implement KnownFailure support as
  per http://bazaar-vcs.org/BzrExtendTestSuite. (Robert Collins)
modified:
  bzrlib/tests/__init__.py       selftest.py-20050531073622-8d0e3c8845c97a64
  bzrlib/tests/test_selftest.py  test_selftest.py-20051202044319-c110a115d8c0456a
=== modified file 'bzrlib/tests/__init__.py'
--- a/bzrlib/tests/__init__.py	2007-03-15 22:35:35 +0000
+++ b/bzrlib/tests/__init__.py	2007-03-22 09:04:59 +0000
@@ -180,6 +180,7 @@
         self.num_tests = num_tests
         self.error_count = 0
         self.failure_count = 0
+        self.known_failure_count = 0
         self.skip_count = 0
         self.count = 0
         self._overall_start_time = time.time()
@@ -221,32 +222,39 @@
         """Record that a test has started."""
         self._start_time = time.time()
 
+    def _cleanupLogFile(self, test):
+        # We can only do this if we have one of our TestCases, not if
+        # we have a doctest.
+        setKeepLogfile = getattr(test, 'setKeepLogfile', None)
+        if setKeepLogfile is not None:
+            setKeepLogfile()
+
     def addError(self, test, err):
         if isinstance(err[1], TestSkipped):
-            return self.addSkipped(test, err)    
+            return self.addSkipped(test, err)
         unittest.TestResult.addError(self, test, err)
-        # We can only do this if we have one of our TestCases, not if
-        # we have a doctest.
-        setKeepLogfile = getattr(test, 'setKeepLogfile', None)
-        if setKeepLogfile is not None:
-            setKeepLogfile()
+        self._cleanupLogFile(test)
         self.extractBenchmarkTime(test)
+        self.error_count += 1
         self.report_error(test, err)
         if self.stop_early:
             self.stop()
 
     def addFailure(self, test, err):
+        self._cleanupLogFile(test)
+        self.extractBenchmarkTime(test)
+        if isinstance(err[1], KnownFailure):
+            return self.addKnownFailure(test, err)
         unittest.TestResult.addFailure(self, test, err)
-        # We can only do this if we have one of our TestCases, not if
-        # we have a doctest.
-        setKeepLogfile = getattr(test, 'setKeepLogfile', None)
-        if setKeepLogfile is not None:
-            setKeepLogfile()
-        self.extractBenchmarkTime(test)
+        self.failure_count += 1
         self.report_failure(test, err)
         if self.stop_early:
             self.stop()
 
+    def addKnownFailure(self, test, err):
+        self.known_failure_count += 1
+        self.report_known_failure(test, err)
+
     def addSuccess(self, test):
         self.extractBenchmarkTime(test)
         if self._bench_history is not None:
@@ -301,9 +309,19 @@
 class TextTestResult(ExtendedTestResult):
     """Displays progress and results of tests in text form"""
 
-    def __init__(self, *args, **kw):
-        ExtendedTestResult.__init__(self, *args, **kw)
-        self.pb = self.ui.nested_progress_bar()
+    def __init__(self, stream, descriptions, verbosity,
+                 bench_history=None,
+                 num_tests=None,
+                 pb=None,
+                 ):
+        ExtendedTestResult.__init__(self, stream, descriptions, verbosity,
+            bench_history, num_tests)
+        if pb is None:
+            self.pb = ui.ui_factory.nested_progress_bar()
+            self._supplied_pb = False
+        else:
+            self.pb = pb
+            self._supplied_pb = True
         self.pb.show_pct = False
         self.pb.show_spinner = False
         self.pb.show_eta = False, 
@@ -322,6 +340,8 @@
             a += ', %d errors' % self.error_count
         if self.failure_count:
             a += ', %d failed' % self.failure_count
+        if self.known_failure_count:
+            a += ', %d known failures' % self.known_failure_count
         if self.skip_count:
             a += ', %d skipped' % self.skip_count
         a += ']'
@@ -342,19 +362,21 @@
             return self._shortened_test_description(test)
 
     def report_error(self, test, err):
-        self.error_count += 1
         self.pb.note('ERROR: %s\n    %s\n', 
             self._test_description(test),
             err[1],
             )
 
     def report_failure(self, test, err):
-        self.failure_count += 1
         self.pb.note('FAIL: %s\n    %s\n', 
             self._test_description(test),
             err[1],
             )
 
+    def report_known_failure(self, test, err):
+        self.pb.note('XFAIL: %s\n%s\n',
+            self._test_description(test), err[1])
+
     def report_skip(self, test, skip_excinfo):
         self.skip_count += 1
         if False:
@@ -375,7 +397,8 @@
         self.pb.update('cleaning up...')
 
     def finished(self):
-        self.pb.finished()
+        if not self._supplied_pb:
+            self.pb.finished()
 
 
 class VerboseTestResult(ExtendedTestResult):
@@ -414,22 +437,27 @@
         return '%s%s' % (indent, err[1])
 
     def report_error(self, test, err):
-        self.error_count += 1
         self.stream.writeln('ERROR %s\n%s'
                 % (self._testTimeString(),
                    self._error_summary(err)))
 
     def report_failure(self, test, err):
-        self.failure_count += 1
         self.stream.writeln(' FAIL %s\n%s'
                 % (self._testTimeString(),
                    self._error_summary(err)))
 
+    def report_known_failure(self, test, err):
+        self.stream.writeln('XFAIL %s\n%s'
+                % (self._testTimeString(),
+                   self._error_summary(err)))
+
     def report_success(self, test):
         self.stream.writeln('   OK %s' % self._testTimeString())
         for bench_called, stats in getattr(test, '_benchcalls', []):
             self.stream.writeln('LSProf output for %s(%s, %s)' % bench_called)
             stats.pprint(file=self.stream)
+        # flush the stream so that we get smooth output. This verbose mode is
+        # used to show the output in PQM.
         self.stream.flush()
 
     def report_skip(self, test, skip_excinfo):
@@ -486,9 +514,17 @@
             if errored:
                 if failed: self.stream.write(", ")
                 self.stream.write("errors=%d" % errored)
+            if result.known_failure_count:
+                if failed or errored: self.stream.write(", ")
+                self.stream.write("known_failure_count=%d" %
+                    result.known_failure_count)
             self.stream.writeln(")")
         else:
-            self.stream.writeln("OK")
+            if result.known_failure_count:
+                self.stream.writeln("OK (known_failures=%d)" %
+                    result.known_failure_count)
+            else:
+                self.stream.writeln("OK")
         if result.skip_count > 0:
             skipped = result.skip_count
             self.stream.writeln('%d test%s skipped' %
@@ -545,6 +581,16 @@
     """Indicates that a test was intentionally skipped, rather than failing."""
 
 
+class KnownFailure(AssertionError):
+    """Indicates that a test failed in a precisely expected manner.
+
+    Such failures dont block the whole test suite from passing because they are
+    indicators of partially completed code or of future work. We have an
+    explicit error for them so that we can ensure that they are always visible:
+    KnownFailures are always shown in the output of bzr selftest.
+    """
+
+
 class CommandFailed(Exception):
     pass
 
@@ -970,6 +1016,10 @@
     def _restoreHooks(self):
         bzrlib.branch.Branch.hooks = self._preserved_hooks
 
+    def knownFailure(self, reason):
+        """This test has failed for some known reason."""
+        raise KnownFailure(reason)
+
     def tearDown(self):
         self._runCleanups()
         unittest.TestCase.tearDown(self)

=== modified file 'bzrlib/tests/test_selftest.py'
--- a/bzrlib/tests/test_selftest.py	2007-03-12 20:55:23 +0000
+++ b/bzrlib/tests/test_selftest.py	2007-03-22 09:04:59 +0000
@@ -38,6 +38,8 @@
 from bzrlib.symbol_versioning import zero_ten, zero_eleven
 from bzrlib.tests import (
                           ChrootedTestCase,
+                          ExtendedTestResult,
+                          KnownFailure,
                           TestCase,
                           TestCaseInTempDir,
                           TestCaseWithMemoryTransport,
@@ -690,6 +692,94 @@
         self.assertContainsRe(output,
             r"LSProf output for <type 'unicode'>\(\('world',\), {'errors': 'replace'}\)\n")
 
+    def test_known_failure(self):
+        """A KnownFailure being raised should trigger several result actions."""
+        class InstrumentedTestResult(ExtendedTestResult):
+
+            def report_test_start(self, test): pass
+            def report_known_failure(self, test, err):
+                self._call = test, err
+        result = InstrumentedTestResult(None, None, None, None)
+        def test_function():
+            raise KnownFailure('failed!')
+        test = unittest.FunctionTestCase(test_function)
+        test.run(result)
+        # it should invoke 'report_known_failure'.
+        self.assertEqual(2, len(result._call))
+        self.assertEqual(test, result._call[0])
+        self.assertEqual(KnownFailure, result._call[1][0])
+        self.assertIsInstance(result._call[1][1], KnownFailure)
+        # we dont introspec the traceback, if the rest is ok, it would be
+        # exceptional for it not to be.
+        # it should update the known_failure_count on the object.
+        self.assertEqual(1, result.known_failure_count)
+        # the result should be successful.
+        self.assertTrue(result.wasSuccessful())
+
+    def test_verbose_report_known_failure(self):
+        # verbose test output formatting
+        result_stream = StringIO()
+        result = bzrlib.tests.VerboseTestResult(
+            unittest._WritelnDecorator(result_stream),
+            descriptions=0,
+            verbosity=2,
+            )
+        test = self.get_passing_test()
+        result.startTest(test)
+        result.extractBenchmarkTime(test)
+        prefix = len(result_stream.getvalue())
+        # the err parameter has the shape:
+        # (class, exception object, traceback)
+        # KnownFailures dont get their tracebacks shown though, so we
+        # can skip that.
+        err = (KnownFailure, KnownFailure('foo'), None)
+        result.report_known_failure(test, err)
+        output = result_stream.getvalue()[prefix:]
+        lines = output.splitlines()
+        self.assertEqual(lines, ['XFAIL                   0ms', '    foo'])
+    
+    def test_text_report_known_failure(self):
+        # text test output formatting
+        pb = MockProgress()
+        result = bzrlib.tests.TextTestResult(
+            None,
+            descriptions=0,
+            verbosity=1,
+            pb=pb,
+            )
+        test = self.get_passing_test()
+        # this seeds the state to handle reporting the test.
+        result.startTest(test)
+        result.extractBenchmarkTime(test)
+        # the err parameter has the shape:
+        # (class, exception object, traceback)
+        # KnownFailures dont get their tracebacks shown though, so we
+        # can skip that.
+        err = (KnownFailure, KnownFailure('foo'), None)
+        result.report_known_failure(test, err)
+        self.assertEqual(
+            [
+            ('update', '[1 in 0s] passing_test', None, None),
+            ('note', 'XFAIL: %s\n%s\n', ('passing_test', err[1]))
+            ],
+            pb.calls)
+        # known_failures should be printed in the summary, so if we run a test
+        # after there are some known failures, the update prefix should match
+        # this.
+        result.known_failure_count = 3
+        test.run(result)
+        self.assertEqual(
+            [
+            ('update', '[2 in 0s, 3 known failures] passing_test', None, None),
+            ],
+            pb.calls[2:])
+
+    def get_passing_test(self):
+        """Return a test object that can't be run usefully."""
+        def passing_test():
+            pass
+        return unittest.FunctionTestCase(passing_test)
+
 
 class TestRunner(TestCase):
 
@@ -712,6 +802,51 @@
         finally:
             TestCaseInTempDir.TEST_ROOT = old_root
 
+    def test_known_failure_failed_run(self):
+        # run a test that generates a known failure which should be printed in
+        # the final output when real failures occur.
+        def known_failure_test():
+            raise KnownFailure('failed')
+        test = unittest.TestSuite()
+        test.addTest(unittest.FunctionTestCase(known_failure_test))
+        def failing_test():
+            raise AssertionError('foo')
+        test.addTest(unittest.FunctionTestCase(failing_test))
+        stream = StringIO()
+        runner = TextTestRunner(stream=stream)
+        result = self.run_test_runner(runner, test)
+        lines = stream.getvalue().splitlines()
+        self.assertEqual([
+            '',
+            '======================================================================',
+            'FAIL: unittest.FunctionTestCase (failing_test)',
+            '----------------------------------------------------------------------',
+            'Traceback (most recent call last):',
+            '    raise AssertionError(\'foo\')',
+            'AssertionError: foo',
+            '',
+            '----------------------------------------------------------------------',
+            'Ran 2 tests in 0.002s',
+            '',
+            'FAILED (failures=1, known_failure_count=1)'],
+            lines[0:5] + lines[6:])
+
+    def test_known_failure_ok_run(self):
+        # run a test that generates a known failure which should be printed in the final output.
+        def known_failure_test():
+            raise KnownFailure('failed')
+        test = unittest.FunctionTestCase(known_failure_test)
+        stream = StringIO()
+        runner = TextTestRunner(stream=stream)
+        result = self.run_test_runner(runner, test)
+        self.assertEqual(
+            '\n'
+            '----------------------------------------------------------------------\n'
+            'Ran 1 test in 0.000s\n'
+            '\n'
+            'OK (known_failures=1)\n',
+            stream.getvalue())
+
     def test_skipped_test(self):
         # run a test that is skipped, and check the suite as a whole still
         # succeeds.
@@ -924,6 +1059,10 @@
         self.assertIsInstance(self._benchcalls[0][1], bzrlib.lsprof.Stats)
         self.assertIsInstance(self._benchcalls[1][1], bzrlib.lsprof.Stats)
 
+    def test_knownFailure(self):
+        """Self.knownFailure() should raise a KnownFailure exception."""
+        self.assertRaises(KnownFailure, self.knownFailure, "A Failure")
+
 
 @symbol_versioning.deprecated_function(zero_eleven)
 def sample_deprecated_function():
@@ -1085,3 +1224,12 @@
         self.assertEquals(['bzr','bzrlib','setup.py',
                            'test9999.tmp','tests'],
                            after)
+
+
+class TestKnownFailure(TestCase):
+
+    def test_known_failure(self):
+        """Check that KnownFailure is defined appropriately."""
+        # a KnownFailure is an assertion error for compatability with unaware
+        # runners.
+        self.assertIsInstance(KnownFailure(""), AssertionError)



More information about the bazaar-commits mailing list