Rev 2372: Implement reporting of Unsupported tests in the bzr test result and runner in file:///home/robertc/source/baz/test-prereqs/

Robert Collins robertc at robertcollins.net
Thu Mar 22 12:19:36 GMT 2007


At file:///home/robertc/source/baz/test-prereqs/

------------------------------------------------------------
revno: 2372
revision-id: robertc at robertcollins.net-20070322121903-8sqxzfr5eqvsx5yp
parent: robertc at robertcollins.net-20070322105438-gt9qu83u9ml5aubo
committer: Robert Collins <robertc at robertcollins.net>
branch nick: test-prereqs
timestamp: Thu 2007-03-22 23:19:03 +1100
message:
  Implement reporting of Unsupported tests in the bzr test result and runner
  classes. (Robert Collins)
modified:
  bzrlib/tests/__init__.py       selftest.py-20050531073622-8d0e3c8845c97a64
  bzrlib/tests/test_selftest.py  test_selftest.py-20051202044319-c110a115d8c0456a
=== modified file 'bzrlib/tests/__init__.py'
--- a/bzrlib/tests/__init__.py	2007-03-22 10:54:38 +0000
+++ b/bzrlib/tests/__init__.py	2007-03-22 12:19:03 +0000
@@ -182,6 +182,7 @@
         self.failure_count = 0
         self.known_failure_count = 0
         self.skip_count = 0
+        self.unsupported = {}
         self.count = 0
         self._overall_start_time = time.time()
     
@@ -255,6 +256,11 @@
         self.known_failure_count += 1
         self.report_known_failure(test, err)
 
+    def addNotSupported(self, test, feature):
+        self.unsupported.setdefault(str(feature), 0)
+        self.unsupported[str(feature)] += 1
+        self.report_unsupported(test, feature)
+
     def addSuccess(self, test):
         self.extractBenchmarkTime(test)
         if self._bench_history is not None:
@@ -344,6 +350,8 @@
             a += ', %d known failures' % self.known_failure_count
         if self.skip_count:
             a += ', %d skipped' % self.skip_count
+        if self.unsupported:
+            a += ', %d missing features' % len(self.unsupported)
         a += ']'
         return a
 
@@ -393,6 +401,9 @@
                 # progress bar...
                 self.pb.note('SKIP: %s', skip_excinfo[1])
 
+    def report_unsupported(self, test, feature):
+        """test cannot be run because feature is missing."""
+                  
     def report_cleaning_up(self):
         self.pb.update('cleaning up...')
 
@@ -466,6 +477,12 @@
                 % (self._testTimeString(),
                    self._error_summary(skip_excinfo)))
 
+    def report_unsupported(self, test, feature):
+        """test cannot be run because feature is missing."""
+        self.stream.writeln("NODEP %s\n    The feature '%s' is not available."
+                %(self._testTimeString(), feature))
+                  
+
 
 class TextTestRunner(object):
     stop_on_failure = False
@@ -529,6 +546,10 @@
             skipped = result.skip_count
             self.stream.writeln('%d test%s skipped' %
                                 (skipped, skipped != 1 and "s" or ""))
+        if result.unsupported:
+            for feature, count in sorted(result.unsupported.items()):
+                self.stream.writeln("Missing feature '%s' skipped %d tests." %
+                    (feature, count))
         result.report_cleaning_up()
         # This is still a little bogus, 
         # but only a little. Folk not using our testrunner will

=== modified file 'bzrlib/tests/test_selftest.py'
--- a/bzrlib/tests/test_selftest.py	2007-03-22 10:54:38 +0000
+++ b/bzrlib/tests/test_selftest.py	2007-03-22 12:19:03 +0000
@@ -781,6 +781,78 @@
             pass
         return unittest.FunctionTestCase(passing_test)
 
+    def test_add_not_supported(self):
+        """Test the behaviour of invoking addNotSupported."""
+        class InstrumentedTestResult(ExtendedTestResult):
+            def report_test_start(self, test): pass
+            def report_unsupported(self, test, feature):
+                self._call = test, feature
+        result = InstrumentedTestResult(None, None, None, None)
+        test = SampleTestCase('_test_pass')
+        feature = Feature()
+        result.startTest(test)
+        result.addNotSupported(test, feature)
+        # it should invoke 'report_unsupported'.
+        self.assertEqual(2, len(result._call))
+        self.assertEqual(test, result._call[0])
+        self.assertEqual(feature, result._call[1])
+        # the result should be successful.
+        self.assertTrue(result.wasSuccessful())
+        # it should record the test against a count of tests not run due to
+        # this feature.
+        self.assertEqual(1, result.unsupported['Feature'])
+        # and invoking it again should increment that counter
+        result.addNotSupported(test, feature)
+        self.assertEqual(2, result.unsupported['Feature'])
+
+    def test_verbose_report_unsupported(self):
+        # verbose test output formatting
+        result_stream = StringIO()
+        result = bzrlib.tests.VerboseTestResult(
+            unittest._WritelnDecorator(result_stream),
+            descriptions=0,
+            verbosity=2,
+            )
+        test = self.get_passing_test()
+        feature = Feature()
+        result.startTest(test)
+        result.extractBenchmarkTime(test)
+        prefix = len(result_stream.getvalue())
+        result.report_unsupported(test, feature)
+        output = result_stream.getvalue()[prefix:]
+        lines = output.splitlines()
+        self.assertEqual(lines, ['NODEP                   0ms', "    The feature 'Feature' is not available."])
+    
+    def test_text_report_unsupported(self):
+        # text test output formatting
+        pb = MockProgress()
+        result = bzrlib.tests.TextTestResult(
+            None,
+            descriptions=0,
+            verbosity=1,
+            pb=pb,
+            )
+        test = self.get_passing_test()
+        feature = Feature()
+        # this seeds the state to handle reporting the test.
+        result.startTest(test)
+        result.extractBenchmarkTime(test)
+        result.report_unsupported(test, feature)
+        # no output on unsupported features
+        self.assertEqual(
+            [('update', '[1 in 0s] passing_test', None, None)
+            ],
+            pb.calls)
+        # the number of missing features should be printed in the progress
+        # summary, so check for that.
+        result.unsupported = {'foo':0, 'bar':0}
+        test.run(result)
+        self.assertEqual(
+            [
+            ('update', '[2 in 0s, 2 missing features] passing_test', None, None),
+            ],
+            pb.calls[1:])
+    
 
 class TestRunner(TestCase):
 
@@ -827,10 +899,9 @@
             'AssertionError: foo',
             '',
             '----------------------------------------------------------------------',
-            'Ran 2 tests in 0.002s',
             '',
             'FAILED (failures=1, known_failure_count=1)'],
-            lines[0:5] + lines[6:])
+            lines[0:5] + lines[6:10] + lines[11:])
 
     def test_known_failure_ok_run(self):
         # run a test that generates a known failure which should be printed in the final output.
@@ -901,6 +972,35 @@
         # Check if cleanup was called the right number of times.
         self.assertEqual(0, test.counter)
 
+    def test_unsupported_features_listed(self):
+        """When unsupported features are encountered they are detailed."""
+        class Feature1(Feature):
+            def _probe(self): return False
+        class Feature2(Feature):
+            def _probe(self): return False
+        # create sample tests
+        test1 = SampleTestCase('_test_pass')
+        test1._test_needs_features = [Feature1()]
+        test2 = SampleTestCase('_test_pass')
+        test2._test_needs_features = [Feature2()]
+        test = unittest.TestSuite()
+        test.addTest(test1)
+        test.addTest(test2)
+        stream = StringIO()
+        runner = TextTestRunner(stream=stream)
+        result = self.run_test_runner(runner, test)
+        lines = stream.getvalue().splitlines()
+        self.assertEqual([
+            '',
+            '----------------------------------------------------------------------',
+            'Ran 2 tests in 0.000s',
+            '',
+            'OK',
+            "Missing feature 'Feature1' skipped 1 tests.",
+            "Missing feature 'Feature2' skipped 1 tests.",
+            ],
+            lines)
+
     def test_bench_history(self):
         # tests that the running the benchmark produces a history file
         # containing a timestamp and the revision id of the bzrlib source which



More information about the bazaar-commits mailing list