Rev 4659: (robertc) Detangle core logic inselftest to make it more in file:///home/pqm/archives/thelove/bzr/%2Btrunk/

Canonical.com Patch Queue Manager pqm at pqm.ubuntu.com
Thu Aug 27 14:20:26 BST 2009


At file:///home/pqm/archives/thelove/bzr/%2Btrunk/

------------------------------------------------------------
revno: 4659 [merge]
revision-id: pqm at pqm.ubuntu.com-20090827132024-h13eo7blndo2dfpl
parent: pqm at pqm.ubuntu.com-20090827041938-ledf747rl9hisefy
parent: robertc at robertcollins.net-20090826233048-4yerdwqhvi2dqzi9
committer: Canonical.com Patch Queue Manager <pqm at pqm.ubuntu.com>
branch nick: +trunk
timestamp: Thu 2009-08-27 14:20:24 +0100
message:
  (robertc) Detangle core logic inselftest to make it more
  	understandable and compatible with current python. (Robert Collins)
modified:
  NEWS                           NEWS-20050323055033-4e00b5db738777ff
  bzrlib/tests/__init__.py       selftest.py-20050531073622-8d0e3c8845c97a64
  bzrlib/tests/test_selftest.py  test_selftest.py-20051202044319-c110a115d8c0456a
=== modified file 'NEWS'
--- a/NEWS	2009-08-27 00:53:27 +0000
+++ b/NEWS	2009-08-27 13:20:24 +0000
@@ -90,6 +90,9 @@
   classes changed to manage lock lifetime of the trees they open in a way
   consistent with reader-exclusive locks. (Robert Collins, #305006)
 
+* ``bzrlib.tests`` now uses ``stopTestRun`` for its ``TestResult``
+  subclasses - the same as python's unittest module. (Robert Collins)
+
 Internals
 *********
 

=== modified file 'bzrlib/tests/__init__.py'
--- a/bzrlib/tests/__init__.py	2009-08-26 07:02:45 +0000
+++ b/bzrlib/tests/__init__.py	2009-08-26 23:30:48 +0000
@@ -175,17 +175,47 @@
         self._overall_start_time = time.time()
         self._strict = strict
 
-    def done(self):
-        # nb: called stopTestRun in the version of this that Python merged
-        # upstream, according to lifeless 20090803
+    def stopTestRun(self):
+        run = self.testsRun
+        actionTaken = "Ran"
+        stopTime = time.time()
+        timeTaken = stopTime - self.startTime
+        self.printErrors()
+        self.stream.writeln(self.separator2)
+        self.stream.writeln("%s %d test%s in %.3fs" % (actionTaken,
+                            run, run != 1 and "s" or "", timeTaken))
+        self.stream.writeln()
+        if not self.wasSuccessful():
+            self.stream.write("FAILED (")
+            failed, errored = map(len, (self.failures, self.errors))
+            if failed:
+                self.stream.write("failures=%d" % failed)
+            if errored:
+                if failed: self.stream.write(", ")
+                self.stream.write("errors=%d" % errored)
+            if self.known_failure_count:
+                if failed or errored: self.stream.write(", ")
+                self.stream.write("known_failure_count=%d" %
+                    self.known_failure_count)
+            self.stream.writeln(")")
+        else:
+            if self.known_failure_count:
+                self.stream.writeln("OK (known_failures=%d)" %
+                    self.known_failure_count)
+            else:
+                self.stream.writeln("OK")
+        if self.skip_count > 0:
+            skipped = self.skip_count
+            self.stream.writeln('%d test%s skipped' %
+                                (skipped, skipped != 1 and "s" or ""))
+        if self.unsupported:
+            for feature, count in sorted(self.unsupported.items()):
+                self.stream.writeln("Missing feature '%s' skipped %d tests." %
+                    (feature, count))
         if self._strict:
             ok = self.wasStrictlySuccessful()
         else:
             ok = self.wasSuccessful()
-        if ok:
-            self.stream.write('tests passed\n')
-        else:
-            self.stream.write('tests failed\n')
         if TestCase._first_thread_leaker_id:
             self.stream.write(
                 '%s is leaking threads among %d leaking tests.\n' % (
@@ -383,12 +413,12 @@
         else:
             raise errors.BzrError("Unknown whence %r" % whence)
 
-    def finished(self):
-        pass
-
     def report_cleaning_up(self):
         pass
 
+    def startTestRun(self):
+        self.startTime = time.time()
+
     def report_success(self, test):
         pass
 
@@ -421,15 +451,14 @@
         self.pb.update_latency = 0
         self.pb.show_transport_activity = False
 
-    def done(self):
+    def stopTestRun(self):
         # called when the tests that are going to run have run
         self.pb.clear()
-        super(TextTestResult, self).done()
-
-    def finished(self):
         self.pb.finished()
+        super(TextTestResult, self).stopTestRun()
 
-    def report_starting(self):
+    def startTestRun(self):
+        super(TextTestResult, self).startTestRun()
         self.pb.update('[test 0/%d] Starting' % (self.num_tests))
 
     def printErrors(self):
@@ -514,7 +543,8 @@
             result = a_string
         return result.ljust(final_width)
 
-    def report_starting(self):
+    def startTestRun(self):
+        super(VerboseTestResult, self).startTestRun()
         self.stream.write('running %d tests...\n' % self.num_tests)
 
     def report_test_start(self, test):
@@ -578,7 +608,6 @@
                  descriptions=0,
                  verbosity=1,
                  bench_history=None,
-                 list_only=False,
                  strict=False,
                  result_decorators=None,
                  ):
@@ -593,85 +622,43 @@
         self.descriptions = descriptions
         self.verbosity = verbosity
         self._bench_history = bench_history
-        self.list_only = list_only
         self._strict = strict
         self._result_decorators = result_decorators or []
 
     def run(self, test):
         "Run the given test case or test suite."
-        startTime = time.time()
         if self.verbosity == 1:
             result_class = TextTestResult
         elif self.verbosity >= 2:
             result_class = VerboseTestResult
-        result = result_class(self.stream,
+        original_result = result_class(self.stream,
                               self.descriptions,
                               self.verbosity,
                               bench_history=self._bench_history,
                               strict=self._strict,
                               )
-        run_result = result
+        # Signal to result objects that look at stop early policy to stop,
+        original_result.stop_early = self.stop_on_failure
+        result = original_result
         for decorator in self._result_decorators:
-            run_result = decorator(run_result)
-        result.stop_early = self.stop_on_failure
-        result.report_starting()
-        if self.list_only:
-            if self.verbosity >= 2:
-                self.stream.writeln("Listing tests only ...\n")
-            run = 0
-            for t in iter_suite_tests(test):
-                self.stream.writeln("%s" % (t.id()))
-                run += 1
-            return None
-        else:
-            try:
-                import testtools
-            except ImportError:
-                test.run(run_result)
-            else:
-                if isinstance(test, testtools.ConcurrentTestSuite):
-                    # We need to catch bzr specific behaviors
-                    test.run(BZRTransformingResult(run_result))
-                else:
-                    test.run(run_result)
-            run = result.testsRun
-            actionTaken = "Ran"
-        stopTime = time.time()
-        timeTaken = stopTime - startTime
-        result.printErrors()
-        self.stream.writeln(result.separator2)
-        self.stream.writeln("%s %d test%s in %.3fs" % (actionTaken,
-                            run, run != 1 and "s" or "", timeTaken))
-        self.stream.writeln()
-        if not result.wasSuccessful():
-            self.stream.write("FAILED (")
-            failed, errored = map(len, (result.failures, result.errors))
-            if failed:
-                self.stream.write("failures=%d" % failed)
-            if errored:
-                if failed: self.stream.write(", ")
-                self.stream.write("errors=%d" % errored)
-            if result.known_failure_count:
-                if failed or errored: self.stream.write(", ")
-                self.stream.write("known_failure_count=%d" %
-                    result.known_failure_count)
-            self.stream.writeln(")")
-        else:
-            if result.known_failure_count:
-                self.stream.writeln("OK (known_failures=%d)" %
-                    result.known_failure_count)
-            else:
-                self.stream.writeln("OK")
-        if result.skip_count > 0:
-            skipped = result.skip_count
-            self.stream.writeln('%d test%s skipped' %
-                                (skipped, skipped != 1 and "s" or ""))
-        if result.unsupported:
-            for feature, count in sorted(result.unsupported.items()):
-                self.stream.writeln("Missing feature '%s' skipped %d tests." %
-                    (feature, count))
-        result.finished()
-        return result
+            result = decorator(result)
+            result.stop_early = self.stop_on_failure
+        try:
+            import testtools
+        except ImportError:
+            pass
+        else:
+            if isinstance(test, testtools.ConcurrentTestSuite):
+                # We need to catch bzr specific behaviors
+                result = BZRTransformingResult(result)
+        result.startTestRun()
+        try:
+            test.run(result)
+        finally:
+            result.stopTestRun()
+        # higher level code uses our extended protocol to determine
+        # what exit code to give.
+        return original_result
 
 
 def iter_suite_tests(suite):
@@ -2807,7 +2794,6 @@
                             descriptions=0,
                             verbosity=verbosity,
                             bench_history=bench_history,
-                            list_only=list_only,
                             strict=strict,
                             result_decorators=result_decorators,
                             )
@@ -2830,10 +2816,15 @@
         decorators.append(CountingDecorator)
     for decorator in decorators:
         suite = decorator(suite)
-    result = runner.run(suite)
     if list_only:
+        # Done after test suite decoration to allow randomisation etc
+        # to take effect, though that is of marginal benefit.
+        if verbosity >= 2:
+            stream.write("Listing tests only ...\n")
+        for t in iter_suite_tests(suite):
+            stream.write("%s\n" % (t.id()))
         return True
-    result.done()
+    result = runner.run(suite)
     if strict:
         return result.wasStrictlySuccessful()
     else:
@@ -3168,6 +3159,12 @@
     def stopTest(self, test):
         self.result.stopTest(test)
 
+    def startTestRun(self):
+        self.result.startTestRun()
+
+    def stopTestRun(self):
+        self.result.stopTestRun()
+
     def addSkip(self, test, reason):
         self.result.addSkip(test, reason)
 

=== modified file 'bzrlib/tests/test_selftest.py'
--- a/bzrlib/tests/test_selftest.py	2009-08-26 06:33:13 +0000
+++ b/bzrlib/tests/test_selftest.py	2009-08-26 23:25:28 +0000
@@ -820,7 +820,7 @@
     def test_known_failure(self):
         """A KnownFailure being raised should trigger several result actions."""
         class InstrumentedTestResult(tests.ExtendedTestResult):
-            def done(self): pass
+            def stopTestRun(self): pass
             def startTests(self): pass
             def report_test_start(self, test): pass
             def report_known_failure(self, test, err):
@@ -874,7 +874,7 @@
     def test_add_not_supported(self):
         """Test the behaviour of invoking addNotSupported."""
         class InstrumentedTestResult(tests.ExtendedTestResult):
-            def done(self): pass
+            def stopTestRun(self): pass
             def startTests(self): pass
             def report_test_start(self, test): pass
             def report_unsupported(self, test, feature):
@@ -918,7 +918,7 @@
     def test_unavailable_exception(self):
         """An UnavailableFeature being raised should invoke addNotSupported."""
         class InstrumentedTestResult(tests.ExtendedTestResult):
-            def done(self): pass
+            def stopTestRun(self): pass
             def startTests(self): pass
             def report_test_start(self, test): pass
             def addNotSupported(self, test, feature):
@@ -1001,11 +1001,14 @@
         because of our use of global state.
         """
         old_root = tests.TestCaseInTempDir.TEST_ROOT
+        old_leak = tests.TestCase._first_thread_leaker_id
         try:
             tests.TestCaseInTempDir.TEST_ROOT = None
+            tests.TestCase._first_thread_leaker_id = None
             return testrunner.run(test)
         finally:
             tests.TestCaseInTempDir.TEST_ROOT = old_root
+            tests.TestCase._first_thread_leaker_id = old_leak
 
     def test_known_failure_failed_run(self):
         # run a test that generates a known failure which should be printed in
@@ -1291,6 +1294,34 @@
         self.assertContainsRe(log, 'this will be kept')
         self.assertEqual(log, test._log_contents)
 
+    def test_startTestRun(self):
+        """run should call result.startTestRun()"""
+        calls = []
+        class LoggingDecorator(tests.ForwardingResult):
+            def startTestRun(self):
+                tests.ForwardingResult.startTestRun(self)
+                calls.append('startTestRun')
+        test = unittest.FunctionTestCase(lambda:None)
+        stream = StringIO()
+        runner = tests.TextTestRunner(stream=stream,
+            result_decorators=[LoggingDecorator])
+        result = self.run_test_runner(runner, test)
+        self.assertLength(1, calls)
+
+    def test_stopTestRun(self):
+        """run should call result.stopTestRun()"""
+        calls = []
+        class LoggingDecorator(tests.ForwardingResult):
+            def stopTestRun(self):
+                tests.ForwardingResult.stopTestRun(self)
+                calls.append('stopTestRun')
+        test = unittest.FunctionTestCase(lambda:None)
+        stream = StringIO()
+        runner = tests.TextTestRunner(stream=stream,
+            result_decorators=[LoggingDecorator])
+        result = self.run_test_runner(runner, test)
+        self.assertLength(1, calls)
+
 
 class SampleTestCase(tests.TestCase):
 
@@ -2934,19 +2965,3 @@
                                                 self.verbosity)
         tests.run_suite(suite, runner_class=MyRunner, stream=StringIO())
         self.assertLength(1, calls)
-
-    def test_done(self):
-        """run_suite should call result.done()"""
-        self.calls = 0
-        def one_more_call(): self.calls += 1
-        def test_function():
-            pass
-        test = unittest.FunctionTestCase(test_function)
-        class InstrumentedTestResult(tests.ExtendedTestResult):
-            def done(self): one_more_call()
-        class MyRunner(tests.TextTestRunner):
-            def run(self, test):
-                return InstrumentedTestResult(self.stream, self.descriptions,
-                                              self.verbosity)
-        tests.run_suite(test, runner_class=MyRunner, stream=StringIO())
-        self.assertEquals(1, self.calls)




More information about the bazaar-commits mailing list