Rev 4645: (robertc) Add --lsprof-tests option to bzr selftest for profiling in file:///home/pqm/archives/thelove/bzr/%2Btrunk/

Canonical.com Patch Queue Manager pqm at pqm.ubuntu.com
Tue Aug 25 06:04:07 BST 2009


At file:///home/pqm/archives/thelove/bzr/%2Btrunk/

------------------------------------------------------------
revno: 4645 [merge]
revision-id: pqm at pqm.ubuntu.com-20090825050405-9g097zqrev5ownl5
parent: pqm at pqm.ubuntu.com-20090825030737-d5xkv49mwu0e8zn6
parent: robertc at robertcollins.net-20090825034815-8iezwaet1s53bbsl
committer: Canonical.com Patch Queue Manager <pqm at pqm.ubuntu.com>
branch nick: +trunk
timestamp: Tue 2009-08-25 06:04:05 +0100
message:
  (robertc) Add --lsprof-tests option to bzr selftest for profiling
  	individual tests. (Robert Collins)
modified:
  NEWS                           NEWS-20050323055033-4e00b5db738777ff
  bzrlib/builtins.py             builtins.py-20050830033751-fc01482b9ca23183
  bzrlib/lsprof.py               lsprof.py-20051208071030-833790916798ceed
  bzrlib/tests/__init__.py       selftest.py-20050531073622-8d0e3c8845c97a64
  bzrlib/tests/blackbox/test_selftest.py test_selftest.py-20060123024542-01c5f1bbcb596d78
  bzrlib/tests/test_lsprof.py    test_lsprof.py-20070606095601-bctdndm8yhc0cqnc-1
  bzrlib/tests/test_selftest.py  test_selftest.py-20051202044319-c110a115d8c0456a
=== modified file 'NEWS'
--- a/NEWS	2009-08-24 18:28:46 +0000
+++ b/NEWS	2009-08-24 22:32:53 +0000
@@ -67,9 +67,16 @@
 Internals
 *********
 
+* The ``bzrlib.lsprof`` module has a new class ``BzrProfiler`` which makes
+  profiling in some situations like callbacks and generators easier.
+  (Robert Collins)
+
 Testing
 *******
 
+* Passing ``--lsprof-tests -v`` to bzr selftest will cause lsprof output to
+  be output for every test. Note that this is very verbose! (Robert Collins)
+
 bzr 1.18
 ########
 

=== modified file 'bzrlib/builtins.py'
--- a/bzrlib/builtins.py	2009-08-11 18:00:37 +0000
+++ b/bzrlib/builtins.py	2009-08-24 22:32:53 +0000
@@ -3369,6 +3369,8 @@
                      Option('lsprof-timed',
                             help='Generate lsprof output for benchmarked'
                                  ' sections of code.'),
+                     Option('lsprof-tests',
+                            help='Generate lsprof output for each test.'),
                      Option('cache-dir', type=str,
                             help='Cache intermediate benchmark output in this '
                                  'directory.'),
@@ -3415,7 +3417,7 @@
             first=False, list_only=False,
             randomize=None, exclude=None, strict=False,
             load_list=None, debugflag=None, starting_with=None, subunit=False,
-            parallel=None):
+            parallel=None, lsprof_tests=False):
         from bzrlib.tests import selftest
         import bzrlib.benchmarks as benchmarks
         from bzrlib.benchmarks import tree_creator
@@ -3455,6 +3457,7 @@
                               "transport": transport,
                               "test_suite_factory": test_suite_factory,
                               "lsprof_timed": lsprof_timed,
+                              "lsprof_tests": lsprof_tests,
                               "bench_history": benchfile,
                               "matching_tests_first": first,
                               "list_only": list_only,

=== modified file 'bzrlib/lsprof.py'
--- a/bzrlib/lsprof.py	2009-03-08 06:18:06 +0000
+++ b/bzrlib/lsprof.py	2009-08-24 21:05:09 +0000
@@ -13,45 +13,74 @@
 
 __all__ = ['profile', 'Stats']
 
-_g_threadmap = {}
-
-
-def _thread_profile(f, *args, **kwds):
-    # we lose the first profile point for a new thread in order to trampoline
-    # a new Profile object into place
-    global _g_threadmap
-    thr = thread.get_ident()
-    _g_threadmap[thr] = p = Profiler()
-    # this overrides our sys.setprofile hook:
-    p.enable(subcalls=True, builtins=True)
-
-
 def profile(f, *args, **kwds):
     """Run a function profile.
 
     Exceptions are not caught: If you need stats even when exceptions are to be
-    raised, passing in a closure that will catch the exceptions and transform
-    them appropriately for your driver function.
+    raised, pass in a closure that will catch the exceptions and transform them
+    appropriately for your driver function.
 
     :return: The functions return value and a stats object.
     """
-    global _g_threadmap
-    p = Profiler()
-    p.enable(subcalls=True)
-    threading.setprofile(_thread_profile)
+    profiler = BzrProfiler()
+    profiler.start()
     try:
         ret = f(*args, **kwds)
     finally:
-        p.disable()
-        for pp in _g_threadmap.values():
+        stats = profiler.stop()
+    return ret, stats
+
+
+class BzrProfiler(object):
+    """Bzr utility wrapper around Profiler.
+    
+    For most uses the module level 'profile()' function will be suitable.
+    However profiling when a simple wrapped function isn't available may
+    be easier to accomplish using this class.
+
+    To use it, create a BzrProfiler and call start() on it. Some arbitrary
+    time later call stop() to stop profiling and retrieve the statistics
+    from the code executed in the interim.
+    """
+
+    def start(self):
+        """Start profiling.
+        
+        This hooks into threading and will record all calls made until
+        stop() is called.
+        """
+        self._g_threadmap = {}
+        self.p = Profiler()
+        self.p.enable(subcalls=True)
+        threading.setprofile(self._thread_profile)
+
+    def stop(self):
+        """Stop profiling.
+
+        This unhooks from threading and cleans up the profiler, returning
+        the gathered Stats object.
+
+        :return: A bzrlib.lsprof.Stats object.
+        """
+        self.p.disable()
+        for pp in self._g_threadmap.values():
             pp.disable()
         threading.setprofile(None)
+        p = self.p
+        self.p = None
+        threads = {}
+        for tid, pp in self._g_threadmap.items():
+            threads[tid] = Stats(pp.getstats(), {})
+        self._g_threadmap = None
+        return Stats(p.getstats(), threads)
 
-    threads = {}
-    for tid, pp in _g_threadmap.items():
-        threads[tid] = Stats(pp.getstats(), {})
-    _g_threadmap = {}
-    return ret, Stats(p.getstats(), threads)
+    def _thread_profile(self, f, *args, **kwds):
+        # we lose the first profile point for a new thread in order to
+        # trampoline a new Profile object into place
+        thr = thread.get_ident()
+        self._g_threadmap[thr] = p = Profiler()
+        # this overrides our sys.setprofile hook:
+        p.enable(subcalls=True, builtins=True)
 
 
 class Stats(object):

=== modified file 'bzrlib/tests/__init__.py'
--- a/bzrlib/tests/__init__.py	2009-08-24 20:30:18 +0000
+++ b/bzrlib/tests/__init__.py	2009-08-25 05:04:05 +0000
@@ -579,13 +579,22 @@
                  bench_history=None,
                  list_only=False,
                  strict=False,
+                 result_decorators=None,
                  ):
+        """Create a TextTestRunner.
+
+        :param result_decorators: An optional list of decorators to apply
+            to the result object being used by the runner. Decorators are
+            applied left to right - the first element in the list is the 
+            innermost decorator.
+        """
         self.stream = unittest._WritelnDecorator(stream)
         self.descriptions = descriptions
         self.verbosity = verbosity
         self._bench_history = bench_history
         self.list_only = list_only
         self._strict = strict
+        self._result_decorators = result_decorators or []
 
     def run(self, test):
         "Run the given test case or test suite."
@@ -600,6 +609,9 @@
                               bench_history=self._bench_history,
                               strict=self._strict,
                               )
+        run_result = result
+        for decorator in self._result_decorators:
+            run_result = decorator(run_result)
         result.stop_early = self.stop_on_failure
         result.report_starting()
         if self.list_only:
@@ -614,13 +626,13 @@
             try:
                 import testtools
             except ImportError:
-                test.run(result)
+                test.run(run_result)
             else:
                 if isinstance(test, testtools.ConcurrentTestSuite):
                     # We need to catch bzr specific behaviors
-                    test.run(BZRTransformingResult(result))
+                    test.run(BZRTransformingResult(run_result))
                 else:
-                    test.run(result)
+                    test.run(run_result)
             run = result.testsRun
             actionTaken = "Ran"
         stopTime = time.time()
@@ -2762,7 +2774,9 @@
               strict=False,
               runner_class=None,
               suite_decorators=None,
-              stream=None):
+              stream=None,
+              result_decorators=None,
+              ):
     """Run a test suite for bzr selftest.
 
     :param runner_class: The class of runner to use. Must support the
@@ -2785,6 +2799,7 @@
                             bench_history=bench_history,
                             list_only=list_only,
                             strict=strict,
+                            result_decorators=result_decorators,
                             )
     runner.stop_on_failure=stop_on_failure
     # built in decorator factories:
@@ -3131,7 +3146,7 @@
     return result
 
 
-class BZRTransformingResult(unittest.TestResult):
+class ForwardingResult(unittest.TestResult):
 
     def __init__(self, target):
         unittest.TestResult.__init__(self)
@@ -3143,6 +3158,21 @@
     def stopTest(self, test):
         self.result.stopTest(test)
 
+    def addSkip(self, test, reason):
+        self.result.addSkip(test, reason)
+
+    def addSuccess(self, test):
+        self.result.addSuccess(test)
+
+    def addError(self, test, err):
+        self.result.addError(test, err)
+
+    def addFailure(self, test, err):
+        self.result.addFailure(test, err)
+
+
+class BZRTransformingResult(ForwardingResult):
+
     def addError(self, test, err):
         feature = self._error_looks_like('UnavailableFeature: ', err)
         if feature is not None:
@@ -3158,12 +3188,6 @@
         else:
             self.result.addFailure(test, err)
 
-    def addSkip(self, test, reason):
-        self.result.addSkip(test, reason)
-
-    def addSuccess(self, test):
-        self.result.addSuccess(test)
-
     def _error_looks_like(self, prefix, err):
         """Deserialize exception and returns the stringify value."""
         import subunit
@@ -3181,6 +3205,38 @@
         return value
 
 
+class ProfileResult(ForwardingResult):
+    """Generate profiling data for all activity between start and success.
+    
+    The profile data is appended to the test's _benchcalls attribute and can
+    be accessed by the forwarded-to TestResult.
+
+    While it might be cleaner do accumulate this in stopTest, addSuccess is
+    where our existing output support for lsprof is, and this class aims to
+    fit in with that: while it could be moved it's not necessary to accomplish
+    test profiling, nor would it be dramatically cleaner.
+    """
+
+    def startTest(self, test):
+        self.profiler = bzrlib.lsprof.BzrProfiler()
+        self.profiler.start()
+        ForwardingResult.startTest(self, test)
+
+    def addSuccess(self, test):
+        stats = self.profiler.stop()
+        try:
+            calls = test._benchcalls
+        except AttributeError:
+            test._benchcalls = []
+            calls = test._benchcalls
+        calls.append(((test.id(), "", ""), stats))
+        ForwardingResult.addSuccess(self, test)
+
+    def stopTest(self, test):
+        ForwardingResult.stopTest(self, test)
+        self.profiler = None
+
+
 # Controlled by "bzr selftest -E=..." option
 # Currently supported:
 #   -Eallow_debug           Will no longer clear debug.debug_flags() so it
@@ -3208,6 +3264,7 @@
              runner_class=None,
              suite_decorators=None,
              stream=None,
+             lsprof_tests=False,
              ):
     """Run the whole test suite under the enhanced runner"""
     # XXX: Very ugly way to do this...
@@ -3242,6 +3299,9 @@
         if starting_with:
             # But always filter as requested.
             suite = filter_suite_by_id_startswith(suite, starting_with)
+        result_decorators = []
+        if lsprof_tests:
+            result_decorators.append(ProfileResult)
         return run_suite(suite, 'testbzr', verbose=verbose, pattern=pattern,
                      stop_on_failure=stop_on_failure,
                      transport=transport,
@@ -3255,6 +3315,7 @@
                      runner_class=runner_class,
                      suite_decorators=suite_decorators,
                      stream=stream,
+                     result_decorators=result_decorators,
                      )
     finally:
         default_transport = old_transport

=== modified file 'bzrlib/tests/blackbox/test_selftest.py'
--- a/bzrlib/tests/blackbox/test_selftest.py	2009-08-24 05:23:11 +0000
+++ b/bzrlib/tests/blackbox/test_selftest.py	2009-08-24 22:32:53 +0000
@@ -172,3 +172,7 @@
             outputs_nothing(['selftest', '--list-only', '--exclude', 'selftest'])
         finally:
             tests.selftest = original_selftest
+
+    def test_lsprof_tests(self):
+        params = self.get_params_passed_to_core('selftest --lsprof-tests')
+        self.assertEqual(True, params[1]["lsprof_tests"])

=== modified file 'bzrlib/tests/test_lsprof.py'
--- a/bzrlib/tests/test_lsprof.py	2009-03-23 14:59:43 +0000
+++ b/bzrlib/tests/test_lsprof.py	2009-08-24 21:05:09 +0000
@@ -92,3 +92,22 @@
         self.stats.save(f)
         data1 = cPickle.load(open(f))
         self.assertEqual(type(data1), bzrlib.lsprof.Stats)
+
+
+class TestBzrProfiler(tests.TestCase):
+
+    _test_needs_features = [LSProfFeature]
+
+    def test_start_call_stuff_stop(self):
+        profiler = bzrlib.lsprof.BzrProfiler()
+        profiler.start()
+        try:
+            def a_function():
+                pass
+            a_function()
+        finally:
+            stats = profiler.stop()
+        stats.freeze()
+        lines = [str(data) for data in stats.data]
+        lines = [line for line in lines if 'a_function' in line]
+        self.assertLength(1, lines)

=== modified file 'bzrlib/tests/test_selftest.py'
--- a/bzrlib/tests/test_selftest.py	2009-08-24 05:35:28 +0000
+++ b/bzrlib/tests/test_selftest.py	2009-08-25 03:48:15 +0000
@@ -687,6 +687,26 @@
         self.assertEqual(url, t.clone('..').base)
 
 
+class TestProfileResult(tests.TestCase):
+
+    def test_profiles_tests(self):
+        self.requireFeature(test_lsprof.LSProfFeature)
+        terminal = unittest.TestResult()
+        result = tests.ProfileResult(terminal)
+        class Sample(tests.TestCase):
+            def a(self):
+                self.sample_function()
+            def sample_function(self):
+                pass
+        test = Sample("a")
+        test.attrs_to_keep = test.attrs_to_keep + ('_benchcalls',)
+        test.run(result)
+        self.assertLength(1, test._benchcalls)
+        # We must be able to unpack it as the test reporting code wants
+        (_, _, _), stats = test._benchcalls[0]
+        self.assertTrue(callable(stats.pprint))
+
+
 class TestTestResult(tests.TestCase):
 
     def check_timing(self, test_case, expected_re):
@@ -1031,6 +1051,20 @@
             '\n'
             'OK \\(known_failures=1\\)\n')
 
+    def test_result_decorator(self):
+        # decorate results
+        calls = []
+        class LoggingDecorator(tests.ForwardingResult):
+            def startTest(self, test):
+                tests.ForwardingResult.startTest(self, test)
+                calls.append('start')
+        test = unittest.FunctionTestCase(lambda:None)
+        stream = StringIO()
+        runner = tests.TextTestRunner(stream=stream,
+            result_decorators=[LoggingDecorator])
+        result = self.run_test_runner(runner, test)
+        self.assertLength(1, calls)
+
     def test_skipped_test(self):
         # run a test that is skipped, and check the suite as a whole still
         # succeeds.
@@ -1103,10 +1137,6 @@
         self.assertContainsRe(out.getvalue(),
                 r'(?m)^    this test never runs')
 
-    def test_not_applicable_demo(self):
-        # just so you can see it in the test output
-        raise tests.TestNotApplicable('this test is just a demonstation')
-
     def test_unsupported_features_listed(self):
         """When unsupported features are encountered they are detailed."""
         class Feature1(tests.Feature):
@@ -1480,6 +1510,7 @@
         self.assertEqual((time.sleep, (0.003,), {}), self._benchcalls[1][0])
         self.assertIsInstance(self._benchcalls[0][1], bzrlib.lsprof.Stats)
         self.assertIsInstance(self._benchcalls[1][1], bzrlib.lsprof.Stats)
+        del self._benchcalls[:]
 
     def test_knownFailure(self):
         """Self.knownFailure() should raise a KnownFailure exception."""
@@ -1817,6 +1848,20 @@
         self.assertNotContainsRe("Test.b", output.getvalue())
         self.assertLength(2, output.readlines())
 
+    def test_lsprof_tests(self):
+        self.requireFeature(test_lsprof.LSProfFeature)
+        calls = []
+        class Test(object):
+            def __call__(test, result):
+                test.run(result)
+            def run(test, result):
+                self.assertIsInstance(result, tests.ForwardingResult)
+                calls.append("called")
+            def countTestCases(self):
+                return 1
+        self.run_selftest(test_suite_factory=Test, lsprof_tests=True)
+        self.assertLength(1, calls)
+
     def test_random(self):
         # test randomising by listing a number of tests.
         output_123 = self.run_selftest(test_suite_factory=self.factory,




More information about the bazaar-commits mailing list