Rev 5296: Merge propagate-exceptions into http-leaks in file:///home/vila/src/bzr/experimental/leaking-tests/
Vincent Ladeuil
v.ladeuil+lp at free.fr
Mon Jul 5 09:10:11 BST 2010
At file:///home/vila/src/bzr/experimental/leaking-tests/
------------------------------------------------------------
revno: 5296 [merge]
revision-id: v.ladeuil+lp at free.fr-20100705081010-zzsgovcbinj4cyrb
parent: v.ladeuil+lp at free.fr-20100701064014-p3zyz3eevfrkl8i4
parent: v.ladeuil+lp at free.fr-20100705081008-ribxqzzxlkdbq42p
committer: Vincent Ladeuil <v.ladeuil+lp at free.fr>
branch nick: http-leaks
timestamp: Mon 2010-07-05 10:10:10 +0200
message:
Merge propagate-exceptions into http-leaks
modified:
NEWS NEWS-20050323055033-4e00b5db738777ff
bzrlib/__init__.py __init__.py-20050309040759-33e65acf91bbcd5d
bzrlib/branch.py branch.py-20050309040759-e4baf4e0d046576e
bzrlib/builtins.py builtins.py-20050830033751-fc01482b9ca23183
bzrlib/lsprof.py lsprof.py-20051208071030-833790916798ceed
bzrlib/osutils.py osutils.py-20050309040759-eeaff12fbf77ac86
bzrlib/tests/__init__.py selftest.py-20050531073622-8d0e3c8845c97a64
bzrlib/tests/per_branch/test_push.py test_push.py-20070130153159-fhfap8uoifevg30j-1
bzrlib/tests/per_branch/test_stacking.py test_stacking.py-20080214020755-msjlkb7urobwly0f-1
bzrlib/tests/test_lsprof.py test_lsprof.py-20070606095601-bctdndm8yhc0cqnc-1
doc/developers/ppa.txt ppa.txt-20080722055539-606u7t2z32t3ae4w-1
setup.py setup.py-20050314065409-02f8a0a6e3f9bc70
-------------- next part --------------
=== modified file 'NEWS'
--- a/NEWS 2010-06-30 16:24:01 +0000
+++ b/NEWS 2010-07-05 08:10:10 +0000
@@ -62,6 +62,9 @@
or pull location in locations.conf or branch.conf.
(Gordon Tyler, #534787)
+* ``bzr reconfigure --unstacked`` now works with branches accessed via a
+ smart server. (Andrew Bennetts, #551525)
+
* ``BzrDir.find_branches`` should ignore branches with missing repositories.
(Marius Kruger, Robert Collins)
@@ -171,6 +174,11 @@
InterBranch objects that work with multiple permutations to be
comprehensively tested. (Robert Collins)
+* ``bzrlib.lsprof.profile`` will no longer silently generate bad threaded
+ profiles when concurrent profile requests are made. Instead the profile
+ requests will be serialised. Reentrant requests will now deadlock.
+ (Robert Collins)
+
* ``bzrlib.knit.KnitSequenceMatcher``, which has been deprecated since
2007, has been deleted. Use ``PatienceSequenceMatcher`` from
``bzrlib.patiencediff`` instead. (Andrew Bennetts)
=== modified file 'bzrlib/__init__.py'
--- a/bzrlib/__init__.py 2010-06-26 02:15:26 +0000
+++ b/bzrlib/__init__.py 2010-07-02 22:44:00 +0000
@@ -38,13 +38,6 @@
_start_time = time.time()
import sys
-if getattr(sys, '_bzr_lazy_regex', False):
- # The 'bzr' executable sets _bzr_lazy_regex. We install the lazy regex
- # hack as soon as possible so that as much of the standard library can
- # benefit, including the 'string' module.
- del sys._bzr_lazy_regex
- import bzrlib.lazy_regex
- bzrlib.lazy_regex.install_lazy_compile()
IGNORE_FILENAME = ".bzrignore"
@@ -126,6 +119,18 @@
return main_version + sub_string
+# lazy_regex import must be done after _format_version_tuple definition
+# to avoid "no attribute '_format_version_tuple'" error when using
+# deprecated_function in the lazy_regex module.
+if getattr(sys, '_bzr_lazy_regex', False):
+ # The 'bzr' executable sets _bzr_lazy_regex. We install the lazy regex
+ # hack as soon as possible so that as much of the standard library can
+ # benefit, including the 'string' module.
+ del sys._bzr_lazy_regex
+ import bzrlib.lazy_regex
+ bzrlib.lazy_regex.install_lazy_compile()
+
+
__version__ = _format_version_tuple(version_info)
version_string = __version__
@@ -163,7 +168,7 @@
otherwise stopping use of bzrlib. Advanced callers can use
BzrLibraryState directly.
"""
- import bzrlib.library_state
+ from bzrlib import library_state, trace
if setup_ui:
import bzrlib.ui
stdin = stdin or sys.stdin
@@ -172,8 +177,8 @@
ui_factory = bzrlib.ui.make_ui_for_terminal(stdin, stdout, stderr)
else:
ui_factory = None
- tracer = bzrlib.trace.DefaultConfig()
- return bzrlib.library_state.BzrLibraryState(ui=ui_factory, trace=tracer)
+ tracer = trace.DefaultConfig()
+ return library_state.BzrLibraryState(ui=ui_factory, trace=tracer)
def test_suite():
=== modified file 'bzrlib/branch.py'
--- a/bzrlib/branch.py 2010-06-20 21:14:49 +0000
+++ b/bzrlib/branch.py 2010-06-30 08:34:11 +0000
@@ -801,28 +801,56 @@
if len(old_repository._fallback_repositories) != 1:
raise AssertionError("can't cope with fallback repositories "
"of %r" % (self.repository,))
- # unlock it, including unlocking the fallback
+ # Open the new repository object.
+ # Repositories don't offer an interface to remove fallback
+ # repositories today; take the conceptually simpler option and just
+ # reopen it. We reopen it starting from the URL so that we
+ # get a separate connection for RemoteRepositories and can
+ # stream from one of them to the other. This does mean doing
+ # separate SSH connection setup, but unstacking is not a
+ # common operation so it's tolerable.
+ new_bzrdir = bzrdir.BzrDir.open(self.bzrdir.root_transport.base)
+ new_repository = new_bzrdir.find_repository()
+ if new_repository._fallback_repositories:
+ raise AssertionError("didn't expect %r to have "
+ "fallback_repositories"
+ % (self.repository,))
+ # Replace self.repository with the new repository.
+ # Do our best to transfer the lock state (i.e. lock-tokens and
+ # lock count) of self.repository to the new repository.
+ lock_token = old_repository.lock_write().repository_token
+ self.repository = new_repository
+ if isinstance(self, remote.RemoteBranch):
+ # Remote branches can have a second reference to the old
+ # repository that need to be replaced.
+ if self._real_branch is not None:
+ self._real_branch.repository = new_repository
+ self.repository.lock_write(token=lock_token)
+ if lock_token is not None:
+ old_repository.leave_lock_in_place()
old_repository.unlock()
+ if lock_token is not None:
+ # XXX: self.repository.leave_lock_in_place() before this
+ # function will not be preserved. Fortunately that doesn't
+ # affect the current default format (2a), and would be a
+ # corner-case anyway.
+ # - Andrew Bennetts, 2010/06/30
+ self.repository.dont_leave_lock_in_place()
+ old_lock_count = 0
+ while True:
+ try:
+ old_repository.unlock()
+ except errors.LockNotHeld:
+ break
+ old_lock_count += 1
+ if old_lock_count == 0:
+ raise AssertionError(
+ 'old_repository should have been locked at least once.')
+ for i in range(old_lock_count-1):
+ self.repository.lock_write()
+ # Fetch from the old repository into the new.
old_repository.lock_read()
try:
- # Repositories don't offer an interface to remove fallback
- # repositories today; take the conceptually simpler option and just
- # reopen it. We reopen it starting from the URL so that we
- # get a separate connection for RemoteRepositories and can
- # stream from one of them to the other. This does mean doing
- # separate SSH connection setup, but unstacking is not a
- # common operation so it's tolerable.
- new_bzrdir = bzrdir.BzrDir.open(self.bzrdir.root_transport.base)
- new_repository = new_bzrdir.find_repository()
- self.repository = new_repository
- if self.repository._fallback_repositories:
- raise AssertionError("didn't expect %r to have "
- "fallback_repositories"
- % (self.repository,))
- # this is not paired with an unlock because it's just restoring
- # the previous state; the lock's released when set_stacked_on_url
- # returns
- self.repository.lock_write()
# XXX: If you unstack a branch while it has a working tree
# with a pending merge, the pending-merged revisions will no
# longer be present. You can (probably) revert and remerge.
=== modified file 'bzrlib/builtins.py'
--- a/bzrlib/builtins.py 2010-06-23 08:19:32 +0000
+++ b/bzrlib/builtins.py 2010-07-05 08:10:06 +0000
@@ -2721,14 +2721,13 @@
ignores.tree_ignores_add_patterns(tree, name_pattern_list)
ignored = globbing.Globster(name_pattern_list)
matches = []
- tree.lock_read()
+ self.add_cleanup(tree.lock_read().unlock)
for entry in tree.list_files():
id = entry[3]
if id is not None:
filename = entry[0]
if ignored.match(filename):
matches.append(filename)
- tree.unlock()
if len(matches) > 0:
self.outf.write("Warning: the following files are version controlled and"
" match your ignore pattern:\n%s"
=== modified file 'bzrlib/lsprof.py'
--- a/bzrlib/lsprof.py 2009-08-24 21:05:09 +0000
+++ b/bzrlib/lsprof.py 2010-07-04 06:22:11 +0000
@@ -10,6 +10,7 @@
import threading
from _lsprof import Profiler, profiler_entry
+from bzrlib import errors
__all__ = ['profile', 'Stats']
@@ -20,6 +21,9 @@
raised, pass in a closure that will catch the exceptions and transform them
appropriately for your driver function.
+ Important caveat: only one profile can execute at a time. See BzrProfiler
+ for details.
+
:return: The functions return value and a stats object.
"""
profiler = BzrProfiler()
@@ -41,8 +45,21 @@
To use it, create a BzrProfiler and call start() on it. Some arbitrary
time later call stop() to stop profiling and retrieve the statistics
from the code executed in the interim.
+
+ Note that profiling involves a threading.Lock around the actual profiling.
+ This is needed because profiling involves global manipulation of the python
+ interpreter state. As such you cannot perform multiple profiles at once.
+ Trying to do so will lock out the second profiler unless the global
+ bzrlib.lsprof.BzrProfiler.profiler_block is set to 0. Setting it to 0 will
+ cause profiling to fail rather than blocking.
"""
+ profiler_block = 1
+ """Serialise rather than failing to profile concurrent profile requests."""
+
+ profiler_lock = threading.Lock()
+ """Global lock used to serialise profiles."""
+
def start(self):
"""Start profiling.
@@ -51,8 +68,16 @@
"""
self._g_threadmap = {}
self.p = Profiler()
- self.p.enable(subcalls=True)
- threading.setprofile(self._thread_profile)
+ permitted = self.__class__.profiler_lock.acquire(
+ self.__class__.profiler_block)
+ if not permitted:
+ raise errors.InternalBzrError(msg="Already profiling something")
+ try:
+ self.p.enable(subcalls=True)
+ threading.setprofile(self._thread_profile)
+ except:
+ self.__class__.profiler_lock.release()
+ raise
def stop(self):
"""Stop profiling.
@@ -62,17 +87,20 @@
:return: A bzrlib.lsprof.Stats object.
"""
- self.p.disable()
- for pp in self._g_threadmap.values():
- pp.disable()
- threading.setprofile(None)
- p = self.p
- self.p = None
- threads = {}
- for tid, pp in self._g_threadmap.items():
- threads[tid] = Stats(pp.getstats(), {})
- self._g_threadmap = None
- return Stats(p.getstats(), threads)
+ try:
+ self.p.disable()
+ for pp in self._g_threadmap.values():
+ pp.disable()
+ threading.setprofile(None)
+ p = self.p
+ self.p = None
+ threads = {}
+ for tid, pp in self._g_threadmap.items():
+ threads[tid] = Stats(pp.getstats(), {})
+ self._g_threadmap = None
+ return Stats(p.getstats(), threads)
+ finally:
+ self.__class__.profiler_lock.release()
def _thread_profile(self, f, *args, **kwds):
# we lose the first profile point for a new thread in order to
=== modified file 'bzrlib/osutils.py'
--- a/bzrlib/osutils.py 2010-06-30 15:19:36 +0000
+++ b/bzrlib/osutils.py 2010-07-05 08:10:08 +0000
@@ -352,7 +352,7 @@
def _win32_mkdtemp(*args, **kwargs):
- return _win32_fixdrive(mkdtemp(*args, **kwargs).replace('\\', '/'))
+ return _win32_fixdrive(tempfile.mkdtemp(*args, **kwargs).replace('\\', '/'))
def _win32_rename(old, new):
=== modified file 'bzrlib/tests/__init__.py'
--- a/bzrlib/tests/__init__.py 2010-06-29 17:13:54 +0000
+++ b/bzrlib/tests/__init__.py 2010-07-05 08:10:08 +0000
@@ -3375,6 +3375,9 @@
def startTest(self, test):
self.profiler = bzrlib.lsprof.BzrProfiler()
+ # Prevent deadlocks in tests that use lsprof: those tests will
+ # unavoidably fail.
+ bzrlib.lsprof.BzrProfiler.profiler_block = 0
self.profiler.start()
ForwardingResult.startTest(self, test)
=== modified file 'bzrlib/tests/per_branch/test_push.py'
--- a/bzrlib/tests/per_branch/test_push.py 2010-02-23 07:43:11 +0000
+++ b/bzrlib/tests/per_branch/test_push.py 2010-07-04 22:39:45 +0000
@@ -231,59 +231,6 @@
trunk.push(remote_branch)
check.check_dwim(remote_branch.base, False, True, True)
- def test_no_get_parent_map_after_insert_stream(self):
- # Effort test for bug 331823
- self.setup_smart_server_with_call_log()
- # Make a local branch with four revisions. Four revisions because:
- # one to push, one there for _walk_to_common_revisions to find, one we
- # don't want to access, one for luck :)
- if isinstance(self.branch_format, branch.BranchReferenceFormat):
- # This test could in principle apply to BranchReferenceFormat, but
- # make_branch_builder doesn't support it.
- raise tests.TestSkipped(
- "BranchBuilder can't make reference branches.")
- try:
- builder = self.make_branch_builder('local')
- except (errors.TransportNotPossible, errors.UninitializableFormat):
- raise tests.TestNotApplicable('format not directly constructable')
- builder.start_series()
- builder.build_snapshot('first', None, [
- ('add', ('', 'root-id', 'directory', ''))])
- builder.build_snapshot('second', ['first'], [])
- builder.build_snapshot('third', ['second'], [])
- builder.build_snapshot('fourth', ['third'], [])
- builder.finish_series()
- local = builder.get_branch()
- local = branch.Branch.open(self.get_vfs_only_url('local'))
- # Initial push of three revisions
- remote_bzrdir = local.bzrdir.sprout(
- self.get_url('remote'), revision_id='third')
- remote = remote_bzrdir.open_branch()
- # Push fourth revision
- self.reset_smart_call_log()
- self.disableOptimisticGetParentMap()
- self.assertFalse(local.is_locked())
- local.push(remote)
- hpss_call_names = [item.call.method for item in self.hpss_calls]
- self.assertTrue('Repository.insert_stream_1.19' in hpss_call_names)
- insert_stream_idx = hpss_call_names.index(
- 'Repository.insert_stream_1.19')
- calls_after_insert_stream = hpss_call_names[insert_stream_idx:]
- # After inserting the stream the client has no reason to query the
- # remote graph any further.
- self.assertEqual(
- ['Repository.insert_stream_1.19', 'Repository.insert_stream_1.19',
- 'get', 'Branch.set_last_revision_info', 'Branch.unlock'],
- calls_after_insert_stream)
-
- def disableOptimisticGetParentMap(self):
- # Tweak some class variables to stop remote get_parent_map calls asking
- # for or receiving more data than the caller asked for.
- self.overrideAttr(repository.InterRepository,
- '_walk_to_common_revisions_batch_size', 1)
- self.overrideAttr(_mod_smart_repo.SmartServerRepositoryGetParentMap,
- 'no_extra_results', True)
-
class TestPushHook(per_branch.TestCaseWithBranch):
=== modified file 'bzrlib/tests/per_branch/test_stacking.py'
--- a/bzrlib/tests/per_branch/test_stacking.py 2010-06-20 11:18:38 +0000
+++ b/bzrlib/tests/per_branch/test_stacking.py 2010-07-01 03:41:20 +0000
@@ -23,8 +23,7 @@
errors,
)
from bzrlib.revision import NULL_REVISION
-from bzrlib.smart import server
-from bzrlib.tests import TestNotApplicable, KnownFailure, transport_util
+from bzrlib.tests import TestNotApplicable, transport_util
from bzrlib.tests.per_branch import TestCaseWithBranch
@@ -206,6 +205,40 @@
self.assertRaises(errors.NotStacked,
new_branch.get_stacked_on_url)
+ def test_unstack_already_locked(self):
+ """Removing the stacked-on branch with an already write-locked branch
+ works.
+
+ This was bug 551525.
+ """
+ try:
+ stacked_bzrdir = self.make_stacked_bzrdir()
+ except unstackable_format_errors, e:
+ raise TestNotApplicable(e)
+ stacked_branch = stacked_bzrdir.open_branch()
+ stacked_branch.lock_write()
+ stacked_branch.set_stacked_on_url(None)
+ stacked_branch.unlock()
+
+ def test_unstack_already_multiple_locked(self):
+ """Unstacking a branch preserves the lock count (even though it
+ replaces the br.repository object).
+
+ This is a more extreme variation of test_unstack_already_locked.
+ """
+ try:
+ stacked_bzrdir = self.make_stacked_bzrdir()
+ except unstackable_format_errors, e:
+ raise TestNotApplicable(e)
+ stacked_branch = stacked_bzrdir.open_branch()
+ stacked_branch.lock_write()
+ stacked_branch.lock_write()
+ stacked_branch.lock_write()
+ stacked_branch.set_stacked_on_url(None)
+ stacked_branch.unlock()
+ stacked_branch.unlock()
+ stacked_branch.unlock()
+
def make_stacked_bzrdir(self, in_directory=None):
"""Create a stacked branch and return its bzrdir.
@@ -221,7 +254,8 @@
tree = self.make_branch_and_tree(prefix + 'stacked-on')
tree.commit('Added foo')
stacked_bzrdir = tree.branch.bzrdir.sprout(
- prefix + 'stacked', tree.branch.last_revision(), stacked=True)
+ self.get_url(prefix + 'stacked'), tree.branch.last_revision(),
+ stacked=True)
return stacked_bzrdir
def test_clone_from_stacked_branch_preserve_stacking(self):
@@ -249,7 +283,8 @@
except unstackable_format_errors, e:
raise TestNotApplicable(e)
stacked_bzrdir.open_branch().set_stacked_on_url('../stacked-on')
- cloned_bzrdir = stacked_bzrdir.clone('cloned', preserve_stacking=True)
+ cloned_bzrdir = stacked_bzrdir.clone(
+ self.get_url('cloned'), preserve_stacking=True)
self.assertEqual(
'../dir/stacked-on',
cloned_bzrdir.open_branch().get_stacked_on_url())
=== modified file 'bzrlib/tests/test_lsprof.py'
--- a/bzrlib/tests/test_lsprof.py 2009-08-24 21:05:09 +0000
+++ b/bzrlib/tests/test_lsprof.py 2010-07-05 08:10:03 +0000
@@ -1,4 +1,4 @@
-# Copyright (C) 2005, 2006 Canonical Ltd
+# Copyright (C) 2007, 2009, 2010 Canonical Ltd
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -19,9 +19,10 @@
import cPickle
import os
+import threading
import bzrlib
-from bzrlib import tests
+from bzrlib import errors, tests
class _LSProfFeature(tests.Feature):
@@ -111,3 +112,40 @@
lines = [str(data) for data in stats.data]
lines = [line for line in lines if 'a_function' in line]
self.assertLength(1, lines)
+
+ def test_block_0(self):
+ # When profiler_block is 0, reentrant profile requests fail.
+ self.overrideAttr(bzrlib.lsprof.BzrProfiler, 'profiler_block', 0)
+ inner_calls = []
+ def inner():
+ profiler = bzrlib.lsprof.BzrProfiler()
+ self.assertRaises(errors.BzrError, profiler.start)
+ inner_calls.append(True)
+ bzrlib.lsprof.profile(inner)
+ self.assertLength(1, inner_calls)
+
+ def test_block_1(self):
+ # When profiler_block is 1, concurrent profiles serialise.
+ # This is tested by manually acquiring the profiler lock, then
+ # starting a thread that tries to profile, and releasing the lock.
+ # We know due to test_block_0 that two profiles at once hit the lock,
+ # so while this isn't perfect (we'd want a callback on the lock being
+ # entered to allow lockstep evaluation of the actions), its good enough
+ # to be confident regressions would be caught. Alternatively, if this
+ # is flakey, a fake Lock object can be used to trace the calls made.
+ calls = []
+ def profiled():
+ calls.append('profiled')
+ def do_profile():
+ bzrlib.lsprof.profile(profiled)
+ calls.append('after_profiled')
+ thread = threading.Thread(target=do_profile)
+ bzrlib.lsprof.BzrProfiler.profiler_lock.acquire()
+ try:
+ try:
+ thread.start()
+ finally:
+ bzrlib.lsprof.BzrProfiler.profiler_lock.release()
+ finally:
+ thread.join()
+ self.assertLength(2, calls)
=== modified file 'doc/developers/ppa.txt'
--- a/doc/developers/ppa.txt 2009-12-02 20:34:07 +0000
+++ b/doc/developers/ppa.txt 2010-06-30 04:11:36 +0000
@@ -13,13 +13,13 @@
__ https://help.launchpad.net/PPAQuickStart
-As of June 2008, there are three PPAs:
+As of June 2010, there are three PPAs:
<https://launchpad.net/~bzr/+archive>
- Final released versions.
+ Final released versions and updates.
<https://launchpad.net/~bzr-beta-ppa/+archive>
- Releases and release candidates.
+ Beta releases.
<https://launchpad.net/~bzr-nightly-ppa/+archive>
Automatic nightly builds from trunk.
@@ -27,13 +27,16 @@
We build packages for every supported Ubuntu release
<https://wiki.ubuntu.com/Releases>. Packages need no longer be updated
when the release passes end-of-life because all users should
-have upgraded by then. (As of May 2008, Edgy Eft is no longer supported.)
+have upgraded by then.
-We build a distinct package for each distrorelease. As of bzr 1.5, Dapper
-uses ``python-support`` and later distributions use ``python-central``.
+We build a distinct package for each distrorelease.
If you upload a release-specific version, you should add a suffix to the
package version, e.g. ``bzr.1.3-1~bazaar1~dapper1``.
+Dapper uses the ``python-support`` framework and later distributions use
+``python-central``. This has little effect on everyday packaging but does
+mean that some of the control files are quite different.
+
Every package is first uploaded into the beta ppa. For final release
versions it is also copied to the main PPA.
=== modified file 'setup.py'
--- a/setup.py 2010-06-22 04:15:25 +0000
+++ b/setup.py 2010-07-04 07:09:09 +0000
@@ -538,6 +538,14 @@
# time before living with docstring stripping
optimize = 1
compile_names = [f for f in self.outfiles if f.endswith('.py')]
+ # Round mtime to nearest even second so that installing on a FAT
+ # filesystem bytecode internal and script timestamps will match
+ for f in compile_names:
+ mtime = os.stat(f).st_mtime
+ remainder = mtime % 2
+ if remainder:
+ mtime -= remainder
+ os.utime(f, (mtime, mtime))
byte_compile(compile_names,
optimize=optimize,
force=self.force, prefix=self.install_dir,
More information about the bazaar-commits
mailing list