Rev 4676: Merge 2.0. in http://people.canonical.com/~robertc/baz2.0/integration
Robert Collins
robertc at robertcollins.net
Mon Sep 7 04:35:24 BST 2009
At http://people.canonical.com/~robertc/baz2.0/integration
------------------------------------------------------------
revno: 4676 [merge]
revision-id: robertc at robertcollins.net-20090907033506-2voaobfg5n4yxyjr
parent: pqm at pqm.ubuntu.com-20090905184318-tw9odqqk4fh93qrv
parent: pqm at pqm.ubuntu.com-20090907030023-8ixzna0yzpap1vnq
committer: Robert Collins <robertc at robertcollins.net>
branch nick: integration
timestamp: Mon 2009-09-07 13:35:06 +1000
message:
Merge 2.0.
modified:
NEWS NEWS-20050323055033-4e00b5db738777ff
bzrlib/groupcompress.py groupcompress.py-20080705181503-ccbxd6xuy1bdnrpu-8
bzrlib/hooks.py hooks.py-20070325015548-ix4np2q0kd8452au-1
bzrlib/knit.py knit.py-20051212171256-f056ac8f0fbe1bd9
bzrlib/repofmt/groupcompress_repo.py repofmt.py-20080715094215-wp1qfvoo7093c8qr-1
bzrlib/repofmt/pack_repo.py pack_repo.py-20070813041115-gjv5ma7ktfqwsjgn-1
bzrlib/repository.py rev_storage.py-20051111201905-119e9401e46257e3
bzrlib/revision.py revision.py-20050309040759-e77802c08f3999d5
bzrlib/tests/per_pack_repository.py test_pack_repository-20080801043947-eaw0e6h2gu75kwmy-1
bzrlib/tests/per_repository/test_write_group.py test_write_group.py-20070716105516-89n34xtogq5frn0m-1
bzrlib/tests/test_hooks.py test_hooks.py-20070628030849-89rtsbe5dmer5npz-1
bzrlib/tests/test_repository.py test_repository.py-20060131075918-65c555b881612f4d
bzrlib/tests/test_trace.py testtrace.py-20051110225523-a21117fc7a07eeff
bzrlib/trace.py trace.py-20050309040759-c8ed824bdcd4748a
=== modified file 'NEWS'
--- a/NEWS 2009-09-05 18:43:18 +0000
+++ b/NEWS 2009-09-07 03:35:06 +0000
@@ -34,6 +34,20 @@
longer report incorrect errors about ``Missing inventory ('TREE_ROOT', ...)``
(Robert Collins, #416732)
+* ``bzr info -v`` on a 2a format still claimed that it was a "Development
+ format" (John Arbash Meinel, #424392)
+
+* Clearer message when Bazaar runs out of memory, instead of a ``MemoryError``
+ traceback. (Martin Pool, #109115)
+
+* Conversion to 2a will create a single pack for all the new revisions (as
+ long as it ran without interruption). This improves both ``bzr upgrade``
+ and ``bzr pull`` or ``bzr merge`` from local branches in older formats.
+ The autopack logic that occurs every 100 revisions during local
+ conversions was not returning that pack's identifier, which resulted in
+ the partial packs created during the conversion not being consolidated
+ at the end of the conversion process. (Robert Collins, #423818)
+
* Don't restrict the command name used to run the test suite.
(Vincent Ladeuil, #419950)
@@ -47,12 +61,20 @@
* Network streams now decode adjacent records of the same type into a
single stream, reducing layering churn. (Robert Collins)
+* Prevent some kinds of incomplete data from being committed to a 2a
+ repository, such as revisions without inventories or inventories without
+ chk_bytes root records.
+ (Andrew Bennetts, #423506)
+
Improvements
************
Documentation
*************
+* Help on hooks no longer says 'Not deprecated' for hooks that are
+ currently supported. (Ian Clatworthy, #422415)
+
API Changes
***********
@@ -100,10 +122,31 @@
longer report incorrect errors about ``Missing inventory ('TREE_ROOT', ...)``
(Robert Collins, #416732)
+* ``bzr info -v`` on a 2a format still claimed that it was a "Development
+ format" (John Arbash Meinel, #424392)
+
* ``bzr log stacked-branch`` shows the full log including
revisions that are in the fallback repository. (Regressed in 2.0rc1).
(John Arbash Meinel, #419241)
+* Clearer message when Bazaar runs out of memory, instead of a ``MemoryError``
+ traceback. (Martin Pool, #109115)
+
+* Conversion to 2a will create a single pack for all the new revisions (as
+ long as it ran without interruption). This improves both ``bzr upgrade``
+ and ``bzr pull`` or ``bzr merge`` from local branches in older formats.
+ The autopack logic that occurs every 100 revisions during local
+ conversions was not returning that pack's identifier, which resulted in
+ the partial packs created during the conversion not being consolidated
+ at the end of the conversion process. (Robert Collins, #423818)
+
+* Fetches from 2a to 2a are now again requested in 'groupcompress' order.
+ Groups that are seen as 'underutilized' will be repacked on-the-fly.
+ This means that when the source is fully packed, there is minimal
+ overhead during the fetch, but if the source is poorly packed the result
+ is a fairly well packed repository (not as good as 'bzr pack' but
+ good-enough.) (Robert Collins, John Arbash Meinel, #402652)
+
* Fix a potential segmentation fault when doing 'log' of a branch that had
ghosts in its mainline. (Evaluating None as a tuple is bad.)
(John Arbash Meinel, #419241)
@@ -119,9 +162,17 @@
* Network streams now decode adjacent records of the same type into a
single stream, reducing layering churn. (Robert Collins)
+* Prevent some kinds of incomplete data from being committed to a 2a
+ repository, such as revisions without inventories or inventories without
+ chk_bytes root records.
+ (Andrew Bennetts, #423506)
+
Documentation
*************
+* Help on hooks no longer says 'Not deprecated' for hooks that are
+ currently supported. (Ian Clatworthy, #422415)
+
* The main table of contents now provides links to the new Migration Docs
and Plugins Guide. (Ian Clatworthy)
=== modified file 'bzrlib/groupcompress.py'
--- a/bzrlib/groupcompress.py 2009-09-04 01:44:31 +0000
+++ b/bzrlib/groupcompress.py 2009-09-07 03:35:06 +0000
@@ -1811,7 +1811,7 @@
def __init__(self, graph_index, is_locked, parents=True,
add_callback=None, track_external_parent_refs=False,
- inconsistency_fatal=True):
+ inconsistency_fatal=True, track_new_keys=False):
"""Construct a _GCGraphIndex on a graph_index.
:param graph_index: An implementation of bzrlib.index.GraphIndex.
@@ -1837,7 +1837,8 @@
self._is_locked = is_locked
self._inconsistency_fatal = inconsistency_fatal
if track_external_parent_refs:
- self._key_dependencies = knit._KeyRefs()
+ self._key_dependencies = knit._KeyRefs(
+ track_new_keys=track_new_keys)
else:
self._key_dependencies = None
@@ -1897,10 +1898,14 @@
result.append((key, value))
records = result
key_dependencies = self._key_dependencies
- if key_dependencies is not None and self._parents:
- for key, value, refs in records:
- parents = refs[0]
- key_dependencies.add_references(key, parents)
+ if key_dependencies is not None:
+ if self._parents:
+ for key, value, refs in records:
+ parents = refs[0]
+ key_dependencies.add_references(key, parents)
+ else:
+ for key, value, refs in records:
+ new_keys.add_key(key)
self._add_callback(records)
def _check_read(self):
@@ -1963,7 +1968,7 @@
"""Return the keys of missing parents."""
# Copied from _KnitGraphIndex.get_missing_parents
# We may have false positives, so filter those out.
- self._key_dependencies.add_keys(
+ self._key_dependencies.satisfy_refs_for_keys(
self.get_parent_map(self._key_dependencies.get_unsatisfied_refs()))
return frozenset(self._key_dependencies.get_unsatisfied_refs())
@@ -2023,17 +2028,17 @@
This allows this _GCGraphIndex to keep track of any missing
compression parents we may want to have filled in to make those
- indices valid.
+ indices valid. It also allows _GCGraphIndex to track any new keys.
:param graph_index: A GraphIndex
"""
- if self._key_dependencies is not None:
- # Add parent refs from graph_index (and discard parent refs that
- # the graph_index has).
- add_refs = self._key_dependencies.add_references
- for node in graph_index.iter_all_entries():
- add_refs(node[1], node[3][0])
-
+ key_dependencies = self._key_dependencies
+ if key_dependencies is None:
+ return
+ for node in graph_index.iter_all_entries():
+ # Add parent refs from graph_index (and discard parent refs
+ # that the graph_index has).
+ key_dependencies.add_references(node[1], node[3][0])
from bzrlib._groupcompress_py import (
=== modified file 'bzrlib/hooks.py'
--- a/bzrlib/hooks.py 2009-06-10 03:31:01 +0000
+++ b/bzrlib/hooks.py 2009-09-01 12:29:54 +0000
@@ -219,9 +219,7 @@
strings.append('Introduced in: %s' % introduced_string)
if self.deprecated:
deprecated_string = _format_version_tuple(self.deprecated)
- else:
- deprecated_string = 'Not deprecated'
- strings.append('Deprecated in: %s' % deprecated_string)
+ strings.append('Deprecated in: %s' % deprecated_string)
strings.append('')
strings.extend(textwrap.wrap(self.__doc__,
break_long_words=False))
=== modified file 'bzrlib/knit.py'
--- a/bzrlib/knit.py 2009-08-30 21:34:42 +0000
+++ b/bzrlib/knit.py 2009-09-07 03:35:06 +0000
@@ -2777,9 +2777,20 @@
class _KeyRefs(object):
- def __init__(self):
+ def __init__(self, track_new_keys=False):
# dict mapping 'key' to 'set of keys referring to that key'
self.refs = {}
+ if track_new_keys:
+ # set remembering all new keys
+ self.new_keys = set()
+ else:
+ self.new_keys = None
+
+ def clear(self):
+ if self.refs:
+ self.refs.clear()
+ if self.new_keys:
+ self.new_keys.clear()
def add_references(self, key, refs):
# Record the new references
@@ -2792,19 +2803,28 @@
# Discard references satisfied by the new key
self.add_key(key)
+ def get_new_keys(self):
+ return self.new_keys
+
def get_unsatisfied_refs(self):
return self.refs.iterkeys()
- def add_key(self, key):
+ def _satisfy_refs_for_key(self, key):
try:
del self.refs[key]
except KeyError:
# No keys depended on this key. That's ok.
pass
- def add_keys(self, keys):
+ def add_key(self, key):
+ # satisfy refs for key, and remember that we've seen this key.
+ self._satisfy_refs_for_key(key)
+ if self.new_keys is not None:
+ self.new_keys.add(key)
+
+ def satisfy_refs_for_keys(self, keys):
for key in keys:
- self.add_key(key)
+ self._satisfy_refs_for_key(key)
def get_referrers(self):
result = set()
@@ -2972,7 +2992,7 @@
# If updating this, you should also update
# groupcompress._GCGraphIndex.get_missing_parents
# We may have false positives, so filter those out.
- self._key_dependencies.add_keys(
+ self._key_dependencies.satisfy_refs_for_keys(
self.get_parent_map(self._key_dependencies.get_unsatisfied_refs()))
return frozenset(self._key_dependencies.get_unsatisfied_refs())
=== modified file 'bzrlib/repofmt/groupcompress_repo.py'
--- a/bzrlib/repofmt/groupcompress_repo.py 2009-09-01 06:10:24 +0000
+++ b/bzrlib/repofmt/groupcompress_repo.py 2009-09-07 03:35:06 +0000
@@ -584,6 +584,53 @@
pack_factory = GCPack
resumed_pack_factory = ResumedGCPack
+ def _check_new_inventories(self):
+ """Detect missing inventories or chk root entries for the new revisions
+ in this write group.
+
+ :returns: set of missing keys. Note that not every missing key is
+ guaranteed to be reported.
+ """
+ if getattr(self.repo, 'chk_bytes', None) is None:
+ return set()
+ # Ensure that all revisions added in this write group have:
+ # - corresponding inventories,
+ # - chk root entries for those inventories,
+ # - and any present parent inventories have their chk root
+ # entries too.
+ # And all this should be independent of any fallback repository.
+ key_deps = self.repo.revisions._index._key_dependencies
+ new_revisions_keys = key_deps.get_new_keys()
+ no_fallback_inv_index = self.repo.inventories._index
+ no_fallback_chk_bytes_index = self.repo.chk_bytes._index
+ inv_parent_map = no_fallback_inv_index.get_parent_map(
+ new_revisions_keys)
+ # Are any inventories for corresponding to the new revisions missing?
+ corresponding_invs = set(inv_parent_map)
+ missing_corresponding = set(new_revisions_keys)
+ missing_corresponding.difference_update(corresponding_invs)
+ if missing_corresponding:
+ return [('inventories', key) for key in missing_corresponding]
+ # Are any chk root entries missing for any inventories? This includes
+ # any present parent inventories, which may be used when calculating
+ # deltas for streaming.
+ all_inv_keys = set(corresponding_invs)
+ for parent_inv_keys in inv_parent_map.itervalues():
+ all_inv_keys.update(parent_inv_keys)
+ # Filter out ghost parents.
+ all_inv_keys.intersection_update(
+ no_fallback_inv_index.get_parent_map(all_inv_keys))
+ all_missing = set()
+ inv_ids = [key[-1] for key in all_inv_keys]
+ for inv in self.repo.iter_inventories(inv_ids, 'unordered'):
+ root_keys = set([inv.id_to_entry.key()])
+ if inv.parent_id_basename_to_file_id is not None:
+ root_keys.add(inv.parent_id_basename_to_file_id.key())
+ present = no_fallback_chk_bytes_index.get_parent_map(root_keys)
+ missing = root_keys.difference(present)
+ all_missing.update([('chk_bytes',) + key for key in missing])
+ return all_missing
+
def _execute_pack_operations(self, pack_operations,
_packer_class=GCCHKPacker,
reload_func=None):
@@ -617,10 +664,11 @@
self._remove_pack_from_memory(pack)
# record the newly available packs and stop advertising the old
# packs
- self._save_pack_names(clear_obsolete_packs=True)
+ result = self._save_pack_names(clear_obsolete_packs=True)
# Move the old packs out of the way now they are no longer referenced.
for revision_count, packs in pack_operations:
self._obsolete_packs(packs)
+ return result
class CHKInventoryRepository(KnitPackRepository):
@@ -651,7 +699,7 @@
_GCGraphIndex(self._pack_collection.revision_index.combined_index,
add_callback=self._pack_collection.revision_index.add_callback,
parents=True, is_locked=self.is_locked,
- track_external_parent_refs=True),
+ track_external_parent_refs=True, track_new_keys=True),
access=self._pack_collection.revision_index.data_access,
delta=False)
self.signatures = GroupCompressVersionedFiles(
@@ -1145,3 +1193,8 @@
def get_format_string(self):
return ('Bazaar repository format 2a (needs bzr 1.16 or later)\n')
+
+ def get_format_description(self):
+ """See RepositoryFormat.get_format_description()."""
+ return ("Repository format 2a - rich roots, group compression"
+ " and chk inventories")
=== modified file 'bzrlib/repofmt/pack_repo.py'
--- a/bzrlib/repofmt/pack_repo.py 2009-08-14 11:11:29 +0000
+++ b/bzrlib/repofmt/pack_repo.py 2009-09-07 03:00:23 +0000
@@ -2063,6 +2063,16 @@
self._remove_pack_indices(resumed_pack)
del self._resumed_packs[:]
+ def _check_new_inventories(self):
+ """Detect missing inventories in this write group.
+
+ :returns: set of missing keys. Note that not every missing key is
+ guaranteed to be reported.
+ """
+ # The base implementation does no checks. GCRepositoryPackCollection
+ # overrides this.
+ return set()
+
def _commit_write_group(self):
all_missing = set()
for prefix, versioned_file in (
@@ -2077,14 +2087,19 @@
raise errors.BzrCheckError(
"Repository %s has missing compression parent(s) %r "
% (self.repo, sorted(all_missing)))
+ all_missing = self._check_new_inventories()
+ if all_missing:
+ raise errors.BzrCheckError(
+ "Repository %s missing keys for new revisions %r "
+ % (self.repo, sorted(all_missing)))
self._remove_pack_indices(self._new_pack)
- should_autopack = False
+ any_new_content = False
if self._new_pack.data_inserted():
# get all the data to disk and read to use
self._new_pack.finish()
self.allocate(self._new_pack)
self._new_pack = None
- should_autopack = True
+ any_new_content = True
else:
self._new_pack.abort()
self._new_pack = None
@@ -2095,13 +2110,15 @@
self._remove_pack_from_memory(resumed_pack)
resumed_pack.finish()
self.allocate(resumed_pack)
- should_autopack = True
+ any_new_content = True
del self._resumed_packs[:]
- if should_autopack:
- if not self.autopack():
+ if any_new_content:
+ result = self.autopack()
+ if not result:
# when autopack takes no steps, the names list is still
# unsaved.
return self._save_pack_names()
+ return result
return []
def _suspend_write_group(self):
@@ -2222,7 +2239,7 @@
% (self._format, self.bzrdir.transport.base))
def _abort_write_group(self):
- self.revisions._index._key_dependencies.refs.clear()
+ self.revisions._index._key_dependencies.clear()
self._pack_collection._abort_write_group()
def _get_source(self, to_format):
@@ -2242,13 +2259,14 @@
self._pack_collection._start_write_group()
def _commit_write_group(self):
- self.revisions._index._key_dependencies.refs.clear()
- return self._pack_collection._commit_write_group()
+ hint = self._pack_collection._commit_write_group()
+ self.revisions._index._key_dependencies.clear()
+ return hint
def suspend_write_group(self):
# XXX check self._write_group is self.get_transaction()?
tokens = self._pack_collection._suspend_write_group()
- self.revisions._index._key_dependencies.refs.clear()
+ self.revisions._index._key_dependencies.clear()
self._write_group = None
return tokens
=== modified file 'bzrlib/repository.py'
--- a/bzrlib/repository.py 2009-09-04 00:49:55 +0000
+++ b/bzrlib/repository.py 2009-09-07 03:35:06 +0000
@@ -1604,7 +1604,7 @@
# but at the moment we're only checking for texts referenced by
# inventories at the graph's edge.
key_deps = self.revisions._index._key_dependencies
- key_deps.add_keys(present_inventories)
+ key_deps.satisfy_refs_for_keys(present_inventories)
referrers = frozenset(r[0] for r in key_deps.get_referrers())
file_ids = self.fileids_altered_by_revision_ids(referrers)
missing_texts = set()
=== modified file 'bzrlib/revision.py'
--- a/bzrlib/revision.py 2009-06-30 16:16:55 +0000
+++ b/bzrlib/revision.py 2009-09-01 12:39:21 +0000
@@ -88,7 +88,7 @@
raise ValueError("invalid property name %r" % name)
if not isinstance(value, basestring):
raise ValueError("invalid property value %r for %r" %
- (name, value))
+ (value, name))
def get_history(self, repository):
"""Return the canonical line-of-history for this revision.
=== modified file 'bzrlib/tests/per_pack_repository.py'
--- a/bzrlib/tests/per_pack_repository.py 2009-08-27 22:17:35 +0000
+++ b/bzrlib/tests/per_pack_repository.py 2009-09-07 03:35:06 +0000
@@ -239,31 +239,38 @@
self.assertTrue(large_pack_name in pack_names)
def test_commit_write_group_returns_new_pack_names(self):
+ # This test doesn't need real disk.
+ self.vfs_transport_factory = tests.MemoryServer
format = self.get_format()
- tree = self.make_branch_and_tree('foo', format=format)
- tree.commit('first post')
- repo = tree.branch.repository
+ repo = self.make_repository('foo', format=format)
repo.lock_write()
try:
- repo.start_write_group()
- try:
- inv = inventory.Inventory(revision_id="A")
- inv.root.revision = "A"
- repo.texts.add_lines((inv.root.file_id, "A"), [], [])
- rev = _mod_revision.Revision(timestamp=0, timezone=None,
- committer="Foo Bar <foo at example.com>", message="Message",
- revision_id="A")
- rev.parent_ids = ()
- repo.add_revision("A", rev, inv=inv)
- except:
- repo.abort_write_group()
- raise
- else:
- old_names = repo._pack_collection._names.keys()
- result = repo.commit_write_group()
- cur_names = repo._pack_collection._names.keys()
- new_names = list(set(cur_names) - set(old_names))
- self.assertEqual(new_names, result)
+ # All current pack repository styles autopack at 10 revisions; and
+ # autopack as well as regular commit write group needs to return
+ # the new pack name. Looping is a little ugly, but we don't have a
+ # clean way to test both the autopack logic and the normal code
+ # path without doing this loop.
+ for pos in range(10):
+ revid = str(pos)
+ repo.start_write_group()
+ try:
+ inv = inventory.Inventory(revision_id=revid)
+ inv.root.revision = revid
+ repo.texts.add_lines((inv.root.file_id, revid), [], [])
+ rev = _mod_revision.Revision(timestamp=0, timezone=None,
+ committer="Foo Bar <foo at example.com>", message="Message",
+ revision_id=revid)
+ rev.parent_ids = ()
+ repo.add_revision(revid, rev, inv=inv)
+ except:
+ repo.abort_write_group()
+ raise
+ else:
+ old_names = repo._pack_collection._names.keys()
+ result = repo.commit_write_group()
+ cur_names = repo._pack_collection._names.keys()
+ new_names = list(set(cur_names) - set(old_names))
+ self.assertEqual(new_names, result)
finally:
repo.unlock()
@@ -964,7 +971,7 @@
('add', ('', 'root-id', 'directory', None))])
builder.build_snapshot('B-id', ['A-id', 'ghost-id'], [])
builder.finish_series()
- repo = self.make_repository('target')
+ repo = self.make_repository('target', format=self.get_format())
b = builder.get_branch()
b.lock_read()
self.addCleanup(b.unlock)
@@ -1002,8 +1009,19 @@
source_repo, target_repo = self.create_source_and_target()
target_repo.start_write_group()
try:
- stream = source_repo.revisions.get_record_stream([('B-id',)],
- 'unordered', True)
+ # Copy all texts, inventories, and chks so that nothing is missing
+ # for revision B-id.
+ for vf_name in ['texts', 'chk_bytes', 'inventories']:
+ source_vf = getattr(source_repo, vf_name, None)
+ if source_vf is None:
+ continue
+ target_vf = getattr(target_repo, vf_name)
+ stream = source_vf.get_record_stream(
+ source_vf.keys(), 'unordered', True)
+ target_vf.insert_record_stream(stream)
+ # Copy just revision B-id
+ stream = source_repo.revisions.get_record_stream(
+ [('B-id',)], 'unordered', True)
target_repo.revisions.insert_record_stream(stream)
key_refs = target_repo.revisions._index._key_dependencies
self.assertEqual([('B-id',)], sorted(key_refs.get_referrers()))
=== modified file 'bzrlib/tests/per_repository/test_write_group.py'
--- a/bzrlib/tests/per_repository/test_write_group.py 2009-08-17 04:18:57 +0000
+++ b/bzrlib/tests/per_repository/test_write_group.py 2009-09-02 03:07:23 +0000
@@ -361,6 +361,143 @@
sink.insert_stream((), repo._format, tokens)
self.assertEqual([True], call_log)
+ def test_missing_chk_root_for_inventory(self):
+ """commit_write_group fails with BzrCheckError when the chk root record
+ for a new inventory is missing.
+ """
+ builder = self.make_branch_builder('simple-branch')
+ builder.build_snapshot('A-id', None, [
+ ('add', ('', 'root-id', 'directory', None)),
+ ('add', ('file', 'file-id', 'file', 'content\n'))])
+ b = builder.get_branch()
+ if not b.repository._format.supports_chks:
+ raise TestNotApplicable('requires repository with chk_bytes')
+ b.lock_read()
+ self.addCleanup(b.unlock)
+ repo = self.make_repository('damaged-repo')
+ repo.lock_write()
+ repo.start_write_group()
+ # Now, add the objects manually
+ text_keys = [('file-id', 'A-id'), ('root-id', 'A-id')]
+ # Directly add the texts, inventory, and revision object for 'A-id' --
+ # but don't add the chk_bytes.
+ src_repo = b.repository
+ repo.texts.insert_record_stream(src_repo.texts.get_record_stream(
+ text_keys, 'unordered', True))
+ repo.inventories.insert_record_stream(
+ src_repo.inventories.get_record_stream(
+ [('A-id',)], 'unordered', True))
+ repo.revisions.insert_record_stream(
+ src_repo.revisions.get_record_stream(
+ [('A-id',)], 'unordered', True))
+ # Make sure the presence of the missing data in a fallback does not
+ # avoid the error.
+ repo.add_fallback_repository(b.repository)
+ self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
+ reopened_repo = self.reopen_repo_and_resume_write_group(repo)
+ self.assertRaises(
+ errors.BzrCheckError, reopened_repo.commit_write_group)
+ reopened_repo.abort_write_group()
+
+ def test_missing_chk_root_for_unchanged_inventory(self):
+ """commit_write_group fails with BzrCheckError when the chk root record
+ for a new inventory is missing, even if the parent inventory is present
+ and has identical content (i.e. the same chk root).
+
+ A stacked repository containing only a revision with an identical
+ inventory to its parent will still have the chk root records for those
+ inventories.
+
+ (In principle the chk records are unnecessary in this case, but in
+ practice bzr 2.0rc1 (at least) expects to find them.)
+ """
+ # Make a branch where the last two revisions have identical
+ # inventories.
+ builder = self.make_branch_builder('simple-branch')
+ builder.build_snapshot('A-id', None, [
+ ('add', ('', 'root-id', 'directory', None)),
+ ('add', ('file', 'file-id', 'file', 'content\n'))])
+ builder.build_snapshot('B-id', None, [])
+ builder.build_snapshot('C-id', None, [])
+ b = builder.get_branch()
+ if not b.repository._format.supports_chks:
+ raise TestNotApplicable('requires repository with chk_bytes')
+ b.lock_read()
+ self.addCleanup(b.unlock)
+ # check our setup: B-id and C-id should have identical chk root keys.
+ inv_b = b.repository.get_inventory('B-id')
+ inv_c = b.repository.get_inventory('C-id')
+ self.assertEqual(inv_b.id_to_entry.key(), inv_c.id_to_entry.key())
+ # Now, manually insert objects for a stacked repo with only revision
+ # C-id:
+ # We need ('revisions', 'C-id'), ('inventories', 'C-id'),
+ # ('inventories', 'B-id'), and the corresponding chk roots for those
+ # inventories.
+ repo = self.make_repository('damaged-repo')
+ repo.lock_write()
+ repo.start_write_group()
+ src_repo = b.repository
+ repo.inventories.insert_record_stream(
+ src_repo.inventories.get_record_stream(
+ [('B-id',), ('C-id',)], 'unordered', True))
+ repo.revisions.insert_record_stream(
+ src_repo.revisions.get_record_stream(
+ [('C-id',)], 'unordered', True))
+ # Make sure the presence of the missing data in a fallback does not
+ # avoid the error.
+ repo.add_fallback_repository(b.repository)
+ self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
+ reopened_repo = self.reopen_repo_and_resume_write_group(repo)
+ self.assertRaises(
+ errors.BzrCheckError, reopened_repo.commit_write_group)
+ reopened_repo.abort_write_group()
+
+ def test_missing_chk_root_for_parent_inventory(self):
+ """commit_write_group fails with BzrCheckError when the chk root record
+ for a parent inventory of a new revision is missing.
+ """
+ builder = self.make_branch_builder('simple-branch')
+ builder.build_snapshot('A-id', None, [
+ ('add', ('', 'root-id', 'directory', None)),
+ ('add', ('file', 'file-id', 'file', 'content\n'))])
+ builder.build_snapshot('B-id', None, [])
+ builder.build_snapshot('C-id', None, [
+ ('modify', ('file-id', 'new-content'))])
+ b = builder.get_branch()
+ if not b.repository._format.supports_chks:
+ raise TestNotApplicable('requires repository with chk_bytes')
+ b.lock_read()
+ self.addCleanup(b.unlock)
+ # Now, manually insert objects for a stacked repo with only revision
+ # C-id, *except* the chk root entry for the parent inventory.
+ # We need ('revisions', 'C-id'), ('inventories', 'C-id'),
+ # ('inventories', 'B-id'), and the corresponding chk roots for those
+ # inventories.
+ inv_c = b.repository.get_inventory('C-id')
+ chk_keys_for_c_only = [
+ inv_c.id_to_entry.key(), inv_c.parent_id_basename_to_file_id.key()]
+ repo = self.make_repository('damaged-repo')
+ repo.lock_write()
+ repo.start_write_group()
+ src_repo = b.repository
+ repo.chk_bytes.insert_record_stream(
+ src_repo.chk_bytes.get_record_stream(
+ chk_keys_for_c_only, 'unordered', True))
+ repo.inventories.insert_record_stream(
+ src_repo.inventories.get_record_stream(
+ [('B-id',), ('C-id',)], 'unordered', True))
+ repo.revisions.insert_record_stream(
+ src_repo.revisions.get_record_stream(
+ [('C-id',)], 'unordered', True))
+ # Make sure the presence of the missing data in a fallback does not
+ # avoid the error.
+ repo.add_fallback_repository(b.repository)
+ self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
+ reopened_repo = self.reopen_repo_and_resume_write_group(repo)
+ self.assertRaises(
+ errors.BzrCheckError, reopened_repo.commit_write_group)
+ reopened_repo.abort_write_group()
+
class TestResumeableWriteGroup(TestCaseWithRepository):
=== modified file 'bzrlib/tests/test_hooks.py'
--- a/bzrlib/tests/test_hooks.py 2009-04-06 22:31:35 +0000
+++ b/bzrlib/tests/test_hooks.py 2009-09-01 12:29:54 +0000
@@ -80,7 +80,6 @@
"~~~~~~~~~~~~~~~\n"
"\n"
"Introduced in: 1.4\n"
- "Deprecated in: Not deprecated\n"
"\n"
"Invoked after the tip of a branch changes. Called with a\n"
"ChangeBranchTipParams object.\n"
@@ -89,7 +88,6 @@
"~~~~~~~~~~~~~~\n"
"\n"
"Introduced in: 1.6\n"
- "Deprecated in: Not deprecated\n"
"\n"
"Invoked before the tip of a branch changes. Called with a\n"
"ChangeBranchTipParams object. Hooks should raise TipChangeRejected to\n"
@@ -133,7 +131,6 @@
"~~~~~~~~~~~~~~~\n"
"\n"
"Introduced in: 0.15\n"
- "Deprecated in: Not deprecated\n"
"\n"
"Invoked after changing the tip of a branch object. Called with a\n"
"bzrlib.branch.PostChangeBranchTipParams object\n", hook.docs())
=== modified file 'bzrlib/tests/test_repository.py'
--- a/bzrlib/tests/test_repository.py 2009-09-04 00:49:55 +0000
+++ b/bzrlib/tests/test_repository.py 2009-09-07 03:35:06 +0000
@@ -705,6 +705,28 @@
# versions of the file.
self.assertEqual(file_1_details[0][:3], file_2_details[0][:3])
+ def test_fetch_combines_groups(self):
+ builder = self.make_branch_builder('source', format='2a')
+ builder.start_series()
+ builder.build_snapshot('1', None, [
+ ('add', ('', 'root-id', 'directory', '')),
+ ('add', ('file', 'file-id', 'file', 'content\n'))])
+ builder.build_snapshot('2', ['1'], [
+ ('modify', ('file-id', 'content-2\n'))])
+ builder.finish_series()
+ source = builder.get_branch()
+ target = self.make_repository('target', format='2a')
+ target.fetch(source.repository)
+ target.lock_read()
+ self.addCleanup(target.unlock)
+ details = target.texts._index.get_build_details(
+ [('file-id', '1',), ('file-id', '2',)])
+ file_1_details = details[('file-id', '1')]
+ file_2_details = details[('file-id', '2')]
+ # The index, and what to read off disk, should be the same for both
+ # versions of the file.
+ self.assertEqual(file_1_details[0][:3], file_2_details[0][:3])
+
def test_format_pack_compresses_True(self):
repo = self.make_repository('repo', format='2a')
self.assertTrue(repo._format.pack_compresses)
=== modified file 'bzrlib/tests/test_trace.py'
--- a/bzrlib/tests/test_trace.py 2009-08-20 04:30:16 +0000
+++ b/bzrlib/tests/test_trace.py 2009-09-03 02:59:56 +0000
@@ -72,6 +72,15 @@
self.assertTrue(len(msg) > 0)
self.assertEqualDiff(msg, 'bzr: interrupted\n')
+ def test_format_memory_error(self):
+ try:
+ raise MemoryError()
+ except MemoryError:
+ pass
+ msg = _format_exception()
+ self.assertEquals(msg,
+ "bzr: out of memory\n")
+
def test_format_os_error(self):
try:
os.rmdir('nosuchfile22222')
=== modified file 'bzrlib/trace.py'
--- a/bzrlib/trace.py 2009-08-20 05:02:45 +0000
+++ b/bzrlib/trace.py 2009-09-03 02:59:56 +0000
@@ -432,6 +432,9 @@
elif isinstance(exc_object, KeyboardInterrupt):
err_file.write("bzr: interrupted\n")
return errors.EXIT_ERROR
+ elif isinstance(exc_object, MemoryError):
+ err_file.write("bzr: out of memory\n")
+ return errors.EXIT_ERROR
elif isinstance(exc_object, ImportError) \
and str(exc_object).startswith("No module named "):
report_user_error(exc_info, err_file,
More information about the bazaar-commits
mailing list