Rev 4656: Properly pack 2a repositories during conversion operations. (Robert Collins. #423818) in http://bazaar.launchpad.net/~lifeless/bzr/2.0
Robert Collins
robertc at robertcollins.net
Mon Sep 7 02:51:13 BST 2009
At http://bazaar.launchpad.net/~lifeless/bzr/2.0
------------------------------------------------------------
revno: 4656
revision-id: robertc at robertcollins.net-20090907015105-8dgub2pyjmyh5aga
parent: pqm at pqm.ubuntu.com-20090904090318-kl6h1ig825lpc8g4
committer: Robert Collins <robertc at robertcollins.net>
branch nick: 2.0
timestamp: Mon 2009-09-07 11:51:05 +1000
message:
Properly pack 2a repositories during conversion operations. (Robert Collins. #423818)
=== modified file 'NEWS'
--- a/NEWS 2009-09-04 07:47:47 +0000
+++ b/NEWS 2009-09-07 01:51:05 +0000
@@ -31,6 +31,14 @@
* Clearer message when Bazaar runs out of memory, instead of a ``MemoryError``
traceback. (Martin Pool, #109115)
+* Conversion to 2a will create a single pack for all the new revisions (as
+ long as it ran without interruption). This improves both ``bzr upgrade``
+ and ``bzr pull`` or ``bzr merge`` from local branches in older formats.
+ The autopack logic that occurs every 100 revisions during local
+ conversions was not returning that pack's identifier, which resulted in
+ the partial packs created during the conversion not being consolidated
+ at the end of the conversion process. (Robert Collins, #423818)
+
* Fetches from 2a to 2a are now again requested in 'groupcompress' order.
Groups that are seen as 'underutilized' will be repacked on-the-fly.
This means that when the source is fully packed, there is minimal
=== modified file 'bzrlib/repofmt/groupcompress_repo.py'
--- a/bzrlib/repofmt/groupcompress_repo.py 2009-09-04 03:58:41 +0000
+++ b/bzrlib/repofmt/groupcompress_repo.py 2009-09-07 01:51:05 +0000
@@ -617,10 +617,11 @@
self._remove_pack_from_memory(pack)
# record the newly available packs and stop advertising the old
# packs
- self._save_pack_names(clear_obsolete_packs=True)
+ result = self._save_pack_names(clear_obsolete_packs=True)
# Move the old packs out of the way now they are no longer referenced.
for revision_count, packs in pack_operations:
self._obsolete_packs(packs)
+ return result
class CHKInventoryRepository(KnitPackRepository):
=== modified file 'bzrlib/repofmt/pack_repo.py'
--- a/bzrlib/repofmt/pack_repo.py 2009-08-14 11:11:29 +0000
+++ b/bzrlib/repofmt/pack_repo.py 2009-09-07 01:51:05 +0000
@@ -2078,13 +2078,13 @@
"Repository %s has missing compression parent(s) %r "
% (self.repo, sorted(all_missing)))
self._remove_pack_indices(self._new_pack)
- should_autopack = False
+ any_new_content = False
if self._new_pack.data_inserted():
# get all the data to disk and read to use
self._new_pack.finish()
self.allocate(self._new_pack)
self._new_pack = None
- should_autopack = True
+ any_new_content = True
else:
self._new_pack.abort()
self._new_pack = None
@@ -2095,13 +2095,15 @@
self._remove_pack_from_memory(resumed_pack)
resumed_pack.finish()
self.allocate(resumed_pack)
- should_autopack = True
+ any_new_content = True
del self._resumed_packs[:]
- if should_autopack:
- if not self.autopack():
+ if any_new_content:
+ result = self.autopack()
+ if not result:
# when autopack takes no steps, the names list is still
# unsaved.
return self._save_pack_names()
+ return result
return []
def _suspend_write_group(self):
=== modified file 'bzrlib/tests/per_pack_repository.py'
--- a/bzrlib/tests/per_pack_repository.py 2009-08-14 00:55:42 +0000
+++ b/bzrlib/tests/per_pack_repository.py 2009-09-07 01:51:05 +0000
@@ -239,31 +239,38 @@
self.assertTrue(large_pack_name in pack_names)
def test_commit_write_group_returns_new_pack_names(self):
+ # This test doesn't need real disk.
+ self.vfs_transport_factory = tests.MemoryServer
format = self.get_format()
- tree = self.make_branch_and_tree('foo', format=format)
- tree.commit('first post')
- repo = tree.branch.repository
+ repo = self.make_repository('foo', format=format)
repo.lock_write()
try:
- repo.start_write_group()
- try:
- inv = inventory.Inventory(revision_id="A")
- inv.root.revision = "A"
- repo.texts.add_lines((inv.root.file_id, "A"), [], [])
- rev = _mod_revision.Revision(timestamp=0, timezone=None,
- committer="Foo Bar <foo at example.com>", message="Message",
- revision_id="A")
- rev.parent_ids = ()
- repo.add_revision("A", rev, inv=inv)
- except:
- repo.abort_write_group()
- raise
- else:
- old_names = repo._pack_collection._names.keys()
- result = repo.commit_write_group()
- cur_names = repo._pack_collection._names.keys()
- new_names = list(set(cur_names) - set(old_names))
- self.assertEqual(new_names, result)
+ # All current pack repository styles autopack at 10 revisions; and
+ # autopack as well as regular commit write group needs to return
+ # the new pack name. Looping is a little ugly, but we don't have a
+ # clean way to test both the autopack logic and the normal code
+ # path without doing this loop.
+ for pos in range(10):
+ revid = str(pos)
+ repo.start_write_group()
+ try:
+ inv = inventory.Inventory(revision_id=revid)
+ inv.root.revision = revid
+ repo.texts.add_lines((inv.root.file_id, revid), [], [])
+ rev = _mod_revision.Revision(timestamp=0, timezone=None,
+ committer="Foo Bar <foo at example.com>", message="Message",
+ revision_id=revid)
+ rev.parent_ids = ()
+ repo.add_revision(revid, rev, inv=inv)
+ except:
+ repo.abort_write_group()
+ raise
+ else:
+ old_names = repo._pack_collection._names.keys()
+ result = repo.commit_write_group()
+ cur_names = repo._pack_collection._names.keys()
+ new_names = list(set(cur_names) - set(old_names))
+ self.assertEqual(new_names, result)
finally:
repo.unlock()
More information about the bazaar-commits
mailing list