Rev 3808: The first function for KnitVersionedFiles can now retry on request. in http://bzr.arbash-meinel.com/branches/bzr/1.9-dev/pack_retry_153786
John Arbash Meinel
john at arbash-meinel.com
Sat Oct 25 02:42:57 BST 2008
At http://bzr.arbash-meinel.com/branches/bzr/1.9-dev/pack_retry_153786
------------------------------------------------------------
revno: 3808
revision-id: john at arbash-meinel.com-20081025014248-zhy0bg5nf238vc29
parent: john at arbash-meinel.com-20081025003853-3orjg3p78750qp4r
committer: John Arbash Meinel <john at arbash-meinel.com>
branch nick: pack_retry_153786
timestamp: Fri 2008-10-24 20:42:48 -0500
message:
The first function for KnitVersionedFiles can now retry on request.
_get_record_map() now includes logic to retry operations if they fail due to a
missing .pack() file.
-------------- next part --------------
=== modified file 'bzrlib/errors.py'
--- a/bzrlib/errors.py 2008-10-24 20:13:11 +0000
+++ b/bzrlib/errors.py 2008-10-25 01:42:48 +0000
@@ -1493,7 +1493,7 @@
internal_error = True
- _fmt = ("Pack files have changed, reload and retry.")
+ _fmt = ("Pack files have changed, reload and retry. %(orig_error)s")
def __init__(self, reload_occurred, exc_info):
"""create a new RestartWithNewPacks error.
@@ -1509,6 +1509,7 @@
BzrError.__init__(self)
self.reload_occurred = reload_occurred
self.exc_info = exc_info
+ self.orig_error = exc_info[1]
# TODO: The global error handler should probably treat this by
# raising/printing the original exception with a bit about
# RetryWithNewPacks also not being caught
=== modified file 'bzrlib/knit.py'
--- a/bzrlib/knit.py 2008-10-24 22:07:49 +0000
+++ b/bzrlib/knit.py 2008-10-25 01:42:48 +0000
@@ -1116,17 +1116,21 @@
# TODO: We want to build in retrying, because we only hold the
# 'records' for the duration of this function, outside of this
# function we deal in 'keys'.
- position_map = self._get_components_positions(keys,
- allow_missing=allow_missing)
- # key = component_id, r = record_details, i_m = index_memo, n = next
- records = [(key, i_m) for key, (r, i_m, n)
- in position_map.iteritems()]
- record_map = {}
- for key, record, digest in \
- self._read_records_iter(records):
- (record_details, index_memo, next) = position_map[key]
- record_map[key] = record, record_details, digest, next
- return record_map
+ while True:
+ try:
+ position_map = self._get_components_positions(keys,
+ allow_missing=allow_missing)
+ # key = component_id, r = record_details, i_m = index_memo, n = next
+ records = [(key, i_m) for key, (r, i_m, n)
+ in position_map.iteritems()]
+ record_map = {}
+ for key, record, digest in \
+ self._read_records_iter(records):
+ (record_details, index_memo, next) = position_map[key]
+ record_map[key] = record, record_details, digest, next
+ return record_map
+ except errors.RetryWithNewPacks, e:
+ self._access.reload_or_raise(e)
def _split_by_prefix(self, keys):
"""For the given keys, split them up based on their prefix.
=== modified file 'bzrlib/tests/test_knit.py'
--- a/bzrlib/tests/test_knit.py 2008-10-24 22:09:02 +0000
+++ b/bzrlib/tests/test_knit.py 2008-10-25 01:42:48 +0000
@@ -48,6 +48,7 @@
_KnitKeyAccess,
make_file_factory,
)
+from bzrlib.repofmt import pack_repo
from bzrlib.tests import (
Feature,
KnownFailure,
@@ -353,6 +354,61 @@
writer.end()
return memos
+ def make_packs_for_retrying(self):
+ """Create 3 packs and a reload function.
+
+ Originally, 2 pack files will have the data, but one will be missing.
+ And then the third will be used in place of the first two if reload()
+ is called.
+
+ :return: (versioned_file, reload_counter)
+ versioned_file a KnitVersionedFiles using the packs for access
+ """
+ tree = self.make_branch_and_memory_tree('tree')
+ tree.lock_write()
+ try:
+ tree.add([''], ['root-id'])
+ tree.commit('one', rev_id='rev-1')
+ tree.commit('two', rev_id='rev-2')
+ # Pack these two revisions into another pack file, but don't remove
+ # the originials
+ repo = tree.branch.repository
+ collection = repo._pack_collection
+ collection.ensure_loaded()
+ orig_packs = collection.packs
+ packer = pack_repo.Packer(collection, orig_packs, '.testpack')
+ new_pack = packer.pack()
+
+ vf = tree.branch.repository.revisions
+ finally:
+ tree.unlock()
+ tree.branch.repository.lock_read()
+ self.addCleanup(tree.branch.repository.unlock)
+ del tree
+ # Set up a reload() function that switches to using the new pack file
+ new_index = new_pack.revision_index
+ access_tuple = new_pack.access_tuple()
+ reload_counter = [0, 0, 0]
+ def reload():
+ reload_counter[0] += 1
+ if reload_counter[1] > 0:
+ # We already reloaded, nothing more to do
+ reload_counter[2] += 1
+ return False
+ reload_counter[1] += 1
+ vf._index._graph_index._indices[:] = [new_index]
+ vf._access._indices.clear()
+ vf._access._indices[new_index] = access_tuple
+ return True
+ # Delete the second original pack file, so that we are forced to reload
+ # when we go to access the data
+ trans, name = orig_packs[1].access_tuple()
+ trans.delete(name)
+ # We don't have the index trigger reloading because we want to test
+ # that we reload when the .pack disappears
+ vf._access._reload_func = reload
+ return vf, reload_counter
+
def make_reload_func(self, return_val=True):
reload_called = [0]
def reload():
@@ -527,9 +583,11 @@
access.reload_or_raise(retry_exc)
self.assertEqual([2], reload_called)
- # TODO: Test that KnitVersionedFiles handles RetryWithNewPacks exceptions
- # and calls reload_or_raise appropriately
-
+ def test__get_record_map_retries(self):
+ vf, reload_counter = self.make_packs_for_retrying()
+ keys = [('rev-1',), ('rev-2',)]
+ records = vf._get_record_map(keys)
+ self.assertEqual(keys, sorted(records.keys()))
class LowLevelKnitDataTests(TestCase):
More information about the bazaar-commits
mailing list