Rev 3809: KnitVersionedFile.get_record_stream now retries *and* fails correctly. in http://bzr.arbash-meinel.com/branches/bzr/1.9-dev/pack_retry_153786
John Arbash Meinel
john at arbash-meinel.com
Sat Oct 25 02:58:54 BST 2008
At http://bzr.arbash-meinel.com/branches/bzr/1.9-dev/pack_retry_153786
------------------------------------------------------------
revno: 3809
revision-id: john at arbash-meinel.com-20081025015845-7424d9znf6v36kuv
parent: john at arbash-meinel.com-20081025014248-zhy0bg5nf238vc29
committer: John Arbash Meinel <john at arbash-meinel.com>
branch nick: pack_retry_153786
timestamp: Fri 2008-10-24 20:58:45 -0500
message:
KnitVersionedFile.get_record_stream now retries *and* fails correctly.
-------------- next part --------------
=== modified file 'bzrlib/knit.py'
--- a/bzrlib/knit.py 2008-10-25 01:42:48 +0000
+++ b/bzrlib/knit.py 2008-10-25 01:58:45 +0000
@@ -1113,19 +1113,23 @@
:param allow_missing: If some records are missing, rather than
error, just return the data that could be generated.
"""
- # TODO: We want to build in retrying, because we only hold the
- # 'records' for the duration of this function, outside of this
- # function we deal in 'keys'.
+ # This retries the whole request if anything fails. Potentially we
+ # could be a bit more selective. We could track the keys whose records
+ # we have successfully found, and then only request the new records
+ # from there. However, _get_components_positions grabs the whole build
+ # chain, which means we'll likely try to grab the same records again
+ # anyway. Also, can the build chains change as part of a pack
+ # operation? We wouldn't want to end up with a broken chain.
while True:
try:
position_map = self._get_components_positions(keys,
allow_missing=allow_missing)
- # key = component_id, r = record_details, i_m = index_memo, n = next
+ # key = component_id, r = record_details, i_m = index_memo,
+ # n = next
records = [(key, i_m) for key, (r, i_m, n)
- in position_map.iteritems()]
+ in position_map.iteritems()]
record_map = {}
- for key, record, digest in \
- self._read_records_iter(records):
+ for key, record, digest in self._read_records_iter(records):
(record_details, index_memo, next) = position_map[key]
record_map[key] = record, record_details, digest, next
return record_map
@@ -1182,8 +1186,7 @@
yield content_factory
return
except errors.RetryWithNewPacks, e:
- # reload_func
- raise
+ self._access.reload_or_raise(e)
def _get_remaining_record_stream(self, keys, ordering,
include_delta_closure):
=== modified file 'bzrlib/tests/test_knit.py'
--- a/bzrlib/tests/test_knit.py 2008-10-25 01:42:48 +0000
+++ b/bzrlib/tests/test_knit.py 2008-10-25 01:58:45 +0000
@@ -354,7 +354,7 @@
writer.end()
return memos
- def make_packs_for_retrying(self):
+ def make_vf_for_retrying(self):
"""Create 3 packs and a reload function.
Originally, 2 pack files will have the data, but one will be missing.
@@ -400,8 +400,8 @@
vf._access._indices.clear()
vf._access._indices[new_index] = access_tuple
return True
- # Delete the second original pack file, so that we are forced to reload
- # when we go to access the data
+ # Delete one of the pack files so the data will need to be reloaded. We
+ # will delete the file with 'rev-1' in it
trans, name = orig_packs[1].access_tuple()
trans.delete(name)
# We don't have the index trigger reloading because we want to test
@@ -584,10 +584,34 @@
self.assertEqual([2], reload_called)
def test__get_record_map_retries(self):
- vf, reload_counter = self.make_packs_for_retrying()
+ vf, reload_counter = self.make_vf_for_retrying()
keys = [('rev-1',), ('rev-2',)]
records = vf._get_record_map(keys)
self.assertEqual(keys, sorted(records.keys()))
+ self.assertEqual([1, 1, 0], reload_counter)
+ # Now delete the packs-in-use, which should trigger another reload, but
+ # this time we just raise an exception because we can't recover
+ for trans, name in vf._access._indices.itervalues():
+ trans.delete(name)
+ self.assertRaises(errors.NoSuchFile, vf._get_record_map, keys)
+ self.assertEqual([2, 1, 1], reload_counter)
+
+ def test_get_record_stream_retries(self):
+ vf, reload_counter = self.make_vf_for_retrying()
+ keys = [('rev-1',), ('rev-2',)]
+ record_stream = vf.get_record_stream(keys, 'topological', False)
+ record = record_stream.next()
+ self.assertEqual(('rev-1',), record.key)
+ self.assertEqual([0, 0, 0], reload_counter)
+ record = record_stream.next()
+ self.assertEqual(('rev-2',), record.key)
+ self.assertEqual([1, 1, 0], reload_counter)
+ # Now delete all pack files, and see that we raise the right error
+ for trans, name in vf._access._indices.itervalues():
+ trans.delete(name)
+ self.assertListRaises(errors.NoSuchFile,
+ vf.get_record_stream, keys, 'topological', False)
+
class LowLevelKnitDataTests(TestCase):
More information about the bazaar-commits
mailing list