Rev 3915: An attempt at pre-loading all blocks that you are going to need. in http://bzr.arbash-meinel.com/branches/bzr/brisbane/multi_blocks
John Arbash Meinel
john at arbash-meinel.com
Thu Mar 26 22:07:36 GMT 2009
At http://bzr.arbash-meinel.com/branches/bzr/brisbane/multi_blocks
------------------------------------------------------------
revno: 3915
revision-id: john at arbash-meinel.com-20090326220725-08a6mj8h3u2ojpan
parent: john at arbash-meinel.com-20090326201840-ddb2uqof335ysvnu
committer: John Arbash Meinel <john at arbash-meinel.com>
branch nick: multi_blocks
timestamp: Thu 2009-03-26 17:07:25 -0500
message:
An attempt at pre-loading all blocks that you are going to need.
-------------- next part --------------
=== modified file 'bzrlib/groupcompress.py'
--- a/bzrlib/groupcompress.py 2009-03-26 20:18:40 +0000
+++ b/bzrlib/groupcompress.py 2009-03-26 22:07:25 +0000
@@ -1329,6 +1329,25 @@
# bytes-on-the-wire for fetch.
for source, keys in source_keys:
if source is self:
+ # These are the blocks that we are going to need to read for
+ # the next set of data
+ needed_blocks = []
+ block_last_byte = {}
+ for key in keys:
+ index_memo = locations[key][0]
+ read_memo = index_memo[0:3]
+ if read_memo not in block_last_byte:
+ block_last_byte[read_memo] = index_memo[4]
+ needed_blocks.append(read_memo)
+ else:
+ last = block_last_byte[read_memo]
+ if index_memo[4] > last:
+ block_last_byte[read_memo] = index_memo[4]
+ # Pre-fill the cache will all blocks
+ for read_memo, zdata in izip(needed_blocks,
+ self._access.get_raw_records(needed_blocks)):
+ block = GroupCompressBlock.from_bytes(zdata)
+ self._group_cache[read_memo] = block
for key in keys:
if key in self._unadded_refs:
if manager is not None:
More information about the bazaar-commits
mailing list