Rev 3916: Only preload ones we don't already have. in http://bzr.arbash-meinel.com/branches/bzr/brisbane/multi_blocks

John Arbash Meinel john at arbash-meinel.com
Thu Mar 26 22:45:05 GMT 2009


At http://bzr.arbash-meinel.com/branches/bzr/brisbane/multi_blocks

------------------------------------------------------------
revno: 3916
revision-id: john at arbash-meinel.com-20090326223530-2urpfrzuhpyictcy
parent: john at arbash-meinel.com-20090326220725-08a6mj8h3u2ojpan
committer: John Arbash Meinel <john at arbash-meinel.com>
branch nick: multi_blocks
timestamp: Thu 2009-03-26 17:35:30 -0500
message:
  Only preload ones we don't already have.
-------------- next part --------------
=== modified file 'bzrlib/groupcompress.py'
--- a/bzrlib/groupcompress.py	2009-03-26 22:07:25 +0000
+++ b/bzrlib/groupcompress.py	2009-03-26 22:35:30 +0000
@@ -1327,15 +1327,15 @@
         #       one-at-a-time.) This could be done at insert_record_stream()
         #       time, but it probably would decrease the number of
         #       bytes-on-the-wire for fetch.
-        for source, keys in source_keys:
-            if source is self:
-                # These are the blocks that we are going to need to read for
-                # the next set of data
-                needed_blocks = []
-                block_last_byte = {}
-                for key in keys:
-                    index_memo = locations[key][0]
-                    read_memo = index_memo[0:3]
+        def preload_missing_blocks(keys):
+            # These are the blocks that we are going to need to read for
+            # the next set of data
+            needed_blocks = []
+            block_last_byte = {}
+            for key in keys:
+                index_memo = locations[key][0]
+                read_memo = index_memo[0:3]
+                if read_memo not in self._group_cache:
                     if read_memo not in block_last_byte:
                         block_last_byte[read_memo] = index_memo[4]
                         needed_blocks.append(read_memo)
@@ -1343,11 +1343,14 @@
                         last = block_last_byte[read_memo]
                         if index_memo[4] > last:
                             block_last_byte[read_memo] = index_memo[4]
-                # Pre-fill the cache will all blocks
-                for read_memo, zdata in izip(needed_blocks,
-                    self._access.get_raw_records(needed_blocks)):
-                    block = GroupCompressBlock.from_bytes(zdata)
-                    self._group_cache[read_memo] = block
+            # Pre-fill the cache will all blocks
+            for read_memo, zdata in izip(needed_blocks,
+                self._access.get_raw_records(needed_blocks)):
+                block = GroupCompressBlock.from_bytes(zdata)
+                self._group_cache[read_memo] = block
+        for source, keys in source_keys:
+            if source is self:
+                preload_missing_blocks(keys)
                 for key in keys:
                     if key in self._unadded_refs:
                         if manager is not None:



More information about the bazaar-commits mailing list