Rev 3769: Fix the logic a bit, and add a bit more tweaking opportunities in http://bzr.arbash-meinel.com/branches/bzr/1.8-dev/btree_buffer
John Arbash Meinel
john at arbash-meinel.com
Thu Oct 9 21:55:21 BST 2008
At http://bzr.arbash-meinel.com/branches/bzr/1.8-dev/btree_buffer
------------------------------------------------------------
revno: 3769
revision-id: john at arbash-meinel.com-20081009205450-z4k6rnvj5j1tqcmu
parent: john at arbash-meinel.com-20081007214112-yl35u4yehfee3cam
committer: John Arbash Meinel <john at arbash-meinel.com>
branch nick: btree_buffer
timestamp: Thu 2008-10-09 15:54:50 -0500
message:
Fix the logic a bit, and add a bit more tweaking opportunities
-------------- next part --------------
=== modified file 'bzrlib/btree_index.py'
--- a/bzrlib/btree_index.py 2008-10-04 14:10:13 +0000
+++ b/bzrlib/btree_index.py 2008-10-09 20:54:50 +0000
@@ -687,9 +687,11 @@
# return node_indexes
# TODO: This could be cached
- recommended_read = self._transport.recommended_page_size() / 4
+ recommended_read = self._transport.recommended_page_size()
recommended_pages = int(math.ceil(recommended_read /
float(_PAGE_SIZE)))
+ # Disable the algorithm
+ # recommended_pages = 1
if len(node_indexes) >= recommended_pages:
# Don't add more, we are already requesting more than enough
return node_indexes
@@ -705,6 +707,9 @@
if self._root_node is not None:
cached_nodes.add(0)
+ # if len(cached_nodes) < 2: # We haven't read enough to justify expansion
+ # return node_indexes
+
# If reading recommended_pages would read the rest of the index, just
# do so.
if max_page - len(cached_nodes) <= recommended_pages:
@@ -736,21 +741,28 @@
# if len(final_nodes) >= recommended_pages:
# break
else:
- while len(final_nodes) < recommended_pages:
- for pos in sorted(final_nodes):
+ new_tips = set(final_nodes)
+ while len(final_nodes) < recommended_pages and new_tips:
+ next_tips = set()
+ for pos in sorted(new_tips):
if pos > 0:
previous = pos - 1
- if previous not in cached_nodes:
+ if (previous not in cached_nodes
+ and previous not in final_nodes):
final_nodes.add(previous)
- if pos < max_page:
- after = pos + 1
- if after not in cached_nodes:
+ next_tips.add(previous)
+ after = pos + 1
+ if after < max_page:
+ if (after not in cached_nodes
+ and after not in final_nodes):
final_nodes.add(after)
+ next_tips.add(previous)
# if len(final_nodes) > recommended_pages:
# break
+ new_tips = next_tips
final_nodes = sorted(final_nodes)
- trace.note('\t\t\t\t%s', final_nodes)
+ trace.note('\t\t%s', final_nodes)
return final_nodes
def _get_nodes(self, cache, node_indexes):
@@ -765,6 +777,7 @@
except KeyError:
needed.append(idx)
if not needed:
+ # trace.note('cached: %s\tnodes: %s', self._name[-14:], node_indexes)
return found
needed = self._expand_nodes(needed)
found.update(self._cache_nodes(needed, cache))
@@ -1182,6 +1195,10 @@
self._size = len(start)
size = min(_PAGE_SIZE, self._size)
else:
+ if offset > self._size:
+ raise AssertionError('tried to read past the end'
+ ' of the file %s > %s'
+ % (offset, self._size))
size = min(size, self._size - offset)
ranges.append((offset, size))
if not ranges:
More information about the bazaar-commits
mailing list