Rev 11: Make PAGE_SIZE be a constant, rather than having it hard-coded everywhere. in http://bzr.arbash-meinel.com/plugins/index2
John Arbash Meinel
john at arbash-meinel.com
Tue Jul 1 20:28:10 BST 2008
At http://bzr.arbash-meinel.com/plugins/index2
------------------------------------------------------------
revno: 11
revision-id: john at arbash-meinel.com-20080701192734-9mbeqa4zszat3z1f
parent: john at arbash-meinel.com-20080701184228-dhck7vdmkh47zspa
committer: John Arbash Meinel <john at arbash-meinel.com>
branch nick: index2
timestamp: Tue 2008-07-01 14:27:34 -0500
message:
Make PAGE_SIZE be a constant, rather than having it hard-coded everywhere.
-------------- next part --------------
=== modified file 'btree_index.py'
--- a/btree_index.py 2008-07-01 11:37:43 +0000
+++ b/btree_index.py 2008-07-01 19:27:34 +0000
@@ -33,6 +33,8 @@
_INTERNAL_FLAG = "type=internal\n"
_INTERNAL_OFFSET = "offset="
+_PAGE_SIZE = 4096
+
class _BuilderRow(object):
"""The stored state accumulated while writing out a row in the index.
@@ -103,7 +105,8 @@
# padded note:
row.spool.write("\x00" * 100)
row.spool.writelines(byte_lines)
- if row.spool.tell() % 4096 != 0:
+ if row.spool.tell() % _PAGE_SIZE != 0:
+ import pdb; pdb.set_trace()
raise AssertionError("incorrect node length")
row.nodes += 1
row.writer = None
@@ -120,7 +123,7 @@
# flesh out any internal nodes that are needed to
# preserve the high of the tree
if internal_row.writer is None:
- length = 4096
+ length = _PAGE_SIZE
if internal_row.nodes == 0:
length -= 100 # padded
internal_row.writer = chunk_writer.ChunkWriter(
@@ -129,7 +132,7 @@
internal_row.writer.write(_INTERNAL_OFFSET +
str(self.rows[pos + 1].nodes) + "\n")
# add a new leaf
- length = 4096
+ length = _PAGE_SIZE
if self.rows[-1].nodes == 0:
length -= 100 # padded
self.rows[-1].writer = chunk_writer.ChunkWriter(length)
@@ -155,7 +158,8 @@
# We need a new row
new_row = _BuilderRow()
self.rows.insert(0, new_row)
- new_row.writer = chunk_writer.ChunkWriter(3996)
+ # This will be padded, hence the -100
+ new_row.writer = chunk_writer.ChunkWriter(_PAGE_SIZE - 100)
new_row.writer.write(_INTERNAL_FLAG)
new_row.writer.write(_INTERNAL_OFFSET +
str(self.rows[1].nodes - 1) + "\n")
@@ -194,12 +198,12 @@
row.spool.seek(0)
# copy nodes to the finalised file.
# Special case the first node as it may be prefixed
- node = row.spool.read(4096)
+ node = row.spool.read(_PAGE_SIZE)
result.write(node[reserved:])
result.write("\x00" * (reserved - position))
position = 0 # Only the root row actually has an offset
copied_len = osutils.pumpfile(row.spool, result)
- if copied_len != (row.nodes - 1) * 4096:
+ if copied_len != (row.nodes - 1) * _PAGE_SIZE:
import pdb;pdb.set_trace()
raise AssertionError("Not enough data copied")
result.flush()
@@ -488,18 +492,18 @@
"""
ranges = []
for index in nodes:
- offset = index * 4096
- size = 4096
+ offset = index * _PAGE_SIZE
+ size = _PAGE_SIZE
if index == 0:
# Root node - special case
if self._size:
- size = min(4096, self._size)
+ size = min(_PAGE_SIZE, self._size)
else:
stream = self._transport.get(self._name)
- start = stream.read(4096)
+ start = stream.read(_PAGE_SIZE)
# Avoid doing this again
self._size = len(start)
- size = min(4096, self._size)
+ size = min(_PAGE_SIZE, self._size)
ranges.append((offset, size))
for offset, data in self._transport.readv(self._name, ranges):
if offset == 0:
@@ -514,7 +518,7 @@
node = _InternalNode(bytes)
else:
raise AssertionError("Unknown node type for %r" % bytes)
- yield offset / 4096, node
+ yield offset / _PAGE_SIZE, node
def _signature(self):
"""The file signature for this index type."""
More information about the bazaar-commits
mailing list