Rev 3680: Fix up the test suite, now that we pack more efficiently. in http://bzr.arbash-meinel.com/branches/bzr/1.7-dev/btree
John Arbash Meinel
john at arbash-meinel.com
Fri Aug 22 22:05:17 BST 2008
At http://bzr.arbash-meinel.com/branches/bzr/1.7-dev/btree
------------------------------------------------------------
revno: 3680
revision-id: john at arbash-meinel.com-20080822210515-f8qwnjnpqk560gly
parent: john at arbash-meinel.com-20080822205059-xrra00puh3onekbi
committer: John Arbash Meinel <john at arbash-meinel.com>
branch nick: btree
timestamp: Fri 2008-08-22 16:05:15 -0500
message:
Fix up the test suite, now that we pack more efficiently.
Unfortunately, that means upping the node count for the iter_all and
three_level tests.
modified:
bzrlib/tests/test_btree_index.py test_index.py-20080624222253-p0x5f92uyh5hw734-13
-------------- next part --------------
=== modified file 'bzrlib/tests/test_btree_index.py'
--- a/bzrlib/tests/test_btree_index.py 2008-08-22 02:18:27 +0000
+++ b/bzrlib/tests/test_btree_index.py 2008-08-22 21:05:15 +0000
@@ -203,7 +203,7 @@
temp_file = builder.finish()
content = temp_file.read()
del temp_file
- self.assertEqual(10646, len(content))
+ self.assertEqual(9602, len(content))
self.assertEqual(
"B+Tree Graph Index 2\nnode_ref_lists=0\nkey_elements=1\nlen=800\n"
"row_lengths=1,2\n",
@@ -215,19 +215,18 @@
expected_root = (
"type=internal\n"
"offset=0\n"
- "503503503503503503503503503503503503503503503503503503503503503503503503503503503503503503503503503503503503503503503503\n"
- )
+ ) + ("635" * 40) + "\n"
self.assertEqual(expected_root, root_bytes)
# We already know serialisation works for leaves, check key selection:
leaf1_bytes = zlib.decompress(leaf1)
sorted_node_keys = sorted(node[0] for node in nodes)
node = btree_index._LeafNode(leaf1_bytes, 1, 0)
- self.assertEqual(448, len(node.keys))
- self.assertEqual(sorted_node_keys[:448], sorted(node.keys))
+ self.assertEqual(594, len(node.keys))
+ self.assertEqual(sorted_node_keys[:594], sorted(node.keys))
leaf2_bytes = zlib.decompress(leaf2)
node = btree_index._LeafNode(leaf2_bytes, 1, 0)
- self.assertEqual(800 - 448, len(node.keys))
- self.assertEqual(sorted_node_keys[448:], sorted(node.keys))
+ self.assertEqual(800 - 594, len(node.keys))
+ self.assertEqual(sorted_node_keys[594:], sorted(node.keys))
def test_last_page_rounded_1_layer(self):
builder = btree_index.BTreeBuilder(key_elements=1, reference_lists=0)
@@ -260,7 +259,7 @@
temp_file = builder.finish()
content = temp_file.read()
del temp_file
- self.assertEqual(10646, len(content))
+ self.assertEqual(9602, len(content))
self.assertEqual(
"B+Tree Graph Index 2\nnode_ref_lists=0\nkey_elements=1\nlen=800\n"
"row_lengths=1,2\n",
@@ -269,22 +268,22 @@
leaf2 = content[8192:]
leaf2_bytes = zlib.decompress(leaf2)
node = btree_index._LeafNode(leaf2_bytes, 1, 0)
- self.assertEqual(800 - 448, len(node.keys))
+ self.assertEqual(800 - 594, len(node.keys))
sorted_node_keys = sorted(node[0] for node in nodes)
- self.assertEqual(sorted_node_keys[448:], sorted(node.keys))
+ self.assertEqual(sorted_node_keys[594:], sorted(node.keys))
def test_three_level_tree_details(self):
# The left most pointer in the second internal node in a row should
# pointer to the second node that the internal node is for, _not_
# the first, otherwise the first node overlaps with the last node of
# the prior internal node on that row.
- # We will be adding 100,000 nodes, so spill at 100,001 to prevent
+ # We will be adding 140,000 nodes, so spill at 200,001 to prevent
# having to flush anything out to disk.
builder = btree_index.BTreeBuilder(key_elements=2, reference_lists=2,
- spill_at=100001)
- # 100K nodes is *just* enough to create a two internal nodes on the
+ spill_at=200001)
+ # 140K nodes is *just* enough to create a two internal nodes on the
# second level
- nodes = self.make_nodes(50000, 2, 2)
+ nodes = self.make_nodes(70000, 2, 2)
for node in nodes:
builder.add_node(*node)
@@ -321,7 +320,7 @@
temp_file = builder.finish()
content = temp_file.read()
del temp_file
- self.assertEqual(10574, len(content))
+ self.assertEqual(10639, len(content))
self.assertEqual(
"B+Tree Graph Index 2\nnode_ref_lists=2\nkey_elements=2\nlen=400\n"
"row_lengths=1,2\n",
@@ -334,9 +333,7 @@
"type=internal\n"
"offset=0\n"
"1111111111111111111111111111111111111111\x00"
- "126126126126126126126126126126126126126126126126126126126"
- "126126126126126126126126126126126126126126126126126126126126126\n"
- )
+ ) + ("151" * 40) + "\n"
self.assertEqual(expected_root, root_bytes)
# We assume the other leaf nodes have been written correctly - layering
# FTW.
@@ -594,7 +591,7 @@
# The entire index should have been read, as it is one page long.
self.assertEqual([('readv', 'index', [(0, size)], False, None)],
transport._activity)
- self.assertEqual(1593, size)
+ self.assertEqual(1199, size)
def test_2_levels_key_count_2_2(self):
builder = btree_index.BTreeBuilder(key_elements=2, reference_lists=2)
@@ -603,7 +600,7 @@
builder.add_node(*node)
transport = get_transport('trace+' + self.get_url(''))
size = transport.put_file('index', builder.finish())
- self.assertEqual(10242, size)
+ self.assertEqual(9152, size)
index = btree_index.BTreeGraphIndex(transport, 'index', size)
del transport._activity[:]
self.assertEqual([], transport._activity)
@@ -626,7 +623,7 @@
# The entire index should have been read linearly.
self.assertEqual([('readv', 'index', [(0, size)], False, None)],
transport._activity)
- self.assertEqual(3846, size)
+ self.assertEqual(2768, size)
def test_validate_two_pages(self):
builder = btree_index.BTreeBuilder(key_elements=2, reference_lists=2)
@@ -636,14 +633,14 @@
transport = get_transport('trace+' + self.get_url(''))
size = transport.put_file('index', builder.finish())
# Root page, 2 leaf pages
- self.assertEqual(10242, size)
+ self.assertEqual(9152, size)
index = btree_index.BTreeGraphIndex(transport, 'index', size)
del transport._activity[:]
self.assertEqual([], transport._activity)
index.validate()
# The entire index should have been read linearly.
self.assertEqual([('readv', 'index', [(0, 4096)], False, None),
- ('readv', 'index', [(4096, 4096), (8192, 2050)], False, None)],
+ ('readv', 'index', [(4096, 4096), (8192, 960)], False, None)],
transport._activity)
# XXX: TODO: write some badly-ordered nodes, and some pointers-to-wrong
# node and make validate find them.
@@ -687,21 +684,21 @@
# iterating all entries reads the header, then does a linear
# read.
builder = btree_index.BTreeBuilder(key_elements=2, reference_lists=2,
- spill_at=100001)
- # 100k nodes is enough to create a three-level index, which shows that
+ spill_at=200001)
+ # 140k nodes is enough to create a three-level index, which shows that
# we skip the internal nodes and just read the leaf nodes.
- nodes = self.make_nodes(50000, 2, 2)
+ nodes = self.make_nodes(70000, 2, 2)
for node in nodes:
builder.add_node(*node)
transport = get_transport('trace+' + self.get_url(''))
size = transport.put_file('index', builder.finish())
- self.assertEqual(2026722, size, 'number of expected bytes in the'
+ self.assertEqual(2624681, size, 'number of expected bytes in the'
' output changed')
del builder
index = btree_index.BTreeGraphIndex(transport, 'index', size)
del transport._activity[:]
self.assertEqual([], transport._activity)
- found_nodes = list(index.iter_all_entries())
+ found_nodes = self.time(list, index.iter_all_entries())
bare_nodes = []
for node in found_nodes:
self.assertTrue(node[0] is index)
@@ -709,7 +706,7 @@
self.assertEqual(3, len(index._row_lengths),
"Not enough rows: %r" % index._row_lengths)
# Should be as long as the nodes we supplied
- self.assertEqual(100000, len(found_nodes))
+ self.assertEqual(140000, len(found_nodes))
# Should have the same content
self.assertEqual(set(nodes), set(bare_nodes))
# Should have done linear scan IO up the index, ignoring
@@ -717,14 +714,14 @@
# The entire index should have been read
total_pages = sum(index._row_lengths)
self.assertEqual(total_pages, index._row_offsets[-1])
- self.assertEqual(2026722, size)
+ self.assertEqual(2624681, size)
# The start of the leaves
first_byte = index._row_offsets[-2] * btree_index._PAGE_SIZE
readv_request = []
for offset in range(first_byte, size, 4096):
readv_request.append((offset, 4096))
# The last page is truncated
- readv_request[-1] = (readv_request[-1][0], 3298)
+ readv_request[-1] = (readv_request[-1][0], 2624681 % 4096)
expected = [('readv', 'index', [(0, 4096)], False, None),
('readv', 'index', readv_request, False, None)]
if expected != transport._activity:
More information about the bazaar-commits
mailing list