Rev 3877: Don't repack backing indices, set a name for the backing indices, set random_id=True when repacking in http://bzr.arbash-meinel.com/branches/bzr/brisbane/hack3
John Arbash Meinel
john at arbash-meinel.com
Thu Mar 19 19:47:29 GMT 2009
At http://bzr.arbash-meinel.com/branches/bzr/brisbane/hack3
------------------------------------------------------------
revno: 3877
revision-id: john at arbash-meinel.com-20090319194720-4esxj7gnrmfaykww
parent: john at arbash-meinel.com-20090319183945-2gia2u0k0lhzcu7n
committer: John Arbash Meinel <john at arbash-meinel.com>
branch nick: hack3
timestamp: Thu 2009-03-19 14:47:20 -0500
message:
Don't repack backing indices, set a name for the backing indices, set random_id=True when repacking
don't force optimize on chk indexes.
Drops us down at least 1 minute on 'bzr pack launchpad'.
-------------- next part --------------
=== modified file 'bzrlib/btree_index.py'
--- a/bzrlib/btree_index.py 2009-03-19 18:39:45 +0000
+++ b/bzrlib/btree_index.py 2009-03-19 19:47:20 +0000
@@ -180,16 +180,8 @@
combine mem with the first and second indexes, creating a new one of
size 4x. On the fifth create a single new one, etc.
"""
- iterators_to_combine = [self._iter_mem_nodes()]
- pos = -1
- for pos, backing in enumerate(self._backing_indices):
- if backing is None:
- pos -= 1
- break
- iterators_to_combine.append(backing.iter_all_entries())
- backing_pos = pos + 1
new_backing_file, size = \
- self._write_nodes(self._iter_smallest(iterators_to_combine),
+ self._write_nodes(self._iter_mem_nodes(),
allow_optimize=False)
dir_path, base_name = osutils.split(new_backing_file.name)
# Note: The transport here isn't strictly needed, because we will use
@@ -198,11 +190,7 @@
base_name, size)
# GC will clean up the file
new_backing._file = new_backing_file
- if len(self._backing_indices) == backing_pos:
- self._backing_indices.append(None)
- self._backing_indices[backing_pos] = new_backing
- for pos in range(backing_pos):
- self._backing_indices[pos] = None
+ self._backing_indices.append(new_backing)
self._keys = set()
self._nodes = {}
self._nodes_by_key = None
@@ -368,7 +356,7 @@
for row in reversed(rows):
pad = (type(row) != _LeafBuilderRow)
row.finish_node(pad=pad)
- result = tempfile.NamedTemporaryFile()
+ result = tempfile.NamedTemporaryFile(prefix='bzr-index-')
lines = [_BTSIGNATURE]
lines.append(_OPTION_NODE_REFS + str(self.reference_lists) + '\n')
lines.append(_OPTION_KEY_ELEMENTS + str(self._key_length) + '\n')
=== modified file 'bzrlib/repofmt/groupcompress_repo.py'
--- a/bzrlib/repofmt/groupcompress_repo.py 2009-03-17 20:33:54 +0000
+++ b/bzrlib/repofmt/groupcompress_repo.py 2009-03-19 19:47:20 +0000
@@ -345,7 +345,8 @@
assert self.new_pack is not None
index = getattr(self.new_pack, index_name)
index_to_pack[index] = self.new_pack.access_tuple()
- index.set_optimize(for_size=True)
+ if index_name != 'chk_index':
+ index.set_optimize(for_size=True)
access.set_writer(self.new_pack._writer, index,
self.new_pack.access_tuple())
add_callback = index.add_nodes
@@ -382,6 +383,7 @@
try:
stream = vf_to_stream(source_vf, keys, message, child_pb)
for _ in target_vf._insert_record_stream(stream,
+ random_id=True,
reuse_blocks=False):
pass
finally:
@@ -415,6 +417,7 @@
for stream in self._get_chk_streams(source_vf, total_keys,
pb=child_pb):
for _ in target_vf._insert_record_stream(stream,
+ random_id=True,
reuse_blocks=False):
pass
finally:
More information about the bazaar-commits
mailing list