Rev 4676: Remove the trim test, remove the _check_rebuild_block call in http://bazaar.launchpad.net/~jameinel/bzr/2.1b1-pack-on-the-fly

John Arbash Meinel john at arbash-meinel.com
Thu Sep 3 16:25:45 BST 2009


At http://bazaar.launchpad.net/~jameinel/bzr/2.1b1-pack-on-the-fly

------------------------------------------------------------
revno: 4676
revision-id: john at arbash-meinel.com-20090903152536-guqk7hltitdra91w
parent: john at arbash-meinel.com-20090903152346-wysd9b9xork5qxs5
committer: John Arbash Meinel <john at arbash-meinel.com>
branch nick: 2.1b1-pack-on-the-fly
timestamp: Thu 2009-09-03 10:25:36 -0500
message:
  Remove the trim test, remove the _check_rebuild_block call
  since it is now fully summarized by the earlier 'check_is_well_utilized()'.
  If that returns True, then we aren't going to truncate or rebuild the block.
  If it returns False, then we are going to rebuild 'from scratch' anyway.
-------------- next part --------------
=== modified file 'bzrlib/groupcompress.py'
--- a/bzrlib/groupcompress.py	2009-09-03 15:23:46 +0000
+++ b/bzrlib/groupcompress.py	2009-09-03 15:25:36 +0000
@@ -1668,7 +1668,6 @@
                 if record.storage_kind == 'groupcompress-block':
                     # Check to see if we really want to re-use this block
                     insert_manager = record._manager
-                    import pdb; pdb.set_trace()
                     reuse_this_block = insert_manager.check_is_well_utilized()
             else:
                 reuse_this_block = False
@@ -1677,7 +1676,6 @@
                 if record.storage_kind == 'groupcompress-block':
                     # Insert the raw block into the target repo
                     insert_manager = record._manager
-                    insert_manager._check_rebuild_block()
                     bytes = record._manager._block.to_bytes()
                     _, start, length = self._access.add_raw_records(
                         [(None, len(bytes))], bytes)[0]

=== modified file 'bzrlib/tests/test_groupcompress.py'
--- a/bzrlib/tests/test_groupcompress.py	2009-09-03 15:23:46 +0000
+++ b/bzrlib/tests/test_groupcompress.py	2009-09-03 15:25:36 +0000
@@ -680,62 +680,6 @@
             else:
                 self.assertIs(block, record._manager._block)
 
-    def test_insert_record_stream_truncates_partial_blocks(self):
-        vf = self.make_test_vf(True, dir='source')
-        def grouped_stream(revision_ids, first_parents=()):
-            parents = first_parents
-            for revision_id in revision_ids:
-                key = (revision_id,)
-                record = versionedfile.FulltextContentFactory(
-                    key, parents, None,
-                    'some content that is\n'
-                    'identical except for\n'
-                    'revision_id:%s\n' % (revision_id,))
-                yield record
-                parents = (key,)
-        # One group, a-l
-        vf.insert_record_stream(grouped_stream('abcdefghijkl'))
-        vf.writer.end()
-        block = manager = None
-        raw_block_bytes = None
-        raw_block_z_bytes = None
-        record_order = []
-        # Everything should fit in a single block
-        for record in vf.get_record_stream([(r,) for r in 'abcdefghijkl'],
-                                           'unordered', False):
-            record_order.append(record.key)
-            if block is None:
-                block = record._manager._block
-                manager = record._manager
-                raw_block_z_bytes = block._z_content
-                block._ensure_content(block._content_length)
-                raw_block_bytes = block._content
-            else:
-                self.assertIs(block, record._manager._block)
-                self.assertIs(manager, record._manager)
-        # 'unordered' fetching will put that in the same order it was inserted
-        self.assertEqual([(r,) for r in 'abcdefghijkl'], record_order)
-        # If we fetch enough of the block, but not everything, then it
-        # should simply decompress, truncate, and recompress
-        vf2 = self.make_test_vf(True, dir='target')
-        def small_stream():
-            for record in vf.get_record_stream([(r,) for r in 'acf'],
-                                               'unordered', False):
-                record._manager._full_enough_block_size = 50
-                record._manager._max_cut_fraction = 0.3
-                yield record
-        vf2.insert_record_stream(small_stream())
-            
-        vf2.writer.end()
-        record = vf2.get_record_stream([('a',)], 'unordered', False).next()
-        new_block = record._manager._block
-        self.assertIsNot(None, new_block._z_content)
-        self.assertNotEqual(raw_block_z_bytes, new_block._z_content)
-        new_block._ensure_content(new_block._content_length)
-        # The new content is simply the truncation of the old content
-        self.assertStartsWith(raw_block_bytes, new_block._content)
-        self.assertTrue(len(new_block._content) < len(raw_block_bytes))
-
     def test_add_missing_noncompression_parent_unvalidated_index(self):
         unvalidated = self.make_g_index_missing_parent()
         combined = _mod_index.CombinedGraphIndex([unvalidated])



More information about the bazaar-commits mailing list