Rev 3779: Change the name to ChunkWriter.set_optimize() in http://bzr.arbash-meinel.com/branches/bzr/1.9-dev/btree_optimize

John Arbash Meinel john at arbash-meinel.com
Wed Oct 15 22:34:34 BST 2008


At http://bzr.arbash-meinel.com/branches/bzr/1.9-dev/btree_optimize

------------------------------------------------------------
revno: 3779
revision-id: john at arbash-meinel.com-20081015213410-g19sy2rpgxcl2sew
parent: john at arbash-meinel.com-20081015212739-ap2uunpg6rjkypc1
committer: John Arbash Meinel <john at arbash-meinel.com>
branch nick: btree_optimize
timestamp: Wed 2008-10-15 16:34:10 -0500
message:
  Change the name to ChunkWriter.set_optimize()
  
  Also allow it to be passed during __init__ and pass it in from
  BTreeBuilder.
-------------- next part --------------
=== modified file 'bzrlib/btree_index.py'
--- a/bzrlib/btree_index.py	2008-09-26 07:09:50 +0000
+++ b/bzrlib/btree_index.py	2008-10-15 21:34:10 +0000
@@ -139,6 +139,7 @@
         self._nodes = {}
         # Indicate it hasn't been built yet
         self._nodes_by_key = None
+        self._optimize_for_size = False
 
     def add_node(self, key, value, references=()):
         """Add a node to the index.
@@ -276,7 +277,8 @@
                     length = _PAGE_SIZE
                     if internal_row.nodes == 0:
                         length -= _RESERVED_HEADER_BYTES # padded
-                    internal_row.writer = chunk_writer.ChunkWriter(length, 0)
+                    internal_row.writer = chunk_writer.ChunkWriter(length, 0,
+                        optimize_for_size=self._optimize_for_size)
                     internal_row.writer.write(_INTERNAL_FLAG)
                     internal_row.writer.write(_INTERNAL_OFFSET +
                         str(rows[pos + 1].nodes) + "\n")
@@ -284,7 +286,8 @@
             length = _PAGE_SIZE
             if rows[-1].nodes == 0:
                 length -= _RESERVED_HEADER_BYTES # padded
-            rows[-1].writer = chunk_writer.ChunkWriter(length)
+            rows[-1].writer = chunk_writer.ChunkWriter(length,
+                optimize_for_size=self._optimize_for_size)
             rows[-1].writer.write(_LEAF_FLAG)
         if rows[-1].writer.write(line):
             # this key did not fit in the node:
@@ -313,7 +316,8 @@
                 # This will be padded, hence the -100
                 new_row.writer = chunk_writer.ChunkWriter(
                     _PAGE_SIZE - _RESERVED_HEADER_BYTES,
-                    reserved_bytes)
+                    reserved_bytes,
+                    optimize_for_size=self._optimize_for_size)
                 new_row.writer.write(_INTERNAL_FLAG)
                 new_row.writer.write(_INTERNAL_OFFSET +
                     str(rows[1].nodes - 1) + "\n")

=== modified file 'bzrlib/chunk_writer.py'
--- a/bzrlib/chunk_writer.py	2008-10-15 21:27:39 +0000
+++ b/bzrlib/chunk_writer.py	2008-10-15 21:34:10 +0000
@@ -93,7 +93,7 @@
     _repack_opts_for_speed = (0, 8)
     _repack_opts_for_size = (20, 0)
 
-    def __init__(self, chunk_size, reserved=0):
+    def __init__(self, chunk_size, reserved=0, optimize_for_size=False):
         """Create a ChunkWriter to write chunk_size chunks.
 
         :param chunk_size: The total byte count to emit at the end of the
@@ -113,20 +113,7 @@
         self.unused_bytes = None
         self.reserved_size = reserved
         # Default is to make building fast rather than compact
-        self._max_repack, self._max_zsync = ChunkWriter._repack_opts_for_speed
-
-    def optimize(self, for_size=True):
-        """Change how we optimize our writes.
-
-        :param for_size: If True, optimize for minimum space usage, otherwise
-            optimize for fastest writing speed.
-        :return: None
-        """
-        if for_size:
-            opts = ChunkWriter._repack_opts_for_size
-        else:
-            opts = ChunkWriter._repack_opts_for_speed
-        self._max_repack, self._max_zsync = opts
+        self.set_optimize(for_size=optimize_for_size)
 
     def finish(self):
         """Finish the chunk.
@@ -158,6 +145,19 @@
             self.bytes_list.append("\x00" * nulls_needed)
         return self.bytes_list, self.unused_bytes, nulls_needed
 
+    def set_optimize(self, for_size=True):
+        """Change how we optimize our writes.
+
+        :param for_size: If True, optimize for minimum space usage, otherwise
+            optimize for fastest writing speed.
+        :return: None
+        """
+        if for_size:
+            opts = ChunkWriter._repack_opts_for_size
+        else:
+            opts = ChunkWriter._repack_opts_for_speed
+        self._max_repack, self._max_zsync = opts
+
     def _recompress_all_bytes_in(self, extra_bytes=None):
         """Recompress the current bytes_in, and optionally more.
 

=== modified file 'bzrlib/tests/test_chunk_writer.py'
--- a/bzrlib/tests/test_chunk_writer.py	2008-10-15 21:27:39 +0000
+++ b/bzrlib/tests/test_chunk_writer.py	2008-10-15 21:34:10 +0000
@@ -41,13 +41,19 @@
 
     def test_optimize_for_speed(self):
         writer = chunk_writer.ChunkWriter(4096)
-        writer.optimize(for_size=False)
+        writer.set_optimize(for_size=False)
+        self.assertEqual(chunk_writer.ChunkWriter._repack_opts_for_speed,
+                         (writer._max_repack, writer._max_zsync))
+        writer = chunk_writer.ChunkWriter(4096, optimize_for_size=False)
         self.assertEqual(chunk_writer.ChunkWriter._repack_opts_for_speed,
                          (writer._max_repack, writer._max_zsync))
 
     def test_optimize_for_size(self):
         writer = chunk_writer.ChunkWriter(4096)
-        writer.optimize(for_size=True)
+        writer.set_optimize(for_size=True)
+        self.assertEqual(chunk_writer.ChunkWriter._repack_opts_for_size,
+                         (writer._max_repack, writer._max_zsync))
+        writer = chunk_writer.ChunkWriter(4096, optimize_for_size=True)
         self.assertEqual(chunk_writer.ChunkWriter._repack_opts_for_size,
                          (writer._max_repack, writer._max_zsync))
 



More information about the bazaar-commits mailing list