Rev 2904: Parse more than one segment of data from a single readv response if needed. in http://people.ubuntu.com/~robertc/baz2.0/index

Robert Collins robertc at robertcollins.net
Thu Oct 11 03:26:54 BST 2007


At http://people.ubuntu.com/~robertc/baz2.0/index

------------------------------------------------------------
revno: 2904
revision-id: robertc at robertcollins.net-20071011022646-fxw9pt0ohs7662sf
parent: robertc at robertcollins.net-20071011021626-p917pq7ytv8o7woz
committer: Robert Collins <robertc at robertcollins.net>
branch nick: index
timestamp: Thu 2007-10-11 12:26:46 +1000
message:
  Parse more than one segment of data from a single readv response if needed.
modified:
  bzrlib/index.py                index.py-20070712131115-lolkarso50vjr64s-1
  bzrlib/tests/test_index.py     test_index.py-20070712131115-lolkarso50vjr64s-2
=== modified file 'bzrlib/index.py'
--- a/bzrlib/index.py	2007-10-08 04:51:31 +0000
+++ b/bzrlib/index.py	2007-10-11 02:26:46 +0000
@@ -725,12 +725,27 @@
         # trim the data.
         # end first:
         end = offset + len(data)
-        index = self._parsed_byte_index(offset)
+        while True:
+            index = self._parsed_byte_index(offset)
+            # Trivial test - if the current index's end is within the
+            # low-matching parsed range, we're done.
+            if end < self._parsed_byte_map[index][1]:
+                return
+            if self._parse_segment(offset, data, end, index):
+                return
+
+    def _parse_segment(self, offset, data, end, index):
+        """Parse one segment of data.
+
+        :param offset: Where 'data' begins in the file.
+        :param data: Some data to parse a segment of.
+        :param end: Where data ends
+        :param index: The current index into the parsed bytes map.
+        :return: True if the parsed segment is the last possible one in the
+            range of data.
+        """
         # default is to use all data
         trim_end = None
-        # trivial check for entirely parsed data:
-        if end < self._parsed_byte_map[index][1]:
-            return
         # accomodate overlap with data before this.
         if offset < self._parsed_byte_map[index][1]:
             # overlaps the lower parsed region
@@ -756,30 +771,35 @@
             trim_end = None
             # do not strip to the last \n
             end_adjacent = True
+            last_segment = True
         elif index + 1 == len(self._parsed_byte_map):
             # at the end of the parsed data
             # use it all
             trim_end = None
             # but strip to the last \n
             end_adjacent = False
+            last_segment = True
         elif end == self._parsed_byte_map[index + 1][0]:
             # buts up against the next parsed region
             # use it all
             trim_end = None
             # do not strip to the last \n
             end_adjacent = True
+            last_segment = True
         elif end > self._parsed_byte_map[index + 1][0]:
             # overlaps into the next parsed region
             # only consider the unparsed data
             trim_end = self._parsed_byte_map[index + 1][0] - offset
             # do not strip to the last \n as we know its an entire record
             end_adjacent = True
+            last_segment = end < self._parsed_byte_map[index + 1][1]
         else:
             # does not overlap into the next region
             # use it all
             trim_end = None
             # but strip to the last \n
             end_adjacent = False
+            last_segment = True
         # now find bytes to discard if needed
         if not start_adjacent:
             # work around python bug in rfind
@@ -839,6 +859,7 @@
             self._bisect_nodes[key] = node_value
             # print "parsed ", key
         self._parsed_bytes(offset, first_key, offset + len(trimmed_data), key)
+        return last_segment
 
     def _parsed_bytes(self, start, start_key, end, end_key):
         """Mark the bytes from start to end as parsed.
@@ -889,8 +910,6 @@
             # new entry
             self._parsed_byte_map.insert(index + 1, new_value)
             self._parsed_key_map.insert(index + 1, new_key)
-        assert sorted(self._parsed_byte_map) == self._parsed_byte_map
-        assert sorted(self._parsed_key_map) == self._parsed_key_map
 
     def _read_and_parse(self, readv_ranges):
         """Read the the ranges and parse the resulting data.
@@ -906,8 +925,8 @@
                     # this must be the start
                     assert offset == 0
                     offset, data = self._parse_header_from_bytes(data)
+                # print readv_ranges, "[%d:%d]" % (offset, offset + len(data))
                 self._parse_region(offset, data)
-                # print offset, len(data), data
 
     def _signature(self):
         """The file signature for this index type."""

=== modified file 'bzrlib/tests/test_index.py'
--- a/bzrlib/tests/test_index.py	2007-10-07 23:37:29 +0000
+++ b/bzrlib/tests/test_index.py	2007-10-11 02:26:46 +0000
@@ -441,6 +441,44 @@
         self.assertEqual([(None, make_key(26)), (make_key(31), make_key(48))],
             index._parsed_key_map)
 
+    def test_parsing_data_handles_parsed_contained_regions(self):
+        # the following patten creates a parsed region that is wholly within a
+        # single result from the readv layer:
+        # .... single-read (readv-minimum-size) ...
+        # which then trims the start and end so the parsed size is < readv
+        # miniumum.
+        # then a dual lookup (or a reference lookup for that matter) which
+        # abuts or overlaps the parsed region on both sides will need to 
+        # discard the data in the middle, but parse the end as well.
+        #
+        # we test this by doing a single lookup to seed the data, then 
+        # a lookup for two keys that are present, and adjacent - 
+        # we except both to be found, and the parsed byte map to include the
+        # locations of both keys.
+        nodes = []
+        def make_key(number):
+            return (str(number) + 'X'*100,)
+        def make_value(number):
+            return 'Y'*100
+        for counter in range(64):
+            nodes.append((make_key(counter), make_value(counter), ()))
+        index = self.make_index(nodes=nodes)
+        result = index.lookup_keys_via_location(
+            [(index._size // 2, ('40', ))])
+        # and we should have a parse map that includes the header and the
+        # region that was parsed after trimming.
+        self.assertEqual([(0, 3972), (5001, 8914)], index._parsed_byte_map)
+        self.assertEqual([(None, make_key(26)), (make_key(31), make_key(48))],
+            index._parsed_key_map)
+        # now ask for two keys, right before and after the parsed region
+        result = index.lookup_keys_via_location(
+            [(4900, make_key(30)), (8914, make_key(49))])
+        self.assertEqual([
+            ((4900, make_key(30)), (index, make_key(30), make_value(30))),
+            ((8914, make_key(49)), (index, make_key(49), make_value(49))),
+            ],
+            result)
+
     def test_lookup_missing_key_answers_without_io_when_map_permits(self):
         # generate a big enough index that we only read some of it on a typical
         # bisection lookup.



More information about the bazaar-commits mailing list