Rev 2903: Merge readv fixes. in http://people.ubuntu.com/~robertc/baz2.0/index

Robert Collins robertc at robertcollins.net
Thu Oct 11 03:16:35 BST 2007


At http://people.ubuntu.com/~robertc/baz2.0/index

------------------------------------------------------------
revno: 2903
revision-id: robertc at robertcollins.net-20071011021626-p917pq7ytv8o7woz
parent: robertc at robertcollins.net-20071008045131-n60qsvujlkg00oyy
parent: robertc at robertcollins.net-20071009015950-oiq91zspjpoeiz6t
committer: Robert Collins <robertc at robertcollins.net>
branch nick: index
timestamp: Thu 2007-10-11 12:16:26 +1000
message:
  Merge readv fixes.
modified:
  bzrlib/tests/test_transport_implementations.py test_transport_implementations.py-20051227111451-f97c5c7d5c49fce7
  bzrlib/transport/__init__.py   transport.py-20050711165921-4978aa7ce1285ad5
    ------------------------------------------------------------
    revno: 2890.1.3
    revision-id: robertc at robertcollins.net-20071009015950-oiq91zspjpoeiz6t
    parent: robertc at robertcollins.net-20071008044749-07yl1rtr3v9iw62o
    committer: Robert Collins <robertc at robertcollins.net>
    branch nick: readv
    timestamp: Tue 2007-10-09 11:59:50 +1000
    message:
      Review feedback and discussion with Martin - split out the readv offset adjustment into a new helper and document where the design might/should go next.
    modified:
      bzrlib/transport/__init__.py   transport.py-20050711165921-4978aa7ce1285ad5
    ------------------------------------------------------------
    revno: 2890.1.2
    revision-id: robertc at robertcollins.net-20071008044749-07yl1rtr3v9iw62o
    parent: robertc at robertcollins.net-20071006072506-fypnagvqddbyh4q9
    committer: Robert Collins <robertc at robertcollins.net>
    branch nick: readv
    timestamp: Mon 2007-10-08 14:47:49 +1000
    message:
      More readv adjust for latency tests and bugfixes.
    modified:
      bzrlib/tests/test_transport_implementations.py test_transport_implementations.py-20051227111451-f97c5c7d5c49fce7
      bzrlib/transport/__init__.py   transport.py-20050711165921-4978aa7ce1285ad5
=== modified file 'bzrlib/tests/test_transport_implementations.py'
--- a/bzrlib/tests/test_transport_implementations.py	2007-10-04 05:09:58 +0000
+++ b/bzrlib/tests/test_transport_implementations.py	2007-10-08 04:47:49 +0000
@@ -1551,7 +1551,22 @@
             self.assertTrue(result[0][0] <= 400)
             self.assertTrue(result[0][0] + data_len >= 1034)
             check_result_data(result)
-        
+        # test from observed failure case.
+        if transport.is_readonly():
+            file('a', 'w').write('a'*1024*1024)
+        else:
+            transport.put_bytes('a', 'a'*1024*1024)
+        broken_vector = [(465219, 800), (225221, 800), (445548, 800),
+            (225037, 800), (221357, 800), (437077, 800), (947670, 800),
+            (465373, 800), (947422, 800)]
+        results = list(transport.readv('a', broken_vector, True, 1024*1024))
+        found_items = [False]*9
+        for pos, (start, length) in enumerate(broken_vector):
+            # check the range is covered by the result
+            for offset, data in results:
+                if offset <= start and start + length <= offset + len(data):
+                    found_items[pos] = True
+        self.assertEqual([True]*9, found_items)
 
     def test_get_with_open_write_stream_sees_all_content(self):
         t = self.get_transport()

=== modified file 'bzrlib/transport/__init__.py'
--- a/bzrlib/transport/__init__.py	2007-10-06 07:25:06 +0000
+++ b/bzrlib/transport/__init__.py	2007-10-09 01:59:50 +0000
@@ -662,48 +662,16 @@
         :return: A list or generator of (offset, data) tuples
         """
         if adjust_for_latency:
-            offsets = sorted(offsets)
-            # short circuit empty requests
-            if len(offsets) == 0:
-                def empty_yielder():
-                    # Quick thunk to stop this function becoming a generator
-                    # itself, rather we return a generator that has nothing to
-                    # yield.
-                    if False:
-                        yield None
-                return empty_yielder()
-            # expand by page size at either end
-            maximum_expansion = self.recommended_page_size()
-            new_offsets = []
-            for offset, length in offsets:
-                expansion = maximum_expansion - length
-                if expansion < 0:
-                    # we're asking for more than the minimum read anyway.
-                    expansion = 0
-                reduction = expansion / 2
-                new_offset = offset - reduction
-                new_length = length + expansion
-                if new_offset < 0:
-                    # don't ask for anything < 0
-                    new_offset = 0
-                if (upper_limit is not None and
-                    new_offset + new_length > upper_limit):
-                    new_length = upper_limit - new_offset
-                new_offsets.append((new_offset, new_length))
-            # combine the expanded offsets
-            offsets = []
-            current_offset, current_length = new_offsets[0]
-            current_finish = current_length + current_offset
-            for offset, length in new_offsets[1:]:
-                if offset > current_finish:
-                    offsets.append((current_offset, current_length))
-                    current_offset = offset
-                    current_length = length
-                    continue
-                finish = offset + length
-                if finish > current_finish:
-                    current_finish = finish
-            offsets.append((current_offset, current_length))
+            # Design note: We may wish to have different algorithms for the
+            # expansion of the offsets per-transport. E.g. for local disk to
+            # use page-aligned expansion. If that is the case consider the following structure:
+            #  - a test that transport.readv uses self._offset_expander or some similar attribute, to do the expansion
+            #  - a test for each transport that it has some known-good offset expander
+            #  - unit tests for each offset expander
+            #  - a set of tests for the offset expander interface, giving
+            #    baseline behaviour (which the current transport
+            #    adjust_for_latency tests could be repurposed to).
+            offsets = self._sort_expand_and_combine(offsets, upper_limit)
         return self._readv(relpath, offsets)
 
     def _readv(self, relpath, offsets):
@@ -760,6 +728,65 @@
                 yield cur_offset_and_size[0], this_data
                 cur_offset_and_size = offset_stack.next()
 
+    def _sort_expand_and_combine(self, offsets, upper_limit):
+        """Helper for readv.
+
+        :param offsets: A readv vector - (offset, length) tuples.
+        :param upper_limit: The highest byte offset that may be requested.
+        :return: A readv vector that will read all the regions requested by
+            offsets, in start-to-end order, with no duplicated regions,
+            expanded by the transports recommended page size.
+        """
+        offsets = sorted(offsets)
+        # short circuit empty requests
+        if len(offsets) == 0:
+            def empty_yielder():
+                # Quick thunk to stop this function becoming a generator
+                # itself, rather we return a generator that has nothing to
+                # yield.
+                if False:
+                    yield None
+            return empty_yielder()
+        # expand by page size at either end
+        maximum_expansion = self.recommended_page_size()
+        new_offsets = []
+        for offset, length in offsets:
+            expansion = maximum_expansion - length
+            if expansion < 0:
+                # we're asking for more than the minimum read anyway.
+                expansion = 0
+            reduction = expansion / 2
+            new_offset = offset - reduction
+            new_length = length + expansion
+            if new_offset < 0:
+                # don't ask for anything < 0
+                new_offset = 0
+            if (upper_limit is not None and
+                new_offset + new_length > upper_limit):
+                new_length = upper_limit - new_offset
+            new_offsets.append((new_offset, new_length))
+        # combine the expanded offsets
+        offsets = []
+        current_offset, current_length = new_offsets[0]
+        current_finish = current_length + current_offset
+        for offset, length in new_offsets[1:]:
+            finish = offset + length
+            if offset > current_finish:
+                # there is a gap, output the current accumulator and start
+                # a new one for the region we're examining.
+                offsets.append((current_offset, current_length))
+                current_offset = offset
+                current_length = length
+                current_finish = finish
+                continue
+            if finish > current_finish:
+                # extend the current accumulator to the end of the region
+                # we're examining.
+                current_finish = finish
+                current_length = finish - current_offset
+        offsets.append((current_offset, current_length))
+        return offsets
+
     @staticmethod
     def _coalesce_offsets(offsets, limit, fudge_factor):
         """Yield coalesced offsets.



More information about the bazaar-commits mailing list