Rev 4154: Add streaming from a stacked branch when the sort order is compatible with doing so. in http://people.ubuntu.com/~robertc/baz2.0/pending/branch.stacked.streams
Robert Collins
robertc at robertcollins.net
Tue Mar 17 06:18:25 GMT 2009
At http://people.ubuntu.com/~robertc/baz2.0/pending/branch.stacked.streams
------------------------------------------------------------
revno: 4154
revision-id: robertc at robertcollins.net-20090317061816-5usnnyaoj0e0322t
parent: robertc at robertcollins.net-20090317030232-y7nhlxwe1i1q3xec
committer: Robert Collins <robertc at robertcollins.net>
branch nick: branch.stacked.streams
timestamp: Tue 2009-03-17 17:18:16 +1100
message:
Add streaming from a stacked branch when the sort order is compatible with doing so.
=== modified file 'NEWS'
--- a/NEWS 2009-03-17 00:50:47 +0000
+++ b/NEWS 2009-03-17 06:18:16 +0000
@@ -25,6 +25,12 @@
IMPROVEMENTS:
+ * Branching from a stacked branch using ``bzr*://`` will now stream
+ the data when the target repository does not need topological
+ ordering, reducing round trips and network overhead. This uses the
+ existing smart server methods added in 1.13, so will work on any
+ 1.13 or newer server. (Robert Collins, Andrew Bennetts)
+
* ``bzr ignore`` gives a more informative message when existing
version controlled files match the ignore pattern. (Neil
Martinsen-Burrell, #248895)
=== modified file 'bzrlib/graph.py'
--- a/bzrlib/graph.py 2009-03-10 04:41:22 +0000
+++ b/bzrlib/graph.py 2009-03-17 06:18:16 +0000
@@ -1461,7 +1461,7 @@
a SearchResult from a smart server, in which case the keys list is
not necessarily immediately available.
"""
- self._recipe = (start_keys, exclude_keys, key_count)
+ self._recipe = ('search', start_keys, exclude_keys, key_count)
self._keys = frozenset(keys)
def get_recipe(self):
@@ -1474,13 +1474,13 @@
added to the exclude list (or else ghost filling may alter the
results).
- :return: A tuple (start_keys_set, exclude_keys_set, revision_count). To
- recreate the results of this search, create a breadth first
- searcher on the same graph starting at start_keys. Then call next()
- (or next_with_ghosts()) repeatedly, and on every result, call
- stop_searching_any on any keys from the exclude_keys set. The
- revision_count value acts as a trivial cross-check - the found
- revisions of the new search should have as many elements as
+ :return: A tuple ('search', start_keys_set, exclude_keys_set,
+ revision_count). To recreate the results of this search, create a
+ breadth first searcher on the same graph starting at start_keys.
+ Then call next() (or next_with_ghosts()) repeatedly, and on every
+ result, call stop_searching_any on any keys from the exclude_keys
+ set. The revision_count value acts as a trivial cross-check - the
+ found revisions of the new search should have as many elements as
revision_count. If it does not, then additional revisions have been
ghosted since the search was executed the first time and the second
time.
@@ -1494,6 +1494,35 @@
"""
return self._keys
+ def is_empty(self):
+ """Return true if the search lists 1 or more revisions."""
+ return self._recipe[3] == 0
+
+ def refine(self, seen, referenced):
+ """Create a new search by refining this search.
+
+ :param seen: Revisions that have been satisfied.
+ :param referenced: Revision references observed while satisfying some
+ of this search.
+ """
+ start = self._recipe[1]
+ exclude = self._recipe[2]
+ count = self._recipe[3]
+ keys = self.get_keys()
+ # New heads = referenced + old heads - seen things - exclude
+ pending_refs = set(referenced)
+ pending_refs.update(start)
+ pending_refs.difference_update(seen)
+ pending_refs.difference_update(exclude)
+ # New exclude = old exclude + satisfied heads
+ seen_heads = start.intersection(seen)
+ exclude.update(seen_heads)
+ # keys gets seen removed
+ keys = keys - seen
+ # length is reduced by len(seen)
+ count -= len(seen)
+ return SearchResult(pending_refs, exclude, count, keys)
+
class PendingAncestryResult(object):
"""A search result that will reconstruct the ancestry for some graph heads.
@@ -1509,11 +1538,21 @@
:param repo: a repository to use to generate the ancestry for the given
heads.
"""
- self.heads = heads
+ self.heads = frozenset(heads)
self.repo = repo
def get_recipe(self):
- raise NotImplementedError(self.get_recipe)
+ """Return a recipe that can be used to replay this search.
+
+ The recipe allows reconstruction of the same results at a later date.
+
+ :seealso SearchResult.get_recipe:
+
+ :return: A tuple ('proxy-search', start_keys_set, set(), -1)
+ To recreate this result, create a PendingAncestryResult with the
+ start_keys_set.
+ """
+ return ('proxy-search', self.heads, set(), -1)
def get_keys(self):
"""See SearchResult.get_keys.
@@ -1529,6 +1568,23 @@
if key != NULL_REVISION]
return keys
+ def is_empty(self):
+ """Return true if the search lists 1 or more revisions."""
+ if revision.NULL_REVISION in self.heads:
+ return len(self.heads) == 1
+ else:
+ return len(self.heads) == 0
+
+ def refine(self, seen, referenced):
+ """Create a new search by refining this search.
+
+ :param seen: Revisions that have been satisfied.
+ :param referenced: Revision references observed while satisfying some
+ of this search.
+ """
+ referenced = self.heads.union(referenced)
+ return PendingAncestryResult(referenced - seen, self.repo)
+
def collapse_linear_regions(parent_map):
"""Collapse regions of the graph that are 'linear'.
=== modified file 'bzrlib/knit.py'
--- a/bzrlib/knit.py 2009-03-12 05:44:43 +0000
+++ b/bzrlib/knit.py 2009-03-17 06:18:16 +0000
@@ -299,7 +299,19 @@
if self._network_bytes is None:
self._create_network_bytes()
return self._network_bytes
+ if ('-ft-' in self.storage_kind and
+ storage_kind in ('chunked', 'fulltext')):
+ adapter_key = (self.storage_kind, 'fulltext')
+ adapter_factory = adapter_registry.get(adapter_key)
+ adapter = adapter_factory(None)
+ bytes = adapter.get_bytes(self)
+ if storage_kind == 'chunked':
+ return [bytes]
+ else:
+ return bytes
if self._knit is not None:
+ # Not redundant with direct conversion above - that only handles
+ # fulltext cases.
if storage_kind == 'chunked':
return self._knit.get_lines(self.key[0])
elif storage_kind == 'fulltext':
=== modified file 'bzrlib/remote.py'
--- a/bzrlib/remote.py 2009-03-16 08:26:29 +0000
+++ b/bzrlib/remote.py 2009-03-17 06:18:16 +0000
@@ -1240,7 +1240,7 @@
stop_keys = result_parents.difference(start_set)
included_keys = start_set.intersection(result_parents)
start_set.difference_update(included_keys)
- recipe = (start_set, stop_keys, len(parents_map))
+ recipe = ('manual', start_set, stop_keys, len(parents_map))
body = self._serialise_search_recipe(recipe)
path = self.bzrdir._path_for_remote_call(self._client)
for key in keys:
@@ -1505,9 +1505,9 @@
:param recipe: A search recipe (start, stop, count).
:return: Serialised bytes.
"""
- start_keys = ' '.join(recipe[0])
- stop_keys = ' '.join(recipe[1])
- count = str(recipe[2])
+ start_keys = ' '.join(recipe[1])
+ stop_keys = ' '.join(recipe[2])
+ count = str(recipe[3])
return '\n'.join((start_keys, stop_keys, count))
def _serialise_search_result(self, search_result):
@@ -1516,7 +1516,7 @@
parts.extend(search_result.heads)
else:
recipe = search_result.get_recipe()
- parts = ['search', self._serialise_search_recipe(recipe)]
+ parts = [recipe[0], self._serialise_search_recipe(recipe)]
return '\n'.join(parts)
def autopack(self):
@@ -1594,20 +1594,49 @@
"""Stream data from a remote server."""
def get_stream(self, search):
- # streaming with fallback repositories is not well defined yet: The
- # remote repository cannot see the fallback repositories, and thus
- # cannot satisfy the entire search in the general case. Likewise the
- # fallback repositories cannot reify the search to determine what they
- # should send. It likely needs a return value in the stream listing the
- # edge of the search to resume from in fallback repositories.
- if self.from_repository._fallback_repositories:
- return repository.StreamSource.get_stream(self, search)
- repo = self.from_repository
+ if (self.from_repository._fallback_repositories and
+ self.to_format._fetch_order == 'topological'):
+ return self._real_stream(self.from_repository, search)
+ return self.missing_parents_chain(search, [self.from_repository] +
+ self.from_repository._fallback_repositories)
+
+ def _real_stream(self, repo, search):
+ """Get a stream for search from repo.
+
+ This never called RemoteStreamSource.get_stream, and is a heler
+ for RemoteStreamSource._get_stream to allow getting a stream
+ reliably whether fallback back because of old servers or trying
+ to stream from a non-RemoteRepository (which the stacked support
+ code will do).
+ """
+ source = repo._get_source(self.to_format)
+ if isinstance(source, RemoteStreamSource):
+ return repository.StreamSource.get_stream(source, search)
+ return source.get_stream(search)
+
+ def _get_stream(self, repo, search):
+ """Core worker to get a stream from repo for search.
+
+ This is used by both get_stream and the stacking support logic. It
+ deliberately gets a stream for repo which does not need to be
+ self.from_repository. In the event that repo is not Remote, or
+ cannot do a smart stream, a fallback is made to the generic
+ repository._get_stream() interface, via self._real_stream.
+
+ In the event of stacking, streams from _get_stream will not
+ contain all the data for search - this is normal (see get_stream).
+
+ :param repo: A repository.
+ :param search: A search.
+ """
+ # Fallbacks may be non-smart
+ if not isinstance(repo, RemoteRepository):
+ return self._real_stream(repo, search)
client = repo._client
medium = client._medium
if medium._is_remote_before((1, 13)):
- # No possible way this can work.
- return repository.StreamSource.get_stream(self, search)
+ # streaming was added in 1.13
+ return self._real_stream(repo, search)
path = repo.bzrdir._path_for_remote_call(client)
try:
search_bytes = repo._serialise_search_result(search)
@@ -1617,7 +1646,7 @@
response_tuple, response_handler = response
except errors.UnknownSmartMethod:
medium._remember_remote_is_before((1,13))
- return repository.StreamSource.get_stream(self, search)
+ return self._real_stream(repo, search)
if response_tuple[0] != 'ok':
raise errors.UnexpectedSmartServerResponse(response_tuple)
byte_stream = response_handler.read_streamed_body()
@@ -1628,6 +1657,40 @@
src_format.network_name(), repo._format.network_name()))
return stream
+ def missing_parents_chain(self, search, sources):
+ """Chain multiple streams together to handle stacking.
+
+ :param search: The overall search to satisfy with streams.
+ :param sources: A list of Repository objects to query.
+ """
+ self.serialiser = self.to_format._serializer
+ self.seen_revs = set()
+ self.referenced_revs = set()
+ # If there are heads in the search, or the key count is > 0, we are not
+ # done.
+ while not search.is_empty() and len(sources) > 1:
+ source = sources.pop(0)
+ stream = self._get_stream(source, search)
+ for kind, substream in stream:
+ if kind != 'revisions':
+ yield kind, substream
+ else:
+ yield kind, self.missing_parents_rev_handler(substream)
+ search = search.refine(self.seen_revs, self.referenced_revs)
+ self.seen_revs = set()
+ self.referenced_revs = set()
+ if not search.is_empty():
+ for kind, stream in self._get_stream(sources[0], search):
+ yield kind, stream
+
+ def missing_parents_rev_handler(self, substream):
+ for content in substream:
+ revision_bytes = content.get_bytes_as('fulltext')
+ revision = self.serialiser.read_revision_from_string(revision_bytes)
+ self.seen_revs.add(content.key[-1])
+ self.referenced_revs.update(revision.parent_ids)
+ yield content
+
class RemoteBranchLockableFiles(LockableFiles):
"""A 'LockableFiles' implementation that talks to a smart server.
=== modified file 'bzrlib/repository.py'
--- a/bzrlib/repository.py 2009-03-16 22:22:06 +0000
+++ b/bzrlib/repository.py 2009-03-17 06:18:16 +0000
@@ -3027,7 +3027,7 @@
raise AssertionError(
"InterPackRepo.fetch doesn't support "
"fetching multiple heads yet.")
- revision_id = fetch_spec.heads[0]
+ revision_id = list(fetch_spec.heads)[0]
fetch_spec = None
if revision_id is None:
# TODO:
=== modified file 'bzrlib/smart/repository.py'
--- a/bzrlib/smart/repository.py 2009-03-16 05:55:42 +0000
+++ b/bzrlib/smart/repository.py 2009-03-17 06:18:16 +0000
@@ -97,7 +97,7 @@
break
search.stop_searching_any(exclude_keys.intersection(next_revs))
search_result = search.get_result()
- if search_result.get_recipe()[2] != revision_count:
+ if search_result.get_recipe()[3] != revision_count:
# we got back a different amount of data than expected, this
# gets reported as NoSuchRevision, because less revisions
# indicates missing revisions, and more should never happen as
=== modified file 'bzrlib/tests/blackbox/test_branch.py'
--- a/bzrlib/tests/blackbox/test_branch.py 2009-03-13 02:55:52 +0000
+++ b/bzrlib/tests/blackbox/test_branch.py 2009-03-17 06:18:16 +0000
@@ -291,6 +291,25 @@
# upwards without agreement from bzr's network support maintainers.
self.assertEqual(10, rpc_count)
+ def test_branch_from_trivial_stacked_branch_streaming_acceptance(self):
+ self.setup_smart_server_with_call_log()
+ t = self.make_branch_and_tree('trunk')
+ for count in range(8):
+ t.commit(message='commit %d' % count)
+ tree2 = t.branch.bzrdir.sprout('feature', stacked=True
+ ).open_workingtree()
+ tree2.commit('feature change')
+ self.reset_smart_call_log()
+ out, err = self.run_bzr(['branch', self.get_url('feature'),
+ 'local-target'])
+ rpc_count = len(self.hpss_calls)
+ # This figure represent the amount of work to perform this use case. It
+ # is entirely ok to reduce this number if a test fails due to rpc_count
+ # being too low. If rpc_count increases, more network roundtrips have
+ # become necessary for this use case. Please do not adjust this number
+ # upwards without agreement from bzr's network support maintainers.
+ self.assertEqual(74, rpc_count)
+
class TestRemoteBranch(TestCaseWithSFTPServer):
=== modified file 'bzrlib/tests/interrepository_implementations/test_interrepository.py'
--- a/bzrlib/tests/interrepository_implementations/test_interrepository.py 2009-01-17 01:30:58 +0000
+++ b/bzrlib/tests/interrepository_implementations/test_interrepository.py 2009-03-17 06:18:16 +0000
@@ -140,7 +140,8 @@
self.assertFalse(repo_b.has_revision('rev2'))
result = repo_b.search_missing_revision_ids(repo_a)
self.assertEqual(set(['rev2']), result.get_keys())
- self.assertEqual((set(['rev2']), set(['rev1']), 1), result.get_recipe())
+ self.assertEqual(('search', set(['rev2']), set(['rev1']), 1),
+ result.get_recipe())
def test_search_missing_revision_ids_absent_requested_raises(self):
# Asking for missing revisions with a tip that is itself absent in the
@@ -166,7 +167,7 @@
repo_a = self.bzrdir.open_repository()
result = repo_b.search_missing_revision_ids(repo_a, revision_id='rev1')
self.assertEqual(set(['rev1']), result.get_keys())
- self.assertEqual((set(['rev1']), set([NULL_REVISION]), 1),
+ self.assertEqual(('search', set(['rev1']), set([NULL_REVISION]), 1),
result.get_recipe())
def test_fetch_fetches_signatures_too(self):
=== modified file 'bzrlib/tests/test_graph.py'
--- a/bzrlib/tests/test_graph.py 2009-03-10 00:43:52 +0000
+++ b/bzrlib/tests/test_graph.py 2009-03-17 06:18:16 +0000
@@ -992,6 +992,9 @@
:param next: A callable to advance the search.
"""
for seen, recipe, included_keys, starts, stops in instructions:
+ # Adjust for recipe contract changes that don't vary for all the
+ # current tests.
+ recipe = ('search',) + recipe
next()
if starts is not None:
search.start_searching(starts)
@@ -1011,7 +1014,7 @@
search = graph._make_breadth_first_searcher(['head'])
# At the start, nothing has been seen, to its all excluded:
result = search.get_result()
- self.assertEqual((set(['head']), set(['head']), 0),
+ self.assertEqual(('search', set(['head']), set(['head']), 0),
result.get_recipe())
self.assertEqual(set(), result.get_keys())
self.assertEqual(set(), search.seen)
@@ -1043,7 +1046,7 @@
search.start_searching(['head'])
# head has been seen:
result = search.get_result()
- self.assertEqual((set(['head']), set(['child']), 1),
+ self.assertEqual(('search', set(['head']), set(['child']), 1),
result.get_recipe())
self.assertEqual(set(['head']), result.get_keys())
self.assertEqual(set(['head']), search.seen)
@@ -1203,7 +1206,7 @@
self.assertRaises(StopIteration, search.next)
self.assertEqual(set(['head', 'ghost', NULL_REVISION]), search.seen)
result = search.get_result()
- self.assertEqual((set(['ghost', 'head']), set(['ghost']), 2),
+ self.assertEqual(('search', set(['ghost', 'head']), set(['ghost']), 2),
result.get_recipe())
self.assertEqual(set(['head', NULL_REVISION]), result.get_keys())
# using next_with_ghosts:
@@ -1212,7 +1215,7 @@
self.assertRaises(StopIteration, search.next)
self.assertEqual(set(['head', 'ghost', NULL_REVISION]), search.seen)
result = search.get_result()
- self.assertEqual((set(['ghost', 'head']), set(['ghost']), 2),
+ self.assertEqual(('search', set(['ghost', 'head']), set(['ghost']), 2),
result.get_recipe())
self.assertEqual(set(['head', NULL_REVISION]), result.get_keys())
@@ -1527,7 +1530,7 @@
self.assertCollapsed(d, d)
-class TestPendingAncestryResult(TestCaseWithMemoryTransport):
+class TestPendingAncestryResultGetKeys(TestCaseWithMemoryTransport):
"""Tests for bzrlib.graph.PendingAncestryResult."""
def test_get_keys(self):
@@ -1540,8 +1543,8 @@
repo = builder.get_branch().repository
repo.lock_read()
self.addCleanup(repo.unlock)
- par = _mod_graph.PendingAncestryResult(['rev-2'], repo)
- self.assertEqual(set(['rev-1', 'rev-2']), set(par.get_keys()))
+ result = _mod_graph.PendingAncestryResult(['rev-2'], repo)
+ self.assertEqual(set(['rev-1', 'rev-2']), set(result.get_keys()))
def test_get_keys_excludes_null(self):
# Make a 'graph' with an iter_ancestry that returns NULL_REVISION
@@ -1550,8 +1553,55 @@
class StubGraph(object):
def iter_ancestry(self, keys):
return [(NULL_REVISION, ()), ('foo', (NULL_REVISION,))]
- par = _mod_graph.PendingAncestryResult(['rev-3'], None)
- par_keys = par._get_keys(StubGraph())
+ result = _mod_graph.PendingAncestryResult(['rev-3'], None)
+ result_keys = result._get_keys(StubGraph())
# Only the non-null keys from the ancestry appear.
- self.assertEqual(set(['foo']), set(par_keys))
-
+ self.assertEqual(set(['foo']), set(result_keys))
+
+
+class TestPendingAncestryResultRefine(TestGraphBase):
+
+ def test_refine(self):
+ # Used when pulling from a stacked repository, so test some revisions
+ # being satisfied from the stacking branch.
+ g = self.make_graph(
+ {"tip":["mid"], "mid":["base"], "tag":["base"],
+ "base":[NULL_REVISION], NULL_REVISION:[]})
+ result = _mod_graph.PendingAncestryResult(['tip', 'tag'], None)
+ result = result.refine(set(['tip']), set(['mid']))
+ self.assertEqual(set(['mid', 'tag']), result.heads)
+ result = result.refine(set(['mid', 'tag', 'base']),
+ set([NULL_REVISION]))
+ self.assertEqual(set([NULL_REVISION]), result.heads)
+ self.assertTrue(result.is_empty())
+
+
+class TestSearchResultRefine(TestGraphBase):
+
+ def test_refine(self):
+ # Used when pulling from a stacked repository, so test some revisions
+ # being satisfied from the stacking branch.
+ g = self.make_graph(
+ {"tip":["mid"], "mid":["base"], "tag":["base"],
+ "base":[NULL_REVISION], NULL_REVISION:[]})
+ result = _mod_graph.SearchResult(set(['tip', 'tag']),
+ set([NULL_REVISION]), 4, set(['tip', 'mid', 'tag', 'base']))
+ result = result.refine(set(['tip']), set(['mid']))
+ recipe = result.get_recipe()
+ # We should be starting from tag (original head) and mid (seen ref)
+ self.assertEqual(set(['mid', 'tag']), recipe[1])
+ # We should be stopping at NULL (original stop) and tip (seen head)
+ self.assertEqual(set([NULL_REVISION, 'tip']), recipe[2])
+ self.assertEqual(3, recipe[3])
+ result = result.refine(set(['mid', 'tag', 'base']),
+ set([NULL_REVISION]))
+ recipe = result.get_recipe()
+ # We should be starting from nothing (NULL was known as a cut point)
+ self.assertEqual(set([]), recipe[1])
+ # We should be stopping at NULL (original stop) and tip (seen head) and
+ # tag (seen head) and mid(seen mid-point head). We could come back and
+ # define this as not including mid, for minimal results, but it is
+ # still 'correct' to include mid, and simpler/easier.
+ self.assertEqual(set([NULL_REVISION, 'tip', 'tag', 'mid']), recipe[2])
+ self.assertEqual(0, recipe[3])
+ self.assertTrue(result.is_empty())
=== modified file 'bzrlib/tests/test_remote.py'
--- a/bzrlib/tests/test_remote.py 2009-03-17 03:02:32 +0000
+++ b/bzrlib/tests/test_remote.py 2009-03-17 06:18:16 +0000
@@ -2256,15 +2256,13 @@
def prepare_stacked_remote_branch(self):
"""Get stacked_upon and stacked branches with content in each."""
- smart_server = server.SmartTCPServer_for_testing()
- smart_server.setUp()
- self.addCleanup(smart_server.tearDown)
+ self.setup_smart_server_with_call_log()
tree1 = self.make_branch_and_tree('tree1', format='1.9')
tree1.commit('rev1', rev_id='rev1')
tree2 = tree1.branch.bzrdir.sprout('tree2', stacked=True
).open_workingtree()
tree2.commit('local changes make me feel good.')
- branch2 = Branch.open(smart_server.get_url() + '/tree2')
+ branch2 = Branch.open(self.get_url('tree2'))
branch2.lock_read()
self.addCleanup(branch2.unlock)
return tree1.branch, branch2
@@ -2308,6 +2306,7 @@
tip = stacked.last_revision()
revs = stacked.repository.get_ancestry(tip)
search = graph.PendingAncestryResult([tip], stacked.repository)
+ self.reset_smart_call_log()
stream = source.get_stream(search)
if None in revs:
revs.remove(None)
@@ -2322,14 +2321,19 @@
# unordered yields the full data from both stacked and stacked upon
# sources.
rev_ord, expected_revs = self.get_ordered_revs('1.9', 'unordered')
- self.assertEqual(set(rev_ord), set(expected_revs))
+ self.assertEqual(set(expected_revs), set(rev_ord))
+ # Getting unordered results should have made a streaming data request
+ # from the server, then one from the backing branch.
+ self.assertLength(2, self.hpss_calls)
def test_stacked_get_stream_topological(self):
# Repository._get_source.get_stream() from a stacked repository with
# topological sorting yields the full data from both stacked and
# stacked upon sources in topological order.
rev_ord, expected_revs = self.get_ordered_revs('knit', 'topological')
- self.assertEqual(rev_ord, expected_revs)
+ self.assertEqual(expected_revs, rev_ord)
+ # Getting topological sort requires VFS calls still
+ self.assertLength(14, self.hpss_calls)
def test_stacked_get_stream_groupcompress(self):
# Repository._get_source.get_stream() from a stacked repository with
@@ -2337,7 +2341,10 @@
# stacked upon sources in groupcompress order.
raise tests.TestSkipped('No groupcompress ordered format available')
rev_ord, expected_revs = self.get_ordered_revs('dev5', 'groupcompress')
- self.assertEqual(reversed(rev_ord), expected_revs)
+ self.assertEqual(expected_revs, reversed(rev_ord))
+ # Getting unordered results should have made a streaming data request
+ # from the backing branch, and one from the stacked on branch.
+ self.assertLength(2, self.hpss_calls)
class TestRemoteBranchEffort(tests.TestCaseWithTransport):
More information about the bazaar-commits
mailing list