Rev 6035: Clean out more of the cruft that got left by accident. in http://bazaar.launchpad.net/~jameinel/bzr/2.4-too-much-walking-388269

John Arbash Meinel john at arbash-meinel.com
Wed Aug 17 13:03:26 UTC 2011


At http://bazaar.launchpad.net/~jameinel/bzr/2.4-too-much-walking-388269

------------------------------------------------------------
revno: 6035
revision-id: john at arbash-meinel.com-20110817130237-h51ige1ldohv1npa
parent: john at arbash-meinel.com-20110817103008-3p47hg0ox21chh81
committer: John Arbash Meinel <john at arbash-meinel.com>
branch nick: 2.4-too-much-walking-388269
timestamp: Wed 2011-08-17 15:02:37 +0200
message:
  Clean out more of the cruft that got left by accident.
-------------- next part --------------
=== modified file 'bzrlib/graph.py'
--- a/bzrlib/graph.py	2011-08-15 14:11:18 +0000
+++ b/bzrlib/graph.py	2011-08-17 13:02:37 +0000
@@ -1892,32 +1892,6 @@
             limit=self.limit)
 
 
-def ignore():
-    # TODO: If we use this heavily, then we should just cache the
-    #       reverse map. It certainly only changes based on newly
-    #       requested entries.
-    stop_keys = set(keys)
-    stop_keys.difference_update(self._unstacked_provider.missing_keys)
-    # Just look at immediate children
-    child_keys = set()
-    for k in keys:
-        child_keys.update(parent_to_children_map[k])
-    # Without this line, we get the revision count wrong for 'bzr'. I'm
-    # guessing a shortcut caused some revs to be found early, and then
-    # not walked now. So without c for c in parents_map[k] we get *way*
-    # too many keys, because the graph flood-fills. Without 'if c not
-    # in child_keys' we stop before we start and get the wrong answer
-    # that way.
-    map(stop_keys.update, [[c for c in parents_map[k] if c not in child_keys]
-                           for k in child_keys])
-    mutter('Faking search set _get_parent_map_rpc,'
-                 ' %d cache size, %d start keys'
-                 ' %d included_keys %d stop_keys',
-                 len(parents_map), len(child_keys), len(child_keys),
-                 len(keys))
-    recipe = ('manual', child_keys, stop_keys, len(child_keys))
-
-
 def invert_parent_map(parent_map):
     """Given a map from child => parents, create a map of parent=>children"""
     child_map = {}
@@ -1968,6 +1942,17 @@
 
 
 def _run_search(parent_map, heads, exclude_keys):
+    """Given a parent map, run a _BreadthFirstSearcher on it.
+
+    Start at heads, walk until you hit exclude_keys. As a further improvement,
+    watch for any heads that you encounter while walking, which means they were
+    not heads of the search.
+
+    This is mostly used to generate a succinct recipe for how to walk through
+    most of parent_map.
+
+    :return: (_BreadthFirstSearcher, set(heads_encountered_by_walking))
+    """
     g = Graph(DictParentsProvider(parent_map))
     s = g._make_breadth_first_searcher(heads)
     found_heads = set()

=== modified file 'bzrlib/vf_repository.py'
--- a/bzrlib/vf_repository.py	2011-08-17 10:30:08 +0000
+++ b/bzrlib/vf_repository.py	2011-08-17 13:02:37 +0000
@@ -16,8 +16,6 @@
 
 """Repository formats built around versioned files."""
 
-import sys
-import time
 
 from bzrlib.lazy_import import lazy_import
 lazy_import(globals(), """
@@ -72,7 +70,7 @@
     )
 
 from bzrlib.trace import (
-    mutter, note
+    mutter
     )
 
 
@@ -2516,21 +2514,12 @@
         searcher = source_graph._make_breadth_first_searcher(all_wanted_revs)
         null_set = frozenset([_mod_revision.NULL_REVISION])
         searcher_exhausted = False
-        search_step = 0
-        gpm = searcher._parents_provider.get_parent_map
-        def get_parent_map_logging(revisions):
-            res = gpm(revisions)
-            mutter('step %d, requested %d returned %d'
-                   % (search_step, len(revisions), len(res)))
-            return res
-        searcher._parents_provider.get_parent_map = get_parent_map_logging
         while True:
             next_revs = set()
             ghosts = set()
             # Iterate the searcher until we have enough next_revs
             while len(next_revs) < self._walk_to_common_revisions_batch_size:
                 try:
-                    search_step += 1
                     next_revs_part, ghosts_part = searcher.next_with_ghosts()
                     next_revs.update(next_revs_part)
                     ghosts.update(ghosts_part)



More information about the bazaar-commits mailing list