Rev 4515: Rename interesting => new, uninteresting => old in http://bazaar.launchpad.net/~jameinel/bzr/1.17-chk-multilevel

John Arbash Meinel john at arbash-meinel.com
Thu Jul 2 20:59:59 BST 2009


At http://bazaar.launchpad.net/~jameinel/bzr/1.17-chk-multilevel

------------------------------------------------------------
revno: 4515
revision-id: john at arbash-meinel.com-20090702195943-q271mscu1ssmy012
parent: john at arbash-meinel.com-20090702195623-mb65gji0i0pr93gc
committer: John Arbash Meinel <john at arbash-meinel.com>
branch nick: 1.17-chk-multilevel
timestamp: Thu 2009-07-02 14:59:43 -0500
message:
  Rename interesting => new, uninteresting => old
  
  This makes all the variables shorter and a bit easier to distinguish.
-------------- next part --------------
=== modified file 'bzrlib/chk_map.py'
--- a/bzrlib/chk_map.py	2009-07-02 19:56:23 +0000
+++ b/bzrlib/chk_map.py	2009-07-02 19:59:43 +0000
@@ -1409,29 +1409,29 @@
     but it won't yield (key,value) pairs that are common.
     """
 
-    def __init__(self, store, interesting_root_keys, uninteresting_root_keys,
+    def __init__(self, store, new_root_keys, old_root_keys,
                  search_key_func, pb=None):
         self._store = store
-        self._interesting_root_keys = interesting_root_keys
-        self._uninteresting_root_keys = uninteresting_root_keys
+        self._new_root_keys = new_root_keys
+        self._old_root_keys = old_root_keys
         self._pb = pb
         # All uninteresting chks that we have seen. By the time they are added
         # here, they should be either fully ignored, or queued up for
         # processing
-        self._all_uninteresting_chks = set(self._uninteresting_root_keys)
-        # All items that we have seen from the uninteresting_root_keys
-        self._all_uninteresting_items = set()
+        self._all_old_chks = set(self._old_root_keys)
+        # All items that we have seen from the old_root_keys
+        self._all_old_items = set()
         # These are interesting items which were either read, or already in the
         # interesting queue (so we don't need to walk them again)
-        self._processed_interesting_refs = set()
+        self._processed_new_refs = set()
         self._search_key_func = search_key_func
 
         # The uninteresting and interesting nodes to be searched
-        self._uninteresting_queue = []
-        self._interesting_queue = []
+        self._old_queue = []
+        self._new_queue = []
         # Holds the (key, value) items found when processing the root nodes,
         # waiting for the uninteresting nodes to be walked
-        self._interesting_item_queue = []
+        self._new_item_queue = []
         self._state = None
 
     def _read_nodes_from_store(self, keys):
@@ -1459,38 +1459,38 @@
                 items = node._items.items()
             yield record, node, prefix_refs, items
 
-    def _read_uninteresting_roots(self):
-        uninteresting_chks_to_enqueue = []
-        all_uninteresting_chks = self._all_uninteresting_chks
+    def _read_old_roots(self):
+        old_chks_to_enqueue = []
+        all_old_chks = self._all_old_chks
         for record, node, prefix_refs, items in \
-                self._read_nodes_from_store(self._uninteresting_root_keys):
+                self._read_nodes_from_store(self._old_root_keys):
             # Uninteresting node
             prefix_refs = [p_r for p_r in prefix_refs
-                                if p_r[1] not in all_uninteresting_chks]
+                                if p_r[1] not in all_old_chks]
             new_refs = [p_r[1] for p_r in prefix_refs]
-            all_uninteresting_chks.update(new_refs)
-            self._all_uninteresting_items.update(items)
+            all_old_chks.update(new_refs)
+            self._all_old_items.update(items)
             # Queue up the uninteresting references
             # Don't actually put them in the 'to-read' queue until we have
             # finished checking the interesting references
-            uninteresting_chks_to_enqueue.extend(prefix_refs)
-        return uninteresting_chks_to_enqueue
+            old_chks_to_enqueue.extend(prefix_refs)
+        return old_chks_to_enqueue
 
-    def _enqueue_uninteresting(self, interesting_prefixes,
-                               uninteresting_chks_to_enqueue):
+    def _enqueue_old(self, new_prefixes,
+                               old_chks_to_enqueue):
         # At this point, we have read all the uninteresting and interesting
         # items, so we can queue up the uninteresting stuff, knowing that we've
         # handled the interesting ones
-        for prefix, ref in uninteresting_chks_to_enqueue:
+        for prefix, ref in old_chks_to_enqueue:
             not_interesting = True
             for i in xrange(len(prefix), 0, -1):
-                if prefix[:i] in interesting_prefixes:
+                if prefix[:i] in new_prefixes:
                     not_interesting = False
                     break
             if not_interesting:
                 # This prefix is not part of the remaining 'interesting set'
                 continue
-            self._uninteresting_queue.append(ref)
+            self._old_queue.append(ref)
 
     def _read_all_roots(self):
         """Read the root pages.
@@ -1499,68 +1499,68 @@
         yielded up to whoever needs them without any buffering.
         """
         # This is the bootstrap phase
-        if not self._uninteresting_root_keys:
-            # With no uninteresting_root_keys we can just shortcut and be ready
-            # for _flush_interesting_queue
-            self._interesting_queue = list(self._interesting_root_keys)
+        if not self._old_root_keys:
+            # With no old_root_keys we can just shortcut and be ready
+            # for _flush_new_queue
+            self._new_queue = list(self._new_root_keys)
             return
-        uninteresting_chks_to_enqueue = self._read_uninteresting_roots()
+        old_chks_to_enqueue = self._read_old_roots()
         # filter out any root keys that are already known to be uninteresting
-        interesting_keys = set(self._interesting_root_keys).difference(
-                                self._all_uninteresting_chks)
-        # These are prefixes that are present in interesting_keys that we are
+        new_keys = set(self._new_root_keys).difference(
+                                self._all_old_chks)
+        # These are prefixes that are present in new_keys that we are
         # thinking to yield
-        interesting_prefixes = set()
+        new_prefixes = set()
         # We are about to yield all of these, so we don't want them getting
         # added a second time
-        processed_interesting_refs = self._processed_interesting_refs
-        processed_interesting_refs.update(interesting_keys)
+        processed_new_refs = self._processed_new_refs
+        processed_new_refs.update(new_keys)
         for record, node, prefix_refs, items in \
-                self._read_nodes_from_store(interesting_keys):
+                self._read_nodes_from_store(new_keys):
             # At this level, we now know all the uninteresting references
             # So we filter and queue up whatever is remaining
             prefix_refs = [p_r for p_r in prefix_refs
-                           if p_r[1] not in self._all_uninteresting_chks
-                              and p_r[1] not in processed_interesting_refs]
+                           if p_r[1] not in self._all_old_chks
+                              and p_r[1] not in processed_new_refs]
             refs = [p_r[1] for p_r in prefix_refs]
-            interesting_prefixes.update([p_r[0] for p_r in prefix_refs])
-            self._interesting_queue.extend(refs)
+            new_prefixes.update([p_r[0] for p_r in prefix_refs])
+            self._new_queue.extend(refs)
             # TODO: We can potentially get multiple items here, however the
             #       current design allows for this, as callers will do the work
             #       to make the results unique. We might profile whether we
             #       gain anything by ensuring unique return values for items
-            interesting_items = [item for item in items
-                                 if item not in self._all_uninteresting_items]
-            self._interesting_item_queue.extend(interesting_items)
-            interesting_prefixes.update([self._search_key_func(item[0])
-                                         for item in interesting_items])
-            processed_interesting_refs.update(refs)
+            new_items = [item for item in items
+                                 if item not in self._all_old_items]
+            self._new_item_queue.extend(new_items)
+            new_prefixes.update([self._search_key_func(item[0])
+                                         for item in new_items])
+            processed_new_refs.update(refs)
             yield record
-        # For interesting_prefixes we have the full length prefixes queued up.
+        # For new_prefixes we have the full length prefixes queued up.
         # However, we also need possible prefixes. (If we have a known ref to
         # 'ab', then we also need to include 'a'.) So expand the
-        # interesting_prefixes to include all shorter prefixes
-        for prefix in list(interesting_prefixes):
-            interesting_prefixes.update([prefix[:i]
+        # new_prefixes to include all shorter prefixes
+        for prefix in list(new_prefixes):
+            new_prefixes.update([prefix[:i]
                                          for i in xrange(1, len(prefix))])
-        self._enqueue_uninteresting(interesting_prefixes,
-                                    uninteresting_chks_to_enqueue)
+        self._enqueue_old(new_prefixes,
+                                    old_chks_to_enqueue)
 
-    def _flush_interesting_queue(self):
+    def _flush_new_queue(self):
         # No need to maintain the heap invariant anymore, just pull things out
         # and process them
-        refs = set(self._interesting_queue)
-        self._interesting_queue = []
+        refs = set(self._new_queue)
+        self._new_queue = []
         # First pass, flush all interesting items and convert to using direct refs
-        all_uninteresting_chks = self._all_uninteresting_chks
-        processed_interesting_refs = self._processed_interesting_refs
-        all_uninteresting_items = self._all_uninteresting_items
-        interesting_items = [item for item in self._interesting_item_queue
-                                   if item not in all_uninteresting_items]
-        self._interesting_item_queue = []
-        if interesting_items:
-            yield None, interesting_items
-        refs = refs.difference(all_uninteresting_chks)
+        all_old_chks = self._all_old_chks
+        processed_new_refs = self._processed_new_refs
+        all_old_items = self._all_old_items
+        new_items = [item for item in self._new_item_queue
+                                   if item not in all_old_items]
+        self._new_item_queue = []
+        if new_items:
+            yield None, new_items
+        refs = refs.difference(all_old_chks)
         while refs:
             next_refs = set()
             next_refs_update = next_refs.update
@@ -1568,30 +1568,30 @@
             # from 1m54s to 1m51s. Consider it.
             for record, _, p_refs, items in self._read_nodes_from_store(refs):
                 items = [item for item in items
-                         if item not in all_uninteresting_items]
+                         if item not in all_old_items]
                 yield record, items
                 next_refs_update([p_r[1] for p_r in p_refs])
-            next_refs = next_refs.difference(all_uninteresting_chks)
-            next_refs = next_refs.difference(processed_interesting_refs)
-            processed_interesting_refs.update(next_refs)
+            next_refs = next_refs.difference(all_old_chks)
+            next_refs = next_refs.difference(processed_new_refs)
+            processed_new_refs.update(next_refs)
             refs = next_refs
 
-    def _process_next_uninteresting(self):
+    def _process_next_old(self):
         # Since we don't filter uninteresting any further than during
         # _read_all_roots, process the whole queue in a single pass.
-        refs = self._uninteresting_queue
-        self._uninteresting_queue = []
-        all_uninteresting_chks = self._all_uninteresting_chks
+        refs = self._old_queue
+        self._old_queue = []
+        all_old_chks = self._all_old_chks
         for record, _, prefix_refs, items in self._read_nodes_from_store(refs):
-            self._all_uninteresting_items.update(items)
-            refs = [r for _,r in prefix_refs if r not in all_uninteresting_chks]
-            self._uninteresting_queue.extend(refs)
-            all_uninteresting_chks.update(refs)
+            self._all_old_items.update(items)
+            refs = [r for _,r in prefix_refs if r not in all_old_chks]
+            self._old_queue.extend(refs)
+            all_old_chks.update(refs)
 
     def _process_queues(self):
-        while self._uninteresting_queue:
-            self._process_next_uninteresting()
-        return self._flush_interesting_queue()
+        while self._old_queue:
+            self._process_next_old()
+        return self._flush_new_queue()
 
     def process(self):
         for record in self._read_all_roots():

=== modified file 'bzrlib/tests/test_chk_map.py'
--- a/bzrlib/tests/test_chk_map.py	2009-07-02 19:56:23 +0000
+++ b/bzrlib/tests/test_chk_map.py	2009-07-02 19:59:43 +0000
@@ -2126,12 +2126,12 @@
 
 class TestCHKMapDifference(TestCaseWithExampleMaps):
 
-    def get_difference(self, interesting_roots, uninteresting_roots,
+    def get_difference(self, new_roots, old_roots,
                        search_key_func=None):
         if search_key_func is None:
             search_key_func = chk_map._search_key_plain
         return chk_map.CHKMapDifference(self.get_chk_bytes(),
-            interesting_roots, uninteresting_roots, search_key_func)
+            new_roots, old_roots, search_key_func)
 
     def test__init__(self):
         c_map = self.make_root_only_map()
@@ -2139,9 +2139,9 @@
         c_map.map(('aaa',), 'new aaa content')
         key2 = c_map._save()
         diff = self.get_difference([key2], [key1])
-        self.assertEqual(set([key1]), diff._all_uninteresting_chks)
-        self.assertEqual([], diff._uninteresting_queue)
-        self.assertEqual([], diff._interesting_queue)
+        self.assertEqual(set([key1]), diff._all_old_chks)
+        self.assertEqual([], diff._old_queue)
+        self.assertEqual([], diff._new_queue)
 
     def help__read_all_roots(self, search_key_func):
         c_map = self.make_root_only_map(search_key_func=search_key_func)
@@ -2151,14 +2151,14 @@
         diff = self.get_difference([key2], [key1], search_key_func)
         root_results = [record.key for record in diff._read_all_roots()]
         self.assertEqual([key2], root_results)
-        # We should have queued up only items that aren't in the uninteresting
+        # We should have queued up only items that aren't in the old
         # set
         self.assertEqual([(('aaa',), 'new aaa content')],
-                         diff._interesting_item_queue)
-        self.assertEqual([], diff._interesting_queue)
-        # And there are no uninteresting references, so that queue should be
+                         diff._new_item_queue)
+        self.assertEqual([], diff._new_queue)
+        # And there are no old references, so that queue should be
         # empty
-        self.assertEqual([], diff._uninteresting_queue)
+        self.assertEqual([], diff._old_queue)
 
     def test__read_all_roots_plain(self):
         self.help__read_all_roots(search_key_func=chk_map._search_key_plain)
@@ -2166,7 +2166,7 @@
     def test__read_all_roots_16(self):
         self.help__read_all_roots(search_key_func=chk_map._search_key_16)
 
-    def test__read_all_roots_skips_known_uninteresting(self):
+    def test__read_all_roots_skips_known_old(self):
         c_map = self.make_one_deep_map(chk_map._search_key_plain)
         key1 = c_map.key()
         c_map2 = self.make_root_only_map(chk_map._search_key_plain)
@@ -2190,11 +2190,11 @@
         self.assertEqual([key2], root_results)
         # At this point, we should have queued up only the 'a' Leaf on both
         # sides, both 'c' and 'd' are known to not have changed on both sides
-        self.assertEqual([key2_a], diff._interesting_queue)
-        self.assertEqual([], diff._interesting_item_queue)
-        self.assertEqual([key1_a], diff._uninteresting_queue)
+        self.assertEqual([key2_a], diff._new_queue)
+        self.assertEqual([], diff._new_item_queue)
+        self.assertEqual([key1_a], diff._old_queue)
 
-    def test__read_all_roots_multi_interesting_prepares_queues(self):
+    def test__read_all_roots_multi_new_prepares_queues(self):
         c_map = self.make_one_deep_map(chk_map._search_key_plain)
         key1 = c_map.key()
         c_map._dump_tree() # load everything
@@ -2215,10 +2215,10 @@
         root_results = [record.key for record in diff._read_all_roots()]
         self.assertEqual(sorted([key2, key3]), sorted(root_results))
         # We should have queued up key2_a, and key3_c, but not key2_c or key3_c
-        self.assertEqual([key2_a, key3_c], diff._interesting_queue)
-        self.assertEqual([], diff._interesting_item_queue)
-        # And we should have queued up both a and c for the uninteresting set
-        self.assertEqual([key1_a, key1_c], diff._uninteresting_queue)
+        self.assertEqual([key2_a, key3_c], diff._new_queue)
+        self.assertEqual([], diff._new_item_queue)
+        # And we should have queued up both a and c for the old set
+        self.assertEqual([key1_a, key1_c], diff._old_queue)
 
     def test__read_all_roots_different_depths(self):
         c_map = self.make_two_deep_map(chk_map._search_key_plain)
@@ -2239,17 +2239,17 @@
         self.assertEqual([key2], root_results)
         # Only the 'a' subset should be queued up, since 'c' and 'd' cannot be
         # present
-        self.assertEqual([key1_a], diff._uninteresting_queue)
-        self.assertEqual([key2_aa, key2_ad], diff._interesting_queue)
-        self.assertEqual([], diff._interesting_item_queue)
+        self.assertEqual([key1_a], diff._old_queue)
+        self.assertEqual([key2_aa, key2_ad], diff._new_queue)
+        self.assertEqual([], diff._new_item_queue)
 
         diff = self.get_difference([key1], [key2], chk_map._search_key_plain)
         root_results = [record.key for record in diff._read_all_roots()]
         self.assertEqual([key1], root_results)
 
-        self.assertEqual([key2_aa, key2_ad], diff._uninteresting_queue)
-        self.assertEqual([key1_a, key1_c, key1_d], diff._interesting_queue)
-        self.assertEqual([], diff._interesting_item_queue)
+        self.assertEqual([key2_aa, key2_ad], diff._old_queue)
+        self.assertEqual([key1_a, key1_c, key1_d], diff._new_queue)
+        self.assertEqual([], diff._new_item_queue)
 
     def test__read_all_roots_different_depths_16(self):
         c_map = self.make_two_deep_map(chk_map._search_key_16)
@@ -2272,20 +2272,20 @@
         root_results = [record.key for record in diff._read_all_roots()]
         self.assertEqual([key2], root_results)
         # Only the subset of keys that may be present should be queued up.
-        self.assertEqual([key1_F], diff._uninteresting_queue)
+        self.assertEqual([key1_F], diff._old_queue)
         self.assertEqual(sorted([key2_F0, key2_F3, key2_F4, key2_FD]),
-                         sorted(diff._interesting_queue))
-        self.assertEqual([], diff._interesting_item_queue)
+                         sorted(diff._new_queue))
+        self.assertEqual([], diff._new_item_queue)
 
         diff = self.get_difference([key1], [key2], chk_map._search_key_16)
         root_results = [record.key for record in diff._read_all_roots()]
         self.assertEqual([key1], root_results)
 
         self.assertEqual(sorted([key2_F0, key2_F3, key2_F4, key2_FD]),
-                         sorted(diff._uninteresting_queue))
+                         sorted(diff._old_queue))
         self.assertEqual(sorted([key1_2, key1_4, key1_C, key1_F]),
-                         sorted(diff._interesting_queue))
-        self.assertEqual([], diff._interesting_item_queue)
+                         sorted(diff._new_queue))
+        self.assertEqual([], diff._new_item_queue)
 
     def test__read_all_roots_mixed_depth(self):
         c_map = self.make_one_deep_two_prefix_map(chk_map._search_key_plain)
@@ -2305,9 +2305,9 @@
         self.assertEqual([key2], root_results)
         # 'ad' matches exactly 'a' on the other side, so it should be removed,
         # and neither side should have it queued for walking
-        self.assertEqual([], diff._uninteresting_queue)
-        self.assertEqual([key2_b], diff._interesting_queue)
-        self.assertEqual([], diff._interesting_item_queue)
+        self.assertEqual([], diff._old_queue)
+        self.assertEqual([key2_b], diff._new_queue)
+        self.assertEqual([], diff._new_item_queue)
 
         diff = self.get_difference([key1], [key2], chk_map._search_key_plain)
         root_results = [record.key for record in diff._read_all_roots()]
@@ -2318,15 +2318,15 @@
         #       than one interesting key, so for now, we live with this
         #       Consider revising, though benchmarking showing it to be a
         #       real-world issue should be done
-        self.assertEqual([key2_a], diff._uninteresting_queue)
-        # self.assertEqual([], diff._uninteresting_queue)
-        self.assertEqual([key1_aa], diff._interesting_queue)
-        self.assertEqual([], diff._interesting_item_queue)
+        self.assertEqual([key2_a], diff._old_queue)
+        # self.assertEqual([], diff._old_queue)
+        self.assertEqual([key1_aa], diff._new_queue)
+        self.assertEqual([], diff._new_item_queue)
 
     def test__read_all_roots_yields_extra_deep_records(self):
         # This is slightly controversial, as we will yield a chk page that we
         # might later on find out could be filtered out. (If a root node is
-        # referenced deeper in the uninteresting set.)
+        # referenced deeper in the old set.)
         # However, even with stacking, we always have all chk pages that we
         # will need. So as long as we filter out the referenced keys, we'll
         # never run into problems.
@@ -2352,10 +2352,10 @@
         # However, even though we have yielded the root node to be fetched,
         # we should have enqued all of the chk pages to be walked, so that we
         # can find the keys if they are present
-        self.assertEqual([key1_a], diff._uninteresting_queue)
+        self.assertEqual([key1_a], diff._old_queue)
         self.assertEqual([(('acc',), 'initial acc content'),
                           (('ace',), 'initial ace content'),
-                         ], diff._interesting_item_queue)
+                         ], diff._new_item_queue)
 
     def test__read_all_roots_multiple_targets(self):
         c_map = self.make_root_only_map()
@@ -2372,44 +2372,44 @@
                                      chk_map._search_key_plain)
         root_results = [record.key for record in diff._read_all_roots()]
         self.assertEqual(sorted([key2, key3]), sorted(root_results))
-        self.assertEqual([], diff._uninteresting_queue)
+        self.assertEqual([], diff._old_queue)
         # the key 'd' is interesting from key2 and key3, but should only be
         # entered into the queue 1 time
         self.assertEqual(sorted([key2_c, key3_c, key2_d]),
-                         sorted(diff._interesting_queue))
-        self.assertEqual([], diff._interesting_item_queue)
+                         sorted(diff._new_queue))
+        self.assertEqual([], diff._new_item_queue)
 
-    def test__read_all_roots_no_uninteresting(self):
-        # This is the 'initial branch' case. With nothing in the uninteresting
+    def test__read_all_roots_no_old(self):
+        # This is the 'initial branch' case. With nothing in the old
         # set, we can just queue up all root nodes into interesting queue, and
-        # then have them fast-path flushed via _flush_interesting_queue
+        # then have them fast-path flushed via _flush_new_queue
         c_map = self.make_two_deep_map()
         key1 = c_map.key()
         diff = self.get_difference([key1], [], chk_map._search_key_plain)
         root_results = [record.key for record in diff._read_all_roots()]
         self.assertEqual([], root_results)
-        self.assertEqual([], diff._uninteresting_queue)
-        self.assertEqual([key1], diff._interesting_queue)
-        self.assertEqual([], diff._interesting_item_queue)
+        self.assertEqual([], diff._old_queue)
+        self.assertEqual([key1], diff._new_queue)
+        self.assertEqual([], diff._new_item_queue)
 
         c_map2 = self.make_one_deep_map()
         key2 = c_map2.key()
         diff = self.get_difference([key1, key2], [], chk_map._search_key_plain)
         root_results = [record.key for record in diff._read_all_roots()]
         self.assertEqual([], root_results)
-        self.assertEqual([], diff._uninteresting_queue)
-        self.assertEqual(sorted([key1, key2]), sorted(diff._interesting_queue))
-        self.assertEqual([], diff._interesting_item_queue)
+        self.assertEqual([], diff._old_queue)
+        self.assertEqual(sorted([key1, key2]), sorted(diff._new_queue))
+        self.assertEqual([], diff._new_item_queue)
 
-    def test__read_all_roots_no_uninteresting_16(self):
+    def test__read_all_roots_no_old_16(self):
         c_map = self.make_two_deep_map(chk_map._search_key_16)
         key1 = c_map.key()
         diff = self.get_difference([key1], [], chk_map._search_key_16)
         root_results = [record.key for record in diff._read_all_roots()]
         self.assertEqual([], root_results)
-        self.assertEqual([], diff._uninteresting_queue)
-        self.assertEqual([key1], diff._interesting_queue)
-        self.assertEqual([], diff._interesting_item_queue)
+        self.assertEqual([], diff._old_queue)
+        self.assertEqual([key1], diff._new_queue)
+        self.assertEqual([], diff._new_item_queue)
 
         c_map2 = self.make_one_deep_map(chk_map._search_key_16)
         key2 = c_map2.key()
@@ -2417,12 +2417,12 @@
                                    chk_map._search_key_16)
         root_results = [record.key for record in diff._read_all_roots()]
         self.assertEqual([], root_results)
-        self.assertEqual([], diff._uninteresting_queue)
+        self.assertEqual([], diff._old_queue)
         self.assertEqual(sorted([key1, key2]),
-                         sorted(diff._interesting_queue))
-        self.assertEqual([], diff._interesting_item_queue)
+                         sorted(diff._new_queue))
+        self.assertEqual([], diff._new_item_queue)
 
-    def test__read_all_roots_multiple_uninteresting(self):
+    def test__read_all_roots_multiple_old(self):
         c_map = self.make_two_deep_map()
         key1 = c_map.key()
         c_map._dump_tree() # load everything
@@ -2439,11 +2439,11 @@
         self.assertEqual([key3], root_results)
         # the 'a' keys should not be queued up 2 times, since they are
         # identical
-        self.assertEqual([key1_a], diff._uninteresting_queue)
-        self.assertEqual([key3_a], diff._interesting_queue)
-        self.assertEqual([], diff._interesting_item_queue)
+        self.assertEqual([key1_a], diff._old_queue)
+        self.assertEqual([key3_a], diff._new_queue)
+        self.assertEqual([], diff._new_item_queue)
 
-    def test__process_next_uninteresting_batched_no_dupes(self):
+    def test__process_next_old_batched_no_dupes(self):
         c_map = self.make_two_deep_map()
         key1 = c_map.key()
         c_map._dump_tree() # load everything
@@ -2465,14 +2465,14 @@
         root_results = [record.key for record in diff._read_all_roots()]
         self.assertEqual([key3], root_results)
         self.assertEqual(sorted([key1_a, key2_a]),
-                         sorted(diff._uninteresting_queue))
-        self.assertEqual([key3_a], diff._interesting_queue)
-        self.assertEqual([], diff._interesting_item_queue)
-        diff._process_next_uninteresting()
-        # All of the uninteresting records should be brought in and queued up,
+                         sorted(diff._old_queue))
+        self.assertEqual([key3_a], diff._new_queue)
+        self.assertEqual([], diff._new_item_queue)
+        diff._process_next_old()
+        # All of the old records should be brought in and queued up,
         # but we should not have any duplicates
         self.assertEqual(sorted([key1_aa, key1_ab, key1_ac, key1_ad, key2_aa]),
-                         sorted(diff._uninteresting_queue))
+                         sorted(diff._old_queue))
 
 
 class TestIterInterestingNodes(TestCaseWithExampleMaps):
@@ -2482,7 +2482,7 @@
         return c_map.key()
 
     def assertIterInteresting(self, records, items, interesting_keys,
-                              uninteresting_keys):
+                              old_keys):
         """Check the result of iter_interesting_nodes.
 
         Note that we no longer care how many steps are taken, etc, just that
@@ -2494,7 +2494,7 @@
         store = self.get_chk_bytes()
         store._search_key_func = chk_map._search_key_plain
         iter_nodes = chk_map.iter_interesting_nodes(store, interesting_keys,
-                                                    uninteresting_keys)
+                                                    old_keys)
         record_keys = []
         all_items = []
         for record, new_items in iter_nodes:
@@ -2690,8 +2690,8 @@
 
     def test_multiple_maps_overlapping_common_new(self):
         # Test that when a node found through the interesting_keys iteration
-        # for *some roots* and also via the uninteresting keys iteration, that
-        # it is still scanned for uninteresting refs and items, because its
+        # for *some roots* and also via the old keys iteration, that
+        # it is still scanned for old refs and items, because its
         # not truely new. This requires 2 levels of InternalNodes to expose,
         # because of the way the bootstrap in _find_children_info works.
         # This suggests that the code is probably amenable to/benefit from



More information about the bazaar-commits mailing list