Rev 63: Add another test which triggers a couple edge cases. in http://bzr.arbash-meinel.com/plugins/history_db

John Arbash Meinel john at arbash-meinel.com
Wed Apr 7 22:56:11 BST 2010


At http://bzr.arbash-meinel.com/plugins/history_db

------------------------------------------------------------
revno: 63
revision-id: john at arbash-meinel.com-20100407215554-gb92iqfqqucf2475
parent: john at arbash-meinel.com-20100407210635-syhdace9q90ib8p4
committer: John Arbash Meinel <john at arbash-meinel.com>
branch nick: history_db
timestamp: Wed 2010-04-07 16:55:54 -0500
message:
  Add another test which triggers a couple edge cases.
  
  Like having a ghost while trying to ensure that all lh parents are filled in.
-------------- next part --------------
=== modified file 'history_db.py'
--- a/history_db.py	2010-04-07 21:06:35 +0000
+++ b/history_db.py	2010-04-07 21:55:54 +0000
@@ -147,10 +147,6 @@
                                  "VALUES (?, ?, ?)", data)
 
     def do_import(self, expand_all=False):
-        if self._incremental:
-            self._update_ancestry(self._branch_tip_rev_id)
-            tip_db_id = self._rev_id_to_db_id[self._branch_tip_rev_id]
-            inc_importer = _IncrementalMergeSort(self, tip_db_id)
         merge_sorted = self._import_tip(self._branch_tip_rev_id)
         if not expand_all:
             return
@@ -180,7 +176,28 @@
         pb.finished()
 
     def _import_tip(self, tip_revision_id, suppress_progress_and_commit=False):
-        merge_sorted = self._graph.merge_sort((tip_revision_id,))
+        if self._incremental:
+            self._update_ancestry(tip_revision_id)
+            self._ensure_revisions([tip_revision_id])
+            tip_db_id = self._rev_id_to_db_id[tip_revision_id]
+            inc_merger = _IncrementalMergeSort(self, tip_db_id)
+            merge_sorted = inc_merger.topo_order()
+            # Map db_ids back to the keys that self._graph would generate
+            db_id_to_rev_id = dict((d, r)
+                               for r, d in self._rev_id_to_db_id.iteritems())
+            # Assert that the result is valid
+            actual_ms = self._graph.merge_sort((tip_revision_id,))
+            actual_ms_iter = iter(actual_ms)
+            for node in merge_sorted:
+                
+                node.key = (db_id_to_rev_id[node.key],)
+                actual_node = actual_ms_iter.next()
+                assert node.key == actual_node.key
+                assert node.revno == actual_node.revno
+                assert node.merge_depth == actual_node.merge_depth
+                assert node.end_of_merge == actual_node.end_of_merge
+        else:
+            merge_sorted = self._graph.merge_sort((tip_revision_id,))
         try:
             if suppress_progress_and_commit:
                 pb = None
@@ -442,6 +459,13 @@
         self._pending_parents = pending_parents
         self._is_first = is_first
 
+    def __repr__(self):
+        return '%s(%s, %s, %s, %s [%s %s %s %s])' % (
+            self.__class__.__name__,
+            self.key, self.revno, self.end_of_merge, self.merge_depth,
+            self._left_parent, self._left_pending_parent,
+            self._pending_parents, self._is_first)
+
 
 class _IncrementalMergeSort(object):
     """Context for importing partial history."""
@@ -706,6 +730,7 @@
             if lh_parent in self._imported_dotted_revno:
                 continue
             missing_parent_ids.add(lh_parent)
+        missing_parent_ids.difference_update(self._ghosts)
         while missing_parent_ids:
             self._step_mainline()
             missing_parent_ids = missing_parent_ids.difference(
@@ -762,6 +787,8 @@
                 # *mainline* branch
                 branch_key = 0
                 mini_revno = revno[0]
+                # We found a mainline branch, make sure it is marked as such
+                self._revno_to_branch_count.setdefault(0, 0)
             if (branch_key not in self._branch_to_child_count
                 or mini_revno > self._branch_to_child_count[branch_key]):
                 self._branch_to_child_count[branch_key] = mini_revno
@@ -950,9 +977,10 @@
                 # And switch to the outer loop
                 break
 
-    def do_import(self):
+    def topo_order(self):
         self._find_interesting_ancestry()
         self._compute_merge_sort()
+        return list(reversed(self._scheduled_stack))
 
 
 class Querier(object):

=== modified file 'test_importer.py'
--- a/test_importer.py	2010-04-07 21:06:35 +0000
+++ b/test_importer.py	2010-04-07 21:55:54 +0000
@@ -271,10 +271,10 @@
 
 class Test_IncrementalMergeSort(TestCaseWithGraphs):
 
-    def assertScheduledStack(self, inc_importer, expected):
+    def assertScheduledStack(self, inc_merger, expected):
         """Check that the merge_sort result is as expected."""
         actual = [(node.key, node.revno, node.end_of_merge, node.merge_depth)
-                  for node in inc_importer._scheduled_stack]
+                  for node in inc_merger._scheduled_stack]
         self.assertEqual(expected, actual)
 
     def test_step_by_step(self):
@@ -290,37 +290,37 @@
         # Now work on just importing G
         importer._update_ancestry('N')
         self.grab_interesting_ids(importer._rev_id_to_db_id)
-        inc_importer = history_db._IncrementalMergeSort(importer, self.N_id)
-        inc_importer._find_needed_mainline()
-        self.assertEqual([self.N_id, self.I_id], inc_importer._mainline_db_ids)
-        self.assertEqual(self.G_id, inc_importer._imported_mainline_id)
+        inc_merger = history_db._IncrementalMergeSort(importer, self.N_id)
+        inc_merger._find_needed_mainline()
+        self.assertEqual([self.N_id, self.I_id], inc_merger._mainline_db_ids)
+        self.assertEqual(self.G_id, inc_merger._imported_mainline_id)
         self.assertEqual(set([self.N_id, self.I_id]),
-                         inc_importer._interesting_ancestor_ids)
+                         inc_merger._interesting_ancestor_ids)
         # This should populate ._search_tips, as well as gdfo information
-        inc_importer._get_initial_search_tips()
-        self.assertEqual(set([self.J_id, self.H_id]), inc_importer._search_tips)
+        inc_merger._get_initial_search_tips()
+        self.assertEqual(set([self.J_id, self.H_id]), inc_merger._search_tips)
         # We should have taken a single step on the mainline
-        self.assertEqual(self.D_id, inc_importer._imported_mainline_id)
-        self.assertEqual(4, inc_importer._imported_gdfo)
+        self.assertEqual(self.D_id, inc_merger._imported_mainline_id)
+        self.assertEqual(4, inc_merger._imported_gdfo)
         self.assertEqual({self.D_id: 4, self.J_id: 5, self.H_id: 3},
-                         inc_importer._known_gdfo)
+                         inc_merger._known_gdfo)
         # Both J has gdfo > D so it is quickly removed. H does not, so it is
         # left behind.
         self.assertEqual([self.H_id],
-             inc_importer._split_search_tips_by_gdfo([self.J_id, self.H_id]))
+             inc_merger._split_search_tips_by_gdfo([self.J_id, self.H_id]))
         self.assertEqual(set([self.N_id, self.I_id, self.J_id]),
-                         inc_importer._interesting_ancestor_ids)
+                         inc_merger._interesting_ancestor_ids)
         # Checking children should notice that all known children are fine, so
         # H now gets marked interesting
         self.assertEqual([],
-                 inc_importer._split_interesting_using_children([self.H_id]))
+                 inc_merger._split_interesting_using_children([self.H_id]))
         self.assertEqual(set([self.N_id, self.I_id, self.J_id, self.H_id]),
-                         inc_importer._interesting_ancestor_ids)
+                         inc_merger._interesting_ancestor_ids)
         # Since we know all search tips are interesting, step again.
-        inc_importer._step_search_tips()
-        self.assertEqual(set([self.B_id, self.F_id]), inc_importer._search_tips)
+        inc_merger._step_search_tips()
+        self.assertEqual(set([self.B_id, self.F_id]), inc_merger._search_tips)
         self.assertEqual(set([self.N_id, self.I_id, self.J_id, self.H_id]),
-                         inc_importer._interesting_ancestor_ids)
+                         inc_merger._interesting_ancestor_ids)
         # F is known-merged, so the first step should filter it from unknowns,
         # and remove it from the search tips
         # However B is not known yet, and has GDFO < D (since it was merged
@@ -328,39 +328,39 @@
         # However E is a child of B, and that is already known to be merged. So
         # B gets filtered out in the child step, and removed as a search tip
         self.assertEqual([self.B_id],
-             inc_importer._split_search_tips_by_gdfo([self.B_id, self.F_id]))
-        self.assertEqual(set([self.B_id]), inc_importer._search_tips)
+             inc_merger._split_search_tips_by_gdfo([self.B_id, self.F_id]))
+        self.assertEqual(set([self.B_id]), inc_merger._search_tips)
         self.assertEqual([],
-                     inc_importer._split_interesting_using_children([self.B_id]))
-        self.assertEqual(set([]), inc_importer._search_tips)
+                     inc_merger._split_interesting_using_children([self.B_id]))
+        self.assertEqual(set([]), inc_merger._search_tips)
         # At this point, we will stop searching. XXX: B's info has not been
         # loaded yet...
-        self.assertEqual(self.D_id, inc_importer._imported_mainline_id)
-        self.assertEqual(4, inc_importer._imported_gdfo)
+        self.assertEqual(self.D_id, inc_merger._imported_mainline_id)
+        self.assertEqual(4, inc_merger._imported_gdfo)
         self.assertEqual({self.E_id: ((1,2,1), 1, 1),
                           self.F_id: ((1,2,2), 0, 1),
                           self.G_id: ((3,), 0, 0),
-                         }, inc_importer._imported_dotted_revno)
+                         }, inc_merger._imported_dotted_revno)
         self.assertEqual({(1,2,1): self.E_id, (1,2,2): self.F_id,
                           (3,): self.G_id,
-                         }, inc_importer._dotted_to_db_id)
+                         }, inc_merger._dotted_to_db_id)
         # At this point, B_id isn't in _imported_dotted_revno, so we loop to
         # ensure we have enough dotted_revno data
-        inc_importer._ensure_lh_parent_info()
-        self.assertEqual(self.A_id, inc_importer._imported_mainline_id)
-        self.assertEqual(1, inc_importer._imported_gdfo)
+        inc_merger._ensure_lh_parent_info()
+        self.assertEqual(self.A_id, inc_merger._imported_mainline_id)
+        self.assertEqual(1, inc_merger._imported_gdfo)
         self.assertEqual({self.B_id: ((1,1,1), 1, 1),
                           self.C_id: ((1,1,2), 0, 1),
                           self.D_id: ((2,), 0, 0),
                           self.E_id: ((1,2,1), 1, 1),
                           self.F_id: ((1,2,2), 0, 1),
                           self.G_id: ((3,), 0, 0),
-                         }, inc_importer._imported_dotted_revno)
-        self.assertEqual({1: 2}, inc_importer._revno_to_branch_count)
+                         }, inc_merger._imported_dotted_revno)
+        self.assertEqual({0: 0, 1: 2}, inc_merger._revno_to_branch_count)
         self.assertEqual({0: 3, (1, 1): 2, (1, 2): 2},
-                         inc_importer._branch_to_child_count)
-        inc_importer._compute_merge_sort()
-        self.assertScheduledStack(inc_importer,
+                         inc_merger._branch_to_child_count)
+        inc_merger._compute_merge_sort()
+        self.assertScheduledStack(inc_merger,
                          [(self.H_id, (1, 3, 1), True, 1),
                           (self.I_id, (4,), False, 0),
                           (self.J_id, (1, 2, 3), True, 1),
@@ -374,30 +374,30 @@
         importer.do_import()
         importer._update_ancestry('O')
         self.grab_interesting_ids(importer._rev_id_to_db_id)
-        inc_importer = history_db._IncrementalMergeSort(importer, self.O_id)
+        inc_merger = history_db._IncrementalMergeSort(importer, self.O_id)
         # This should step through the ancestry, and load in the necessary
         # data. Check the final state
-        inc_importer._find_interesting_ancestry()
+        inc_merger._find_interesting_ancestry()
         self.assertEqual([self.O_id, self.N_id, self.I_id],
-                         inc_importer._mainline_db_ids)
+                         inc_merger._mainline_db_ids)
         # We should stop loading A, I need to figure out why it gets loaded
-        self.assertEqual(self.A_id, inc_importer._imported_mainline_id)
-        self.assertEqual(1, inc_importer._imported_gdfo)
+        self.assertEqual(self.A_id, inc_merger._imported_mainline_id)
+        self.assertEqual(1, inc_merger._imported_gdfo)
         self.assertEqual(set([self.H_id, self.I_id, self.J_id, self.K_id,
                               self.L_id, self.M_id, self.N_id, self.O_id]),
-                         inc_importer._interesting_ancestor_ids)
-        self.assertEqual(set([]), inc_importer._search_tips)
+                         inc_merger._interesting_ancestor_ids)
+        self.assertEqual(set([]), inc_merger._search_tips)
         self.assertEqual({self.B_id: ((1,1,1), 1, 1),
                           self.C_id: ((1,1,2), 0, 1),
                           self.D_id: ((2,), 0, 0),
                           self.E_id: ((1,2,1), 1, 1),
                           self.F_id: ((1,2,2), 0, 1),
                           self.G_id: ((3,), 0, 0),
-                         }, inc_importer._imported_dotted_revno)
+                         }, inc_merger._imported_dotted_revno)
         self.assertEqual({(1,1,1): self.B_id, (1,1,2): self.C_id,
                           (2,): self.D_id, (1,2,1): self.E_id,
                           (1,2,2): self.F_id, (3,): self.G_id,
-                         }, inc_importer._dotted_to_db_id)
+                         }, inc_merger._dotted_to_db_id)
 
     def test__step_search_tips_skips_already_seen(self):
         # Simpler graph:
@@ -429,19 +429,19 @@
         importer.do_import()
         importer._update_ancestry('H')
         self.grab_interesting_ids(importer._rev_id_to_db_id)
-        inc_importer = history_db._IncrementalMergeSort(importer, self.H_id)
-        inc_importer._find_needed_mainline()
-        self.assertEqual([self.H_id, self.F_id], inc_importer._mainline_db_ids)
-        self.assertEqual(self.B_id, inc_importer._imported_mainline_id)
-        inc_importer._get_initial_search_tips()
-        self.assertEqual(set([self.D_id, self.G_id]), inc_importer._search_tips)
+        inc_merger = history_db._IncrementalMergeSort(importer, self.H_id)
+        inc_merger._find_needed_mainline()
+        self.assertEqual([self.H_id, self.F_id], inc_merger._mainline_db_ids)
+        self.assertEqual(self.B_id, inc_merger._imported_mainline_id)
+        inc_merger._get_initial_search_tips()
+        self.assertEqual(set([self.D_id, self.G_id]), inc_merger._search_tips)
         # Both have higher-than-mainline gdfos
         self.assertEqual([],
-                 inc_importer._split_search_tips_by_gdfo([self.D_id, self.G_id]))
-        inc_importer._step_search_tips()
+                 inc_merger._split_search_tips_by_gdfo([self.D_id, self.G_id]))
+        inc_merger._step_search_tips()
         # It should want to include D_id, but it should know that we've already
         # been there
-        self.assertEqual(set([self.C_id, self.E_id]), inc_importer._search_tips)
+        self.assertEqual(set([self.C_id, self.E_id]), inc_merger._search_tips)
 
     def test_maintain_branch_counter_correct(self):
         # The trick is that if we have already imported to N, then we will be
@@ -454,10 +454,10 @@
         importer.do_import()
         importer._update_ancestry('O')
         self.grab_interesting_ids(importer._rev_id_to_db_id)
-        inc_importer = history_db._IncrementalMergeSort(importer, self.O_id)
-        inc_importer._find_interesting_ancestry()
-        inc_importer._compute_merge_sort()
-        self.assertScheduledStack(inc_importer,
+        inc_merger = history_db._IncrementalMergeSort(importer, self.O_id)
+        inc_merger._find_interesting_ancestry()
+        inc_merger._compute_merge_sort()
+        self.assertScheduledStack(inc_merger,
                          [(self.K_id, (1, 2, 4), True, 1),
                           (self.L_id, (1, 4, 1), True, 2),
                           (self.M_id, (1, 2, 5), False, 1),
@@ -465,7 +465,7 @@
                          ])
         # We have to load G to get E, but we shouldn't have to load D_id, so
         # that should be where we stop.
-        self.assertEqual(self.D_id, inc_importer._imported_mainline_id)
+        self.assertEqual(self.D_id, inc_merger._imported_mainline_id)
 
     def test_handles_simple_child(self):
         ancestry = {'A': (), 'B': ('A',)}
@@ -474,10 +474,10 @@
         importer.do_import()
         importer._update_ancestry('B')
         self.grab_interesting_ids(importer._rev_id_to_db_id)
-        inc_importer = history_db._IncrementalMergeSort(importer, self.B_id)
-        inc_importer._find_interesting_ancestry()
-        inc_importer._compute_merge_sort()
-        self.assertScheduledStack(inc_importer, [(self.B_id, (2,), False, 0)])
+        inc_merger = history_db._IncrementalMergeSort(importer, self.B_id)
+        inc_merger._find_interesting_ancestry()
+        inc_merger._compute_merge_sort()
+        self.assertScheduledStack(inc_merger, [(self.B_id, (2,), False, 0)])
 
     def test_handles_multi_roots(self):
         # Graph:
@@ -498,13 +498,13 @@
         importer.do_import()
         importer._update_ancestry('F')
         self.grab_interesting_ids(importer._rev_id_to_db_id)
-        inc_importer = history_db._IncrementalMergeSort(importer, self.F_id)
-        inc_importer._find_interesting_ancestry()
-        self.assertEqual(self.C_id, inc_importer._imported_mainline_id)
+        inc_merger = history_db._IncrementalMergeSort(importer, self.F_id)
+        inc_merger._find_interesting_ancestry()
+        self.assertEqual(self.C_id, inc_merger._imported_mainline_id)
         self.assertEqual(set([self.E_id, self.F_id]),
-                         inc_importer._interesting_ancestor_ids)
-        inc_importer._compute_merge_sort()
-        self.assertScheduledStack(inc_importer,
+                         inc_merger._interesting_ancestor_ids)
+        inc_merger._compute_merge_sort()
+        self.assertScheduledStack(inc_merger,
                          [(self.E_id, (0, 2, 1), True, 1),
                           (self.F_id, (4,), False, 0),
                          ])
@@ -537,19 +537,19 @@
         importer.do_import()
         importer._update_ancestry('K')
         self.grab_interesting_ids(importer._rev_id_to_db_id)
-        inc_importer = history_db._IncrementalMergeSort(importer, self.K_id)
-        inc_importer._find_interesting_ancestry()
-        self.assertEqual(self.G_id, inc_importer._imported_mainline_id)
+        inc_merger = history_db._IncrementalMergeSort(importer, self.K_id)
+        inc_merger._find_interesting_ancestry()
+        self.assertEqual(self.G_id, inc_merger._imported_mainline_id)
         self.assertEqual(set([self.K_id, self.J_id]),
-                         inc_importer._interesting_ancestor_ids)
-        inc_importer._compute_merge_sort()
-        self.assertScheduledStack(inc_importer,
+                         inc_merger._interesting_ancestor_ids)
+        inc_merger._compute_merge_sort()
+        self.assertScheduledStack(inc_merger,
                          [(self.J_id, (0, 3, 1), True, 1),
                           (self.K_id, (6,), False, 0),
                          ])
         # We only have to walk back and stop at D because we have found (0,2,1)
         # which must be the latest branch.
-        self.assertEqual(self.D_id, inc_importer._imported_mainline_id)
+        self.assertEqual(self.D_id, inc_merger._imported_mainline_id)
 
     def test_one_rev(self):
         # Trivial ancestry:
@@ -559,10 +559,10 @@
         importer = history_db.Importer(':memory:', b, incremental=False)
         importer._update_ancestry('A')
         self.grab_interesting_ids(importer._rev_id_to_db_id)
-        inc_importer = history_db._IncrementalMergeSort(importer, self.A_id)
-        inc_importer._find_interesting_ancestry()
-        inc_importer._compute_merge_sort()
-        self.assertScheduledStack(inc_importer,
+        inc_merger = history_db._IncrementalMergeSort(importer, self.A_id)
+        inc_merger._find_interesting_ancestry()
+        inc_merger._compute_merge_sort()
+        self.assertScheduledStack(inc_merger,
                          [(self.A_id, (1,), True, 0),
                          ])
 
@@ -571,15 +571,31 @@
         importer = history_db.Importer(':memory:', b, incremental=False)
         importer._update_ancestry('E')
         self.grab_interesting_ids(importer._rev_id_to_db_id)
-        inc_importer = history_db._IncrementalMergeSort(importer, self.E_id)
-        inc_importer._find_interesting_ancestry()
-        inc_importer._compute_merge_sort()
+        inc_merger = history_db._IncrementalMergeSort(importer, self.E_id)
+        inc_merger._find_interesting_ancestry()
+        inc_merger._compute_merge_sort()
         # G is not mentioned in merge_sorted, neither as a left-hand parent,
         # nor as a right-hand parent
-        self.assertScheduledStack(inc_importer,
+        self.assertScheduledStack(inc_merger,
                          [(self.A_id, (1,), True, 0),
                           (self.B_id, (2,), False, 0),
                           (self.C_id, (3,), False, 0),
                           (self.D_id, (0, 1, 1), True, 1),
                           (self.E_id, (4,), False, 0),
                          ])
+
+    def test_handle_partial_ghosts(self):
+        b = self.make_branch_with_ghosts()
+        b._tip_revision = 'C'
+        importer = history_db.Importer(':memory:', b, incremental=False)
+        importer.do_import()
+        importer._update_ancestry('E')
+        self.grab_interesting_ids(importer._rev_id_to_db_id)
+        inc_merger = history_db._IncrementalMergeSort(importer, self.E_id)
+        inc_merger.topo_order()
+        # G is not mentioned in merge_sorted, neither as a left-hand parent,
+        # nor as a right-hand parent
+        self.assertScheduledStack(inc_merger,
+                         [(self.D_id, (0, 1, 1), True, 1),
+                          (self.E_id, (4,), False, 0),
+                         ])



More information about the bazaar-commits mailing list