Rev 2697: Basic version of knit-based repository operating, many tests failing. in http://people.ubuntu.com/~robertc/baz2.0/repository

Robert Collins robertc at robertcollins.net
Thu Aug 2 12:45:14 BST 2007


At http://people.ubuntu.com/~robertc/baz2.0/repository

------------------------------------------------------------
revno: 2697
revision-id: robertc at robertcollins.net-20070802114509-b67qb6z9f5lhyk8f
parent: robertc at robertcollins.net-20070802074102-wm3rjkbeb9bc39t2
committer: Robert Collins <robertc at robertcollins.net>
branch nick: repository
timestamp: Thu 2007-08-02 21:45:09 +1000
message:
  Basic version of knit-based repository operating, many tests failing.
modified:
  bzrlib/knit.py                 knit.py-20051212171256-f056ac8f0fbe1bd9
  bzrlib/repofmt/knitrepo.py     knitrepo.py-20070206081537-pyy4a00xdas0j4pf-1
  bzrlib/tests/test_knit.py      test_knit.py-20051212171302-95d4c00dd5f11f2b
=== modified file 'bzrlib/knit.py'
--- a/bzrlib/knit.py	2007-08-02 07:41:02 +0000
+++ b/bzrlib/knit.py	2007-08-02 11:45:09 +0000
@@ -429,7 +429,7 @@
         for count in xrange(self._max_delta_chain):
             parent = delta_parents[0]
             method = self._index.get_method(parent)
-            pos, size = self._index.get_position(parent)
+            index, pos, size = self._index.get_position(parent)
             if method == 'fulltext':
                 fulltext_size = size
                 break
@@ -577,8 +577,8 @@
             parent = parents[0]
         else:
             parent = None
-        data_pos, data_size = self._index.get_position(version_id)
-        data, sha1 = self._data.read_records(((version_id, data_pos, data_size),))[version_id]
+        index_memo = self._index.get_position(version_id)
+        data, sha1 = self._data.read_records(((version_id, index_memo),))[version_id]
         noeol = 'no-eol' in self._index.get_options(version_id)
         if 'fulltext' == self._index.get_method(version_id):
             new_content = self.factory.parse_fulltext(data, version_id)
@@ -702,8 +702,8 @@
                     next = None
                 else:
                     next = self.get_parents(cursor)[0]
-                data_pos, data_size = self._index.get_position(cursor)
-                component_data[cursor] = (method, data_pos, data_size, next)
+                index_memo = self._index.get_position(cursor)
+                component_data[cursor] = (method, index_memo, next)
                 cursor = next
         return component_data
        
@@ -836,12 +836,12 @@
         If the method is fulltext, next will be None.
         """
         position_map = self._get_components_positions(version_ids)
-        # c = component_id, m = method, p = position, s = size, n = next
-        records = [(c, p, s) for c, (m, p, s, n) in position_map.iteritems()]
+        # c = component_id, m = method, i_m = index_memo, n = next
+        records = [(c, i_m) for c, (m, i_m, n) in position_map.iteritems()]
         record_map = {}
         for component_id, content, digest in \
                 self._data.read_records_iter(records):
-            method, position, size, next = position_map[component_id]
+            method, index_memo, next = position_map[component_id]
             record_map[component_id] = method, content, digest, next
                           
         return record_map
@@ -939,8 +939,8 @@
         # get a in-component-order queue:
         for version_id in self.versions():
             if version_id in requested_versions:
-                data_pos, length = self._index.get_position(version_id)
-                version_id_records.append((version_id, data_pos, length))
+                index_memo = self._index.get_position(version_id)
+                version_id_records.append((version_id, index_memo))
 
         total = len(version_id_records)
         for version_idx, (version_id, data, sha_value) in \
@@ -1274,9 +1274,9 @@
                 result_list.append('.' + version)
         return ' '.join(result_list)
 
-    def add_version(self, version_id, options, (pos, size), parents):
+    def add_version(self, version_id, options, index_memo, parents):
         """Add a version record to the index."""
-        self.add_versions(((version_id, options, (pos, size), parents),))
+        self.add_versions(((version_id, options, index_memo, parents),))
 
     def add_versions(self, versions):
         """Add multiple versions to the index.
@@ -1289,7 +1289,7 @@
         orig_cache = self._cache.copy()
 
         try:
-            for version_id, options, (pos, size), parents in versions:
+            for version_id, options, (index, pos, size), parents in versions:
                 line = "\n%s %s %s %s %s :" % (version_id,
                                                ','.join(options),
                                                pos,
@@ -1324,7 +1324,7 @@
     def get_position(self, version_id):
         """Return data position and size of specified version."""
         entry = self._cache[version_id]
-        return entry[2], entry[3]
+        return None, entry[2], entry[3]
 
     def get_method(self, version_id):
         """Return compression method of specified version."""
@@ -1544,9 +1544,14 @@
         return tuple(key[0] for key in keys)
 
     def get_position(self, version_id):
-        """Return data position and size of specified version."""
-        bits = self._get_node(version_id)[2][1:].split(' ')
-        return int(bits[0]), int(bits[1])
+        """Return details needed to access the version.
+        
+        :return: a tuple (index, data position, size) to hand to the access
+            logic to get the record.
+        """
+        node = self._get_node(version_id)
+        bits = node[2][1:].split(' ')
+        return node[0], int(bits[0]), int(bits[1])
 
     def get_method(self, version_id):
         """Return compression method of specified version."""
@@ -1602,9 +1607,9 @@
         if missing:
             raise RevisionNotPresent(missing.pop(), self)
 
-    def add_version(self, version_id, options, (pos, size), parents):
+    def add_version(self, version_id, options, access_memo, parents):
         """Add a version record to the index."""
-        return self.add_versions(((version_id, options, (pos, size), parents),))
+        return self.add_versions(((version_id, options, access_memo, parents),))
 
     def add_versions(self, versions):
         """Add multiple versions to the index.
@@ -1624,8 +1629,12 @@
         # check for dups
 
         keys = {}
-        for (version_id, options, (pos, size), parents) in versions:
+        for (version_id, options, access_memo, parents) in versions:
             # index keys are tuples:
+            try:
+                pos, size = access_memo
+            except ValueError:
+                index, pos, size = access_memo
             key = (version_id, )
             parents = tuple((parent, ) for parent in parents)
             if 'no-eol' in options:
@@ -1711,7 +1720,7 @@
             base = 0
         result = []
         for size in sizes:
-            result.append((base, size))
+            result.append((None, base, size))
             base += size
         return result
 
@@ -1745,7 +1754,8 @@
             a readv tuple.
         :return: An iterator over the bytes of the records.
         """
-        for pos, data in self._transport.readv(self._filename, memos_for_retrieval):
+        read_vector = [(pos, size) for (index, pos, size) in memos_for_retrieval]
+        for pos, data in self._transport.readv(self._filename, read_vector):
             yield data
 
 
@@ -1978,16 +1988,16 @@
             # grab the disk data needed.
             if self._cache:
                 # Don't check _cache if it is empty
-                needed_offsets = [(pos, size) for version_id, pos, size
+                needed_offsets = [index_memo for version_id, index_memo
                                               in records
                                               if version_id not in self._cache]
             else:
-                needed_offsets = [(pos, size) for version_id, pos, size
+                needed_offsets = [index_memo for version_id, index_memo
                                                in records]
 
             raw_records = self._access.get_raw_records(needed_offsets)
 
-        for version_id, pos, size in records:
+        for version_id, index_memo in records:
             if version_id in self._cache:
                 # This data has already been validated
                 data = self._cache[version_id]
@@ -2038,9 +2048,9 @@
         # The transport optimizes the fetching as well 
         # (ie, reads continuous ranges.)
         raw_data = self._access.get_raw_records(
-            [(pos, size) for version_id, pos, size in needed_records])
+            [index_memo for version_id, index_memo in needed_records])
 
-        for (version_id, pos, size), data in \
+        for (version_id, index_memo), data in \
                 izip(iter(needed_records), raw_data):
             content, digest = self._parse_record(version_id, data)
             if self._do_cache:
@@ -2136,8 +2146,8 @@
                     assert (self.target.has_version(parent) or
                             parent in copy_set or
                             not self.source.has_version(parent))
-                data_pos, data_size = self.source._index.get_position(version_id)
-                copy_queue_records.append((version_id, data_pos, data_size))
+                index_memo = self.source._index.get_position(version_id)
+                copy_queue_records.append((version_id, index_memo))
                 copy_queue.append((version_id, options, parents))
                 copy_set.add(version_id)
 

=== modified file 'bzrlib/repofmt/knitrepo.py'
--- a/bzrlib/repofmt/knitrepo.py	2007-08-01 04:39:01 +0000
+++ b/bzrlib/repofmt/knitrepo.py	2007-08-02 11:45:09 +0000
@@ -18,14 +18,16 @@
 lazy_import(globals(), """
 from bzrlib import (
         file_names,
+        pack,
         )
 from bzrlib.index import (
+    GraphIndex,
     InMemoryGraphIndex,
-    GraphIndex,
     CombinedGraphIndex,
     GraphIndexPrefixAdapter,
     )
-from bzrlib.knit import KnitGraphIndex
+from bzrlib.knit import KnitGraphIndex, _PackAccess
+from bzrlib.pack import ContainerWriter
 from bzrlib.store import revision
 """)
 from bzrlib import (
@@ -350,25 +352,33 @@
             return self.repo._revision_knit
         indices = []
         self.repo._data_names.ensure_loaded()
+        pack_map = {}
         for name in self.repo._data_names.names():
             # TODO: maybe this should expose size to us  to allow
             # sorting of the indices for better performance ?
             index_name = self.name_to_revision_index_name(name)
             indices.append(GraphIndex(self.transport, index_name))
+            pack_map[indices[-1]] = (self.repo._pack_tuple(name))
         if self.repo.is_in_write_group():
             # allow writing: queue writes to a new index
             indices.append(self.repo._revision_write_index)
+            pack_map[self.repo._revision_write_index] = self.repo._open_pack_tuple
+            writer = self.repo._open_pack_writer, self.repo._revision_write_index
             add_callback = self.repo._revision_write_index.add_nodes
         else:
+            writer = None
             add_callback = None # no data-adding permitted.
         self.repo._revision_all_indices = CombinedGraphIndex(indices)
         knit_index = KnitGraphIndex(self.repo._revision_all_indices,
             add_callback=add_callback)
+        knit_access = _PackAccess(pack_map, writer)
+        self.repo._revision_knit_access = knit_access
         self.repo._revision_knit = knit.KnitVersionedFile(
             'revisions', self.transport.clone('..'),
             self.repo.control_files._file_mode,
             create=False, access_mode=self.repo.control_files._lock_mode,
-            index=knit_index, delta=False, factory=knit.KnitPlainFactory())
+            index=knit_index, delta=False, factory=knit.KnitPlainFactory(),
+            access_method=knit_access)
         return self.repo._revision_knit
 
     def get_signature_file(self, transaction):
@@ -377,25 +387,33 @@
             return self.repo._signature_knit
         indices = []
         self.repo._data_names.ensure_loaded()
+        pack_map = {}
         for name in self.repo._data_names.names():
             # TODO: maybe this should expose size to us  to allow
             # sorting of the indices for better performance ?
             index_name = self.name_to_signature_index_name(name)
             indices.append(GraphIndex(self.transport, index_name))
+            pack_map[indices[-1]] = (self.repo._pack_tuple(name))
         if self.repo.is_in_write_group():
             # allow writing: queue writes to a new index
             indices.append(self.repo._signature_write_index)
+            pack_map[self.repo._signature_write_index] = self.repo._open_pack_tuple
+            writer = self.repo._open_pack_writer, self.repo._signature_write_index
             add_callback = self.repo._signature_write_index.add_nodes
         else:
+            writer = None
             add_callback = None # no data-adding permitted.
         self.repo._signature_all_indices = CombinedGraphIndex(indices)
         knit_index = KnitGraphIndex(self.repo._signature_all_indices,
             add_callback=add_callback, parents=False)
+        knit_access = _PackAccess(pack_map, writer)
+        self.repo._signature_knit_access = knit_access
         self.repo._signature_knit = knit.KnitVersionedFile(
             'signatures', self.transport.clone('..'),
             self.repo.control_files._file_mode,
             create=False, access_mode=self.repo.control_files._lock_mode,
-            index=knit_index, delta=False, factory=knit.KnitPlainFactory())
+            index=knit_index, delta=False, factory=knit.KnitPlainFactory(),
+            access_method=knit_access)
         return self.repo._signature_knit
 
     def data_inserted(self):
@@ -422,6 +440,8 @@
             # remove the write buffering index. XXX: API break
             # - clearly we need a remove_index call too.
             del self.repo._revision_all_indices._indices[-1]
+            # reset the knit access writer
+            self.repo._revision_knit_access.set_writer(None, None, (None, None))
         # write a signatures index (might be empty)
         new_index_name = self.name_to_signature_index_name(new_name)
         self.transport.put_file(new_index_name,
@@ -434,6 +454,8 @@
             # remove the write buffering index. XXX: API break
             # - clearly we need a remove_index call too.
             del self.repo._signature_all_indices._indices[-1]
+            # reset the knit access writer
+            self.repo._signature_knit_access.set_writer(None, None, (None, None))
 
     def name_to_revision_index_name(self, name):
         """The revision index is the name + .rix."""
@@ -449,10 +471,12 @@
         self.repo._revision_knit = None
         self.repo._revision_write_index = None
         self.repo._revision_all_indices = None
+        self.repo._revision_knit_access = None
         # cached signature data
         self.repo._signature_knit = None
         self.repo._signature_write_index = None
         self.repo._signature_all_indices = None
+        self.repo._signature_knit_access = None
 
     def setup(self):
         # setup in-memory indices to accumulate data.
@@ -463,9 +487,13 @@
         if self.repo._revision_knit is not None:
             self.repo._revision_all_indices.insert_index(0, self.repo._revision_write_index)
             self.repo._revision_knit._index._add_callback = self.repo._revision_write_index.add_nodes
+            self.repo._revision_knit_access.set_writer(self.repo._open_pack_writer,
+                self.repo._revision_write_index, self.repo._open_pack_tuple)
         if self.repo._signature_knit is not None:
             self.repo._signature_all_indices.insert_index(0, self.repo._signature_write_index)
             self.repo._signature_knit._index._add_callback = self.repo._signature_write_index.add_nodes
+            self.repo._signature_knit_access.set_writer(self.repo._open_pack_writer,
+                self.repo._signature_write_index, self.repo._open_pack_tuple)
 
 
 class GraphKnitTextStore(VersionedFileStore):
@@ -512,11 +540,13 @@
             return
         indices = []
         self.repo._data_names.ensure_loaded()
+        self.repo._text_pack_map = {}
         for name in self.repo._data_names.names():
             # TODO: maybe this should expose size to us  to allow
             # sorting of the indices for better performance ?
             index_name = self.name_to_text_index_name(name)
             indices.append(GraphIndex(self.transport, index_name))
+            self.repo._text_pack_map[indices[-1]] = (self.repo._pack_tuple(name))
         if self.repo.is_in_write_group():
             # allow writing: queue writes to a new index
             indices.append(self.repo._text_write_index)
@@ -548,17 +578,22 @@
         filename = self.weavestore.filename(file_id)
         if self.repo.is_in_write_group():
             add_callback = self.repo._text_write_index.add_nodes
+            self.repo._text_pack_map[self.repo._text_write_index] = self.repo._open_pack_tuple
+            writer = self.repo._open_pack_writer, self.repo._text_write_index
         else:
             add_callback = None # no data-adding permitted.
+            writer = None
 
         file_id_index = GraphIndexPrefixAdapter(self.repo._text_all_indices,
             (file_id, ), 1, add_nodes_callback=add_callback)
         knit_index = KnitGraphIndex(file_id_index,
             add_callback=file_id_index.add_nodes,
             deltas=True, parents=True)
+        knit_access = _PackAccess(self.repo._text_pack_map, writer)
         return knit.KnitVersionedFile(filename, self.weavestore._transport,
             self.weavestore._file_mode,
             index=knit_index,
+            access_method=knit_access,
             **self.weavestore._versionedfile_kwargs)
 
     get_weave = get_weave_or_empty
@@ -567,7 +602,7 @@
         """Generate a list of the fileids inserted, for use by check."""
         self._ensure_all_index()
         ids = set()
-        for key, value, refs in self.repo._text_all_indices.iter_all_entries():
+        for index, key, value, refs in self.repo._text_all_indices.iter_all_entries():
             ids.add(key[0])
         return iter(ids)
 
@@ -581,6 +616,8 @@
         self.repo._text_write_index = None
         # remove all constructed text data indices
         self.repo._text_all_indices = None
+        # and the pack map
+        self.repo._text_pack_map = None
 
     def setup(self):
         # setup in-memory indices to accumulate data.
@@ -617,15 +654,18 @@
             return
         indices = []
         self.repo._data_names.ensure_loaded()
+        pack_map = {}
         for name in self.repo._data_names.names():
             # TODO: maybe this should expose size to us  to allow
             # sorting of the indices for better performance ?
             index_name = self.name_to_inv_index_name(name)
             indices.append(GraphIndex(self.transport, index_name))
+            pack_map[indices[-1]] = (self.repo._pack_tuple(name))
         if self.repo.is_in_write_group():
             # allow writing: queue writes to a new index
             indices.append(self.repo._inv_write_index)
         self.repo._inv_all_indices = CombinedGraphIndex(indices)
+        self.repo._inv_pack_map = pack_map
 
     def flush(self, new_name):
         """Write the index out to new_name."""
@@ -643,6 +683,8 @@
             # remove the write buffering index. XXX: API break
             # - clearly we need a remove_index call too.
             del self.repo._inv_all_indices._indices[-1]
+            self.repo._inv_knit_access.set_writer(None, None, (None, None))
+        self.repo._inv_pack_map = None
 
     def get_weave(self):
         """Get a 'Knit' that contains inventory data."""
@@ -650,16 +692,22 @@
         filename = 'inventory'
         if self.repo.is_in_write_group():
             add_callback = self.repo._inv_write_index.add_nodes
+            self.repo._inv_pack_map[self.repo._inv_write_index] = self.repo._open_pack_tuple
+            writer = self.repo._open_pack_writer, self.repo._inv_write_index
         else:
             add_callback = None # no data-adding permitted.
+            writer = None
 
         knit_index = KnitGraphIndex(self.repo._inv_all_indices,
             add_callback=add_callback,
             deltas=True, parents=True)
         # TODO - mode support. self.weavestore._file_mode,
+        knit_access = _PackAccess(self.repo._inv_pack_map, writer)
+        self.repo._inv_knit_access = knit_access
         return knit.KnitVersionedFile('inventory', self.transport.clone('..'),
             index=knit_index,
-            factory=knit.KnitPlainFactory())
+            factory=knit.KnitPlainFactory(),
+            access_method=knit_access)
 
     def name_to_inv_index_name(self, name):
         """The inv index is the name + .iix."""
@@ -671,6 +719,9 @@
         self.repo._inv_write_index = None
         # remove all constructed inv data indices
         self.repo._inv_all_indices = None
+        # remove the knit access object
+        self.repo._inv_knit_access = None
+        self.repo._inv_pack_map = None
 
     def setup(self):
         # setup in-memory indices to accumulate data.
@@ -697,6 +748,8 @@
         self._revision_store = GraphKnitRevisionStore(self, index_transport, self._revision_store)
         self.weave_store = GraphKnitTextStore(self, index_transport, self.weave_store)
         self._inv_thunk = InventoryKnitThunk(self, index_transport)
+        self._upload_transport = control_files._transport.clone('upload')
+        self._pack_transport = control_files._transport.clone('packs')
 
     def _abort_write_group(self):
         # FIXME: just drop the transient index.
@@ -706,6 +759,10 @@
         # forget what names there are
         self._data_names.reset()
 
+    def _pack_tuple(self, name):
+        """Return a tuple with the transport and file name for a pack name."""
+        return self._pack_transport, name + '.pack'
+
     def _refresh_data(self):
         if self.control_files._lock_count==1:
             self._revision_store.reset()
@@ -715,6 +772,12 @@
             self._data_names.reset()
 
     def _start_write_group(self):
+        random_name = self.control_files._lock.nonce
+        self._open_pack_tuple = (self._upload_transport, random_name + '.pack')
+        def write_data(bytes):
+            self._upload_transport.append_bytes(random_name + '.pack', bytes)
+        self._open_pack_writer = pack.ContainerWriter(write_data)
+        self._open_pack_writer.begin()
         self._data_names.setup()
         self._revision_store.setup()
         self.weave_store.setup()
@@ -729,7 +792,12 @@
             self.weave_store.flush(new_name)
             self._inv_thunk.flush(new_name)
             self._revision_store.flush(new_name)
+            self._upload_transport.rename(self._open_pack_tuple[1],
+                '../packs/' + new_name + '.pack')
             self._data_names.save()
+        else:
+            # can the pending upload
+            self._upload_transport.delete(self._open_pack_tuple[1])
         self._revision_store.reset()
         self.weave_store.reset()
         self._inv_thunk.reset()
@@ -753,6 +821,8 @@
         self._revision_store = GraphKnitRevisionStore(self, index_transport, self._revision_store)
         self.weave_store = GraphKnitTextStore(self, index_transport, self.weave_store)
         self._inv_thunk = InventoryKnitThunk(self, index_transport)
+        self._upload_transport = control_files._transport.clone('upload')
+        self._pack_transport = control_files._transport.clone('packs')
 
     def _abort_write_group(self):
         # FIXME: just drop the transient index.
@@ -762,6 +832,10 @@
         # forget what names there are
         self._data_names.reset()
 
+    def _pack_tuple(self, name):
+        """Return a tuple with the transport and file name for a pack name."""
+        return self._pack_transport, name + '.pack'
+
     def _refresh_data(self):
         if self.control_files._lock_count==1:
             self._revision_store.reset()
@@ -771,6 +845,12 @@
             self._data_names.reset()
 
     def _start_write_group(self):
+        random_name = self.control_files._lock.nonce
+        self._open_pack_tuple = (self._upload_transport, random_name + '.pack')
+        def write_data(bytes):
+            self._upload_transport.append_bytes(random_name + '.pack', bytes)
+        self._open_pack_writer = pack.ContainerWriter(write_data)
+        self._open_pack_writer.begin()
         self._data_names.setup()
         self._revision_store.setup()
         self.weave_store.setup()
@@ -785,7 +865,12 @@
             self.weave_store.flush(new_name)
             self._inv_thunk.flush(new_name)
             self._revision_store.flush(new_name)
+            self._upload_transport.rename(self._open_pack_tuple[1],
+                '../packs/' + new_name + '.pack')
             self._data_names.save()
+        else:
+            # can the pending upload
+            self._upload_transport.delete(self._open_pack_tuple[1])
         self._revision_store.reset()
         self.weave_store.reset()
         self._inv_thunk.reset()
@@ -1043,15 +1128,16 @@
     mutter('changing to GraphKnit1 repository in %s.', a_bzrdir.transport.base)
     repo_transport = a_bzrdir.get_repository_transport(None)
     repo_transport.mkdir('indices')
+    repo_transport.mkdir('packs')
+    repo_transport.mkdir('upload')
+    repo_transport.rmdir('knits')
     names = file_names.FileNames(
         repo_transport.clone('indices'), 'index')
     names.initialise()
     names.save()
-    repo_transport.delete('revisions.kndx')
-    repo_transport.delete('signatures.kndx')
-    for first in '0123456789abcdef':
-        for second in '0123456789abcdef':
-            repo_transport.mkdir('knits/%s%s' % (first, second))
+    for knit in ('inventory', 'revisions', 'signatures'):
+        repo_transport.delete(knit + '.kndx')
+        repo_transport.delete(knit + '.knit')
 
 
 class RepositoryFormatGraphKnit3(RepositoryFormatKnit3):

=== modified file 'bzrlib/tests/test_knit.py'
--- a/bzrlib/tests/test_knit.py	2007-08-02 07:41:02 +0000
+++ b/bzrlib/tests/test_knit.py	2007-08-02 11:45:09 +0000
@@ -288,7 +288,7 @@
         transport = MockTransport([gz_txt])
         access = _KnitAccess(transport, 'filename', None, None, False, False)
         data = _KnitData(access=access)
-        records = [('rev-id-1', 0, len(gz_txt))]
+        records = [('rev-id-1', (None, 0, len(gz_txt)))]
 
         contents = data.read_records(records)
         self.assertEqual({'rev-id-1':(['foo\n', 'bar\n'], sha1sum)}, contents)
@@ -306,7 +306,7 @@
         transport = MockTransport([gz_txt])
         access = _KnitAccess(transport, 'filename', None, None, False, False)
         data = _KnitData(access=access)
-        records = [('rev-id-1', 0, len(gz_txt))]
+        records = [('rev-id-1', (None, 0, len(gz_txt)))]
         self.assertRaises(errors.KnitCorrupt, data.read_records, records)
 
         # read_records_iter_raw won't detect that sort of mismatch/corruption
@@ -324,7 +324,7 @@
         transport = MockTransport([gz_txt])
         access = _KnitAccess(transport, 'filename', None, None, False, False)
         data = _KnitData(access=access)
-        records = [('rev-id-1', 0, len(gz_txt))]
+        records = [('rev-id-1', (None, 0, len(gz_txt)))]
         self.assertRaises(errors.KnitCorrupt, data.read_records, records)
 
         # read_records_iter_raw won't detect that sort of mismatch/corruption
@@ -342,7 +342,7 @@
         access = _KnitAccess(transport, 'filename', None, None, False, False)
         data = _KnitData(access=access)
         # We are asking for rev-id-2, but the data is rev-id-1
-        records = [('rev-id-2', 0, len(gz_txt))]
+        records = [('rev-id-2', (None, 0, len(gz_txt)))]
         self.assertRaises(errors.KnitCorrupt, data.read_records, records)
 
         # read_records_iter_raw will notice if we request the wrong version.
@@ -359,7 +359,7 @@
         transport = MockTransport([txt])
         access = _KnitAccess(transport, 'filename', None, None, False, False)
         data = _KnitData(access=access)
-        records = [('rev-id-1', 0, len(txt))]
+        records = [('rev-id-1', (None, 0, len(txt)))]
 
         # We don't have valid gzip data ==> corrupt
         self.assertRaises(errors.KnitCorrupt, data.read_records, records)
@@ -380,7 +380,7 @@
         transport = MockTransport([gz_txt])
         access = _KnitAccess(transport, 'filename', None, None, False, False)
         data = _KnitData(access=access)
-        records = [('rev-id-1', 0, len(gz_txt))]
+        records = [('rev-id-1', (None, 0, len(gz_txt)))]
 
         self.assertRaises(errors.KnitCorrupt, data.read_records, records)
 
@@ -492,7 +492,7 @@
         # check that the index used is the first one written. (Specific
         # to KnitIndex style indices.
         self.assertEqual("1", index._version_list_to_index(["version"]))
-        self.assertEqual((3, 4), index.get_position("version"))
+        self.assertEqual((None, 3, 4), index.get_position("version"))
         self.assertEqual(["options3"], index.get_options("version"))
         self.assertEqual(["parent", "other"],
             index.get_parents_with_ghosts("version"))
@@ -515,7 +515,7 @@
             _KnitIndex.HEADER
             ])
         index = self.get_knit_index(transport, "filename", "r")
-        index.add_version(utf8_revision_id, ["option"], (0, 1), [])
+        index.add_version(utf8_revision_id, ["option"], (None, 0, 1), [])
         self.assertEqual(("append_bytes", ("filename",
             "\n%s option 0 1  :" % (utf8_revision_id,)),
             {}),
@@ -528,7 +528,7 @@
             _KnitIndex.HEADER
             ])
         index = self.get_knit_index(transport, "filename", "r")
-        index.add_version("version", ["option"], (0, 1), [utf8_revision_id])
+        index.add_version("version", ["option"], (None, 0, 1), [utf8_revision_id])
         self.assertEqual(("append_bytes", ("filename",
             "\nversion option 0 1 .%s :" % (utf8_revision_id,)),
             {}),
@@ -539,10 +539,10 @@
         index = self.get_knit_index(transport, "filename", "w", create=True)
         self.assertEqual([], index.get_graph())
 
-        index.add_version("a", ["option"], (0, 1), ["b"])
+        index.add_version("a", ["option"], (None, 0, 1), ["b"])
         self.assertEqual([("a", ["b"])], index.get_graph())
 
-        index.add_version("c", ["option"], (0, 1), ["d"])
+        index.add_version("c", ["option"], (None, 0, 1), ["d"])
         self.assertEqual([("a", ["b"]), ("c", ["d"])],
             sorted(index.get_graph()))
 
@@ -599,11 +599,11 @@
         transport = MockTransport()
         index = self.get_knit_index(transport, "filename", "w", create=True)
         # no parents
-        index.add_version('r0', ['option'], (0, 1), [])
+        index.add_version('r0', ['option'], (None, 0, 1), [])
         # 1 parent
-        index.add_version('r1', ['option'], (0, 1), ['r0'])
+        index.add_version('r1', ['option'], (None, 0, 1), ['r0'])
         # 2 parents
-        index.add_version('r2', ['option'], (0, 1), ['r1', 'r0'])
+        index.add_version('r2', ['option'], (None, 0, 1), ['r1', 'r0'])
         # XXX TODO a ghost
         # cases: each sample data individually:
         self.assertEqual(set([('r0', ())]),
@@ -634,15 +634,15 @@
         self.assertEqual(0, index.num_versions())
         self.assertEqual(0, len(index))
 
-        index.add_version("a", ["option"], (0, 1), [])
-        self.assertEqual(1, index.num_versions())
-        self.assertEqual(1, len(index))
-
-        index.add_version("a", ["option2"], (1, 2), [])
-        self.assertEqual(1, index.num_versions())
-        self.assertEqual(1, len(index))
-
-        index.add_version("b", ["option"], (0, 1), [])
+        index.add_version("a", ["option"], (None, 0, 1), [])
+        self.assertEqual(1, index.num_versions())
+        self.assertEqual(1, len(index))
+
+        index.add_version("a", ["option2"], (None, 1, 2), [])
+        self.assertEqual(1, index.num_versions())
+        self.assertEqual(1, len(index))
+
+        index.add_version("b", ["option"], (None, 0, 1), [])
         self.assertEqual(2, index.num_versions())
         self.assertEqual(2, len(index))
 
@@ -654,13 +654,13 @@
 
         self.assertEqual([], index.get_versions())
 
-        index.add_version("a", ["option"], (0, 1), [])
-        self.assertEqual(["a"], index.get_versions())
-
-        index.add_version("a", ["option"], (0, 1), [])
-        self.assertEqual(["a"], index.get_versions())
-
-        index.add_version("b", ["option"], (0, 1), [])
+        index.add_version("a", ["option"], (None, 0, 1), [])
+        self.assertEqual(["a"], index.get_versions())
+
+        index.add_version("a", ["option"], (None, 0, 1), [])
+        self.assertEqual(["a"], index.get_versions())
+
+        index.add_version("b", ["option"], (None, 0, 1), [])
         self.assertEqual(["a", "b"], index.get_versions())
 
     def test_add_version(self):
@@ -669,33 +669,33 @@
             ])
         index = self.get_knit_index(transport, "filename", "r")
 
-        index.add_version("a", ["option"], (0, 1), ["b"])
+        index.add_version("a", ["option"], (None, 0, 1), ["b"])
         self.assertEqual(("append_bytes",
             ("filename", "\na option 0 1 .b :"),
             {}), transport.calls.pop(0))
         self.assertTrue(index.has_version("a"))
         self.assertEqual(1, index.num_versions())
-        self.assertEqual((0, 1), index.get_position("a"))
+        self.assertEqual((None, 0, 1), index.get_position("a"))
         self.assertEqual(["option"], index.get_options("a"))
         self.assertEqual(["b"], index.get_parents_with_ghosts("a"))
 
-        index.add_version("a", ["opt"], (1, 2), ["c"])
+        index.add_version("a", ["opt"], (None, 1, 2), ["c"])
         self.assertEqual(("append_bytes",
             ("filename", "\na opt 1 2 .c :"),
             {}), transport.calls.pop(0))
         self.assertTrue(index.has_version("a"))
         self.assertEqual(1, index.num_versions())
-        self.assertEqual((1, 2), index.get_position("a"))
+        self.assertEqual((None, 1, 2), index.get_position("a"))
         self.assertEqual(["opt"], index.get_options("a"))
         self.assertEqual(["c"], index.get_parents_with_ghosts("a"))
 
-        index.add_version("b", ["option"], (2, 3), ["a"])
+        index.add_version("b", ["option"], (None, 2, 3), ["a"])
         self.assertEqual(("append_bytes",
             ("filename", "\nb option 2 3 0 :"),
             {}), transport.calls.pop(0))
         self.assertTrue(index.has_version("b"))
         self.assertEqual(2, index.num_versions())
-        self.assertEqual((2, 3), index.get_position("b"))
+        self.assertEqual((None, 2, 3), index.get_position("b"))
         self.assertEqual(["option"], index.get_options("b"))
         self.assertEqual(["a"], index.get_parents_with_ghosts("b"))
 
@@ -706,9 +706,9 @@
         index = self.get_knit_index(transport, "filename", "r")
 
         index.add_versions([
-            ("a", ["option"], (0, 1), ["b"]),
-            ("a", ["opt"], (1, 2), ["c"]),
-            ("b", ["option"], (2, 3), ["a"])
+            ("a", ["option"], (None, 0, 1), ["b"]),
+            ("a", ["opt"], (None, 1, 2), ["c"]),
+            ("b", ["option"], (None, 2, 3), ["a"])
             ])
         self.assertEqual(("append_bytes", ("filename",
             "\na option 0 1 .b :"
@@ -718,8 +718,8 @@
         self.assertTrue(index.has_version("a"))
         self.assertTrue(index.has_version("b"))
         self.assertEqual(2, index.num_versions())
-        self.assertEqual((1, 2), index.get_position("a"))
-        self.assertEqual((2, 3), index.get_position("b"))
+        self.assertEqual((None, 1, 2), index.get_position("a"))
+        self.assertEqual((None, 2, 3), index.get_position("b"))
         self.assertEqual(["opt"], index.get_options("a"))
         self.assertEqual(["option"], index.get_options("b"))
         self.assertEqual(["c"], index.get_parents_with_ghosts("a"))
@@ -734,9 +734,9 @@
         self.assertEqual([], transport.calls)
 
         index.add_versions([
-            ("a", ["option"], (0, 1), ["b"]),
-            ("a", ["opt"], (1, 2), ["c"]),
-            ("b", ["option"], (2, 3), ["a"])
+            ("a", ["option"], (None, 0, 1), ["b"]),
+            ("a", ["opt"], (None, 1, 2), ["c"]),
+            ("b", ["option"], (None, 2, 3), ["a"])
             ])
         name, (filename, f), kwargs = transport.calls.pop(0)
         self.assertEqual("put_file_non_atomic", name)
@@ -769,8 +769,8 @@
             ])
         index = self.get_knit_index(transport, "filename", "r")
 
-        self.assertEqual((0, 1), index.get_position("a"))
-        self.assertEqual((1, 2), index.get_position("b"))
+        self.assertEqual((None, 0, 1), index.get_position("a"))
+        self.assertEqual((None, 1, 2), index.get_position("b"))
 
     def test_get_method(self):
         transport = MockTransport([
@@ -1591,8 +1591,8 @@
 
         def read_one_raw(version):
             pos_map = k._get_components_positions([version])
-            method, pos, size, next = pos_map[version]
-            lst = list(k._data.read_records_iter_raw([(version, pos, size)]))
+            method, index_memo, next = pos_map[version]
+            lst = list(k._data.read_records_iter_raw([(version, index_memo)]))
             self.assertEqual(1, len(lst))
             return lst[0]
 
@@ -1612,8 +1612,8 @@
 
         def read_one(version):
             pos_map = k._get_components_positions([version])
-            method, pos, size, next = pos_map[version]
-            lst = list(k._data.read_records_iter([(version, pos, size)]))
+            method, index_memo, next = pos_map[version]
+            lst = list(k._data.read_records_iter([(version, index_memo)]))
             self.assertEqual(1, len(lst))
             return lst[0]
 
@@ -1657,14 +1657,14 @@
         """Adding versions to the index should update the lookup dict"""
         knit = self.make_test_knit()
         idx = knit._index
-        idx.add_version('a-1', ['fulltext'], (0, 0), [])
+        idx.add_version('a-1', ['fulltext'], (None, 0, 0), [])
         self.check_file_contents('test.kndx',
             '# bzr knit index 8\n'
             '\n'
             'a-1 fulltext 0 0  :'
             )
-        idx.add_versions([('a-2', ['fulltext'], (0, 0), ['a-1']),
-                          ('a-3', ['fulltext'], (0, 0), ['a-2']),
+        idx.add_versions([('a-2', ['fulltext'], (None, 0, 0), ['a-1']),
+                          ('a-3', ['fulltext'], (None, 0, 0), ['a-2']),
                          ])
         self.check_file_contents('test.kndx',
             '# bzr knit index 8\n'
@@ -1693,15 +1693,15 @@
 
         knit = self.make_test_knit()
         idx = knit._index
-        idx.add_version('a-1', ['fulltext'], (0, 0), [])
+        idx.add_version('a-1', ['fulltext'], (None, 0, 0), [])
 
         class StopEarly(Exception):
             pass
 
         def generate_failure():
             """Add some entries and then raise an exception"""
-            yield ('a-2', ['fulltext'], (0, 0), ['a-1'])
-            yield ('a-3', ['fulltext'], (0, 0), ['a-2'])
+            yield ('a-2', ['fulltext'], (None, 0, 0), ['a-1'])
+            yield ('a-3', ['fulltext'], (None, 0, 0), ['a-2'])
             raise StopEarly()
 
         # Assert the pre-condition
@@ -1849,8 +1849,8 @@
 
     def test_get_position(self):
         index = self.two_graph_index()
-        self.assertEqual((0, 100), index.get_position('tip'))
-        self.assertEqual((100, 78), index.get_position('parent'))
+        self.assertEqual((index._graph_index._indices[0], 0, 100), index.get_position('tip'))
+        self.assertEqual((index._graph_index._indices[1], 100, 78), index.get_position('parent'))
 
     def test_get_method_deltas(self):
         index = self.two_graph_index(deltas=True)
@@ -2132,8 +2132,8 @@
 
     def test_get_position(self):
         index = self.two_graph_index()
-        self.assertEqual((0, 100), index.get_position('tip'))
-        self.assertEqual((100, 78), index.get_position('parent'))
+        self.assertEqual((index._graph_index._indices[0], 0, 100), index.get_position('tip'))
+        self.assertEqual((index._graph_index._indices[1], 100, 78), index.get_position('parent'))
 
     def test_get_method(self):
         index = self.two_graph_index()



More information about the bazaar-commits mailing list