Rev 3240: Implement generic stacking rather than pack-internals based stacking. in http://people.ubuntu.com/~robertc/baz2.0/shallow-branch

Robert Collins robertc at robertcollins.net
Wed Jun 25 05:19:26 BST 2008


At http://people.ubuntu.com/~robertc/baz2.0/shallow-branch

------------------------------------------------------------
revno: 3240
revision-id: robertc at robertcollins.net-20080625041919-f1p0cvslr4ttpbjj
parent: robertc at robertcollins.net-20080625023945-nmu1d5jbt2i5bakk
committer: Robert Collins <robertc at robertcollins.net>
branch nick: Development1
timestamp: Wed 2008-06-25 14:19:19 +1000
message:
  Implement generic stacking rather than pack-internals based stacking.
modified:
  bzrlib/repofmt/pack_repo.py    pack_repo.py-20070813041115-gjv5ma7ktfqwsjgn-1
  bzrlib/repository.py           rev_storage.py-20051111201905-119e9401e46257e3
=== modified file 'bzrlib/repofmt/pack_repo.py'
--- a/bzrlib/repofmt/pack_repo.py	2008-06-12 02:31:52 +0000
+++ b/bzrlib/repofmt/pack_repo.py	2008-06-25 04:19:19 +0000
@@ -1183,10 +1183,8 @@
         :return: True if packing took place.
         """
         # XXX: Should not be needed when the management of indices is sane.
-        
-        total_revisions = self._local_revision_index().key_count()
-        total_packs = len(list(collection for collection, sizes in
-            self._names.values() if collection is self))
+        total_revisions = self.revision_index.combined_index.key_count()
+        total_packs = len(self._names)
         if self._max_pack_count(total_revisions) >= total_packs:
             return False
         # XXX: the following may want to be a class, to pack with a given
@@ -1198,8 +1196,6 @@
         pack_distribution = self.pack_distribution(total_revisions)
         existing_packs = []
         for pack in self.all_packs():
-            if self._names[pack.name][0] is not self:
-                continue
             revision_count = pack.get_revision_count()
             if revision_count == 0:
                 # revision less packs are not generated by normal operation,
@@ -1240,15 +1236,6 @@
         for revision_count, packs in pack_operations:
             self._obsolete_packs(packs)
 
-    def _local_revision_index(self):
-        """Return a combined index for all the local packs only."""
-        index = CombinedGraphIndex([])
-        for name, (collection, sizes) in self._names.items():
-            if collection is not self:
-                continue
-            index.insert_index(0, self.get_pack_by_name(name).revision_index)
-        return index
-
     def lock_names(self):
         """Acquire the mutex around the pack-names index.
         
@@ -1260,14 +1247,13 @@
     def pack(self):
         """Pack the pack collection totally."""
         self.ensure_loaded()
-        total_packs = len(list(collection for collection, sizes in
-            self._names.values() if collection is self))
+        total_packs = len(self._names)
         if total_packs < 2:
             # This is arguably wrong because we might not be optimal, but for
             # now lets leave it in. (e.g. reconcile -> one pack. But not
             # optimal.
             return
-        total_revisions = self._local_revision_index().key_count()
+        total_revisions = self.revision_index.combined_index.key_count()
         # XXX: the following may want to be a class, to pack with a given
         # policy.
         mutter('Packing repository %s, which has %d pack files, '
@@ -1277,8 +1263,6 @@
         pack_distribution = [1]
         pack_operations = [[0, []]]
         for pack in self.all_packs():
-            if self._names[pack.name][0] is not self:
-                continue
             pack_operations[-1][0] += pack.get_revision_count()
             pack_operations[-1][1].append(pack)
         self._execute_pack_operations(pack_operations, OptimisingPacker)
@@ -1332,28 +1316,14 @@
             raise errors.ObjectNotLocked(self.repo)
         if self._names is None:
             self._names = {}
-            # Get fallback repository packs.
-            # TODO: we really should try local packs first and thus order the
-            # indices appropriately.
-            self._names.update(self.fallback_packs_details())
-            # Now the local packs.
             self._packs_at_load = set()
             for index, key, value in self._iter_disk_pack_index():
                 name = key[0]
-                self._names[name] = (self, self._parse_index_sizes(value))
+                self._names[name] = self._parse_index_sizes(value)
                 self._packs_at_load.add((key, value))
         # populate all the metadata.
         self.all_packs()
 
-    def fallback_packs_details(self):
-        """Return a dict of name -> (collection, index) size tuples."""
-        result = {}
-        for repo in self.repo._fallback_repositories:
-            collection = repo._pack_collection
-            for index, key, value in collection._iter_disk_pack_index():
-                result[key[0]] = (collection, self._parse_index_sizes(value))
-        return result
-
     def _parse_index_sizes(self, value):
         """Parse a string of index sizes."""
         return tuple([int(digits) for digits in value.split(' ')])
@@ -1361,20 +1331,17 @@
     def get_pack_by_name(self, name):
         """Get a Pack object by name.
 
-        If previously accessed this returns from the self._packs_by_name cache.
-
         :param name: The name of the pack - e.g. '123456'
         :return: A Pack object.
         """
         try:
             return self._packs_by_name[name]
         except KeyError:
-            collection = self._names[name][0]
             rev_index = self._make_index(name, '.rix')
             inv_index = self._make_index(name, '.iix')
             txt_index = self._make_index(name, '.tix')
             sig_index = self._make_index(name, '.six')
-            result = ExistingPack(collection._pack_transport, name, rev_index,
+            result = ExistingPack(self._pack_transport, name, rev_index,
                 inv_index, txt_index, sig_index)
             self.add_pack_to_memory(result)
             return result
@@ -1389,7 +1356,7 @@
         if a_new_pack.name in self._names:
             raise errors.BzrError(
                 'Pack %r already exists in %s' % (a_new_pack.name, self))
-        self._names[a_new_pack.name] = self, tuple(a_new_pack.index_sizes)
+        self._names[a_new_pack.name] = tuple(a_new_pack.index_sizes)
         self.add_pack_to_memory(a_new_pack)
 
     def _iter_disk_pack_index(self):
@@ -1403,12 +1370,11 @@
                 ).iter_all_entries()
 
     def _make_index(self, name, suffix):
-        collection = self._names[name][0]
         size_offset = self._suffix_offsets[suffix]
         index_name = name + suffix
-        index_size = self._names[name][1][size_offset]
+        index_size = self._names[name][size_offset]
         return GraphIndex(
-            collection._index_transport, index_name, index_size)
+            self._index_transport, index_name, index_size)
 
     def _max_pack_count(self, total_revisions):
         """Return the maximum number of packs to use for total revisions.
@@ -1583,9 +1549,7 @@
                 disk_nodes.add((key, value))
             # do a two-way diff against our original content
             current_nodes = set()
-            for name, (collection, sizes) in self._names.iteritems():
-                if collection is not self:
-                    continue
+            for name, sizes in self._names.iteritems():
                 current_nodes.add(
                     ((name, ), ' '.join(str(size) for size in sizes)))
             deleted_nodes = self._packs_at_load - current_nodes
@@ -1606,18 +1570,18 @@
         finally:
             self._unlock_names()
         # synchronise the memory packs list with what we just wrote:
-        new_names = self.fallback_packs_details()
-        for key, value in disk_nodes:
-            new_names[key[0]] = self, self._parse_index_sizes(value)
+        new_names = dict(disk_nodes)
         # drop no longer present nodes
         for pack in self.all_packs():
-            if pack.name not in new_names:
+            if (pack.name,) not in new_names:
                 self._remove_pack_from_memory(pack)
         # add new nodes/refresh existing ones
-        for name, (collection, sizes) in new_names.iteritems():
+        for key, value in disk_nodes:
+            name = key[0]
+            sizes = self._parse_index_sizes(value)
             if name in self._names:
                 # existing
-                if sizes != self._names[name][1]:
+                if sizes != self._names[name]:
                     # the pack for name has had its indices replaced - rare but
                     # important to handle. XXX: probably can never happen today
                     # because the three-way merge code above does not handle it
@@ -1631,7 +1595,7 @@
                     self.get_pack_by_name(name)
             else:
                 # new
-                self._names[name] = collection, sizes
+                self._names[name] = sizes
                 self.get_pack_by_name(name)
 
     def _clear_obsolete_packs(self):
@@ -1743,15 +1707,6 @@
     def _abort_write_group(self):
         self._pack_collection._abort_write_group()
 
-    def _add_fallback_repository_check(self, repository):
-        """Check that this repository can fallback to repository safely.
-        
-        :param repository: A repository to fallback to.
-        :return: True if the repositories can stack ok.
-        """
-        return (InterRepository._same_model(self, repository) and
-            self._format.__class__ == repository._format.__class__)
-
     def _find_inconsistent_revision_parents(self):
         """Find revisions with incorrectly cached parents.
 
@@ -1873,6 +1828,9 @@
         if self._write_lock_count == 1:
             from bzrlib import transactions
             self._transaction = transactions.WriteTransaction()
+            for repo in self._fallback_repositories:
+                # Writes don't affect fallback repos
+                repo.lock_read()
         self._refresh_data()
 
     def lock_read(self):
@@ -1880,6 +1838,9 @@
             self._write_lock_count += 1
         else:
             self.control_files.lock_read()
+            for repo in self._fallback_repositories:
+                # Writes don't affect fallback repos
+                repo.lock_read()
         self._refresh_data()
 
     def leave_lock_in_place(self):
@@ -1921,8 +1882,109 @@
                 transaction = self._transaction
                 self._transaction = None
                 transaction.finish()
+                for repo in self._fallback_repositories:
+                    repo.unlock()
         else:
             self.control_files.unlock()
+            for repo in self._fallback_repositories:
+                repo.unlock()
+
+
+class RepositoryFormatPack(MetaDirRepositoryFormat):
+    """Format logic for pack structured repositories.
+
+    This repository format has:
+     - a list of packs in pack-names
+     - packs in packs/NAME.pack
+     - indices in indices/NAME.{iix,six,tix,rix}
+     - knit deltas in the packs, knit indices mapped to the indices.
+     - thunk objects to support the knits programming API.
+     - a format marker of its own
+     - an optional 'shared-storage' flag
+     - an optional 'no-working-trees' flag
+     - a LockDir lock
+    """
+
+    # Set this attribute in derived classes to control the repository class
+    # created by open and initialize.
+    repository_class = None
+    # Set this attribute in derived classes to control the
+    # _commit_builder_class that the repository objects will have passed to
+    # their constructor.
+    _commit_builder_class = None
+    # Set this attribute in derived clases to control the _serializer that the
+    # repository objects will have passed to their constructor.
+    _serializer = None
+    # External references are not supported in pack repositories yet.
+    supports_external_lookups = False
+
+    def initialize(self, a_bzrdir, shared=False):
+        """Create a pack based repository.
+
+        :param a_bzrdir: bzrdir to contain the new repository; must already
+            be initialized.
+        :param shared: If true the repository will be initialized as a shared
+                       repository.
+        """
+        mutter('creating repository in %s.', a_bzrdir.transport.base)
+        dirs = ['indices', 'obsolete_packs', 'packs', 'upload']
+        builder = GraphIndexBuilder()
+        files = [('pack-names', builder.finish())]
+        utf8_files = [('format', self.get_format_string())]
+        
+        self._upload_blank_content(a_bzrdir, dirs, files, utf8_files, shared)
+        return self.open(a_bzrdir=a_bzrdir, _found=True)
+
+    def open(self, a_bzrdir, _found=False, _override_transport=None):
+        """See RepositoryFormat.open().
+        
+        :param _override_transport: INTERNAL USE ONLY. Allows opening the
+                                    repository at a slightly different url
+                                    than normal. I.e. during 'upgrade'.
+        """
+        if not _found:
+            format = RepositoryFormat.find_format(a_bzrdir)
+        if _override_transport is not None:
+            repo_transport = _override_transport
+        else:
+            repo_transport = a_bzrdir.get_repository_transport(None)
+        control_files = lockable_files.LockableFiles(repo_transport,
+                                'lock', lockdir.LockDir)
+        return self.repository_class(_format=self,
+                              a_bzrdir=a_bzrdir,
+                              control_files=control_files,
+                              _commit_builder_class=self._commit_builder_class,
+                              _serializer=self._serializer)
+
+
+class RepositoryFormatKnitPack1(RepositoryFormatPack):
+    """A no-subtrees parameterized Pack repository.
+
+    This format was introduced in 0.92.
+    """
+
+    repository_class = KnitPackRepository
+    _commit_builder_class = PackCommitBuilder
+    _serializer = xml5.serializer_v5
+
+    def _get_matching_bzrdir(self):
+        return bzrdir.format_registry.make_bzrdir('pack-0.92')
+
+    def _ignore_setting_bzrdir(self, format):
+        pass
+
+    _matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
+
+    def get_format_string(self):
+        """See RepositoryFormat.get_format_string()."""
+        return "Bazaar pack repository format 1 (needs bzr 0.92)\n"
+
+    def get_format_description(self):
+        """See RepositoryFormat.get_format_description()."""
+        return "Packs containing knits without subtree support"
+
+    def check_conversion_target(self, target_format):
+        pass
 
 
 class RepositoryFormatPack(MetaDirRepositoryFormat):

=== modified file 'bzrlib/repository.py'
--- a/bzrlib/repository.py	2008-06-25 02:39:45 +0000
+++ b/bzrlib/repository.py	2008-06-25 04:19:19 +0000
@@ -505,6 +505,10 @@
         if not self._add_fallback_repository_check(repository):
             raise errors.IncompatibleRepositories(self, repository)
         self._fallback_repositories.append(repository)
+        self.texts.add_fallback_versioned_files(repository.texts)
+        self.inventories.add_fallback_versioned_files(repository.inventories)
+        self.revisions.add_fallback_versioned_files(repository.revisions)
+        self.signatures.add_fallback_versioned_files(repository.signatures)
 
     def _add_fallback_repository_check(self, repository):
         """Check that this repository can fallback to repository safely.
@@ -712,11 +716,16 @@
         XXX: this docstring is duplicated in many places, e.g. lockable_files.py
         """
         result = self.control_files.lock_write(token=token)
+        for repo in self._fallback_repositories:
+            # Writes don't affect fallback repos
+            repo.lock_read()
         self._refresh_data()
         return result
 
     def lock_read(self):
         self.control_files.lock_read()
+        for repo in self._fallback_repositories:
+            repo.lock_read()
         self._refresh_data()
 
     def get_physical_lock_status(self):
@@ -954,6 +963,8 @@
                 raise errors.BzrError(
                     'Must end write groups before releasing write locks.')
         self.control_files.unlock()
+        for repo in self._fallback_repositories:
+            repo.unlock()
 
     @needs_read_lock
     def clone(self, a_bzrdir, revision_id=None):




More information about the bazaar-commits mailing list