Rev 3896: Cleanup, in preparation for merging to brisbane-core. in http://bzr.arbash-meinel.com/branches/bzr/brisbane/refcycles

John Arbash Meinel john at arbash-meinel.com
Mon Mar 23 20:04:51 GMT 2009


At http://bzr.arbash-meinel.com/branches/bzr/brisbane/refcycles

------------------------------------------------------------
revno: 3896
revision-id: john at arbash-meinel.com-20090323200442-10qwt4ws636wwjwl
parent: john at arbash-meinel.com-20090323032950-lmbrocu79l90dqn5
committer: John Arbash Meinel <john at arbash-meinel.com>
branch nick: refcycles
timestamp: Mon 2009-03-23 15:04:42 -0500
message:
  Cleanup, in preparation for merging to brisbane-core.
-------------- next part --------------
=== modified file 'bzrlib/groupcompress.py'
--- a/bzrlib/groupcompress.py	2009-03-23 03:29:50 +0000
+++ b/bzrlib/groupcompress.py	2009-03-23 20:04:42 +0000
@@ -494,10 +494,11 @@
                 return self._manager._wire_bytes()
             else:
                 return ''
-            self._manager = None # safe?
         if storage_kind in ('fulltext', 'chunked'):
             if self._bytes is None:
-                # Grab the raw bytes for this entry, and break the ref-cycle
+                # Grab and cache the raw bytes for this entry
+                # and break the ref-cycle with _manager since we don't need it
+                # anymore
                 self._manager._prepare_for_extract()
                 block = self._manager._block
                 self._bytes = block.extract(self.key, self._start, self._end)
@@ -507,7 +508,7 @@
             else:
                 return [self._bytes]
         raise errors.UnavailableRepresentation(self.key, storage_kind,
-            self.storage_kind)
+                                               self.storage_kind)
 
 
 class _LazyGroupContentManager(object):
@@ -534,6 +535,7 @@
         """Get a record for all keys added so far."""
         for factory in self._factories:
             yield factory
+            # Break the ref-cycle
             factory._bytes = None
             factory._manager = None
         # TODO: Consider setting self._factories = None after the above loop,
@@ -1286,8 +1288,7 @@
         for key in missing:
             yield AbsentContentFactory(key)
         manager = None
-        last_block = None
-        last_memo = None
+        last_read_memo = None
         # TODO: This works fairly well at batching up existing groups into a
         #       streamable format, and possibly allowing for taking one big
         #       group and splitting it when it isn't fully utilized.
@@ -1302,50 +1303,39 @@
                 for key in keys:
                     if key in self._unadded_refs:
                         if manager is not None:
-                            # Yield everything buffered so far
                             for factory in manager.get_record_stream():
                                 yield factory
-                                # Disable this record, breaks the refcycle, and
-                                # saves memory. But this means clients really
-                                # *cannot* hang on to objects.
-                                factory._bytes = None
-                                factory._manager = None
-                            manager = None
+                            last_read_memo = manager = None
                         bytes, sha1 = self._compressor.extract(key)
                         parents = self._unadded_refs[key]
                         yield FulltextContentFactory(key, parents, sha1, bytes)
                     else:
                         index_memo, _, parents, (method, _) = locations[key]
                         read_memo = index_memo[0:3]
-                        if last_memo == read_memo:
-                            block = last_block
-                        else:
+                        if last_read_memo != read_memo:
+                            # We are starting a new block. If we have a
+                            # manager, we have found everything that fits for
+                            # now, so yield records
+                            if manager is not None:
+                                for factory in manager.get_record_stream():
+                                    yield factory
+                            # Now start a new manager
                             block = self._get_block(index_memo)
-                            last_block = block
-                            last_memo = read_memo
+                            manager = _LazyGroupContentManager(block)
+                            last_read_memo = read_memo
                         start, end = index_memo[3:5]
-                        if manager is None:
-                            manager = _LazyGroupContentManager(block)
-                        elif manager._block is not block:
-                            # Flush and create a new manager
-                            for factory in manager.get_record_stream():
-                                yield factory
-                            manager = _LazyGroupContentManager(block)
                         manager.add_factory(key, parents, start, end)
             else:
                 if manager is not None:
-                    # Yield everything buffered so far
                     for factory in manager.get_record_stream():
                         yield factory
-                    manager = None
+                    last_read_memo = manager = None
                 for record in source.get_record_stream(keys, ordering,
                                                        include_delta_closure):
                     yield record
         if manager is not None:
-            # Yield everything buffered so far
             for factory in manager.get_record_stream():
                 yield factory
-            manager = None
 
     def get_sha1s(self, keys):
         """See VersionedFiles.get_sha1s()."""

=== modified file 'bzrlib/lru_cache.py'
--- a/bzrlib/lru_cache.py	2009-03-20 15:02:05 +0000
+++ b/bzrlib/lru_cache.py	2009-03-23 20:04:42 +0000
@@ -151,12 +151,8 @@
     def clear(self):
         """Clear out all of the cache."""
         # Clean up in LRU order
-        for key in self._cache.keys():
-            self._remove(key)
-        assert not self._cache
-        assert not self._cleanup
-        self._queue = deque()
-        self._refcount = {}
+        while self._cache:
+            self._remove_lru()
 
     def resize(self, max_cache, after_cleanup_count=None):
         """Change the number of entries that will be cached."""
@@ -251,10 +247,6 @@
         val = LRUCache._remove(self, key)
         self._value_size -= self._compute_size(val)
 
-    def clear(self):
-        LRUCache.clear(self)
-        self._value_size = 0
-
     def resize(self, max_size, after_cleanup_size=None):
         """Change the number of bytes that will be cached."""
         self._update_max_size(max_size, after_cleanup_size=after_cleanup_size)

=== modified file 'bzrlib/repofmt/groupcompress_repo.py'
--- a/bzrlib/repofmt/groupcompress_repo.py	2009-03-23 03:29:50 +0000
+++ b/bzrlib/repofmt/groupcompress_repo.py	2009-03-23 20:04:42 +0000
@@ -190,8 +190,6 @@
                 if pb is not None:
                     pb.update(message, idx + 1, len(keys))
                 yield record
-                record._manager = None
-                record._bytes = None
         return pb_stream()
 
     def _get_filtered_inv_stream(self, source_vf, keys, message, pb=None):
@@ -218,8 +216,6 @@
                     p_id_roots_set.add(key)
                     self._chk_p_id_roots.append(key)
                 yield record
-                record._manager = None
-                record._bytes = None
             # We have finished processing all of the inventory records, we
             # don't need these sets anymore
             id_roots_set.clear()
@@ -295,8 +291,6 @@
                         if pb is not None:
                             pb.update('chk node', counter[0], total_keys)
                         yield record
-                        record._manager = None
-                        record._bytes = None
                 yield next_stream()
                 # Double check that we won't be emitting any keys twice
                 # If we get rid of the pre-calculation of all keys, we could
@@ -391,17 +385,11 @@
             self.revision_keys = source_vf.keys()
         self._copy_stream(source_vf, target_vf, self.revision_keys,
                           'revisions', self._get_progress_stream, 1)
-        for index in source_vf._index._graph_index._indices:
-            index._leaf_node_cache.clear()
-        # target_vf._index._graph_index._spill_mem_keys_to_disk()
 
     def _copy_inventory_texts(self):
         source_vf, target_vf = self._build_vfs('inventory', True, True)
         self._copy_stream(source_vf, target_vf, self.revision_keys,
                           'inventories', self._get_filtered_inv_stream, 2)
-        for index in source_vf._index._graph_index._indices:
-            index._leaf_node_cache.clear()
-        # target_vf._index._graph_index._spill_mem_keys_to_disk()
 
     def _copy_chk_texts(self):
         source_vf, target_vf = self._build_vfs('chk', False, False)
@@ -423,9 +411,6 @@
                     pass
         finally:
             child_pb.finished()
-        for index in source_vf._index._graph_index._indices:
-            index._leaf_node_cache.clear()
-        # target_vf._index._graph_index._spill_mem_keys_to_disk()
 
     def _copy_text_texts(self):
         source_vf, target_vf = self._build_vfs('text', True, True)
@@ -437,9 +422,6 @@
         text_keys = source_vf.keys()
         self._copy_stream(source_vf, target_vf, text_keys,
                           'text', self._get_progress_stream, 4)
-        for index in source_vf._index._graph_index._indices:
-            index._leaf_node_cache.clear()
-        # target_vf._index._graph_index._spill_mem_keys_to_disk()
 
     def _copy_signature_texts(self):
         source_vf, target_vf = self._build_vfs('signature', False, False)
@@ -447,9 +429,6 @@
         signature_keys.intersection(self.revision_keys)
         self._copy_stream(source_vf, target_vf, signature_keys,
                           'signatures', self._get_progress_stream, 5)
-        for index in source_vf._index._graph_index._indices:
-            index._leaf_node_cache.clear()
-        # target_vf._index._graph_index._spill_mem_keys_to_disk()
 
     def _create_pack_from_packs(self):
         self.pb.update('repacking', 0, 7)
@@ -462,7 +441,6 @@
         self._copy_text_texts()
         self._copy_signature_texts()
         self.new_pack._check_references()
-        trace.debug_memory('after fetch')
         if not self._use_pack(self.new_pack):
             self.new_pack.abort()
             return None



More information about the bazaar-commits mailing list