Rev 5099: Split out the policies for locking from the policies for indexing. in http://bzr.arbash-meinel.com/branches/bzr/lp/2.2.0b2-contained-pack
John Arbash Meinel
john at arbash-meinel.com
Mon Mar 8 20:01:58 GMT 2010
At http://bzr.arbash-meinel.com/branches/bzr/lp/2.2.0b2-contained-pack
------------------------------------------------------------
revno: 5099
revision-id: john at arbash-meinel.com-20100308200153-ob1otaacxdq09gzb
parent: john at arbash-meinel.com-20100308195555-wbu2h575236ltlm5
committer: John Arbash Meinel <john at arbash-meinel.com>
branch nick: 2.2.0b2-contained-pack
timestamp: Mon 2010-03-08 14:01:53 -0600
message:
Split out the policies for locking from the policies for indexing.
-------------- next part --------------
=== modified file 'bzrlib/pack_collection.py'
--- a/bzrlib/pack_collection.py 2010-03-08 19:55:55 +0000
+++ b/bzrlib/pack_collection.py 2010-03-08 20:01:53 +0000
@@ -153,8 +153,24 @@
# return SectionInfoSerializer().to_bytes(self._sections)
-class MemoTrackerPolicy(object):
- """Policy class determining what to do for specific MemoTracker actions.
+class LockingPolicy(object):
+ """Abstract Base Class that defines interactions with locking."""
+
+ def safe_to_cache(self):
+ """Raise an exception if it is unsafe to cache data in memory."""
+ raise NotImplementedError(self.safe_to_cache)
+
+ def lock_write(self):
+ """We are about to update the file, make sure that is safe to do."""
+ raise NotImplementedError(self.lock_write)
+
+ def unlock(self):
+ """We have finished writing, unlock."""
+ raise NotImplementedError(self.unlock)
+
+
+class IndexPolicy(object):
+ """Policy class determining what to do for reading/writing to indexes.
Rather than having implementations subclass MemoTracker to get appropriate
functionality, we implement it with a Policy object. This is mostly just
@@ -175,28 +191,15 @@
"""
raise NotImplementedError(self.write_index)
- def safe_to_cache(self):
- """Raise an exception if it is unsafe to cache data in memory."""
- raise NotImplementedError(self.safe_to_cache)
-
- def lock_write(self):
- """We are about to update the file, make sure that is safe to do."""
- raise NotImplementedError(self.lock_write)
-
- def unlock(self):
- """We have finished writing, unlock."""
- raise NotImplementedError(self.unlock)
-
-
-class TransportBTreeMemoTrackerPolicy(object):
- """A policy that uses a BTreeIndex at a particular location."""
-
- _index_builder_class = btree_index.BTreeBuilder
- _index_class = btree_index.BTreeGraphIndex
-
- def __init__(self, transport, filename):
+
+class GraphIndexPolicy(IndexPolicy):
+ """An IndexPolicy that uses a GraphIndex at a particular location."""
+
+ def __init__(self, transport, filename, builder_class, index_class):
self.transport = transport
self.filename = filename
+ self._index_builder_class = builder_class
+ self._index_class = index_class
def _do_handle_missing_file(self):
"""What should we do if the file is not present when we want to read?
@@ -252,8 +255,9 @@
not yet written to disk.)
"""
- def __init__(self, policy):
- self.policy = policy
+ def __init__(self, locking_policy, index_policy):
+ self.locking_policy = locking_policy
+ self.index_policy = index_policy
self._memos = None
# The set of memos() that were on disk when we read it last
self._last_read_memos = None
@@ -263,11 +267,11 @@
:return: True if we had to read data from disk
"""
- self.policy.safe_to_cache()
+ self.locking_policy.safe_to_cache()
if self._memos is not None:
result = False
else:
- memos = set(self.policy.read_index())
+ memos = set(self.index_policy.read_index())
self._memos = dict(memos)
self._last_read_memos = memos
result = True
@@ -315,7 +319,7 @@
tracking
mem_added Ones not in _last_read_memos that we have locally added
"""
- disk_memos = set(self.policy.read_index())
+ disk_memos = set(self.index_policy.read_index())
disk_added = disk_memos - self._last_read_memos
disk_removed = self._last_read_memos - disk_memos
mem_memos = set(self._memos.items())
@@ -346,11 +350,11 @@
def save(self):
"""Save the current in-memory content to disk."""
- self.policy.lock_write()
+ self.locking_policy.lock_write()
try:
(on_disk, active, d_removed, d_added, m_removed,
m_added) = self._diff_from_disk()
- self.policy.write_index(active)
+ self.index_policy.write_index(active)
# XXX: There is a bug here, test it. self._memos does not (yet)
# match active
self._last_read_memos = set(self._memos.iteritems())
@@ -365,7 +369,7 @@
for memo in d_added:
self.add_memo(*memo)
finally:
- self.policy.unlock()
+ self.locking_policy.unlock()
=== modified file 'bzrlib/tests/test_pack_collection.py'
--- a/bzrlib/tests/test_pack_collection.py 2010-03-08 19:55:55 +0000
+++ b/bzrlib/tests/test_pack_collection.py 2010-03-08 20:01:53 +0000
@@ -128,7 +128,7 @@
-class TrivialMemoTrackerPolicy(pack_collection.TransportBTreeMemoTrackerPolicy):
+class TrivialLockingPolicy(pack_collection.LockingPolicy):
"""Stub out the necessary functionality with trivial implementations."""
def safe_to_cache(self):
@@ -155,8 +155,10 @@
self.tracker = self.make_tracker()
def make_tracker(self):
- policy = TrivialMemoTrackerPolicy(self.transport, 'meta')
- tracker = pack_collection.MemoTracker(policy)
+ tracker = pack_collection.MemoTracker(
+ TrivialLockingPolicy(),
+ pack_collection.GraphIndexPolicy(self.transport, 'meta',
+ btree_index.BTreeBuilder, btree_index.BTreeGraphIndex))
return tracker
def assertDiffFromDisk(self, on_disk, active, d_removed, d_added,
@@ -175,8 +177,7 @@
# We should create the file at this time, and it should be a valid
# index file
self.tracker.save()
- btree = self.tracker.policy._index_class(self.transport, 'meta',
- size=None)
+ btree = btree_index.BTreeGraphIndex(self.transport, 'meta', size=None)
self.assertEqual(0, btree.key_count())
def test_add_memo(self):
@@ -188,8 +189,7 @@
self.assertEqual({'name1': 'content1'}, self.tracker._memos)
self.assertEqual([('name1', 'content1')],
sorted(self.tracker._last_read_memos))
- btree = self.tracker.policy._index_class(self.transport, 'meta',
- size=None)
+ btree = btree_index.BTreeGraphIndex(self.transport, 'meta', size=None)
self.assertEqual(1, btree.key_count())
self.assertEqual([(('name1',), 'content1')],
sorted([(n[1], n[2]) for n in btree.iter_all_entries()]))
@@ -210,14 +210,12 @@
], sorted(self.tracker._last_read_memos))
self.assertEqual({'name1': 'content1'}, self.tracker._memos)
# Before saving, we should have both keys
- btree = self.tracker.policy._index_class(self.transport, 'meta',
- size=None)
+ btree = btree_index.BTreeGraphIndex(self.transport, 'meta', size=None)
self.assertEqual(2, btree.key_count())
self.tracker.save()
self.assertEqual([('name1', 'content1')],
sorted(self.tracker._last_read_memos))
- btree = self.tracker.policy._index_class(self.transport, 'meta',
- size=None)
+ btree = btree_index.BTreeGraphIndex(self.transport, 'meta', size=None)
self.assertEqual(1, btree.key_count())
def test_add_with_concurrent_add(self):
@@ -235,8 +233,7 @@
'name3': 'content3',
}, self.tracker._memos)
# And the on-disk record should also have all 3
- btree = self.tracker.policy._index_class(self.transport, 'meta',
- size=None)
+ btree = btree_index.BTreeGraphIndex(self.transport, 'meta', size=None)
self.assertEqual(3, btree.key_count())
def test_concurrent_remove(self):
@@ -252,8 +249,7 @@
self.assertEqual({'name2': 'content2',
}, self.tracker._memos)
# And the on-disk record should also have all 3
- btree = self.tracker.policy._index_class(self.transport, 'meta',
- size=None)
+ btree = btree_index.BTreeGraphIndex(self.transport, 'meta', size=None)
self.assertEqual(1, btree.key_count())
def test__diff_from_disk_no_changes(self):
More information about the bazaar-commits
mailing list