Rev 73: All the backends support the current api tests. in http://bazaar.launchpad.net/+branch/u1db

John Arbash Meinel john at arbash-meinel.com
Thu Oct 13 14:01:15 UTC 2011


At http://bazaar.launchpad.net/+branch/u1db

------------------------------------------------------------
revno: 73 [merge]
revision-id: john at arbash-meinel.com-20111013140048-defyihyn7qf6l2f4
parent: john at arbash-meinel.com-20111013135829-50g1u59p3fy1oy0f
parent: john at arbash-meinel.com-20111013124422-e99nnz1jho7mk34j
committer: John Arbash Meinel <john at arbash-meinel.com>
branch nick: u1db
timestamp: Thu 2011-10-13 16:00:48 +0200
message:
  All the backends support the current api tests.
added:
  u1db/compat/                   u1dbcompat-20111013123507-o7klrz137qc7momb-1
  u1db/compat/__init__.py        __init__.py-20111013124213-lgi2rfpoy5z6myyt-1
  u1db/compat/ordered_dict.py    ordered_dict.py-20111013124213-lgi2rfpoy5z6myyt-2
modified:
  u1db/backends/inmemory.py      client.py-20110907100834-dekceojbjm2ken0c-3
  u1db/backends/sqlite_backend.py sqlite_backend.py-20110908122026-u745g3ftpndrgjl0-1
  u1db/tests/test_backends.py    test_client.py-20110907100834-dekceojbjm2ken0c-6
  u1db/tests/test_sqlite_backend.py test_sqlite_backend.-20110908122026-u745g3ftpndrgjl0-2
-------------- next part --------------
=== modified file 'u1db/backends/inmemory.py'
--- a/u1db/backends/inmemory.py	2011-09-12 14:42:00 +0000
+++ b/u1db/backends/inmemory.py	2011-10-12 13:27:03 +0000
@@ -14,7 +14,6 @@
 
 """The in-memory Database class for U1DB."""
 
-import re
 import simplejson
 
 import u1db

=== modified file 'u1db/backends/sqlite_backend.py'
--- a/u1db/backends/sqlite_backend.py	2011-09-12 14:42:00 +0000
+++ b/u1db/backends/sqlite_backend.py	2011-10-13 12:44:22 +0000
@@ -19,6 +19,7 @@
 
 import u1db
 from u1db.backends import CommonBackend
+from u1db import compat
 
 
 class SQLiteDatabase(CommonBackend):
@@ -88,6 +89,10 @@
                       " PRIMARY KEY (name, offset))")
             c.execute("CREATE TABLE u1db_config (name TEXT, value TEXT)")
             c.execute("INSERT INTO u1db_config VALUES ('sql_schema', '0')")
+            self._extra_schema_init(c)
+
+    def _extra_schema_init(self, c):
+        """Add any extra fields, etc to the basic table definitions."""
 
     def _set_machine_id(self, machine_id):
         """Force the machine_id to be set."""
@@ -161,14 +166,8 @@
         with self._db_handle:
             if self._has_conflicts(doc_id):
                 raise u1db.ConflictedDoc()
-            c = self._db_handle.cursor()
-            c.execute("SELECT doc_rev, doc FROM document WHERE doc_id=?",
-                      (doc_id,))
-            val = c.fetchone()
-            if val is None:
-                old_rev = old_doc = None
-            else:
-                old_rev, old_doc = val
+            old_rev, old_doc = self._get_doc(doc_id)
+            if old_rev is not None:
                 if old_rev != old_doc_rev:
                     raise u1db.InvalidDocRev()
             new_rev = self._allocate_doc_rev(old_doc_rev)
@@ -176,30 +175,12 @@
         return new_rev
 
     def _put_and_update_indexes(self, doc_id, old_doc, new_rev, doc):
-        c = self._db_handle.cursor()
-        # for index in self._indexes.itervalues():
-        #     if old_doc is not None:
-        #         index.remove_json(doc_id, old_doc)
-        #     if doc not in (None, 'null'):
-        #         index.add_json(doc_id, doc)
-        if doc:
-            raw_doc = simplejson.loads(doc)
-        else:
-            raw_doc = {}
-        if old_doc:
-            c.execute("UPDATE document SET doc_rev=?, doc=? WHERE doc_id = ?",
-                      (new_rev, doc, doc_id))
-            c.execute("DELETE FROM document_fields WHERE doc_id = ?",
-                      (doc_id,))
-        else:
-            c.execute("INSERT INTO document VALUES (?, ?, ?)",
-                      (doc_id, new_rev, doc))
-        values = [(doc_id, field_name, value) for field_name, value in
-                  raw_doc.iteritems()]
-        c.executemany("INSERT INTO document_fields VALUES (?, ?, ?)",
-                      values)
-        c.execute("INSERT INTO transaction_log(doc_id) VALUES (?)",
-                  (doc_id,))
+        """Actually insert a document into the database.
+
+        This both updates the existing documents content, and any indexes that
+        refer to this document.
+        """
+        raise NotImplementedError(self._put_and_update_indexes)
 
     def whats_changed(self, old_db_rev=0):
         c = self._db_handle.cursor()
@@ -216,13 +197,9 @@
 
     def delete_doc(self, doc_id, doc_rev):
         with self._db_handle:
-            c = self._db_handle.cursor()
-            c.execute("SELECT doc_rev, doc FROM document WHERE doc_id = ?",
-                      (doc_id,))
-            val = c.fetchone()
-            if val is None:
+            old_doc_rev, old_doc = self._get_doc(doc_id)
+            if old_doc_rev is None:
                 raise KeyError
-            old_doc_rev, old_doc = val
             if old_doc_rev != doc_rev:
                 raise u1db.InvalidDocRev()
             if old_doc is None:
@@ -366,3 +343,172 @@
             c = self._db_handle.cursor()
             c.execute("DELETE FROM index_definitions WHERE name = ?",
                       (index_name,))
+
+
+class SQLiteExpandedDatabase(SQLiteDatabase):
+    """An SQLite Backend that expands documents into a document_field table.
+
+    It stores the raw document text in document.doc, but also puts the
+    individual fields into document_fields.
+    """
+
+    def _put_and_update_indexes(self, doc_id, old_doc, new_rev, doc):
+        c = self._db_handle.cursor()
+        if doc:
+            raw_doc = simplejson.loads(doc)
+        else:
+            raw_doc = {}
+        if old_doc:
+            c.execute("UPDATE document SET doc_rev=?, doc=? WHERE doc_id = ?",
+                      (new_rev, doc, doc_id))
+            c.execute("DELETE FROM document_fields WHERE doc_id = ?",
+                      (doc_id,))
+        else:
+            c.execute("INSERT INTO document VALUES (?, ?, ?)",
+                      (doc_id, new_rev, doc))
+        values = [(doc_id, field_name, value) for field_name, value in
+                  raw_doc.iteritems()]
+        c.executemany("INSERT INTO document_fields VALUES (?, ?, ?)",
+                      values)
+        c.execute("INSERT INTO transaction_log(doc_id) VALUES (?)",
+                  (doc_id,))
+
+
+class SQLitePartialExpandDatabase(SQLiteDatabase):
+    """Similar to SQLiteExpandedDatabase, but only indexed fields are expanded.
+    """
+
+    def _get_indexed_fields(self):
+        """Determine what fields are indexed."""
+        c = self._db_handle.cursor()
+        c.execute("SELECT field FROM index_definitions")
+        return set([x[0] for x in c.fetchall()])
+
+    def _put_and_update_indexes(self, doc_id, old_doc, new_rev, doc):
+        c = self._db_handle.cursor()
+        if doc:
+            raw_doc = simplejson.loads(doc)
+        else:
+            raw_doc = {}
+        if old_doc:
+            c.execute("UPDATE document SET doc_rev=?, doc=? WHERE doc_id = ?",
+                      (new_rev, doc, doc_id))
+            c.execute("DELETE FROM document_fields WHERE doc_id = ?",
+                      (doc_id,))
+        else:
+            c.execute("INSERT INTO document VALUES (?, ?, ?)",
+                      (doc_id, new_rev, doc))
+        indexed_fields = self._get_indexed_fields()
+        if indexed_fields:
+            # It is expected that len(indexed_fields) is shorter than
+            # len(raw_doc)
+            values = [(doc_id, field_name, raw_doc[field_name])
+                      for field_name in indexed_fields
+                      if field_name in raw_doc]
+            c.executemany("INSERT INTO document_fields VALUES (?, ?, ?)",
+                          values)
+        c.execute("INSERT INTO transaction_log(doc_id) VALUES (?)",
+                  (doc_id,))
+
+    def create_index(self, index_name, index_expression):
+        with self._db_handle:
+            c = self._db_handle.cursor()
+            cur_fields = self._get_indexed_fields()
+            definition = [(index_name, idx, field)
+                          for idx, field in enumerate(index_expression)]
+            c.executemany("INSERT INTO index_definitions VALUES (?, ?, ?)",
+                          definition)
+            new_fields = set([f for f in index_expression
+                              if f not in cur_fields])
+            if new_fields:
+                self._update_indexes(new_fields)
+
+    def _iter_all_docs(self):
+        c = self._db_handle.cursor()
+        c.execute("SELECT doc_id, doc FROM document")
+        while True:
+            next_rows = c.fetchmany()
+            if not next_rows:
+                break
+            for row in next_rows:
+                yield row
+
+    def _update_indexes(self, new_fields):
+        for doc_id, doc in self._iter_all_docs():
+            raw_doc = simplejson.loads(doc)
+            values = [(doc_id, field_name, raw_doc[field_name])
+                      for field_name in new_fields
+                      if field_name in raw_doc]
+            c = self._db_handle.cursor()
+            c.executemany("INSERT INTO document_fields VALUES (?, ?, ?)",
+                          values)
+
+
+class SQLiteOnlyExpandedDatabase(SQLiteDatabase):
+    """Documents are only stored by their fields.
+
+    Rather than storing the raw content as text, we split it into fields and
+    store it in an indexable table.
+    """
+
+    def _extra_schema_init(self, c):
+        c.execute("ALTER TABLE document_fields ADD COLUMN offset INT")
+
+    def _put_and_update_indexes(self, doc_id, old_doc, new_rev, doc):
+        c = self._db_handle.cursor()
+        if doc:
+            raw_doc = simplejson.loads(doc,
+                object_pairs_hook=compat.OrderedDict)
+            doc_content = None
+        else:
+            raw_doc = {}
+            doc_content = '<deleted>'
+        if old_doc:
+            c.execute("UPDATE document SET doc_rev=?, doc=?"
+                      " WHERE doc_id = ?", (new_rev, doc_content, doc_id))
+            c.execute("DELETE FROM document_fields WHERE doc_id = ?",
+                      (doc_id,))
+        else:
+            c.execute("INSERT INTO document VALUES (?, ?, ?)",
+                      (doc_id, new_rev, doc_content))
+        values = [(doc_id, field_name, value, idx)
+                  for idx, (field_name, value)
+                  in enumerate(raw_doc.iteritems())]
+        c.executemany("INSERT INTO document_fields VALUES (?, ?, ?, ?)",
+                      values)
+        c.execute("INSERT INTO transaction_log(doc_id) VALUES (?)",
+                  (doc_id,))
+
+    def _get_doc(self, doc_id):
+        """Get just the document content, without fancy handling."""
+        c = self._db_handle.cursor()
+        c.execute("SELECT doc_rev, doc FROM document WHERE doc_id = ?",
+                  (doc_id,))
+        val = c.fetchone()
+        if val is None:
+            return None, None
+        # TODO: There is a race condition here, where we select the document
+        #       revision info before we select the actual content fields.
+        #       We probably need a transaction (readonly) to ensure
+        #       consistency.
+        doc_rev, doc_content = val
+        if doc_content == '<deleted>':
+            return doc_rev, None
+        c.execute("SELECT field_name, value FROM document_fields"
+                  " WHERE doc_id = ? ORDER BY offset", (doc_id,))
+        # TODO: What about nested docs?
+        raw_doc = compat.OrderedDict()
+        for field, value in c.fetchall():
+            raw_doc[field] = value
+        doc = simplejson.dumps(raw_doc)
+        return doc_rev, doc
+
+    def get_from_index(self, index_name, key_values):
+        # The base implementation does all the complex index joining. But it
+        # doesn't manage to extract the actual document content correctly.
+        # To do that, we add a loop around self._get_doc
+        base = super(SQLiteOnlyExpandedDatabase, self).get_from_index(
+            index_name, key_values)
+        result = [(doc_id, doc_rev, self._get_doc(doc_id)[1])
+                  for doc_id, doc_rev, _ in base]
+        return result

=== added directory 'u1db/compat'
=== added file 'u1db/compat/__init__.py'
--- a/u1db/compat/__init__.py	1970-01-01 00:00:00 +0000
+++ b/u1db/compat/__init__.py	2011-10-13 12:44:22 +0000
@@ -0,0 +1,22 @@
+# Copyright 2011 Canonical Ltd.
+#
+# This program is free software: you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 3, as published
+# by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranties of
+# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+# PURPOSE.  See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""Compatibility code for varios versions of python, etc."""
+
+try:
+    # Added in Python2.7
+    from collections import OrderedDict
+except ImportError:
+    from u1db.compat.ordered_dict import OrderedDict
+

=== added file 'u1db/compat/ordered_dict.py'
--- a/u1db/compat/ordered_dict.py	1970-01-01 00:00:00 +0000
+++ b/u1db/compat/ordered_dict.py	2011-10-13 12:44:22 +0000
@@ -0,0 +1,258 @@
+# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
+# Passes Python2.7's test suite and incorporates all the latest updates.
+
+try:
+    from thread import get_ident as _get_ident
+except ImportError:
+    from dummy_thread import get_ident as _get_ident
+
+try:
+    from _abcoll import KeysView, ValuesView, ItemsView
+except ImportError:
+    pass
+
+
+class OrderedDict(dict):
+    'Dictionary that remembers insertion order'
+    # An inherited dict maps keys to values.
+    # The inherited dict provides __getitem__, __len__, __contains__, and get.
+    # The remaining methods are order-aware.
+    # Big-O running times for all methods are the same as for regular dictionaries.
+
+    # The internal self.__map dictionary maps keys to links in a doubly linked list.
+    # The circular doubly linked list starts and ends with a sentinel element.
+    # The sentinel element never gets deleted (this simplifies the algorithm).
+    # Each link is stored as a list of length three:  [PREV, NEXT, KEY].
+
+    def __init__(self, *args, **kwds):
+        '''Initialize an ordered dictionary.  Signature is the same as for
+        regular dictionaries, but keyword arguments are not recommended
+        because their insertion order is arbitrary.
+
+        '''
+        if len(args) > 1:
+            raise TypeError('expected at most 1 arguments, got %d' % len(args))
+        try:
+            self.__root
+        except AttributeError:
+            self.__root = root = []                     # sentinel node
+            root[:] = [root, root, None]
+            self.__map = {}
+        self.__update(*args, **kwds)
+
+    def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
+        'od.__setitem__(i, y) <==> od[i]=y'
+        # Setting a new item creates a new link which goes at the end of the linked
+        # list, and the inherited dictionary is updated with the new key/value pair.
+        if key not in self:
+            root = self.__root
+            last = root[0]
+            last[1] = root[0] = self.__map[key] = [last, root, key]
+        dict_setitem(self, key, value)
+
+    def __delitem__(self, key, dict_delitem=dict.__delitem__):
+        'od.__delitem__(y) <==> del od[y]'
+        # Deleting an existing item uses self.__map to find the link which is
+        # then removed by updating the links in the predecessor and successor nodes.
+        dict_delitem(self, key)
+        link_prev, link_next, key = self.__map.pop(key)
+        link_prev[1] = link_next
+        link_next[0] = link_prev
+
+    def __iter__(self):
+        'od.__iter__() <==> iter(od)'
+        root = self.__root
+        curr = root[1]
+        while curr is not root:
+            yield curr[2]
+            curr = curr[1]
+
+    def __reversed__(self):
+        'od.__reversed__() <==> reversed(od)'
+        root = self.__root
+        curr = root[0]
+        while curr is not root:
+            yield curr[2]
+            curr = curr[0]
+
+    def clear(self):
+        'od.clear() -> None.  Remove all items from od.'
+        try:
+            for node in self.__map.itervalues():
+                del node[:]
+            root = self.__root
+            root[:] = [root, root, None]
+            self.__map.clear()
+        except AttributeError:
+            pass
+        dict.clear(self)
+
+    def popitem(self, last=True):
+        '''od.popitem() -> (k, v), return and remove a (key, value) pair.
+        Pairs are returned in LIFO order if last is true or FIFO order if false.
+
+        '''
+        if not self:
+            raise KeyError('dictionary is empty')
+        root = self.__root
+        if last:
+            link = root[0]
+            link_prev = link[0]
+            link_prev[1] = root
+            root[0] = link_prev
+        else:
+            link = root[1]
+            link_next = link[1]
+            root[1] = link_next
+            link_next[0] = root
+        key = link[2]
+        del self.__map[key]
+        value = dict.pop(self, key)
+        return key, value
+
+    # -- the following methods do not depend on the internal structure --
+
+    def keys(self):
+        'od.keys() -> list of keys in od'
+        return list(self)
+
+    def values(self):
+        'od.values() -> list of values in od'
+        return [self[key] for key in self]
+
+    def items(self):
+        'od.items() -> list of (key, value) pairs in od'
+        return [(key, self[key]) for key in self]
+
+    def iterkeys(self):
+        'od.iterkeys() -> an iterator over the keys in od'
+        return iter(self)
+
+    def itervalues(self):
+        'od.itervalues -> an iterator over the values in od'
+        for k in self:
+            yield self[k]
+
+    def iteritems(self):
+        'od.iteritems -> an iterator over the (key, value) items in od'
+        for k in self:
+            yield (k, self[k])
+
+    def update(*args, **kwds):
+        '''od.update(E, **F) -> None.  Update od from dict/iterable E and F.
+
+        If E is a dict instance, does:           for k in E: od[k] = E[k]
+        If E has a .keys() method, does:         for k in E.keys(): od[k] = E[k]
+        Or if E is an iterable of items, does:   for k, v in E: od[k] = v
+        In either case, this is followed by:     for k, v in F.items(): od[k] = v
+
+        '''
+        if len(args) > 2:
+            raise TypeError('update() takes at most 2 positional '
+                            'arguments (%d given)' % (len(args),))
+        elif not args:
+            raise TypeError('update() takes at least 1 argument (0 given)')
+        self = args[0]
+        # Make progressively weaker assumptions about "other"
+        other = ()
+        if len(args) == 2:
+            other = args[1]
+        if isinstance(other, dict):
+            for key in other:
+                self[key] = other[key]
+        elif hasattr(other, 'keys'):
+            for key in other.keys():
+                self[key] = other[key]
+        else:
+            for key, value in other:
+                self[key] = value
+        for key, value in kwds.items():
+            self[key] = value
+
+    __update = update  # let subclasses override update without breaking __init__
+
+    __marker = object()
+
+    def pop(self, key, default=__marker):
+        '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
+        If key is not found, d is returned if given, otherwise KeyError is raised.
+
+        '''
+        if key in self:
+            result = self[key]
+            del self[key]
+            return result
+        if default is self.__marker:
+            raise KeyError(key)
+        return default
+
+    def setdefault(self, key, default=None):
+        'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
+        if key in self:
+            return self[key]
+        self[key] = default
+        return default
+
+    def __repr__(self, _repr_running={}):
+        'od.__repr__() <==> repr(od)'
+        call_key = id(self), _get_ident()
+        if call_key in _repr_running:
+            return '...'
+        _repr_running[call_key] = 1
+        try:
+            if not self:
+                return '%s()' % (self.__class__.__name__,)
+            return '%s(%r)' % (self.__class__.__name__, self.items())
+        finally:
+            del _repr_running[call_key]
+
+    def __reduce__(self):
+        'Return state information for pickling'
+        items = [[k, self[k]] for k in self]
+        inst_dict = vars(self).copy()
+        for k in vars(OrderedDict()):
+            inst_dict.pop(k, None)
+        if inst_dict:
+            return (self.__class__, (items,), inst_dict)
+        return self.__class__, (items,)
+
+    def copy(self):
+        'od.copy() -> a shallow copy of od'
+        return self.__class__(self)
+
+    @classmethod
+    def fromkeys(cls, iterable, value=None):
+        '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
+        and values equal to v (which defaults to None).
+
+        '''
+        d = cls()
+        for key in iterable:
+            d[key] = value
+        return d
+
+    def __eq__(self, other):
+        '''od.__eq__(y) <==> od==y.  Comparison to another OD is order-sensitive
+        while comparison to a regular mapping is order-insensitive.
+
+        '''
+        if isinstance(other, OrderedDict):
+            return len(self)==len(other) and self.items() == other.items()
+        return dict.__eq__(self, other)
+
+    def __ne__(self, other):
+        return not self == other
+
+    # -- the following methods are only used in Python 2.7 --
+
+    def viewkeys(self):
+        "od.viewkeys() -> a set-like object providing a view on od's keys"
+        return KeysView(self)
+
+    def viewvalues(self):
+        "od.viewvalues() -> an object providing a view on od's values"
+        return ValuesView(self)
+
+    def viewitems(self):
+        "od.viewitems() -> a set-like object providing a view on od's items"
+        return ItemsView(self)

=== modified file 'u1db/tests/test_backends.py'
--- a/u1db/tests/test_backends.py	2011-10-13 13:58:29 +0000
+++ b/u1db/tests/test_backends.py	2011-10-13 14:00:48 +0000
@@ -32,8 +32,20 @@
     return inmemory.InMemoryDatabase(machine_id)
 
 
-def create_sqlite_database(machine_id):
-    db = sqlite_backend.SQLiteDatabase(':memory:')
+def create_sqlite_expanded(machine_id):
+    db = sqlite_backend.SQLiteExpandedDatabase(':memory:')
+    db._set_machine_id(machine_id)
+    return db
+
+
+def create_sqlite_partial_expanded(machine_id):
+    db = sqlite_backend.SQLitePartialExpandDatabase(':memory:')
+    db._set_machine_id(machine_id)
+    return db
+
+
+def create_sqlite_only_expanded(machine_id):
+    db = sqlite_backend.SQLiteOnlyExpandedDatabase(':memory:')
     db._set_machine_id(machine_id)
     return db
 
@@ -43,7 +55,9 @@
     create_database = None
     scenarios = [
         ('mem', {'create_database': create_memory_database}),
-        ('sqlite', {'create_database': create_sqlite_database}),
+        ('sql_expand', {'create_database': create_sqlite_expanded}),
+        ('sql_partexpand', {'create_database': create_sqlite_partial_expanded}),
+        ('sql_onlyexpand', {'create_database': create_sqlite_only_expanded}),
         ]
 
     def close_database(self, database):

=== modified file 'u1db/tests/test_sqlite_backend.py'
--- a/u1db/tests/test_sqlite_backend.py	2011-09-09 17:48:30 +0000
+++ b/u1db/tests/test_sqlite_backend.py	2011-10-13 12:44:22 +0000
@@ -15,6 +15,7 @@
 """Test sqlite backend internals."""
 
 from sqlite3 import dbapi2
+import simplejson
 
 from u1db import (
     tests,
@@ -25,27 +26,29 @@
 simple_doc = '{"key": "value"}'
 
 
-class TestSQLiteDatabase(tests.TestCase):
+class TestSQLiteExpandedDatabase(tests.TestCase):
+
+    def setUp(self):
+        super(TestSQLiteExpandedDatabase, self).setUp()
+        self.db = sqlite_backend.SQLiteExpandedDatabase(':memory:')
+        self.db._set_machine_id('test')
 
     def test_create_database(self):
-        db = sqlite_backend.SQLiteDatabase(':memory:')
-        raw_db = db._get_sqlite_handle()
+        raw_db = self.db._get_sqlite_handle()
         self.assertNotEqual(None, raw_db)
 
     def test__close_sqlite_handle(self):
-        db = sqlite_backend.SQLiteDatabase(':memory:')
-        raw_db = db._get_sqlite_handle()
-        db._close_sqlite_handle()
+        raw_db = self.db._get_sqlite_handle()
+        self.db._close_sqlite_handle()
         self.assertRaises(dbapi2.ProgrammingError,
             raw_db.cursor)
 
     def test_create_database_initializes_schema(self):
-        db = sqlite_backend.SQLiteDatabase(':memory:')
-        raw_db = db._get_sqlite_handle()
+        raw_db = self.db._get_sqlite_handle()
         c = raw_db.cursor()
         c.execute("SELECT * FROM u1db_config")
         config = dict([(r[0], r[1]) for r in c.fetchall()])
-        self.assertEqual({'sql_schema': '0'}, config)
+        self.assertEqual({'sql_schema': '0', 'machine_id': 'test'}, config)
 
         # These tables must exist, though we don't care what is in them yet
         c.execute("SELECT * FROM transaction_log")
@@ -56,49 +59,45 @@
         c.execute("SELECT * FROM index_definitions")
 
     def test__set_machine_id(self):
-        db = sqlite_backend.SQLiteDatabase(':memory:')
-        self.assertEqual(None, db._real_machine_id)
-        self.assertEqual(None, db._machine_id)
-        db._set_machine_id('foo')
-        c = db._get_sqlite_handle().cursor()
+        # Start from scratch, so that machine_id isn't set.
+        self.db = sqlite_backend.SQLiteExpandedDatabase(':memory:')
+        self.assertEqual(None, self.db._real_machine_id)
+        self.assertEqual(None, self.db._machine_id)
+        self.db._set_machine_id('foo')
+        c = self.db._get_sqlite_handle().cursor()
         c.execute("SELECT value FROM u1db_config WHERE name='machine_id'")
         self.assertEqual(('foo',), c.fetchone())
-        self.assertEqual('foo', db._real_machine_id)
-        self.assertEqual('foo', db._machine_id)
-        db._close_sqlite_handle()
-        self.assertEqual('foo', db._machine_id)
+        self.assertEqual('foo', self.db._real_machine_id)
+        self.assertEqual('foo', self.db._machine_id)
+        self.db._close_sqlite_handle()
+        self.assertEqual('foo', self.db._machine_id)
 
     def test__get_db_rev(self):
-        db = sqlite_backend.SQLiteDatabase(':memory:')
-        db._set_machine_id('foo')
-        self.assertEqual(0, db._get_db_rev())
+        self.db._set_machine_id('foo')
+        self.assertEqual(0, self.db._get_db_rev())
 
     def test__allocate_doc_id(self):
-        db = sqlite_backend.SQLiteDatabase(':memory:')
-        self.assertEqual('doc-0', db._allocate_doc_id())
+        self.assertEqual('doc-0', self.db._allocate_doc_id())
 
     def test_create_index(self):
-        db = sqlite_backend.SQLiteDatabase(':memory:')
-        db.create_index('test-idx', ["key"])
-        self.assertEqual([('test-idx', ["key"])], db.list_indexes())
+        self.db.create_index('test-idx', ["key"])
+        self.assertEqual([('test-idx', ["key"])], self.db.list_indexes())
 
     def test_create_index_multiple_fields(self):
-        db = sqlite_backend.SQLiteDatabase(':memory:')
-        db.create_index('test-idx', ["key", "key2"])
-        self.assertEqual([('test-idx', ["key", "key2"])], db.list_indexes())
+        self.db.create_index('test-idx', ["key", "key2"])
+        self.assertEqual([('test-idx', ["key", "key2"])],
+                         self.db.list_indexes())
 
     def test__get_index_definition(self):
-        db = sqlite_backend.SQLiteDatabase(':memory:')
-        db.create_index('test-idx', ["key", "key2"])
+        self.db.create_index('test-idx', ["key", "key2"])
         # TODO: How would you test that an index is getting used for an SQL
         #       request?
         self.assertEqual(["key", "key2"],
-                         db._get_index_definition('test-idx'))
+                         self.db._get_index_definition('test-idx'))
 
     def test_list_index_mixed(self):
-        db = sqlite_backend.SQLiteDatabase(':memory:')
         # Make sure that we properly order the output
-        c = db._get_sqlite_handle().cursor()
+        c = self.db._get_sqlite_handle().cursor()
         # We intentionally insert the data in weird ordering, to make sure the
         # query still gets it back correctly.
         c.executemany("INSERT INTO index_definitions VALUES (?, ?, ?)",
@@ -109,13 +108,12 @@
                        ('idx-2', 1, 'key21')])
         self.assertEqual([('idx-1', ['key10', 'key11']),
                           ('idx-2', ['key20', 'key21', 'key22'])],
-                         db.list_indexes())
+                         self.db.list_indexes())
 
     def test_create_extracts_fields(self):
-        db = sqlite_backend.SQLiteDatabase(':memory:')
-        doc1_id, doc1_rev = db.create_doc('{"key1": "val1", "key2": "val2"}')
-        doc2_id, doc2_rev = db.create_doc('{"key1": "valx", "key2": "valy"}')
-        c = db._get_sqlite_handle().cursor()
+        doc1_id, doc1_rev = self.db.create_doc('{"key1": "val1", "key2": "val2"}')
+        doc2_id, doc2_rev = self.db.create_doc('{"key1": "valx", "key2": "valy"}')
+        c = self.db._get_sqlite_handle().cursor()
         c.execute("SELECT doc_id, field_name, value FROM document_fields"
                   " ORDER BY doc_id, field_name, value")
         self.assertEqual([(doc1_id, "key1", "val1"),
@@ -125,12 +123,87 @@
                          ], c.fetchall())
 
     def test_put_updates_fields(self):
-        db = sqlite_backend.SQLiteDatabase(':memory:')
-        doc1_id, doc1_rev = db.create_doc('{"key1": "val1", "key2": "val2"}')
-        doc2_rev = db.put_doc(doc1_id, doc1_rev, '{"key1": "val1", "key2": "valy"}')
-        c = db._get_sqlite_handle().cursor()
+        doc1_id, doc1_rev = self.db.create_doc(
+            '{"key1": "val1", "key2": "val2"}')
+        doc2_rev = self.db.put_doc(doc1_id, doc1_rev,
+            '{"key1": "val1", "key2": "valy"}')
+        c = self.db._get_sqlite_handle().cursor()
         c.execute("SELECT doc_id, field_name, value FROM document_fields"
                   " ORDER BY doc_id, field_name, value")
         self.assertEqual([(doc1_id, "key1", "val1"),
                           (doc1_id, "key2", "valy"),
                          ], c.fetchall())
+
+
+class TestSQLitePartialExpandDatabase(tests.TestCase):
+
+    def setUp(self):
+        super(TestSQLitePartialExpandDatabase, self).setUp()
+        self.db = sqlite_backend.SQLitePartialExpandDatabase(':memory:')
+        self.db._set_machine_id('test')
+
+    def test_no_indexes_no_document_fields(self):
+        doc1_id, doc1_rev = self.db.create_doc(
+            '{"key1": "val1", "key2": "val2"}')
+        c = self.db._get_sqlite_handle().cursor()
+        c.execute("SELECT doc_id, field_name, value FROM document_fields"
+                  " ORDER BY doc_id, field_name, value")
+        self.assertEqual([], c.fetchall())
+
+    def test__get_indexed_fields(self):
+        self.db.create_index('idx1', ['a', 'b'])
+        self.assertEqual(set(['a', 'b']), self.db._get_indexed_fields())
+        self.db.create_index('idx2', ['b', 'c'])
+        self.assertEqual(set(['a', 'b', 'c']), self.db._get_indexed_fields())
+
+    def test_indexed_fields_expanded(self):
+        self.db.create_index('idx1', ['key1'])
+        doc1_id, doc1_rev = self.db.create_doc(
+            '{"key1": "val1", "key2": "val2"}')
+        self.assertEqual(set(['key1']), self.db._get_indexed_fields())
+        c = self.db._get_sqlite_handle().cursor()
+        c.execute("SELECT doc_id, field_name, value FROM document_fields"
+                  " ORDER BY doc_id, field_name, value")
+        self.assertEqual([(doc1_id, 'key1', 'val1')], c.fetchall())
+
+    def test_create_index_updates_fields(self):
+        doc1_id, doc1_rev = self.db.create_doc(
+            '{"key1": "val1", "key2": "val2"}')
+        self.db.create_index('idx1', ['key1'])
+        self.assertEqual(set(['key1']), self.db._get_indexed_fields())
+        c = self.db._get_sqlite_handle().cursor()
+        c.execute("SELECT doc_id, field_name, value FROM document_fields"
+                  " ORDER BY doc_id, field_name, value")
+        self.assertEqual([(doc1_id, 'key1', 'val1')], c.fetchall())
+
+
+class TestSQLiteOnlyExpandedDatabase(tests.TestCase):
+
+    def setUp(self):
+        super(TestSQLiteOnlyExpandedDatabase, self).setUp()
+        self.db = sqlite_backend.SQLiteOnlyExpandedDatabase(':memory:')
+        self.db._set_machine_id('test')
+
+    def test_no_document_content(self):
+        doc1_id, doc1_rev = self.db.create_doc(
+            '{"key1": "val1", "key2": "val2"}')
+        c = self.db._get_sqlite_handle().cursor()
+        c.execute("SELECT doc_id, doc FROM document ORDER BY doc_id")
+        self.assertEqual([(doc1_id, None)], c.fetchall())
+        c.execute("SELECT doc_id, field_name, value FROM document_fields"
+                  " ORDER BY doc_id, field_name")
+        self.assertEqual([(doc1_id, 'key1', 'val1'),
+                          (doc1_id, 'key2', 'val2'),
+                         ], c.fetchall())
+
+    def test_get_doc_reassembles_content(self):
+        doc1_id, doc1_rev = self.db.create_doc(
+            '{"key1": "val1", "key2": "val2"}')
+        self.assertEqual((doc1_rev, '{"key1": "val1", "key2": "val2"}', False),
+                         self.db.get_doc(doc1_id))
+
+    def test_distinguish_deleted_from_empty_doc(self):
+        doc1_id, doc1_rev = self.db.create_doc('{}')
+        self.assertEqual((doc1_rev, '{}', False), self.db.get_doc(doc1_id))
+        doc1_rev2 = self.db.delete_doc(doc1_id, doc1_rev)
+        self.assertEqual((doc1_rev2, None, False), self.db.get_doc(doc1_id))



More information about the bazaar-commits mailing list