summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAshley Sommer <ashleysommer@gmail.com>2020-08-19 13:30:44 +1000
committerAshley Sommer <ashleysommer@gmail.com>2020-08-19 13:30:44 +1000
commit860628cd156fb88cb8c810e914a883539af6aaa8 (patch)
tree644077f5f7761cbab21d65c00060199fc842c070
parentc0c6630c37fc20350f69452cf77fd5642a02e2da (diff)
downloadrdflib-860628cd156fb88cb8c810e914a883539af6aaa8.tar.gz
Removed IOMemory store
Renamed Memory2 to Memory Renamed Memory1 to SimpleMemory Set default store to new Memory Fixed tests Fixed docs
-rw-r--r--docs/persistence.rst4
-rw-r--r--docs/plugin_stores.rst7
-rw-r--r--docs/univrdfstore.rst2
-rw-r--r--examples/conjunctive_graphs.py4
-rw-r--r--rdflib/collection.py4
-rw-r--r--rdflib/graph.py14
-rw-r--r--rdflib/plugin.py9
-rw-r--r--rdflib/plugins/stores/memory.py (renamed from rdflib/plugins/memory.py)378
-rw-r--r--rdflib/plugins/stores/sleepycat.py (renamed from rdflib/plugins/sleepycat.py)0
-rw-r--r--rdflib/plugins/stores/sparqlstore.py2
-rw-r--r--test/store_performance.py2
-rw-r--r--test/test_aggregate_graphs.py4
-rw-r--r--test/test_canonicalization.py4
-rw-r--r--test/test_dataset.py2
-rw-r--r--test/test_graph.py6
-rw-r--r--test/test_graph_context.py2
-rw-r--r--test/test_iomemory.py67
-rw-r--r--test/test_memory_store.py28
-rw-r--r--test/test_namespace.py4
-rw-r--r--test/test_util.py4
20 files changed, 84 insertions, 463 deletions
diff --git a/docs/persistence.rst b/docs/persistence.rst
index fbddf38f..bd270a14 100644
--- a/docs/persistence.rst
+++ b/docs/persistence.rst
@@ -19,8 +19,8 @@ this API for a different store.
Stores currently shipped with core RDFLib
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-* :class:`Memory <rdflib.plugins.memory.IOMemory>` (not persistent!)
-* :class:`~rdflib.plugins.sleepycat.Sleepycat` (on disk persistence via Python's :ref:`bsddb` or :ref:`bsddb3` packages)
+* :class:`Memory <rdflib.plugins.stores.memory.Memory>` (not persistent!)
+* :class:`~rdflib.plugins.stores.sleepycat.Sleepycat` (on disk persistence via Python's :ref:`bsddb` or :ref:`bsddb3` packages)
* :class:`~rdflib.plugins.stores.sparqlstore.SPARQLStore` - a read-only wrapper around a remote SPARQL Query endpoint.
* :class:`~rdflib.plugins.stores.sparqlstore.SPARQLUpdateStore` - a read-write wrapper around a remote SPARQL query/update endpoint pair.
diff --git a/docs/plugin_stores.rst b/docs/plugin_stores.rst
index 68063577..a936c54e 100644
--- a/docs/plugin_stores.rst
+++ b/docs/plugin_stores.rst
@@ -10,9 +10,10 @@ Name Class
================= ============================================================
Auditable :class:`~rdflib.plugins.stores.auditable.AuditableStore`
Concurrent :class:`~rdflib.plugins.stores.concurrent.ConcurrentStore`
-IOMemory :class:`~rdflib.plugins.memory.IOMemory`
+SimpleMemory :class:`~rdflib.plugins.stores.memory.SimpleMemory`
+Memory :class:`~rdflib.plugins.stores.memory.Memory`
SPARQLStore :class:`~rdflib.plugins.stores.sparqlstore.SPARQLStore`
SPARQLUpdateStore :class:`~rdflib.plugins.stores.sparqlstore.SPARQLUpdateStore`
-Sleepycat :class:`~rdflib.plugins.sleepycat.Sleepycat`
-default :class:`~rdflib.plugins.memory.IOMemory`
+Sleepycat :class:`~rdflib.plugins.stores.sleepycat.Sleepycat`
+default :class:`~rdflib.plugins.stores.memory.Memory`
================= ============================================================
diff --git a/docs/univrdfstore.rst b/docs/univrdfstore.rst
index f6822e5b..dfb96d81 100644
--- a/docs/univrdfstore.rst
+++ b/docs/univrdfstore.rst
@@ -344,7 +344,7 @@ These are a list of additional kinds of RDF terms (all of which are special Lite
Namespace Management Interfaces
===============================
-The following namespace management interfaces (defined in Graph) could be implemented in the RDF store. Currently, they exist as stub methods of :class:`~rdflib.store.Store` and are defined in the store subclasses (e.g. :class:`~rdflib.store.IOMemory`):
+The following namespace management interfaces (defined in Graph) could be implemented in the RDF store. Currently, they exist as stub methods of :class:`~rdflib.store.Store` and are defined in the store subclasses (e.g. :class:`~rdflib.plugins.store.memory.Memory`):
.. automethod:: rdflib.store.Store.bind
:noindex:
diff --git a/examples/conjunctive_graphs.py b/examples/conjunctive_graphs.py
index a66a3aa8..289046ec 100644
--- a/examples/conjunctive_graphs.py
+++ b/examples/conjunctive_graphs.py
@@ -10,7 +10,7 @@ conjunction (union) of all the graphs.
from rdflib import Namespace, Literal, URIRef
from rdflib.graph import Graph, ConjunctiveGraph
-from rdflib.plugins.memory import IOMemory
+from rdflib.plugins.stores.memory import Memory
if __name__ == "__main__":
@@ -22,7 +22,7 @@ if __name__ == "__main__":
cmary = URIRef("http://love.com/lovers/mary")
cjohn = URIRef("http://love.com/lovers/john")
- store = IOMemory()
+ store = Memory()
g = ConjunctiveGraph(store=store)
g.bind("love", ns)
diff --git a/rdflib/collection.py b/rdflib/collection.py
index 8b667a23..b4bf9f38 100644
--- a/rdflib/collection.py
+++ b/rdflib/collection.py
@@ -18,7 +18,7 @@ class Collection(object):
>>> from rdflib.graph import Graph
>>> from pprint import pprint
>>> listName = BNode()
- >>> g = Graph('IOMemory')
+ >>> g = Graph('Memory')
>>> listItem1 = BNode()
>>> listItem2 = BNode()
>>> g.add((listName, RDF.first, Literal(1)))
@@ -52,7 +52,7 @@ class Collection(object):
"""
>>> from rdflib.graph import Graph
>>> listName = BNode()
- >>> g = Graph('IOMemory')
+ >>> g = Graph('Memory')
>>> listItem1 = BNode()
>>> listItem2 = BNode()
>>> g.add((listName, RDF.first, Literal(1)))
diff --git a/rdflib/graph.py b/rdflib/graph.py
index 145224b8..2afcdccd 100644
--- a/rdflib/graph.py
+++ b/rdflib/graph.py
@@ -104,31 +104,31 @@ see :class:`~rdflib.graph.Dataset`
Working with graphs
===================
-Instantiating Graphs with default store (IOMemory) and default identifier
+Instantiating Graphs with default store (Memory) and default identifier
(a BNode):
>>> g = Graph()
>>> g.store.__class__
- <class 'rdflib.plugins.memory.IOMemory'>
+ <class 'rdflib.plugins.stores.memory.Memory'>
>>> g.identifier.__class__
<class 'rdflib.term.BNode'>
-Instantiating Graphs with a IOMemory store and an identifier -
+Instantiating Graphs with a Memory store and an identifier -
<http://rdflib.net>:
- >>> g = Graph('IOMemory', URIRef("http://rdflib.net"))
+ >>> g = Graph('Memory', URIRef("http://rdflib.net"))
>>> g.identifier
rdflib.term.URIRef('http://rdflib.net')
>>> str(g) # doctest: +NORMALIZE_WHITESPACE
"<http://rdflib.net> a rdfg:Graph;rdflib:storage
- [a rdflib:Store;rdfs:label 'IOMemory']."
+ [a rdflib:Store;rdfs:label 'Memory']."
Creating a ConjunctiveGraph - The top level container for all named Graphs
in a "database":
>>> g = ConjunctiveGraph()
>>> str(g.default_context)
- "[a rdfg:Graph;rdflib:storage [a rdflib:Store;rdfs:label 'IOMemory']]."
+ "[a rdfg:Graph;rdflib:storage [a rdflib:Store;rdfs:label 'Memory']]."
Adding / removing reified triples to Graph and iterating over it directly or
via triple pattern:
@@ -192,7 +192,7 @@ by RDFLib they are UUIDs and unique.
Graph Aggregation - ConjunctiveGraphs and ReadOnlyGraphAggregate within
the same store:
- >>> store = plugin.get("IOMemory", Store)()
+ >>> store = plugin.get("Memory", Store)()
>>> g1 = Graph(store)
>>> g2 = Graph(store)
>>> g3 = Graph(store)
diff --git a/rdflib/plugin.py b/rdflib/plugin.py
index a15767e1..174dbfb8 100644
--- a/rdflib/plugin.py
+++ b/rdflib/plugin.py
@@ -131,13 +131,12 @@ def plugins(name=None, kind=None):
yield p
-register("default", Store, "rdflib.plugins.memory", "Memory2")
-register("Memory2", Store, "rdflib.plugins.memory", "Memory2")
-register("Memory1", Store, "rdflib.plugins.memory", "Memory1")
-register("IOMemory", Store, "rdflib.plugins.memory", "IOMemory")
+register("default", Store, "rdflib.plugins.stores.memory", "Memory")
+register("Memory", Store, "rdflib.plugins.stores.memory", "Memory")
+register("SimpleMemory", Store, "rdflib.plugins.stores.memory", "SimpleMemory")
register("Auditable", Store, "rdflib.plugins.stores.auditable", "AuditableStore")
register("Concurrent", Store, "rdflib.plugins.stores.concurrent", "ConcurrentStore")
-register("Sleepycat", Store, "rdflib.plugins.sleepycat", "Sleepycat")
+register("Sleepycat", Store, "rdflib.plugins.stores.sleepycat", "Sleepycat")
register("SPARQLStore", Store, "rdflib.plugins.stores.sparqlstore", "SPARQLStore")
register(
"SPARQLUpdateStore", Store, "rdflib.plugins.stores.sparqlstore", "SPARQLUpdateStore"
diff --git a/rdflib/plugins/memory.py b/rdflib/plugins/stores/memory.py
index 73763fdd..93b6ec25 100644
--- a/rdflib/plugins/memory.py
+++ b/rdflib/plugins/stores/memory.py
@@ -1,19 +1,15 @@
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import random
-
+#
+#
from rdflib.store import Store
-__all__ = ["Memory1", "Memory2", "IOMemory"]
+__all__ = ["SimpleMemory", "Memory"]
-ANY = Any = None
+ANY = None
-class Memory1(Store):
+class SimpleMemory(Store):
"""\
- An in memory implementation of a triple store.
+ A fast naive in memory implementation of a triple store.
This triple store uses nested dictionaries to store triples. Each
triple is stored in two such indices as follows spo[s][p][o] = 1 and
@@ -23,7 +19,7 @@ class Memory1(Store):
"""
def __init__(self, configuration=None, identifier=None):
- super(Memory1, self).__init__(configuration)
+ super(SimpleMemory, self).__init__(configuration)
self.identifier = identifier
# indexed by [subject][predicate][object]
@@ -169,12 +165,18 @@ class Memory1(Store):
def __contexts(self):
return (c for c in []) # TODO: best way to return empty generator
+ def query(self, query, initNs, initBindings, queryGraph, **kwargs):
+ super(SimpleMemory, self).query(query, initNs, initBindings, queryGraph, **kwargs)
+
+ def update(self, update, initNs, initBindings, queryGraph, **kwargs):
+ super(SimpleMemory, self).update(update, initNs, initBindings, queryGraph, **kwargs)
+
-class Memory2(Store):
+class Memory(Store):
"""\
An in memory implementation of a triple store.
- Same as Memory1 above, but is Context-aware, Graph-aware, and Formula-aware
+ Same as SimpleMemory above, but is Context-aware, Graph-aware, and Formula-aware
Authors: Ashley Sommer
"""
@@ -183,7 +185,7 @@ class Memory2(Store):
graph_aware = True
def __init__(self, configuration=None, identifier=None):
- super(Memory2, self).__init__(configuration)
+ super(Memory, self).__init__(configuration)
self.identifier = identifier
# indexed by [subject][predicate][object]
@@ -524,348 +526,8 @@ class Memory2(Store):
if ctx_str is not None
)
+ def query(self, query, initNs, initBindings, queryGraph, **kwargs):
+ super(Memory, self).query(query, initNs, initBindings, queryGraph, **kwargs)
-class IOMemory(Store):
- """\
- An integer-key-optimized context-aware in-memory store.
-
- Uses three dict indices (for subjects, objects and predicates) holding
- sets of triples. Context information is tracked in a separate dict, with
- the triple as key and a dict of {context: quoted} items as value. The
- context information is used to filter triple query results.
-
- Memory usage is low due to several optimizations. RDF nodes are not
- stored directly in the indices; instead, the indices hold integer keys
- and the actual nodes are only stored once in int-to-object and
- object-to-int mapping dictionaries. A default context is determined
- based on the first triple that is added to the store, and no context
- information is actually stored for subsequent other triples with the
- same context information.
-
- Most operations should be quite fast, but a triples() query with two
- bound parts requires a set intersection operation, which may be slow in
- some cases. When multiple contexts are used in the same store, filtering
- based on context has to be done after each query, which may also be
- slow.
-
- """
-
- context_aware = True
- formula_aware = True
- graph_aware = True
-
- # The following variable name conventions are used in this class:
- #
- # subject, predicate, object unencoded triple parts
- # triple = (subject, predicate, object) unencoded triple
- # context: unencoded context
- #
- # sid, pid, oid integer-encoded triple parts
- # enctriple = (sid, pid, oid) integer-encoded triple
- # cid integer-encoded context
-
- def __init__(self, configuration=None, identifier=None):
- super(IOMemory, self).__init__()
- self.__namespace = {}
- self.__prefix = {}
-
- # Mappings for encoding RDF nodes using integer keys, to save memory
- # in the indexes Note that None is always mapped to itself, to make
- # it easy to test for it in either encoded or unencoded form.
- self.__int2obj = {None: None} # maps integer keys to objects
- self.__obj2int = {None: None} # maps objects to integer keys
-
- # Indexes for each triple part, and a list of contexts for each triple
- self.__subjectIndex = {} # key: sid val: set(enctriples)
- self.__predicateIndex = {} # key: pid val: set(enctriples)
- self.__objectIndex = {} # key: oid val: set(enctriples)
- self.__tripleContexts = (
- {}
- ) # key: enctriple val: {cid1: quoted, cid2: quoted ...}
- self.__contextTriples = {None: set()} # key: cid val: set(enctriples)
-
- # all contexts used in store (unencoded)
- self.__all_contexts = set()
- # default context information for triples
- self.__defaultContexts = None
-
- def bind(self, prefix, namespace):
- self.__prefix[namespace] = prefix
- self.__namespace[prefix] = namespace
-
- def namespace(self, prefix):
- return self.__namespace.get(prefix, None)
-
- def prefix(self, namespace):
- return self.__prefix.get(namespace, None)
-
- def namespaces(self):
- for prefix, namespace in self.__namespace.items():
- yield prefix, namespace
-
- def add(self, triple, context, quoted=False):
- Store.add(self, triple, context, quoted)
-
- if context is not None:
- self.__all_contexts.add(context)
-
- enctriple = self.__encodeTriple(triple)
- sid, pid, oid = enctriple
-
- self.__addTripleContext(enctriple, context, quoted)
-
- if sid in self.__subjectIndex:
- self.__subjectIndex[sid].add(enctriple)
- else:
- self.__subjectIndex[sid] = set([enctriple])
-
- if pid in self.__predicateIndex:
- self.__predicateIndex[pid].add(enctriple)
- else:
- self.__predicateIndex[pid] = set([enctriple])
-
- if oid in self.__objectIndex:
- self.__objectIndex[oid].add(enctriple)
- else:
- self.__objectIndex[oid] = set([enctriple])
-
- def remove(self, triplepat, context=None):
- req_cid = self.__obj2id(context)
- for triple, contexts in self.triples(triplepat, context):
- enctriple = self.__encodeTriple(triple)
- for cid in self.__getTripleContexts(enctriple):
- if context is not None and req_cid != cid:
- continue
- self.__removeTripleContext(enctriple, cid)
- ctxs = self.__getTripleContexts(enctriple, skipQuoted=True)
- if None in ctxs and (context is None or len(ctxs) == 1):
- self.__removeTripleContext(enctriple, None)
- if len(self.__getTripleContexts(enctriple)) == 0:
- # triple has been removed from all contexts
- sid, pid, oid = enctriple
- self.__subjectIndex[sid].remove(enctriple)
- self.__predicateIndex[pid].remove(enctriple)
- self.__objectIndex[oid].remove(enctriple)
-
- del self.__tripleContexts[enctriple]
-
- if (
- req_cid is not None
- and req_cid in self.__contextTriples
- and len(self.__contextTriples[req_cid]) == 0
- ):
- # all triples are removed out of this context
- # and it's not the default context so delete it
- del self.__contextTriples[req_cid]
-
- if (
- triplepat == (None, None, None)
- and context in self.__all_contexts
- and not self.graph_aware
- ):
- # remove the whole context
- self.__all_contexts.remove(context)
-
- def triples(self, triplein, context=None):
- if context is not None:
- if context == self: # hmm...does this really ever happen?
- context = None
-
- cid = self.__obj2id(context)
- enctriple = self.__encodeTriple(triplein)
- sid, pid, oid = enctriple
-
- # all triples case (no triple parts given as pattern)
- if sid is None and pid is None and oid is None:
- return self.__all_triples(cid)
-
- # optimize "triple in graph" case (all parts given)
- if sid is not None and pid is not None and oid is not None:
- if (
- sid in self.__subjectIndex
- and enctriple in self.__subjectIndex[sid]
- and self.__tripleHasContext(enctriple, cid)
- ):
- return ((triplein, self.__contexts(enctriple)) for i in [0])
- else:
- return self.__emptygen()
-
- # remaining cases: one or two out of three given
- sets = []
- if sid is not None:
- if sid in self.__subjectIndex:
- sets.append(self.__subjectIndex[sid])
- else:
- return self.__emptygen()
- if pid is not None:
- if pid in self.__predicateIndex:
- sets.append(self.__predicateIndex[pid])
- else:
- return self.__emptygen()
- if oid is not None:
- if oid in self.__objectIndex:
- sets.append(self.__objectIndex[oid])
- else:
- return self.__emptygen()
-
- # to get the result, do an intersection of the sets (if necessary)
- if len(sets) > 1:
- enctriples = sets[0].intersection(*sets[1:])
- else:
- enctriples = sets[0].copy()
-
- return (
- (self.__decodeTriple(enctriple), self.__contexts(enctriple))
- for enctriple in enctriples
- if self.__tripleHasContext(enctriple, cid)
- )
-
- def contexts(self, triple=None):
- if triple is None or triple == (None, None, None):
- return (context for context in self.__all_contexts)
-
- enctriple = self.__encodeTriple(triple)
- sid, pid, oid = enctriple
- if sid in self.__subjectIndex and enctriple in self.__subjectIndex[sid]:
- return self.__contexts(enctriple)
- else:
- return self.__emptygen()
-
- def __len__(self, context=None):
- cid = self.__obj2id(context)
- if cid not in self.__contextTriples:
- return 0
- return len(self.__contextTriples[cid])
-
- def add_graph(self, graph):
- if not self.graph_aware:
- Store.add_graph(self, graph)
- else:
- self.__all_contexts.add(graph)
-
- def remove_graph(self, graph):
- if not self.graph_aware:
- Store.remove_graph(self, graph)
- else:
- self.remove((None, None, None), graph)
- try:
- self.__all_contexts.remove(graph)
- except KeyError:
- pass # we didn't know this graph, no problem
-
- # internal utility methods below
-
- def __addTripleContext(self, enctriple, context, quoted):
- """add the given context to the set of contexts for the triple"""
- cid = self.__obj2id(context)
-
- sid, pid, oid = enctriple
- if sid in self.__subjectIndex and enctriple in self.__subjectIndex[sid]:
- # we know the triple exists somewhere in the store
- if enctriple not in self.__tripleContexts:
- # triple exists with default ctx info
- # start with a copy of the default ctx info
- self.__tripleContexts[enctriple] = self.__defaultContexts.copy()
-
- self.__tripleContexts[enctriple][cid] = quoted
- if not quoted:
- self.__tripleContexts[enctriple][None] = quoted
- else:
- # the triple didn't exist before in the store
- if quoted: # this context only
- self.__tripleContexts[enctriple] = {cid: quoted}
- else: # default context as well
- self.__tripleContexts[enctriple] = {cid: quoted, None: quoted}
-
- # if the triple is not quoted add it to the default context
- if not quoted:
- self.__contextTriples[None].add(enctriple)
-
- # always add the triple to given context, making sure it's initialized
- if cid not in self.__contextTriples:
- self.__contextTriples[cid] = set()
- self.__contextTriples[cid].add(enctriple)
-
- # if this is the first ever triple in the store, set default ctx info
- if self.__defaultContexts is None:
- self.__defaultContexts = self.__tripleContexts[enctriple]
-
- # if the context info is the same as default, no need to store it
- if self.__tripleContexts[enctriple] == self.__defaultContexts:
- del self.__tripleContexts[enctriple]
-
- def __getTripleContexts(self, enctriple, skipQuoted=False):
- """return a list of (encoded) contexts for the triple, skipping
- quoted contexts if skipQuoted==True"""
-
- ctxs = self.__tripleContexts.get(enctriple, self.__defaultContexts)
-
- if not skipQuoted:
- return ctxs.keys()
-
- return [cid for cid, quoted in ctxs.items() if not quoted]
-
- def __tripleHasContext(self, enctriple, cid):
- """return True iff the triple exists in the given context"""
- ctxs = self.__tripleContexts.get(enctriple, self.__defaultContexts)
- return cid in ctxs
-
- def __removeTripleContext(self, enctriple, cid):
- """remove the context from the triple"""
- ctxs = self.__tripleContexts.get(enctriple, self.__defaultContexts).copy()
- del ctxs[cid]
- if ctxs == self.__defaultContexts:
- del self.__tripleContexts[enctriple]
- else:
- self.__tripleContexts[enctriple] = ctxs
- self.__contextTriples[cid].remove(enctriple)
-
- def __obj2id(self, obj):
- """encode object, storing it in the encoding map if necessary,
- and return the integer key"""
- if obj not in self.__obj2int:
- id = randid()
- while id in self.__int2obj:
- id = randid()
- self.__obj2int[obj] = id
- self.__int2obj[id] = obj
- return id
- return self.__obj2int[obj]
-
- def __encodeTriple(self, triple):
- """encode a whole triple, returning the encoded triple"""
- return tuple(map(self.__obj2id, triple))
-
- def __decodeTriple(self, enctriple):
- """decode a whole encoded triple, returning the original
- triple"""
- return tuple(map(self.__int2obj.get, enctriple))
-
- def __all_triples(self, cid):
- """return a generator which yields all the triples (unencoded)
- of the given context"""
- if cid not in self.__contextTriples:
- return
- for enctriple in self.__contextTriples[cid].copy():
- yield self.__decodeTriple(enctriple), self.__contexts(enctriple)
-
- def __contexts(self, enctriple):
- """return a generator for all the non-quoted contexts
- (unencoded) the encoded triple appears in"""
- return (
- self.__int2obj.get(cid)
- for cid in self.__getTripleContexts(enctriple, skipQuoted=True)
- if cid is not None
- )
-
- def __emptygen(self):
- """return an empty generator"""
- if False:
- yield
-
-
-def randid(randint=random.randint, choice=random.choice, signs=(-1, 1)):
- return choice(signs) * randint(1, 2000000000)
-
-
-del random
+ def update(self, update, initNs, initBindings, queryGraph, **kwargs):
+ super(Memory, self).update(update, initNs, initBindings, queryGraph, **kwargs)
diff --git a/rdflib/plugins/sleepycat.py b/rdflib/plugins/stores/sleepycat.py
index 735d3c3a..735d3c3a 100644
--- a/rdflib/plugins/sleepycat.py
+++ b/rdflib/plugins/stores/sleepycat.py
diff --git a/rdflib/plugins/stores/sparqlstore.py b/rdflib/plugins/stores/sparqlstore.py
index 1bdf2d32..39f9790f 100644
--- a/rdflib/plugins/stores/sparqlstore.py
+++ b/rdflib/plugins/stores/sparqlstore.py
@@ -646,7 +646,7 @@ class SPARQLUpdateStore(SPARQLStore):
.. admonition:: Context-aware query rewriting
- **When:** If context-awareness is enabled and the graph is not the default graph of the store.
- - **Why:** To ensure consistency with the :class:`~rdflib.plugins.memory.IOMemory` store.
+ - **Why:** To ensure consistency with the :class:`~rdflib.plugins.stores.memory.Memory` store.
The graph must except "local" SPARQL requests (requests with no GRAPH keyword)
like if it was the default graph.
- **What is done:** These "local" queries are rewritten by this store.
diff --git a/test/store_performance.py b/test/store_performance.py
index 9e55d654..399ac7cb 100644
--- a/test/store_performance.py
+++ b/test/store_performance.py
@@ -98,7 +98,7 @@ class StoreTestCase(unittest.TestCase):
class MemoryStoreTestCase(StoreTestCase):
- store = "IOMemory"
+ store = "Memory"
if __name__ == "__main__":
diff --git a/test/test_aggregate_graphs.py b/test/test_aggregate_graphs.py
index 5d58f4d3..efe684d3 100644
--- a/test/test_aggregate_graphs.py
+++ b/test/test_aggregate_graphs.py
@@ -60,7 +60,7 @@ WHERE {?n3Doc a log:N3Document }"""
class GraphAggregates1(unittest.TestCase):
def setUp(self):
- memStore = plugin.get("IOMemory", Store)()
+ memStore = plugin.get("Memory", Store)()
self.graph1 = Graph(memStore)
self.graph2 = Graph(memStore)
self.graph3 = Graph(memStore)
@@ -109,7 +109,7 @@ class GraphAggregates2(unittest.TestCase):
sparql = True
def setUp(self):
- memStore = plugin.get("IOMemory", Store)()
+ memStore = plugin.get("Memory", Store)()
self.graph1 = Graph(memStore, URIRef("http://example.com/graph1"))
self.graph2 = Graph(memStore, URIRef("http://example.com/graph2"))
self.graph3 = Graph(memStore, URIRef("http://example.com/graph3"))
diff --git a/test/test_canonicalization.py b/test/test_canonicalization.py
index 12dd657f..75abff74 100644
--- a/test/test_canonicalization.py
+++ b/test/test_canonicalization.py
@@ -3,7 +3,7 @@ from rdflib import Graph, RDF, BNode, URIRef, Namespace, ConjunctiveGraph, Liter
from rdflib.compare import to_isomorphic, to_canonical_graph
import rdflib
-from rdflib.plugins.memory import IOMemory
+from rdflib.plugins.stores.memory import Memory
from io import StringIO
@@ -287,7 +287,7 @@ def test_issue682_signing_named_graphs():
cmary = URIRef("http://love.com/lovers/mary#")
cjohn = URIRef("http://love.com/lovers/john#")
- store = IOMemory()
+ store = Memory()
g = ConjunctiveGraph(store=store)
g.bind("love", ns)
diff --git a/test/test_dataset.py b/test/test_dataset.py
index ef7eda76..ea56ef49 100644
--- a/test/test_dataset.py
+++ b/test/test_dataset.py
@@ -178,7 +178,7 @@ if __name__ == "__main__":
tests = 0
for s in plugin.plugins(pluginname, plugin.Store):
- if s.name in ("default", "IOMemory", "Auditable", "Concurrent", "SPARQLStore"):
+ if s.name in ("default", "Memory", "Auditable", "Concurrent", "SPARQLStore"):
continue # these are tested by default
if not s.getClass().graph_aware:
diff --git a/test/test_graph.py b/test/test_graph.py
index 0032213e..560c1a43 100644
--- a/test/test_graph.py
+++ b/test/test_graph.py
@@ -260,7 +260,7 @@ tests = 0
for s in plugin.plugins(pluginname, plugin.Store):
if s.name in (
"default",
- "IOMemory",
+ "Memory",
"Auditable",
"Concurrent",
"SPARQLStore",
@@ -268,6 +268,10 @@ for s in plugin.plugins(pluginname, plugin.Store):
):
continue # these are tested by default
+ if s.name in ("SimpleMemory",):
+ # these (by design) won't pass some of the tests (like Intersection)
+ continue
+
locals()["t%d" % tests] = type(
"%sGraphTestCase" % s.name, (GraphTestCase,), {"store": s.name}
)
diff --git a/test/test_graph_context.py b/test/test_graph_context.py
index 0a7ac8a3..f6c6713e 100644
--- a/test/test_graph_context.py
+++ b/test/test_graph_context.py
@@ -370,7 +370,7 @@ tests = 0
for s in plugin.plugins(pluginname, plugin.Store):
if s.name in (
"default",
- "IOMemory",
+ "Memory",
"Auditable",
"Concurrent",
"SPARQLStore",
diff --git a/test/test_iomemory.py b/test/test_iomemory.py
deleted file mode 100644
index 4239fc3c..00000000
--- a/test/test_iomemory.py
+++ /dev/null
@@ -1,67 +0,0 @@
-"""
-
-Iteration and update conflict with set based IOMemory store
-
-https://github.com/RDFLib/rdflib/issues/286
-
-"""
-
-from rdflib.store import Store
-from rdflib import plugin
-
-from rdflib import Graph, Literal, Namespace
-
-
-def test_concurrent1():
- dns = Namespace(u"http://www.example.com/")
-
- store = plugin.get("IOMemory", Store)()
- g1 = Graph(store=store)
-
- g1.add((dns.Name, dns.prop, Literal(u"test")))
- g1.add((dns.Name, dns.prop, Literal(u"test2")))
- g1.add((dns.Name, dns.prop, Literal(u"test3")))
-
- n = len(g1)
- i = 0
-
- for t in g1.triples((None, None, None)):
- i += 1
- # next line causes problems because it adds a new Subject that needs
- # to be indexed in __subjectIndex dictionary in IOMemory Store.
- # which invalidates the iterator used to iterate over g1
- g1.add(t)
-
- assert i == n
-
-
-def test_concurrent2():
- dns = Namespace(u"http://www.example.com/")
-
- store = plugin.get("IOMemory", Store)()
- g1 = Graph(store=store)
- g2 = Graph(store=store)
-
- g1.add((dns.Name, dns.prop, Literal(u"test")))
- g1.add((dns.Name, dns.prop, Literal(u"test2")))
- g1.add((dns.Name, dns.prop, Literal(u"test3")))
-
- n = len(g1)
- i = 0
-
- for t in g1.triples((None, None, None)):
- i += 1
- g2.add(t)
- # next line causes problems because it adds a new Subject that needs
- # to be indexed in __subjectIndex dictionary in IOMemory Store.
- # which invalidates the iterator used to iterate over g1
- g2.add((dns.Name1, dns.prop1, Literal(u"test")))
- g2.add((dns.Name1, dns.prop, Literal(u"test")))
- g2.add((dns.Name, dns.prop, Literal(u"test4")))
-
- assert i == n
-
-
-if __name__ == "__main__":
- test_concurrent1()
- test_concurrent2()
diff --git a/test/test_memory_store.py b/test/test_memory_store.py
index 0f5771db..ad46d6c0 100644
--- a/test/test_memory_store.py
+++ b/test/test_memory_store.py
@@ -1,12 +1,34 @@
import unittest
import rdflib
-rdflib.plugin.register("Memory", rdflib.store.Store, "rdflib.plugins.memory", "Memory")
+rdflib.plugin.register("SimpleMemory", rdflib.store.Store, "rdflib.plugins.stores.memory", "SimpleMemory")
+rdflib.plugin.register("Memory", rdflib.store.Store, "rdflib.plugins.stores.memory", "Memory")
+class SimpleStoreTestCase(unittest.TestCase):
+ def test_memory_store(self):
+ g = rdflib.Graph("SimpleMemory")
+ subj1 = rdflib.URIRef("http://example.org/foo#bar1")
+ pred1 = rdflib.URIRef("http://example.org/foo#bar2")
+ obj1 = rdflib.URIRef("http://example.org/foo#bar3")
+ triple1 = (subj1, pred1, obj1)
+ triple2 = (
+ subj1,
+ rdflib.URIRef("http://example.org/foo#bar4"),
+ rdflib.URIRef("http://example.org/foo#bar5"),
+ )
+ g.add(triple1)
+ self.assertTrue(len(g) == 1)
+ g.add(triple2)
+ self.assertTrue(len(list(g.triples((subj1, None, None)))) == 2)
+ self.assertTrue(len(list(g.triples((None, pred1, None)))) == 1)
+ self.assertTrue(len(list(g.triples((None, None, obj1)))) == 1)
+ g.remove(triple1)
+ self.assertTrue(len(g) == 1)
+ g.serialize()
-class StoreTestCase(unittest.TestCase):
+class MemoryStoreTestCase(unittest.TestCase):
def test_memory_store(self):
- g = rdflib.Graph("Memory1")
+ g = rdflib.Graph("Memory")
subj1 = rdflib.URIRef("http://example.org/foo#bar1")
pred1 = rdflib.URIRef("http://example.org/foo#bar2")
obj1 = rdflib.URIRef("http://example.org/foo#bar3")
diff --git a/test/test_namespace.py b/test/test_namespace.py
index 48896fdc..510d8515 100644
--- a/test/test_namespace.py
+++ b/test/test_namespace.py
@@ -39,8 +39,8 @@ class NamespacePrefixTest(unittest.TestCase):
)
graph = Graph().parse(data=data, format="turtle")
for p, n in tuple(graph.namespaces()):
- graph.store._IOMemory__namespace.pop(p)
- graph.store._IOMemory__prefix.pop(n)
+ graph.store._Memory__namespace.pop(p)
+ graph.store._Memory__prefix.pop(n)
graph.namespace_manager.reset()
self.assertFalse(tuple(graph.namespaces()))
u = URIRef("http://example.org/a")
diff --git a/test/test_util.py b/test/test_util.py
index 89890c8d..d8f0a5d1 100644
--- a/test/test_util.py
+++ b/test/test_util.py
@@ -293,12 +293,12 @@ class TestUtilTermConvert(unittest.TestCase):
def test_util_from_n3_expectquotedgraph(self):
s = "{<http://example.com/schema>}"
- res = util.from_n3(s, default=None, backend="IOMemory")
+ res = util.from_n3(s, default=None, backend="Memory")
self.assertTrue(isinstance(res, QuotedGraph))
def test_util_from_n3_expectgraph(self):
s = "[<http://example.com/schema>]"
- res = util.from_n3(s, default=None, backend="IOMemory")
+ res = util.from_n3(s, default=None, backend="Memory")
self.assertTrue(isinstance(res, Graph))