summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJoern Hees <dev@joernhees.de>2015-03-04 18:29:12 +0100
committerJoern Hees <dev@joernhees.de>2015-03-04 18:29:12 +0100
commitb75c875a47ea9a2eee8601e2e110bd06d5cb99f1 (patch)
tree4c7d62fa70069534f1d8ef2d9531db598a5d451a
parent3e7b7937453d4abb7fffca792facedf17a8a4199 (diff)
downloadrdflib-b75c875a47ea9a2eee8601e2e110bd06d5cb99f1.tar.gz
run two fuseki servers with different default graph behaviors
On a SPARQLUpdateStore we need two different behaviors for: - Dataset tests: need empty default graph. - ConjunctiveGraph tests: need default graph to be union of all graphs.
-rw-r--r--.travis.yml5
-rw-r--r--test/test_dataset.py44
-rw-r--r--test/test_sparqlupdatestore.py22
3 files changed, 49 insertions, 22 deletions
diff --git a/.travis.yml b/.travis.yml
index d8ded45d..91f95ff1 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -18,7 +18,10 @@ before_install:
- tar -zxf jena-fuseki-1.1.1-distribution.tar.gz
- mv jena-fuseki-1.1.1 fuseki
- cd fuseki
- - bash fuseki-server --debug --update --memTDB --set tdb:unionDefaultGraph=true /db &>fuseki.log &
+ # normal SPARQLStore & Dataset tests:
+ - bash fuseki-server --port 3030 --debug --update --mem /db &>fuseki.log &
+ # SPARQLUpdateStore tests & ConjunctiveGraph endpoint behavior:
+ - bash fuseki-server --port 3031 --debug --update --memTDB --set tdb:unionDefaultGraph=true /db &>fuseki.log &
- sleep 2
- cd ..
diff --git a/test/test_dataset.py b/test/test_dataset.py
index 68726736..4d39c3d6 100644
--- a/test/test_dataset.py
+++ b/test/test_dataset.py
@@ -10,6 +10,22 @@ from rdflib.graph import DATASET_DEFAULT_GRAPH_ID
from nose.exc import SkipTest
+# Will also run SPARQLUpdateStore tests against local SPARQL1.1 endpoint if
+# available. This assumes SPARQL1.1 query/update endpoints running locally at
+# http://localhost:3030/db/
+#
+# Testing SPARQLUpdateStore Dataset behavior needs a different endpoint behavior
+# than our ConjunctiveGraph tests in test_sparqlupdatestore.py!
+#
+# For the tests here to run, you can for example start fuseki with:
+# ./fuseki-server --mem --update /db
+
+# THIS WILL DELETE ALL DATA IN THE /db dataset
+
+HOST = 'http://localhost:3030'
+DB = '/db/'
+
+
class DatasetTestCase(unittest.TestCase):
store = 'default'
slow = True
@@ -25,7 +41,7 @@ class DatasetTestCase(unittest.TestCase):
_, self.tmppath = mkstemp(
prefix='test', dir='/tmp', suffix='.sqlite')
elif self.store == "SPARQLUpdateStore":
- root = "http://localhost:3030/db/"
+ root = HOST + DB
self.graph.open((root + "sparql", root + "update"))
else:
self.tmppath = mkdtemp()
@@ -64,8 +80,8 @@ class DatasetTestCase(unittest.TestCase):
def testGraphAware(self):
- if not self.graph.store.graph_aware: return
-
+ if not self.graph.store.graph_aware: return
+
g = self.graph
g1 = g.graph(self.c1)
@@ -76,9 +92,9 @@ class DatasetTestCase(unittest.TestCase):
self.assertEquals(set(x.identifier for x in self.graph.contexts()),
set([self.c1, DATASET_DEFAULT_GRAPH_ID]))
- # added graph is empty
+ # added graph is empty
self.assertEquals(len(g1), 0)
-
+
g1.add( (self.tarek, self.likes, self.pizza) )
# added graph still exists
@@ -90,7 +106,7 @@ class DatasetTestCase(unittest.TestCase):
g1.remove( (self.tarek, self.likes, self.pizza) )
- # added graph is empty
+ # added graph is empty
self.assertEquals(len(g1), 0)
# Some SPARQL endpoint backends (e.g. TDB) do not consider
@@ -101,21 +117,21 @@ class DatasetTestCase(unittest.TestCase):
set([self.c1, DATASET_DEFAULT_GRAPH_ID]))
g.remove_graph(self.c1)
-
+
# graph is gone
- self.assertEquals(set(x.identifier for x in self.graph.contexts()),
+ self.assertEquals(set(x.identifier for x in self.graph.contexts()),
set([DATASET_DEFAULT_GRAPH_ID]))
-
+
def testDefaultGraph(self):
# Something the default graph is read-only (e.g. TDB in union mode)
if self.store == "SPARQLUpdateStore":
print "Please make sure updating the default graph " \
"is supported by your SPARQL endpoint"
-
+
self.graph.add(( self.tarek, self.likes, self.pizza))
self.assertEquals(len(self.graph), 1)
# only default exists
- self.assertEquals(set(x.identifier for x in self.graph.contexts()),
+ self.assertEquals(set(x.identifier for x in self.graph.contexts()),
set([DATASET_DEFAULT_GRAPH_ID]))
# removing default graph removes triples but not actual graph
@@ -123,7 +139,7 @@ class DatasetTestCase(unittest.TestCase):
self.assertEquals(len(self.graph), 0)
# default still exists
- self.assertEquals(set(x.identifier for x in self.graph.contexts()),
+ self.assertEquals(set(x.identifier for x in self.graph.contexts()),
set([DATASET_DEFAULT_GRAPH_ID]))
def testNotUnion(self):
@@ -134,7 +150,7 @@ class DatasetTestCase(unittest.TestCase):
g1 = self.graph.graph(self.c1)
g1.add((self.tarek, self.likes, self.pizza))
- self.assertEqual(list(self.graph.objects(self.tarek, None)),
+ self.assertEqual(list(self.graph.objects(self.tarek, None)),
[])
self.assertEqual(list(g1.objects(self.tarek, None)), [self.pizza])
@@ -159,7 +175,7 @@ for s in plugin.plugins(pluginname, plugin.Store):
if s.name == "SPARQLUpdateStore":
import urllib2
try:
- assert len(urllib2.urlopen("http://localhost:3030/").read()) > 0
+ assert len(urllib2.urlopen(HOST).read()) > 0
except:
sys.stderr.write("No SPARQL endpoint for %s (tests skipped)\n" % s.name)
continue
diff --git a/test/test_sparqlupdatestore.py b/test/test_sparqlupdatestore.py
index 1bd276cd..39678cc6 100644
--- a/test/test_sparqlupdatestore.py
+++ b/test/test_sparqlupdatestore.py
@@ -5,10 +5,18 @@ import re
from rdflib import ConjunctiveGraph, URIRef, Literal
from rdflib.util import from_n3
-# this assumed SPARQL1.1 query/update endpoints
-# running locally at http://localhost:3030/db/
-# for instance fuseki started with
-# ./fuseki-server --memTDB --update --set tdb:unionDefaultGraph=true /db
+HOST = 'http://localhost:3031'
+DB = '/db/'
+
+# this assumes SPARQL1.1 query/update endpoints running locally at
+# http://localhost:3031/db/
+#
+# The ConjunctiveGraph tests below require that the SPARQL endpoint renders its
+# default graph as the union of all known graphs! This is incompatible with the
+# endpoint behavior required by our Dataset tests in test_dataset.py, so you
+# need to run a second SPARQL endpoint on a non standard port,
+# e.g. fuseki started with:
+# ./fuseki-server --port 3031 --memTDB --update --set tdb:unionDefaultGraph=true /db
# THIS WILL DELETE ALL DATA IN THE /db dataset
@@ -30,7 +38,7 @@ class TestSparql11(unittest.TestCase):
self.longMessage = True
self.graph = ConjunctiveGraph('SPARQLUpdateStore')
- root = "http://localhost:3030/db/"
+ root = HOST + DB
self.graph.open((root + "sparql", root + "update"))
# clean out the store
@@ -297,9 +305,9 @@ class TestSparql11(unittest.TestCase):
from nose import SkipTest
import urllib2
try:
- assert len(urllib2.urlopen("http://localhost:3030/").read()) > 0
+ assert len(urllib2.urlopen(HOST).read()) > 0
except:
- raise SkipTest("http://localhost:3030/ is unavailable.")
+ raise SkipTest(HOST + " is unavailable.")
if __name__ == '__main__':