summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorNicholas Car <nicholas.car@surroundaustralia.com>2020-05-16 21:00:24 +1000
committerNicholas Car <nicholas.car@surroundaustralia.com>2020-05-16 21:00:24 +1000
commit0be6f6039479ce29cf71b11e76be54e186130036 (patch)
tree897d208525a7e8952fb496151db074d49dcdeb3a /test
parent2a8d70824e1b4caf0c606074a44ac3a15fa72718 (diff)
downloadrdflib-0be6f6039479ce29cf71b11e76be54e186130036.tar.gz
blacked all python files
Diffstat (limited to 'test')
-rw-r--r--test/earl.py12
-rw-r--r--test/manifest.py102
-rw-r--r--test/store_performance.py14
-rw-r--r--test/test_aggregate_graphs.py46
-rw-r--r--test/test_auditable.py244
-rw-r--r--test/test_batch_add.py33
-rw-r--r--test/test_bnode_ncname.py16
-rw-r--r--test/test_canonicalization.py435
-rw-r--r--test/test_comparison.py7
-rw-r--r--test/test_conjunctive_graph.py15
-rw-r--r--test/test_conneg.py8
-rw-r--r--test/test_conventions.py13
-rw-r--r--test/test_core_sparqlstore.py5
-rw-r--r--test/test_dataset.py94
-rw-r--r--test/test_datetime.py38
-rw-r--r--test/test_dawg.py206
-rw-r--r--test/test_diff.py4
-rw-r--r--test/test_duration.py8
-rw-r--r--test/test_empty_xml_base.py30
-rw-r--r--test/test_evaluate_bind.py28
-rw-r--r--test/test_events.py15
-rw-r--r--test/test_expressions.py156
-rw-r--r--test/test_extras_external_graph_libs.py42
-rw-r--r--test/test_finalnewline.py14
-rw-r--r--test/test_graph.py42
-rw-r--r--test/test_graph_context.py115
-rw-r--r--test/test_graph_formula.py32
-rw-r--r--test/test_graph_items.py7
-rw-r--r--test/test_hex_binary.py11
-rw-r--r--test/test_initbindings.py275
-rw-r--r--test/test_iomemory.py3
-rw-r--r--test/test_issue084.py91
-rw-r--r--test/test_issue1003.py60
-rw-r--r--test/test_issue160.py17
-rw-r--r--test/test_issue161.py10
-rw-r--r--test/test_issue184.py4
-rw-r--r--test/test_issue190.py26
-rw-r--r--test/test_issue200.py13
-rw-r--r--test/test_issue209.py1
-rw-r--r--test/test_issue223.py6
-rw-r--r--test/test_issue247.py1
-rw-r--r--test/test_issue248.py38
-rw-r--r--test/test_issue274.py78
-rw-r--r--test/test_issue363.py14
-rw-r--r--test/test_issue379.py4
-rw-r--r--test/test_issue381.py53
-rw-r--r--test/test_issue432.py2
-rw-r--r--test/test_issue446.py11
-rw-r--r--test/test_issue492.py4
-rw-r--r--test/test_issue523.py8
-rw-r--r--test/test_issue532.py2
-rw-r--r--test/test_issue545.py4
-rw-r--r--test/test_issue554.py7
-rw-r--r--test/test_issue563.py24
-rw-r--r--test/test_issue579.py4
-rw-r--r--test/test_issue604.py2
-rw-r--r--test/test_issue655.py52
-rw-r--r--test/test_issue715.py13
-rw-r--r--test/test_issue733.py19
-rw-r--r--test/test_issue920.py19
-rw-r--r--test/test_issue923.py5
-rw-r--r--test/test_issue953.py6
-rw-r--r--test/test_issue_git_200.py3
-rw-r--r--test/test_issue_git_336.py26
-rw-r--r--test/test_literal.py56
-rw-r--r--test/test_memory_store.py16
-rw-r--r--test/test_mulpath_n3.py4
-rw-r--r--test/test_n3.py84
-rw-r--r--test/test_n3_suite.py22
-rw-r--r--test/test_namespace.py85
-rw-r--r--test/test_nodepickler.py13
-rw-r--r--test/test_nquads.py23
-rw-r--r--test/test_nquads_w3c.py13
-rw-r--r--test/test_nt_misc.py19
-rw-r--r--test/test_nt_suite.py52
-rw-r--r--test/test_nt_w3c.py13
-rw-r--r--test/test_parser.py11
-rw-r--r--test/test_parser_helpers.py1
-rw-r--r--test/test_prefixTypes.py11
-rw-r--r--test/test_preflabel.py75
-rw-r--r--test/test_prettyxml.py95
-rw-r--r--test/test_rdf_lists.py20
-rw-r--r--test/test_rdfxml.py32
-rw-r--r--test/test_roundtrip.py37
-rw-r--r--test/test_rules.py11
-rw-r--r--test/test_seq.py8
-rw-r--r--test/test_serializexml.py66
-rw-r--r--test/test_slice.py38
-rw-r--r--test/test_sparql.py41
-rw-r--r--test/test_sparql_agg_distinct.py49
-rw-r--r--test/test_sparql_agg_undef.py24
-rw-r--r--test/test_sparql_construct_bindings.py21
-rw-r--r--test/test_sparql_service.py32
-rw-r--r--test/test_sparqlstore.py41
-rw-r--r--test/test_sparqlupdatestore.py173
-rw-r--r--test/test_swap_n3.py61
-rw-r--r--test/test_term.py208
-rw-r--r--test/test_trig.py113
-rw-r--r--test/test_trig_w3c.py32
-rw-r--r--test/test_trix_parse.py3
-rw-r--r--test/test_trix_serialize.py27
-rw-r--r--test/test_tsvresults.py1
-rw-r--r--test/test_turtle_serialize.py55
-rw-r--r--test/test_turtle_sort_issue613.py10
-rw-r--r--test/test_turtle_w3c.py19
-rw-r--r--test/test_util.py133
-rw-r--r--test/test_wide_python.py13
-rw-r--r--test/test_xmlliterals.py48
-rw-r--r--test/testutils.py16
-rw-r--r--test/triple_store.py4
-rw-r--r--test/type_check.py17
111 files changed, 2631 insertions, 2012 deletions
diff --git a/test/earl.py b/test/earl.py
index 9e4d0413..54df7d3e 100644
--- a/test/earl.py
+++ b/test/earl.py
@@ -9,17 +9,17 @@ EARL = Namespace("http://www.w3.org/ns/earl#")
report = Graph()
-report.bind('foaf', FOAF)
-report.bind('earl', EARL)
-report.bind('doap', DOAP)
-report.bind('dc', DC)
+report.bind("foaf", FOAF)
+report.bind("earl", EARL)
+report.bind("doap", DOAP)
+report.bind("dc", DC)
-me = URIRef('http://gromgull.net/me')
+me = URIRef("http://gromgull.net/me")
report.add((me, RDF.type, FOAF.Person))
report.add((me, FOAF.homepage, URIRef("http://gromgull.net")))
report.add((me, FOAF.name, Literal("Gunnar Aastrand Grimnes")))
-rdflib = URIRef('https://github.com/RDFLib/rdflib')
+rdflib = URIRef("https://github.com/RDFLib/rdflib")
report.add((rdflib, DOAP.homepage, rdflib))
report.add((rdflib, DOAP.name, Literal("rdflib")))
diff --git a/test/manifest.py b/test/manifest.py
index 1a8d774d..107b9422 100644
--- a/test/manifest.py
+++ b/test/manifest.py
@@ -5,26 +5,27 @@ from nose.tools import nottest
from rdflib import Graph, RDF, RDFS, Namespace
-MF = Namespace('http://www.w3.org/2001/sw/DataAccess/tests/test-manifest#')
-QT = Namespace('http://www.w3.org/2001/sw/DataAccess/tests/test-query#')
-UP = Namespace('http://www.w3.org/2009/sparql/tests/test-update#')
-RDFT = Namespace('http://www.w3.org/ns/rdftest#')
+MF = Namespace("http://www.w3.org/2001/sw/DataAccess/tests/test-manifest#")
+QT = Namespace("http://www.w3.org/2001/sw/DataAccess/tests/test-query#")
+UP = Namespace("http://www.w3.org/2009/sparql/tests/test-update#")
+RDFT = Namespace("http://www.w3.org/ns/rdftest#")
-DAWG = Namespace('http://www.w3.org/2001/sw/DataAccess/tests/test-dawg#')
+DAWG = Namespace("http://www.w3.org/2001/sw/DataAccess/tests/test-dawg#")
-RDFTest = namedtuple('RDFTest', ['uri', 'name', 'comment', 'data',
- 'graphdata', 'action', 'result', 'syntax'])
+RDFTest = namedtuple(
+ "RDFTest",
+ ["uri", "name", "comment", "data", "graphdata", "action", "result", "syntax"],
+)
def read_manifest(f, base=None, legacy=False):
-
def _str(x):
if x is not None:
return str(x)
return None
g = Graph()
- g.load(f, publicID=base, format='turtle')
+ g.load(f, publicID=base, format="turtle")
for m in g.subjects(RDF.type, MF.Manifest):
@@ -36,17 +37,22 @@ def read_manifest(f, base=None, legacy=False):
for col in g.objects(m, MF.entries):
for e in g.items(col):
- approved = ((e, DAWG.approval, DAWG.Approved) in g or
- (e, DAWG.approval, DAWG.NotClassified) in g or
- (e, RDFT.approval, RDFT.Approved) in g)
+ approved = (
+ (e, DAWG.approval, DAWG.Approved) in g
+ or (e, DAWG.approval, DAWG.NotClassified) in g
+ or (e, RDFT.approval, RDFT.Approved) in g
+ )
# run proposed tests
# approved |= (e, RDFT.approval, RDFT.Proposed) in g
# run legacy tests with no approval set
if legacy:
- approved |= ((e, DAWG.approval, None) not in g and
- (e, RDFT.approval, None) not in g)
+ approved |= (e, DAWG.approval, None) not in g and (
+ e,
+ RDFT.approval,
+ None,
+ ) not in g
if not approved:
continue
@@ -75,15 +81,17 @@ def read_manifest(f, base=None, legacy=False):
data = g.value(a, UP.data)
graphdata = []
for gd in g.objects(a, UP.graphData):
- graphdata.append((g.value(gd, UP.graph),
- g.value(gd, RDFS.label)))
+ graphdata.append(
+ (g.value(gd, UP.graph), g.value(gd, RDFS.label))
+ )
r = g.value(e, MF.result)
resdata = g.value(r, UP.data)
resgraphdata = []
for gd in g.objects(r, UP.graphData):
- resgraphdata.append((g.value(gd, UP.graph),
- g.value(gd, RDFS.label)))
+ resgraphdata.append(
+ (g.value(gd, UP.graph), g.value(gd, RDFS.label))
+ )
res = resdata, resgraphdata
@@ -91,28 +99,37 @@ def read_manifest(f, base=None, legacy=False):
query = g.value(e, MF.action)
syntax = _type == MF.PositiveSyntaxTest11
- elif _type in (MF.PositiveUpdateSyntaxTest11,
- MF.NegativeUpdateSyntaxTest11):
+ elif _type in (
+ MF.PositiveUpdateSyntaxTest11,
+ MF.NegativeUpdateSyntaxTest11,
+ ):
query = g.value(e, MF.action)
syntax = _type == MF.PositiveUpdateSyntaxTest11
- elif _type in (RDFT.TestNQuadsPositiveSyntax,
- RDFT.TestNQuadsNegativeSyntax,
- RDFT.TestTrigPositiveSyntax,
- RDFT.TestTrigNegativeSyntax,
- RDFT.TestNTriplesPositiveSyntax,
- RDFT.TestNTriplesNegativeSyntax,
- RDFT.TestTurtlePositiveSyntax,
- RDFT.TestTurtleNegativeSyntax,
- ):
+ elif _type in (
+ RDFT.TestNQuadsPositiveSyntax,
+ RDFT.TestNQuadsNegativeSyntax,
+ RDFT.TestTrigPositiveSyntax,
+ RDFT.TestTrigNegativeSyntax,
+ RDFT.TestNTriplesPositiveSyntax,
+ RDFT.TestNTriplesNegativeSyntax,
+ RDFT.TestTurtlePositiveSyntax,
+ RDFT.TestTurtleNegativeSyntax,
+ ):
query = g.value(e, MF.action)
- syntax = _type in (RDFT.TestNQuadsPositiveSyntax,
- RDFT.TestNTriplesPositiveSyntax,
- RDFT.TestTrigPositiveSyntax,
- RDFT.TestTurtlePositiveSyntax)
-
- elif _type in (RDFT.TestTurtleEval, RDFT.TestTurtleNegativeEval,
- RDFT.TestTrigEval, RDFT.TestTrigNegativeEval):
+ syntax = _type in (
+ RDFT.TestNQuadsPositiveSyntax,
+ RDFT.TestNTriplesPositiveSyntax,
+ RDFT.TestTrigPositiveSyntax,
+ RDFT.TestTurtlePositiveSyntax,
+ )
+
+ elif _type in (
+ RDFT.TestTurtleEval,
+ RDFT.TestTurtleNegativeEval,
+ RDFT.TestTrigEval,
+ RDFT.TestTrigNegativeEval,
+ ):
query = g.value(e, MF.action)
res = g.value(e, MF.result)
syntax = _type in (RDFT.TestTurtleEval, RDFT.TestTrigEval)
@@ -122,9 +139,16 @@ def read_manifest(f, base=None, legacy=False):
print("I dont know DAWG Test Type %s" % _type)
continue
- yield _type, RDFTest(e, _str(name), _str(comment),
- _str(data), graphdata, _str(query),
- res, syntax)
+ yield _type, RDFTest(
+ e,
+ _str(name),
+ _str(comment),
+ _str(data),
+ graphdata,
+ _str(query),
+ res,
+ syntax,
+ )
@nottest
diff --git a/test/store_performance.py b/test/store_performance.py
index 578a51e5..9e55d654 100644
--- a/test/store_performance.py
+++ b/test/store_performance.py
@@ -24,7 +24,8 @@ class StoreTestCase(unittest.TestCase):
something other than a unit test... but for now we'll add it as a
unit test.
"""
- store = 'default'
+
+ store = "default"
tmppath = None
configString = os.environ.get("DBURI", "dburi")
@@ -36,6 +37,7 @@ class StoreTestCase(unittest.TestCase):
if self.store == "MySQL":
# from test.mysql import configString
from rdflib.store.MySQL import MySQL
+
path = self.configString
MySQL().destroy(path)
else:
@@ -54,10 +56,10 @@ class StoreTestCase(unittest.TestCase):
def testTime(self):
number = 1
print(self.store)
- print("input:", end=' ')
+ print("input:", end=" ")
for i in itertools.repeat(None, number):
self._testInput()
- print("random:", end=' ')
+ print("random:", end=" ")
for i in itertools.repeat(None, number):
self._testRandom()
print(".")
@@ -77,7 +79,7 @@ class StoreTestCase(unittest.TestCase):
for _i in it:
add_random()
t1 = time()
- print("%.3g" % (t1 - t0), end=' ')
+ print("%.3g" % (t1 - t0), end=" ")
def _testInput(self):
number = 1
@@ -92,12 +94,12 @@ class StoreTestCase(unittest.TestCase):
for _i in it:
add_from_input()
t1 = time()
- print("%.3g" % (t1 - t0), end=' ')
+ print("%.3g" % (t1 - t0), end=" ")
class MemoryStoreTestCase(StoreTestCase):
store = "IOMemory"
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main()
diff --git a/test/test_aggregate_graphs.py b/test/test_aggregate_graphs.py
index e8e40e81..5d58f4d3 100644
--- a/test/test_aggregate_graphs.py
+++ b/test/test_aggregate_graphs.py
@@ -36,8 +36,7 @@ testGraph3N3 = """
<> a log:N3Document.
"""
-sparqlQ = \
- """
+sparqlQ = """
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
SELECT *
FROM NAMED <http://example.com/graph1>
@@ -47,14 +46,12 @@ FROM <http://www.w3.org/2000/01/rdf-schema#>
WHERE {?sub ?pred rdfs:Class }"""
-sparqlQ2 =\
- """
+sparqlQ2 = """
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
SELECT ?class
WHERE { GRAPH ?graph { ?member a ?class } }"""
-sparqlQ3 =\
- """
+sparqlQ3 = """
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX log: <http://www.w3.org/2000/10/swap/log#>
SELECT ?n3Doc
@@ -63,15 +60,17 @@ WHERE {?n3Doc a log:N3Document }"""
class GraphAggregates1(unittest.TestCase):
def setUp(self):
- memStore = plugin.get('IOMemory', Store)()
+ memStore = plugin.get("IOMemory", Store)()
self.graph1 = Graph(memStore)
self.graph2 = Graph(memStore)
self.graph3 = Graph(memStore)
- for n3Str, graph in [(testGraph1N3, self.graph1),
- (testGraph2N3, self.graph2),
- (testGraph3N3, self.graph3)]:
- graph.parse(StringIO(n3Str), format='n3')
+ for n3Str, graph in [
+ (testGraph1N3, self.graph1),
+ (testGraph2N3, self.graph2),
+ (testGraph3N3, self.graph3),
+ ]:
+ graph.parse(StringIO(n3Str), format="n3")
self.G = ReadOnlyGraphAggregate([self.graph1, self.graph2, self.graph3])
@@ -92,7 +91,16 @@ class GraphAggregates1(unittest.TestCase):
assert (URIRef("http://test/foo"), RDF.type, RDFS.Resource) in self.G
barPredicates = [URIRef("http://test/d"), RDFS.isDefinedBy]
- assert len(list(self.G.triples_choices((URIRef("http://test/bar"), barPredicates, None)))) == 2
+ assert (
+ len(
+ list(
+ self.G.triples_choices(
+ (URIRef("http://test/bar"), barPredicates, None)
+ )
+ )
+ )
+ == 2
+ )
class GraphAggregates2(unittest.TestCase):
@@ -101,20 +109,22 @@ class GraphAggregates2(unittest.TestCase):
sparql = True
def setUp(self):
- memStore = plugin.get('IOMemory', Store)()
+ memStore = plugin.get("IOMemory", Store)()
self.graph1 = Graph(memStore, URIRef("http://example.com/graph1"))
self.graph2 = Graph(memStore, URIRef("http://example.com/graph2"))
self.graph3 = Graph(memStore, URIRef("http://example.com/graph3"))
- for n3Str, graph in [(testGraph1N3, self.graph1),
- (testGraph2N3, self.graph2),
- (testGraph3N3, self.graph3)]:
- graph.parse(StringIO(n3Str), format='n3')
+ for n3Str, graph in [
+ (testGraph1N3, self.graph1),
+ (testGraph2N3, self.graph2),
+ (testGraph3N3, self.graph3),
+ ]:
+ graph.parse(StringIO(n3Str), format="n3")
self.graph4 = Graph(memStore, RDFS)
self.graph4.parse(RDFS.uri)
self.G = ConjunctiveGraph(memStore)
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main()
diff --git a/test/test_auditable.py b/test/test_auditable.py
index 63c7c5e5..e5aff715 100644
--- a/test/test_auditable.py
+++ b/test/test_auditable.py
@@ -9,7 +9,6 @@ EX = Namespace("http://example.org/")
class BaseTestAuditableStore(unittest.TestCase):
-
def assert_graph_equal(self, g1, g2):
try:
return self.assertSetEqual(set(g1), set(g2))
@@ -19,192 +18,157 @@ class BaseTestAuditableStore(unittest.TestCase):
class TestAuditableStore(BaseTestAuditableStore):
-
def setUp(self):
self.g = Graph()
self.g.add((EX.s0, EX.p0, EX.o0))
self.g.add((EX.s0, EX.p0, EX.o0bis))
- self.t = Graph(AuditableStore(self.g.store),
- self.g.identifier)
+ self.t = Graph(AuditableStore(self.g.store), self.g.identifier)
def test_add_commit(self):
self.t.add((EX.s1, EX.p1, EX.o1))
- self.assert_graph_equal(self.t, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- (EX.s1, EX.p1, EX.o1),
- ])
+ self.assert_graph_equal(
+ self.t,
+ [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis), (EX.s1, EX.p1, EX.o1),],
+ )
self.t.commit()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- (EX.s1, EX.p1, EX.o1),
- ])
+ self.assert_graph_equal(
+ self.g,
+ [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis), (EX.s1, EX.p1, EX.o1),],
+ )
def test_remove_commit(self):
self.t.remove((EX.s0, EX.p0, EX.o0))
- self.assert_graph_equal(self.t, [
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(self.t, [(EX.s0, EX.p0, EX.o0bis),])
self.t.commit()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(self.g, [(EX.s0, EX.p0, EX.o0bis),])
def test_multiple_remove_commit(self):
self.t.remove((EX.s0, EX.p0, None))
- self.assert_graph_equal(self.t, [
- ])
+ self.assert_graph_equal(self.t, [])
self.t.commit()
- self.assert_graph_equal(self.g, [
- ])
+ self.assert_graph_equal(self.g, [])
def test_noop_add_commit(self):
self.t.add((EX.s0, EX.p0, EX.o0))
- self.assert_graph_equal(self.t, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(
+ self.t, [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis),]
+ )
self.t.commit()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(
+ self.g, [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis),]
+ )
def test_noop_remove_commit(self):
self.t.add((EX.s0, EX.p0, EX.o0))
- self.assert_graph_equal(self.t, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(
+ self.t, [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis),]
+ )
self.t.commit()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(
+ self.g, [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis),]
+ )
def test_add_remove_commit(self):
self.t.add((EX.s1, EX.p1, EX.o1))
self.t.remove((EX.s1, EX.p1, EX.o1))
- self.assert_graph_equal(self.t, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(
+ self.t, [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis),]
+ )
self.t.commit()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(
+ self.g, [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis),]
+ )
def test_remove_add_commit(self):
self.t.remove((EX.s1, EX.p1, EX.o1))
self.t.add((EX.s1, EX.p1, EX.o1))
- self.assert_graph_equal(self.t, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- (EX.s1, EX.p1, EX.o1),
- ])
+ self.assert_graph_equal(
+ self.t,
+ [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis), (EX.s1, EX.p1, EX.o1),],
+ )
self.t.commit()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- (EX.s1, EX.p1, EX.o1),
- ])
+ self.assert_graph_equal(
+ self.g,
+ [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis), (EX.s1, EX.p1, EX.o1),],
+ )
def test_add_rollback(self):
self.t.add((EX.s1, EX.p1, EX.o1))
self.t.rollback()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(
+ self.g, [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis),]
+ )
def test_remove_rollback(self):
self.t.remove((EX.s0, EX.p0, EX.o0))
self.t.rollback()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(
+ self.g, [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis),]
+ )
def test_multiple_remove_rollback(self):
self.t.remove((EX.s0, EX.p0, None))
self.t.rollback()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(
+ self.g, [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis),]
+ )
def test_noop_add_rollback(self):
self.t.add((EX.s0, EX.p0, EX.o0))
self.t.rollback()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(
+ self.g, [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis),]
+ )
def test_noop_remove_rollback(self):
self.t.add((EX.s0, EX.p0, EX.o0))
self.t.rollback()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(
+ self.g, [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis),]
+ )
def test_add_remove_rollback(self):
self.t.add((EX.s1, EX.p1, EX.o1))
self.t.remove((EX.s1, EX.p1, EX.o1))
self.t.rollback()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(
+ self.g, [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis),]
+ )
def test_remove_add_rollback(self):
self.t.remove((EX.s1, EX.p1, EX.o1))
self.t.add((EX.s1, EX.p1, EX.o1))
self.t.rollback()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(
+ self.g, [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis),]
+ )
class TestAuditableStoreEmptyGraph(BaseTestAuditableStore):
-
def setUp(self):
self.g = Graph()
- self.t = Graph(AuditableStore(self.g.store),
- self.g.identifier)
+ self.t = Graph(AuditableStore(self.g.store), self.g.identifier)
def test_add_commit(self):
self.t.add((EX.s1, EX.p1, EX.o1))
- self.assert_graph_equal(self.t, [
- (EX.s1, EX.p1, EX.o1),
- ])
+ self.assert_graph_equal(self.t, [(EX.s1, EX.p1, EX.o1),])
self.t.commit()
- self.assert_graph_equal(self.g, [
- (EX.s1, EX.p1, EX.o1),
- ])
+ self.assert_graph_equal(self.g, [(EX.s1, EX.p1, EX.o1),])
def test_add_rollback(self):
self.t.add((EX.s1, EX.p1, EX.o1))
self.t.rollback()
- self.assert_graph_equal(self.g, [
- ])
+ self.assert_graph_equal(self.g, [])
class TestAuditableStoreConccurent(BaseTestAuditableStore):
-
def setUp(self):
self.g = Graph()
self.g.add((EX.s0, EX.p0, EX.o0))
self.g.add((EX.s0, EX.p0, EX.o0bis))
- self.t1 = Graph(AuditableStore(self.g.store),
- self.g.identifier)
- self.t2 = Graph(AuditableStore(self.g.store),
- self.g.identifier)
+ self.t1 = Graph(AuditableStore(self.g.store), self.g.identifier)
+ self.t2 = Graph(AuditableStore(self.g.store), self.g.identifier)
self.t1.add((EX.s1, EX.p1, EX.o1))
self.t2.add((EX.s2, EX.p2, EX.o2))
self.t1.remove((EX.s0, EX.p0, EX.o0))
@@ -213,93 +177,71 @@ class TestAuditableStoreConccurent(BaseTestAuditableStore):
def test_commit_commit(self):
self.t1.commit()
self.t2.commit()
- self.assert_graph_equal(self.g, [
- (EX.s1, EX.p1, EX.o1),
- (EX.s2, EX.p2, EX.o2),
- ])
+ self.assert_graph_equal(self.g, [(EX.s1, EX.p1, EX.o1), (EX.s2, EX.p2, EX.o2),])
def test_commit_rollback(self):
self.t1.commit()
self.t2.rollback()
- self.assert_graph_equal(self.g, [
- (EX.s1, EX.p1, EX.o1),
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(
+ self.g, [(EX.s1, EX.p1, EX.o1), (EX.s0, EX.p0, EX.o0bis),]
+ )
def test_rollback_commit(self):
self.t1.rollback()
self.t2.commit()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s2, EX.p2, EX.o2),
- ])
+ self.assert_graph_equal(self.g, [(EX.s0, EX.p0, EX.o0), (EX.s2, EX.p2, EX.o2),])
def test_rollback_rollback(self):
self.t1.rollback()
self.t2.rollback()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(
+ self.g, [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis),]
+ )
class TestAuditableStoreEmbeded(BaseTestAuditableStore):
-
def setUp(self):
self.g = Graph()
self.g.add((EX.s0, EX.p0, EX.o0))
self.g.add((EX.s0, EX.p0, EX.o0bis))
- self.t1 = Graph(AuditableStore(self.g.store),
- self.g.identifier)
+ self.t1 = Graph(AuditableStore(self.g.store), self.g.identifier)
self.t1.add((EX.s1, EX.p1, EX.o1))
self.t1.remove((EX.s0, EX.p0, EX.o0bis))
- self.t2 = Graph(AuditableStore(self.t1.store),
- self.t1.identifier)
+ self.t2 = Graph(AuditableStore(self.t1.store), self.t1.identifier)
self.t2.add((EX.s2, EX.p2, EX.o2))
self.t2.remove((EX.s1, EX.p1, EX.o1))
def test_commit_commit(self):
- self.assert_graph_equal(self.t2, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s2, EX.p2, EX.o2),
- ])
+ self.assert_graph_equal(
+ self.t2, [(EX.s0, EX.p0, EX.o0), (EX.s2, EX.p2, EX.o2),]
+ )
self.t2.commit()
- self.assert_graph_equal(self.t1, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s2, EX.p2, EX.o2),
- ])
+ self.assert_graph_equal(
+ self.t1, [(EX.s0, EX.p0, EX.o0), (EX.s2, EX.p2, EX.o2),]
+ )
self.t1.commit()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s2, EX.p2, EX.o2),
- ])
+ self.assert_graph_equal(self.g, [(EX.s0, EX.p0, EX.o0), (EX.s2, EX.p2, EX.o2),])
def test_commit_rollback(self):
self.t2.commit()
self.t1.rollback()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(
+ self.g, [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis),]
+ )
def test_rollback_commit(self):
self.t2.rollback()
- self.assert_graph_equal(self.t1, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s1, EX.p1, EX.o1),
- ])
+ self.assert_graph_equal(
+ self.t1, [(EX.s0, EX.p0, EX.o0), (EX.s1, EX.p1, EX.o1),]
+ )
self.t1.commit()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s1, EX.p1, EX.o1),
- ])
+ self.assert_graph_equal(self.g, [(EX.s0, EX.p0, EX.o0), (EX.s1, EX.p1, EX.o1),])
def test_rollback_rollback(self):
self.t2.rollback()
self.t1.rollback()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(
+ self.g, [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis),]
+ )
diff --git a/test/test_batch_add.py b/test/test_batch_add.py
index 1747100c..43457e5e 100644
--- a/test/test_batch_add.py
+++ b/test/test_batch_add.py
@@ -21,15 +21,14 @@ class TestBatchAddGraph(unittest.TestCase):
BatchAddGraph(Graph(), batch_size=-12)
def test_exit_submits_partial_batch(self):
- trip = (URIRef('a'), URIRef('b'), URIRef('c'))
+ trip = (URIRef("a"), URIRef("b"), URIRef("c"))
g = Graph()
with BatchAddGraph(g, batch_size=10) as cut:
cut.add(trip)
self.assertIn(trip, g)
def test_add_more_than_batch_size(self):
- trips = [(URIRef('a'), URIRef('b%d' % i), URIRef('c%d' % i))
- for i in range(12)]
+ trips = [(URIRef("a"), URIRef("b%d" % i), URIRef("c%d" % i)) for i in range(12)]
g = Graph()
with BatchAddGraph(g, batch_size=10) as cut:
for trip in trips:
@@ -37,38 +36,37 @@ class TestBatchAddGraph(unittest.TestCase):
self.assertEqual(12, len(g))
def test_add_quad_for_non_conjunctive_empty(self):
- '''
+ """
Graph drops quads that don't match our graph. Make sure we do the same
- '''
- g = Graph(identifier='http://example.org/g')
- badg = Graph(identifier='http://example.org/badness')
+ """
+ g = Graph(identifier="http://example.org/g")
+ badg = Graph(identifier="http://example.org/badness")
with BatchAddGraph(g) as cut:
- cut.add((URIRef('a'), URIRef('b'), URIRef('c'), badg))
+ cut.add((URIRef("a"), URIRef("b"), URIRef("c"), badg))
self.assertEqual(0, len(g))
def test_add_quad_for_non_conjunctive_pass_on_context_matches(self):
g = Graph()
with BatchAddGraph(g) as cut:
- cut.add((URIRef('a'), URIRef('b'), URIRef('c'), g))
+ cut.add((URIRef("a"), URIRef("b"), URIRef("c"), g))
self.assertEqual(1, len(g))
def test_no_addN_on_exception(self):
- '''
+ """
Even if we've added triples so far, it may be that attempting to add the last
batch is the cause of our exception, so we don't want to attempt again
- '''
+ """
g = Graph()
- trips = [(URIRef('a'), URIRef('b%d' % i), URIRef('c%d' % i))
- for i in range(12)]
+ trips = [(URIRef("a"), URIRef("b%d" % i), URIRef("c%d" % i)) for i in range(12)]
try:
with BatchAddGraph(g, batch_size=10) as cut:
for i, trip in enumerate(trips):
cut.add(trip)
if i == 11:
- raise Exception('myexc')
+ raise Exception("myexc")
except Exception as e:
- if str(e) != 'myexc':
+ if str(e) != "myexc":
pass
self.assertEqual(10, len(g))
@@ -81,8 +79,9 @@ class TestBatchAddGraph(unittest.TestCase):
self.counts.append(sum(1 for _ in quads))
g = MockGraph()
- quads = [(URIRef('a'), URIRef('b%d' % i), URIRef('c%d' % i), g)
- for i in range(12)]
+ quads = [
+ (URIRef("a"), URIRef("b%d" % i), URIRef("c%d" % i), g) for i in range(12)
+ ]
with BatchAddGraph(g, batch_size=10, batch_addn=True) as cut:
cut.addN(quads)
diff --git a/test/test_bnode_ncname.py b/test/test_bnode_ncname.py
index 7017ef09..3e621579 100644
--- a/test/test_bnode_ncname.py
+++ b/test/test_bnode_ncname.py
@@ -6,6 +6,7 @@ from hashlib import md5
try:
from uuid import uuid4
except ImportError:
+
def uuid4():
"""
Generates a uuid on behalf of Python 2.4
@@ -14,12 +15,13 @@ except ImportError:
import os
import time
import socket
+
try:
preseed = os.urandom(16)
except NotImplementedError:
- preseed = ''
+ preseed = ""
# Have doubts about this. random.seed will just hash the string
- random.seed('%s%s%s' % (preseed, os.getpid(), time.time()))
+ random.seed("%s%s%s" % (preseed, os.getpid(), time.time()))
del preseed
t = int(time.time() * 1000.0)
r = int(random.random() * 100000000000000000)
@@ -28,10 +30,11 @@ except ImportError:
except:
# if we can't get a network address, just imagine one
a = random.random() * 100000000000000000
- strdata = str(t) + ' ' + str(r) + ' ' + str(a)
- data = md5(strdata.encode('ascii')).hexdigest()
+ strdata = str(t) + " " + str(r) + " " + str(a)
+ data = md5(strdata.encode("ascii")).hexdigest()
yield data
+
# Adapted from http://icodesnip.com/snippet/python/simple-universally-unique-id-uuid-or-guid
@@ -69,13 +72,14 @@ def is_ncname(value):
>>> from rdflib import BNode
>>> assert is_ncname(BNode(_sn_gen=bnode_uuid, _prefix="urn:uuid:")) == True
"""
- ncnameexp = re.compile('[A-Za-z][A-Za-z0-9]*')
+ ncnameexp = re.compile("[A-Za-z][A-Za-z0-9]*")
if ncnameexp.match(value):
return True
else:
return False
-if __name__ == '__main__':
+if __name__ == "__main__":
import doctest
+
doctest.testmod()
diff --git a/test/test_canonicalization.py b/test/test_canonicalization.py
index df010432..12dd657f 100644
--- a/test/test_canonicalization.py
+++ b/test/test_canonicalization.py
@@ -19,39 +19,56 @@ def get_digest_value(rdf, mimetype):
def negative_graph_match_test():
- '''Test of FRIR identifiers against tricky RDF graphs with blank nodes.'''
+ """Test of FRIR identifiers against tricky RDF graphs with blank nodes."""
testInputs = [
- [str('''@prefix : <http://example.org/ns#> .
+ [
+ str(
+ """@prefix : <http://example.org/ns#> .
<http://example.org> :rel
[ :label "Same" ].
- '''),
- str('''@prefix : <http://example.org/ns#> .
+ """
+ ),
+ str(
+ """@prefix : <http://example.org/ns#> .
<http://example.org> :rel
[ :label "Same" ],
[ :label "Same" ].
- '''),
- False
- ],
- [str('''@prefix : <http://example.org/ns#> .
+ """
+ ),
+ False,
+ ],
+ [
+ str(
+ """@prefix : <http://example.org/ns#> .
<http://example.org> :rel
<http://example.org/a>.
- '''),
- str('''@prefix : <http://example.org/ns#> .
+ """
+ ),
+ str(
+ """@prefix : <http://example.org/ns#> .
<http://example.org> :rel
<http://example.org/a>,
<http://example.org/a>.
- '''),
- True
- ],
- [str('''@prefix : <http://example.org/ns#> .
+ """
+ ),
+ True,
+ ],
+ [
+ str(
+ """@prefix : <http://example.org/ns#> .
:linear_two_step_symmetry_start :related [ :related [ :related :linear_two_step_symmatry_end]],
- [ :related [ :related :linear_two_step_symmatry_end]].'''),
- str('''@prefix : <http://example.org/ns#> .
+ [ :related [ :related :linear_two_step_symmatry_end]]."""
+ ),
+ str(
+ """@prefix : <http://example.org/ns#> .
:linear_two_step_symmetry_start :related [ :related [ :related :linear_two_step_symmatry_end]],
- [ :related [ :related :linear_two_step_symmatry_end]].'''),
- True
- ],
- [str('''@prefix : <http://example.org/ns#> .
+ [ :related [ :related :linear_two_step_symmatry_end]]."""
+ ),
+ True,
+ ],
+ [
+ str(
+ """@prefix : <http://example.org/ns#> .
_:a :rel [
:rel [
:rel [
@@ -60,8 +77,10 @@ def negative_graph_match_test():
];
];
];
- ].'''),
- str('''@prefix : <http://example.org/ns#> .
+ ]."""
+ ),
+ str(
+ """@prefix : <http://example.org/ns#> .
_:a :rel [
:rel [
:rel [
@@ -72,11 +91,14 @@ def negative_graph_match_test():
];
];
];
- ].'''),
- False
- ],
+ ]."""
+ ),
+ False,
+ ],
# This test fails because the algorithm purposefully breaks the symmetry of symetric
- [str('''@prefix : <http://example.org/ns#> .
+ [
+ str(
+ """@prefix : <http://example.org/ns#> .
_:a :rel [
:rel [
:rel [
@@ -85,8 +107,10 @@ def negative_graph_match_test():
];
];
];
- ].'''),
- str('''@prefix : <http://example.org/ns#> .
+ ]."""
+ ),
+ str(
+ """@prefix : <http://example.org/ns#> .
_:a :rel [
:rel [
:rel [
@@ -95,10 +119,13 @@ def negative_graph_match_test():
];
];
];
- ].'''),
- True
- ],
- [str('''@prefix : <http://example.org/ns#> .
+ ]."""
+ ),
+ True,
+ ],
+ [
+ str(
+ """@prefix : <http://example.org/ns#> .
_:a :rel [
:rel [
:label "foo";
@@ -108,8 +135,10 @@ def negative_graph_match_test():
];
];
];
- ].'''),
- str('''@prefix : <http://example.org/ns#> .
+ ]."""
+ ),
+ str(
+ """@prefix : <http://example.org/ns#> .
_:a :rel [
:rel [
:rel [
@@ -118,10 +147,13 @@ def negative_graph_match_test():
];
];
];
- ].'''),
- False
- ],
- [str('''@prefix : <http://example.org/ns#> .
+ ]."""
+ ),
+ False,
+ ],
+ [
+ str(
+ """@prefix : <http://example.org/ns#> .
_:0001 :rel _:0003, _:0004.
_:0002 :rel _:0005, _:0006.
_:0003 :rel _:0001, _:0007, _:0010.
@@ -132,8 +164,10 @@ def negative_graph_match_test():
_:0008 :rel _:0004, _:0006, _:0010.
_:0009 :rel _:0004, _:0005, _:0007.
_:0010 :rel _:0003, _:0006, _:0008.
- '''),
- str('''@prefix : <http://example.org/ns#> .
+ """
+ ),
+ str(
+ """@prefix : <http://example.org/ns#> .
_:0001 :rel _:0003, _:0004.
_:0002 :rel _:0005, _:0006.
_:0003 :rel _:0001, _:0007, _:0010.
@@ -144,9 +178,10 @@ def negative_graph_match_test():
_:0005 :rel _:0002, _:0007, _:0009.
_:0006 :rel _:0002, _:0008, _:0010.
_:0007 :rel _:0003, _:0005, _:0009.
- '''),
- True
- ],
+ """
+ ),
+ True,
+ ],
]
def fn(rdf1, rdf2, identical):
@@ -157,6 +192,7 @@ def negative_graph_match_test():
print(rdf2)
print(digest2)
assert (digest1 == digest2) == identical
+
for inputs in testInputs:
yield fn, inputs[0], inputs[1], inputs[2]
@@ -165,66 +201,30 @@ def test_issue494_collapsing_bnodes():
"""Test for https://github.com/RDFLib/rdflib/issues/494 collapsing BNodes"""
g = Graph()
g += [
- (BNode('Na1a8fbcf755f41c1b5728f326be50994'),
- RDF['object'],
- URIRef(u'source')),
- (BNode('Na1a8fbcf755f41c1b5728f326be50994'),
- RDF['predicate'],
- BNode('vcb3')),
- (BNode('Na1a8fbcf755f41c1b5728f326be50994'),
- RDF['subject'],
- BNode('vcb2')),
- (BNode('Na1a8fbcf755f41c1b5728f326be50994'),
- RDF['type'],
- RDF['Statement']),
- (BNode('Na713b02f320d409c806ff0190db324f4'),
- RDF['object'],
- URIRef(u'target')),
- (BNode('Na713b02f320d409c806ff0190db324f4'),
- RDF['predicate'],
- BNode('vcb0')),
- (BNode('Na713b02f320d409c806ff0190db324f4'),
- RDF['subject'],
- URIRef(u'source')),
- (BNode('Na713b02f320d409c806ff0190db324f4'),
- RDF['type'],
- RDF['Statement']),
- (BNode('Ndb804ba690a64b3dbb9063c68d5e3550'),
- RDF['object'],
- BNode('vr0KcS4')),
- (BNode('Ndb804ba690a64b3dbb9063c68d5e3550'),
- RDF['predicate'],
- BNode('vrby3JV')),
- (BNode('Ndb804ba690a64b3dbb9063c68d5e3550'),
- RDF['subject'],
- URIRef(u'source')),
- (BNode('Ndb804ba690a64b3dbb9063c68d5e3550'),
- RDF['type'],
- RDF['Statement']),
- (BNode('Ndfc47fb1cd2d4382bcb8d5eb7835a636'),
- RDF['object'],
- URIRef(u'source')),
- (BNode('Ndfc47fb1cd2d4382bcb8d5eb7835a636'),
- RDF['predicate'],
- BNode('vcb5')),
- (BNode('Ndfc47fb1cd2d4382bcb8d5eb7835a636'),
- RDF['subject'],
- URIRef(u'target')),
- (BNode('Ndfc47fb1cd2d4382bcb8d5eb7835a636'),
- RDF['type'],
- RDF['Statement']),
- (BNode('Nec6864ef180843838aa9805bac835c98'),
- RDF['object'],
- URIRef(u'source')),
- (BNode('Nec6864ef180843838aa9805bac835c98'),
- RDF['predicate'],
- BNode('vcb4')),
- (BNode('Nec6864ef180843838aa9805bac835c98'),
- RDF['subject'],
- URIRef(u'source')),
- (BNode('Nec6864ef180843838aa9805bac835c98'),
- RDF['type'],
- RDF['Statement']),
+ (BNode("Na1a8fbcf755f41c1b5728f326be50994"), RDF["object"], URIRef(u"source")),
+ (BNode("Na1a8fbcf755f41c1b5728f326be50994"), RDF["predicate"], BNode("vcb3")),
+ (BNode("Na1a8fbcf755f41c1b5728f326be50994"), RDF["subject"], BNode("vcb2")),
+ (BNode("Na1a8fbcf755f41c1b5728f326be50994"), RDF["type"], RDF["Statement"]),
+ (BNode("Na713b02f320d409c806ff0190db324f4"), RDF["object"], URIRef(u"target")),
+ (BNode("Na713b02f320d409c806ff0190db324f4"), RDF["predicate"], BNode("vcb0")),
+ (BNode("Na713b02f320d409c806ff0190db324f4"), RDF["subject"], URIRef(u"source")),
+ (BNode("Na713b02f320d409c806ff0190db324f4"), RDF["type"], RDF["Statement"]),
+ (BNode("Ndb804ba690a64b3dbb9063c68d5e3550"), RDF["object"], BNode("vr0KcS4")),
+ (
+ BNode("Ndb804ba690a64b3dbb9063c68d5e3550"),
+ RDF["predicate"],
+ BNode("vrby3JV"),
+ ),
+ (BNode("Ndb804ba690a64b3dbb9063c68d5e3550"), RDF["subject"], URIRef(u"source")),
+ (BNode("Ndb804ba690a64b3dbb9063c68d5e3550"), RDF["type"], RDF["Statement"]),
+ (BNode("Ndfc47fb1cd2d4382bcb8d5eb7835a636"), RDF["object"], URIRef(u"source")),
+ (BNode("Ndfc47fb1cd2d4382bcb8d5eb7835a636"), RDF["predicate"], BNode("vcb5")),
+ (BNode("Ndfc47fb1cd2d4382bcb8d5eb7835a636"), RDF["subject"], URIRef(u"target")),
+ (BNode("Ndfc47fb1cd2d4382bcb8d5eb7835a636"), RDF["type"], RDF["Statement"]),
+ (BNode("Nec6864ef180843838aa9805bac835c98"), RDF["object"], URIRef(u"source")),
+ (BNode("Nec6864ef180843838aa9805bac835c98"), RDF["predicate"], BNode("vcb4")),
+ (BNode("Nec6864ef180843838aa9805bac835c98"), RDF["subject"], URIRef(u"source")),
+ (BNode("Nec6864ef180843838aa9805bac835c98"), RDF["type"], RDF["Statement"]),
]
# print('graph length: %d, nodes: %d' % (len(g), len(g.all_nodes())))
@@ -232,10 +232,10 @@ def test_issue494_collapsing_bnodes():
# for triple_bnode in g.subjects(RDF['type'], RDF['Statement']):
# print(len(list(g.triples([triple_bnode, None, None]))))
# print('all node degrees:')
- g_node_degs = sorted([
- len(list(g.triples([node, None, None])))
- for node in g.all_nodes()
- ], reverse=True)
+ g_node_degs = sorted(
+ [len(list(g.triples([node, None, None]))) for node in g.all_nodes()],
+ reverse=True,
+ )
# print(g_node_degs)
cg = to_canonical_graph(g)
@@ -244,21 +244,20 @@ def test_issue494_collapsing_bnodes():
# for triple_bnode in cg.subjects(RDF['type'], RDF['Statement']):
# print(len(list(cg.triples([triple_bnode, None, None]))))
# print('all node degrees:')
- cg_node_degs = sorted([
- len(list(cg.triples([node, None, None])))
- for node in cg.all_nodes()
- ], reverse=True)
+ cg_node_degs = sorted(
+ [len(list(cg.triples([node, None, None]))) for node in cg.all_nodes()],
+ reverse=True,
+ )
# print(cg_node_degs)
- assert len(g) == len(cg), \
- 'canonicalization changed number of triples in graph'
- assert len(g.all_nodes()) == len(cg.all_nodes()), \
- 'canonicalization changed number of nodes in graph'
- assert len(list(g.subjects(RDF['type'], RDF['Statement']))) == \
- len(list(cg.subjects(RDF['type'], RDF['Statement']))), \
- 'canonicalization changed number of statements'
- assert g_node_degs == cg_node_degs, \
- 'canonicalization changed node degrees'
+ assert len(g) == len(cg), "canonicalization changed number of triples in graph"
+ assert len(g.all_nodes()) == len(
+ cg.all_nodes()
+ ), "canonicalization changed number of nodes in graph"
+ assert len(list(g.subjects(RDF["type"], RDF["Statement"]))) == len(
+ list(cg.subjects(RDF["type"], RDF["Statement"]))
+ ), "canonicalization changed number of statements"
+ assert g_node_degs == cg_node_degs, "canonicalization changed node degrees"
# counter for subject, predicate and object nodes
g_pos_counts = Counter(), Counter(), Counter()
@@ -274,8 +273,9 @@ def test_issue494_collapsing_bnodes():
cg_pos_counts[i][t] += 1
cg_count_signature = [sorted(c.values()) for c in cg_pos_counts]
- assert g_count_signature == cg_count_signature, \
- 'canonicalization changed node position counts'
+ assert (
+ g_count_signature == cg_count_signature
+ ), "canonicalization changed node position counts"
def test_issue682_signing_named_graphs():
@@ -294,11 +294,11 @@ def test_issue682_signing_named_graphs():
gmary = Graph(store=store, identifier=cmary)
- gmary.add((mary, ns['hasName'], Literal("Mary")))
- gmary.add((mary, ns['loves'], john))
+ gmary.add((mary, ns["hasName"], Literal("Mary")))
+ gmary.add((mary, ns["loves"], john))
gjohn = Graph(store=store, identifier=cjohn)
- gjohn.add((john, ns['hasName'], Literal("John")))
+ gjohn.add((john, ns["hasName"], Literal("John")))
ig = to_isomorphic(g)
igmary = to_isomorphic(gmary)
@@ -312,69 +312,109 @@ def test_issue682_signing_named_graphs():
def test_issue725_collapsing_bnodes_2():
g = Graph()
g += [
- (BNode('N0a76d42406b84fe4b8029d0a7fa04244'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#object'),
- BNode('v2')),
- (BNode('N0a76d42406b84fe4b8029d0a7fa04244'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#predicate'),
- BNode('v0')),
- (BNode('N0a76d42406b84fe4b8029d0a7fa04244'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#subject'),
- URIRef(u'urn:gp_learner:fixed_var:target')),
- (BNode('N0a76d42406b84fe4b8029d0a7fa04244'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#Statement')),
- (BNode('N2f62af5936b94a8eb4b1e4bfa8e11d95'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#object'),
- BNode('v1')),
- (BNode('N2f62af5936b94a8eb4b1e4bfa8e11d95'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#predicate'),
- BNode('v0')),
- (BNode('N2f62af5936b94a8eb4b1e4bfa8e11d95'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#subject'),
- URIRef(u'urn:gp_learner:fixed_var:target')),
- (BNode('N2f62af5936b94a8eb4b1e4bfa8e11d95'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#Statement')),
- (BNode('N5ae541f93e1d4e5880450b1bdceb6404'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#object'),
- BNode('v5')),
- (BNode('N5ae541f93e1d4e5880450b1bdceb6404'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#predicate'),
- BNode('v4')),
- (BNode('N5ae541f93e1d4e5880450b1bdceb6404'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#subject'),
- URIRef(u'urn:gp_learner:fixed_var:target')),
- (BNode('N5ae541f93e1d4e5880450b1bdceb6404'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#Statement')),
- (BNode('N86ac7ca781f546ae939b8963895f672e'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#object'),
- URIRef(u'urn:gp_learner:fixed_var:source')),
- (BNode('N86ac7ca781f546ae939b8963895f672e'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#predicate'),
- BNode('v0')),
- (BNode('N86ac7ca781f546ae939b8963895f672e'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#subject'),
- URIRef(u'urn:gp_learner:fixed_var:target')),
- (BNode('N86ac7ca781f546ae939b8963895f672e'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#Statement')),
- (BNode('Nac82b883ca3849b5ab6820b7ac15e490'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#object'),
- BNode('v1')),
- (BNode('Nac82b883ca3849b5ab6820b7ac15e490'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#predicate'),
- BNode('v3')),
- (BNode('Nac82b883ca3849b5ab6820b7ac15e490'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#subject'),
- URIRef(u'urn:gp_learner:fixed_var:target')),
- (BNode('Nac82b883ca3849b5ab6820b7ac15e490'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#Statement'))
+ (
+ BNode("N0a76d42406b84fe4b8029d0a7fa04244"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#object"),
+ BNode("v2"),
+ ),
+ (
+ BNode("N0a76d42406b84fe4b8029d0a7fa04244"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#predicate"),
+ BNode("v0"),
+ ),
+ (
+ BNode("N0a76d42406b84fe4b8029d0a7fa04244"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#subject"),
+ URIRef(u"urn:gp_learner:fixed_var:target"),
+ ),
+ (
+ BNode("N0a76d42406b84fe4b8029d0a7fa04244"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#type"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#Statement"),
+ ),
+ (
+ BNode("N2f62af5936b94a8eb4b1e4bfa8e11d95"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#object"),
+ BNode("v1"),
+ ),
+ (
+ BNode("N2f62af5936b94a8eb4b1e4bfa8e11d95"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#predicate"),
+ BNode("v0"),
+ ),
+ (
+ BNode("N2f62af5936b94a8eb4b1e4bfa8e11d95"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#subject"),
+ URIRef(u"urn:gp_learner:fixed_var:target"),
+ ),
+ (
+ BNode("N2f62af5936b94a8eb4b1e4bfa8e11d95"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#type"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#Statement"),
+ ),
+ (
+ BNode("N5ae541f93e1d4e5880450b1bdceb6404"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#object"),
+ BNode("v5"),
+ ),
+ (
+ BNode("N5ae541f93e1d4e5880450b1bdceb6404"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#predicate"),
+ BNode("v4"),
+ ),
+ (
+ BNode("N5ae541f93e1d4e5880450b1bdceb6404"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#subject"),
+ URIRef(u"urn:gp_learner:fixed_var:target"),
+ ),
+ (
+ BNode("N5ae541f93e1d4e5880450b1bdceb6404"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#type"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#Statement"),
+ ),
+ (
+ BNode("N86ac7ca781f546ae939b8963895f672e"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#object"),
+ URIRef(u"urn:gp_learner:fixed_var:source"),
+ ),
+ (
+ BNode("N86ac7ca781f546ae939b8963895f672e"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#predicate"),
+ BNode("v0"),
+ ),
+ (
+ BNode("N86ac7ca781f546ae939b8963895f672e"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#subject"),
+ URIRef(u"urn:gp_learner:fixed_var:target"),
+ ),
+ (
+ BNode("N86ac7ca781f546ae939b8963895f672e"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#type"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#Statement"),
+ ),
+ (
+ BNode("Nac82b883ca3849b5ab6820b7ac15e490"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#object"),
+ BNode("v1"),
+ ),
+ (
+ BNode("Nac82b883ca3849b5ab6820b7ac15e490"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#predicate"),
+ BNode("v3"),
+ ),
+ (
+ BNode("Nac82b883ca3849b5ab6820b7ac15e490"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#subject"),
+ URIRef(u"urn:gp_learner:fixed_var:target"),
+ ),
+ (
+ BNode("Nac82b883ca3849b5ab6820b7ac15e490"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#type"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#Statement"),
+ ),
]
- turtle = '''
+ turtle = """
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix xml: <http://www.w3.org/XML/1998/namespace> .
@@ -403,7 +443,7 @@ def test_issue725_collapsing_bnodes_2():
[] a rdf:Statement ;
rdf:object _:v1 ;
rdf:predicate [ ] ;
- rdf:subject <urn:gp_learner:fixed_var:target> .'''
+ rdf:subject <urn:gp_learner:fixed_var:target> ."""
# g = Graph()
# g.parse(data=turtle, format='turtle')
@@ -436,16 +476,16 @@ def test_issue725_collapsing_bnodes_2():
# [len(list(cg.triples([None, None, node]))) for node in cg.all_nodes()]))
# print(cg.serialize(format='n3'))
- assert (len(g.all_nodes()) == len(cg.all_nodes()))
+ assert len(g.all_nodes()) == len(cg.all_nodes())
cg = to_canonical_graph(g)
- assert len(g) == len(cg), \
- 'canonicalization changed number of triples in graph'
- assert len(g.all_nodes()) == len(cg.all_nodes()), \
- 'canonicalization changed number of nodes in graph'
- assert len(list(g.subjects(RDF['type'], RDF['Statement']))) == \
- len(list(cg.subjects(RDF['type'], RDF['Statement']))), \
- 'canonicalization changed number of statements'
+ assert len(g) == len(cg), "canonicalization changed number of triples in graph"
+ assert len(g.all_nodes()) == len(
+ cg.all_nodes()
+ ), "canonicalization changed number of nodes in graph"
+ assert len(list(g.subjects(RDF["type"], RDF["Statement"]))) == len(
+ list(cg.subjects(RDF["type"], RDF["Statement"]))
+ ), "canonicalization changed number of statements"
# counter for subject, predicate and object nodes
g_pos_counts = Counter(), Counter(), Counter()
@@ -460,5 +500,6 @@ def test_issue725_collapsing_bnodes_2():
cg_pos_counts[i][t] += 1
cg_count_signature = [sorted(c.values()) for c in cg_pos_counts]
- assert g_count_signature == cg_count_signature, \
- 'canonicalization changed node position counts'
+ assert (
+ g_count_signature == cg_count_signature
+ ), "canonicalization changed node position counts"
diff --git a/test/test_comparison.py b/test/test_comparison.py
index 3c8e50d4..8455598c 100644
--- a/test/test_comparison.py
+++ b/test/test_comparison.py
@@ -33,7 +33,6 @@ Ah... it's coming back to me...
class IdentifierEquality(unittest.TestCase):
-
def setUp(self):
self.uriref = URIRef("http://example.org/")
self.bnode = BNode()
@@ -66,7 +65,11 @@ class IdentifierEquality(unittest.TestCase):
self.assertEqual("foo" in CORE_SYNTAX_TERMS, False)
def testH(self):
- self.assertEqual(URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#RDF") in CORE_SYNTAX_TERMS, True)
+ self.assertEqual(
+ URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#RDF")
+ in CORE_SYNTAX_TERMS,
+ True,
+ )
def testI(self):
g = Graph()
diff --git a/test/test_conjunctive_graph.py b/test/test_conjunctive_graph.py
index 5c686027..41bf432f 100644
--- a/test/test_conjunctive_graph.py
+++ b/test/test_conjunctive_graph.py
@@ -19,9 +19,9 @@ def test_bnode_publicid():
g = ConjunctiveGraph()
b = BNode()
- data = '<d:d> <e:e> <f:f> .'
+ data = "<d:d> <e:e> <f:f> ."
print("Parsing %r into %r" % (data, b))
- g.parse(data=data, format='turtle', publicID=b)
+ g.parse(data=data, format="turtle", publicID=b)
triples = list(g.get_context(b).triples((None, None, None)))
if not triples:
@@ -36,8 +36,8 @@ def test_bnode_publicid():
def test_quad_contexts():
g = ConjunctiveGraph()
- a = URIRef('urn:a')
- b = URIRef('urn:b')
+ a = URIRef("urn:a")
+ b = URIRef("urn:b")
g.get_context(a).add((a, a, a))
g.addN([(b, b, b, b)])
@@ -57,11 +57,12 @@ def test_graph_ids():
yield check, dict(data=DATA, publicID=PUBLIC_ID, format="turtle")
- source = StringInputSource(DATA.encode('utf8'))
+ source = StringInputSource(DATA.encode("utf8"))
source.setPublicId(PUBLIC_ID)
- yield check, dict(source=source, format='turtle')
+ yield check, dict(source=source, format="turtle")
-if __name__ == '__main__':
+if __name__ == "__main__":
import nose
+
nose.main(defaultTest=__name__)
diff --git a/test/test_conneg.py b/test/test_conneg.py
index 04b63d53..b8eee3bc 100644
--- a/test/test_conneg.py
+++ b/test/test_conneg.py
@@ -56,16 +56,15 @@ class TestHTTPHandler(BaseHTTPRequestHandler):
self.send_header("Content-type", rct)
self.end_headers()
- self.wfile.write(content.encode('utf-8'))
+ self.wfile.write(content.encode("utf-8"))
def log_message(self, *args):
pass
-def runHttpServer(server_class=HTTPServer,
- handler_class=TestHTTPHandler):
+def runHttpServer(server_class=HTTPServer, handler_class=TestHTTPHandler):
"""Start a server than can handle 3 requests :)"""
- server_address = ('localhost', 12345)
+ server_address = ("localhost", 12345)
httpd = server_class(server_address, handler_class)
httpd.handle_request()
@@ -87,5 +86,6 @@ if __name__ == "__main__":
import sys
import nose
+
if len(sys.argv) == 1:
nose.main(defaultTest=sys.argv[0])
diff --git a/test/test_conventions.py b/test/test_conventions.py
index 268047d7..11d7636a 100644
--- a/test/test_conventions.py
+++ b/test/test_conventions.py
@@ -12,10 +12,9 @@ modules should all be lower-case initial
class A(unittest.TestCase):
-
def module_names(self, path=None, names=None):
- skip_as_ignorably_private = ['embeddedRDF', 'OpenID', 'DublinCore']
+ skip_as_ignorably_private = ["embeddedRDF", "OpenID", "DublinCore"]
if path is None:
path = rdflib.__path__
@@ -23,13 +22,14 @@ class A(unittest.TestCase):
names = set()
# TODO: handle cases where len(path) is not 1
- assert len(path) == 1, "We're assuming the path has exactly one item in it for now"
+ assert (
+ len(path) == 1
+ ), "We're assuming the path has exactly one item in it for now"
path = path[0]
for importer, name, ispkg in pkgutil.iter_modules([path]):
if ispkg:
- result = self.module_names(path=os.path.join(path, name),
- names=names)
+ result = self.module_names(path=os.path.join(path, name), names=names)
names.union(result)
else:
if name != name.lower() and name not in skip_as_ignorably_private:
@@ -38,8 +38,7 @@ class A(unittest.TestCase):
def test_module_names(self):
names = self.module_names()
- self.assertTrue(
- names == set(), "module names '%s' are not lower case" % names)
+ self.assertTrue(names == set(), "module names '%s' are not lower case" % names)
if __name__ == "__main__":
diff --git a/test/test_core_sparqlstore.py b/test/test_core_sparqlstore.py
index 26c7554d..622e4a24 100644
--- a/test/test_core_sparqlstore.py
+++ b/test/test_core_sparqlstore.py
@@ -1,9 +1,10 @@
import unittest
from rdflib.graph import Graph
+
class TestSPARQLStoreGraphCore(unittest.TestCase):
- store_name = 'SPARQLStore'
+ store_name = "SPARQLStore"
path = "http://dbpedia.org/sparql"
storetest = True
create = False
@@ -21,5 +22,5 @@ class TestSPARQLStoreGraphCore(unittest.TestCase):
print("Done")
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main()
diff --git a/test/test_dataset.py b/test/test_dataset.py
index 9fcf424a..ef7eda76 100644
--- a/test/test_dataset.py
+++ b/test/test_dataset.py
@@ -22,12 +22,12 @@ from nose.exc import SkipTest
# THIS WILL DELETE ALL DATA IN THE /db dataset
-HOST = 'http://localhost:3030'
-DB = '/db/'
+HOST = "http://localhost:3030"
+DB = "/db/"
class DatasetTestCase(unittest.TestCase):
- store = 'default'
+ store = "default"
slow = True
tmppath = None
@@ -35,11 +35,9 @@ class DatasetTestCase(unittest.TestCase):
try:
self.graph = Dataset(store=self.store)
except ImportError:
- raise SkipTest(
- "Dependencies for store '%s' not available!" % self.store)
+ raise SkipTest("Dependencies for store '%s' not available!" % self.store)
if self.store == "SQLite":
- _, self.tmppath = mkstemp(
- prefix='test', dir='/tmp', suffix='.sqlite')
+ _, self.tmppath = mkstemp(prefix="test", dir="/tmp", suffix=".sqlite")
elif self.store == "SPARQLUpdateStore":
root = HOST + DB
self.graph.open((root + "sparql", root + "update"))
@@ -48,17 +46,17 @@ class DatasetTestCase(unittest.TestCase):
if self.store != "SPARQLUpdateStore":
self.graph.open(self.tmppath, create=True)
- self.michel = URIRef(u'urn:michel')
- self.tarek = URIRef(u'urn:tarek')
- self.bob = URIRef(u'urn:bob')
- self.likes = URIRef(u'urn:likes')
- self.hates = URIRef(u'urn:hates')
- self.pizza = URIRef(u'urn:pizza')
- self.cheese = URIRef(u'urn:cheese')
+ self.michel = URIRef(u"urn:michel")
+ self.tarek = URIRef(u"urn:tarek")
+ self.bob = URIRef(u"urn:bob")
+ self.likes = URIRef(u"urn:likes")
+ self.hates = URIRef(u"urn:hates")
+ self.pizza = URIRef(u"urn:pizza")
+ self.cheese = URIRef(u"urn:cheese")
# Use regular URIs because SPARQL endpoints like Fuseki alter short names
- self.c1 = URIRef(u'urn:context-1')
- self.c2 = URIRef(u'urn:context-2')
+ self.c1 = URIRef(u"urn:context-1")
+ self.c2 = URIRef(u"urn:context-2")
# delete the graph for each test!
self.graph.remove((None, None, None))
@@ -89,8 +87,10 @@ class DatasetTestCase(unittest.TestCase):
# empty named graphs
if self.store != "SPARQLUpdateStore":
# added graph exists
- self.assertEqual(set(x.identifier for x in self.graph.contexts()),
- set([self.c1, DATASET_DEFAULT_GRAPH_ID]))
+ self.assertEqual(
+ set(x.identifier for x in self.graph.contexts()),
+ set([self.c1, DATASET_DEFAULT_GRAPH_ID]),
+ )
# added graph is empty
self.assertEqual(len(g1), 0)
@@ -98,8 +98,10 @@ class DatasetTestCase(unittest.TestCase):
g1.add((self.tarek, self.likes, self.pizza))
# added graph still exists
- self.assertEqual(set(x.identifier for x in self.graph.contexts()),
- set([self.c1, DATASET_DEFAULT_GRAPH_ID]))
+ self.assertEqual(
+ set(x.identifier for x in self.graph.contexts()),
+ set([self.c1, DATASET_DEFAULT_GRAPH_ID]),
+ )
# added graph contains one triple
self.assertEqual(len(g1), 1)
@@ -113,60 +115,70 @@ class DatasetTestCase(unittest.TestCase):
# empty named graphs
if self.store != "SPARQLUpdateStore":
# graph still exists, although empty
- self.assertEqual(set(x.identifier for x in self.graph.contexts()),
- set([self.c1, DATASET_DEFAULT_GRAPH_ID]))
+ self.assertEqual(
+ set(x.identifier for x in self.graph.contexts()),
+ set([self.c1, DATASET_DEFAULT_GRAPH_ID]),
+ )
g.remove_graph(self.c1)
# graph is gone
- self.assertEqual(set(x.identifier for x in self.graph.contexts()),
- set([DATASET_DEFAULT_GRAPH_ID]))
+ self.assertEqual(
+ set(x.identifier for x in self.graph.contexts()),
+ set([DATASET_DEFAULT_GRAPH_ID]),
+ )
def testDefaultGraph(self):
# Something the default graph is read-only (e.g. TDB in union mode)
if self.store == "SPARQLUpdateStore":
- print("Please make sure updating the default graph "
- "is supported by your SPARQL endpoint")
+ print(
+ "Please make sure updating the default graph "
+ "is supported by your SPARQL endpoint"
+ )
self.graph.add((self.tarek, self.likes, self.pizza))
self.assertEqual(len(self.graph), 1)
# only default exists
- self.assertEqual(set(x.identifier for x in self.graph.contexts()),
- set([DATASET_DEFAULT_GRAPH_ID]))
+ self.assertEqual(
+ set(x.identifier for x in self.graph.contexts()),
+ set([DATASET_DEFAULT_GRAPH_ID]),
+ )
# removing default graph removes triples but not actual graph
self.graph.remove_graph(DATASET_DEFAULT_GRAPH_ID)
self.assertEqual(len(self.graph), 0)
# default still exists
- self.assertEqual(set(x.identifier for x in self.graph.contexts()),
- set([DATASET_DEFAULT_GRAPH_ID]))
+ self.assertEqual(
+ set(x.identifier for x in self.graph.contexts()),
+ set([DATASET_DEFAULT_GRAPH_ID]),
+ )
def testNotUnion(self):
# Union depends on the SPARQL endpoint configuration
if self.store == "SPARQLUpdateStore":
- print("Please make sure your SPARQL endpoint has not configured "
- "its default graph as the union of the named graphs")
+ print(
+ "Please make sure your SPARQL endpoint has not configured "
+ "its default graph as the union of the named graphs"
+ )
g1 = self.graph.graph(self.c1)
g1.add((self.tarek, self.likes, self.pizza))
- self.assertEqual(list(self.graph.objects(self.tarek, None)),
- [])
+ self.assertEqual(list(self.graph.objects(self.tarek, None)), [])
self.assertEqual(list(g1.objects(self.tarek, None)), [self.pizza])
# dynamically create classes for each registered Store
pluginname = None
-if __name__ == '__main__':
+if __name__ == "__main__":
if len(sys.argv) > 1:
pluginname = sys.argv[1]
tests = 0
for s in plugin.plugins(pluginname, plugin.Store):
- if s.name in ('default', 'IOMemory', 'Auditable',
- 'Concurrent', 'SPARQLStore'):
+ if s.name in ("default", "IOMemory", "Auditable", "Concurrent", "SPARQLStore"):
continue # these are tested by default
if not s.getClass().graph_aware:
@@ -174,16 +186,18 @@ for s in plugin.plugins(pluginname, plugin.Store):
if s.name == "SPARQLUpdateStore":
from urllib.request import urlopen
+
try:
assert len(urlopen(HOST).read()) > 0
except:
sys.stderr.write("No SPARQL endpoint for %s (tests skipped)\n" % s.name)
continue
- locals()["t%d" % tests] = type("%sContextTestCase" % s.name, (
- DatasetTestCase,), {"store": s.name})
+ locals()["t%d" % tests] = type(
+ "%sContextTestCase" % s.name, (DatasetTestCase,), {"store": s.name}
+ )
tests += 1
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main()
diff --git a/test/test_datetime.py b/test/test_datetime.py
index a8b68995..d71fc392 100644
--- a/test/test_datetime.py
+++ b/test/test_datetime.py
@@ -13,8 +13,10 @@ from rdflib.namespace import XSD
class TestRelativeBase(unittest.TestCase):
def test_equality(self):
- x = Literal("2008-12-01T18:02:00Z",
- datatype=URIRef('http://www.w3.org/2001/XMLSchema#dateTime'))
+ x = Literal(
+ "2008-12-01T18:02:00Z",
+ datatype=URIRef("http://www.w3.org/2001/XMLSchema#dateTime"),
+ )
self.assertEqual(x == x, True)
def test_microseconds(self):
@@ -23,7 +25,7 @@ class TestRelativeBase(unittest.TestCase):
# datetime with microseconds should be cast as a literal with using
# XML Schema dateTime as the literal datatype
- self.assertEqual(str(l), '2009-06-15T23:37:06.522630')
+ self.assertEqual(str(l), "2009-06-15T23:37:06.522630")
self.assertEqual(l.datatype, XSD.dateTime)
dt2 = l.toPython()
@@ -31,45 +33,41 @@ class TestRelativeBase(unittest.TestCase):
def test_to_python(self):
dt = "2008-12-01T18:02:00"
- l = Literal(dt,
- datatype=URIRef('http://www.w3.org/2001/XMLSchema#dateTime'))
+ l = Literal(dt, datatype=URIRef("http://www.w3.org/2001/XMLSchema#dateTime"))
self.assertTrue(isinstance(l.toPython(), datetime))
self.assertEqual(l.toPython().isoformat(), dt)
def test_timezone_z(self):
dt = "2008-12-01T18:02:00.522630Z"
- l = Literal(dt,
- datatype=URIRef('http://www.w3.org/2001/XMLSchema#dateTime'))
+ l = Literal(dt, datatype=URIRef("http://www.w3.org/2001/XMLSchema#dateTime"))
self.assertTrue(isinstance(l.toPython(), datetime))
- self.assertEqual(datetime_isoformat(l.toPython(),
- DATE_EXT_COMPLETE + 'T' + '%H:%M:%S.%f' + TZ_EXT),
- dt)
- self.assertEqual(l.toPython().isoformat(),
- "2008-12-01T18:02:00.522630+00:00")
+ self.assertEqual(
+ datetime_isoformat(
+ l.toPython(), DATE_EXT_COMPLETE + "T" + "%H:%M:%S.%f" + TZ_EXT
+ ),
+ dt,
+ )
+ self.assertEqual(l.toPython().isoformat(), "2008-12-01T18:02:00.522630+00:00")
def test_timezone_offset(self):
dt = "2010-02-10T12:36:00+03:00"
- l = Literal(dt,
- datatype=URIRef('http://www.w3.org/2001/XMLSchema#dateTime'))
+ l = Literal(dt, datatype=URIRef("http://www.w3.org/2001/XMLSchema#dateTime"))
self.assertTrue(isinstance(l.toPython(), datetime))
self.assertEqual(l.toPython().isoformat(), dt)
def test_timezone_offset_to_utc(self):
dt = "2010-02-10T12:36:00+03:00"
- l = Literal(dt,
- datatype=URIRef('http://www.w3.org/2001/XMLSchema#dateTime'))
+ l = Literal(dt, datatype=URIRef("http://www.w3.org/2001/XMLSchema#dateTime"))
utc_dt = l.toPython().astimezone(UTC)
- self.assertEqual(datetime_isoformat(utc_dt),
- "2010-02-10T09:36:00Z")
+ self.assertEqual(datetime_isoformat(utc_dt), "2010-02-10T09:36:00Z")
def test_timezone_offset_millisecond(self):
dt = "2011-01-16T19:39:18.239743+01:00"
- l = Literal(dt,
- datatype=URIRef('http://www.w3.org/2001/XMLSchema#dateTime'))
+ l = Literal(dt, datatype=URIRef("http://www.w3.org/2001/XMLSchema#dateTime"))
self.assertTrue(isinstance(l.toPython(), datetime))
self.assertEqual(l.toPython().isoformat(), dt)
diff --git a/test/test_dawg.py b/test/test_dawg.py
index b7343426..80289738 100644
--- a/test/test_dawg.py
+++ b/test/test_dawg.py
@@ -21,23 +21,21 @@ except:
defaultdict.__init__(self, int)
def most_common(self, N):
- return [x[0] for x in sorted(self.items(),
- key=itemgetter(1),
- reverse=True)[:10]]
+ return [
+ x[0] for x in sorted(self.items(), key=itemgetter(1), reverse=True)[:10]
+ ]
import datetime
import isodate
-from rdflib import (
- Dataset, Graph, URIRef, BNode)
+from rdflib import Dataset, Graph, URIRef, BNode
from rdflib.query import Result
from rdflib.compare import isomorphic
from rdflib.plugins import sparql as rdflib_sparql_module
-from rdflib.plugins.sparql.algebra import (
- pprintAlgebra, translateQuery, translateUpdate)
+from rdflib.plugins.sparql.algebra import pprintAlgebra, translateQuery, translateUpdate
from rdflib.plugins.sparql.parser import parseQuery, parseUpdate
from rdflib.plugins.sparql.results.rdfresults import RDFResultParser
from rdflib.plugins.sparql.update import evalUpdate
@@ -52,12 +50,15 @@ from nose import SkipTest
from .manifest import nose_tests, MF, UP
from .earl import report, add_test
+
+
def eq(a, b, msg):
- return eq_(a, b, msg + ': (%r!=%r)' % (a, b))
+ return eq_(a, b, msg + ": (%r!=%r)" % (a, b))
def setFlags():
import rdflib
+
# Several tests rely on lexical form of literals being kept!
rdflib.NORMALIZE_LITERALS = False
@@ -70,6 +71,7 @@ def setFlags():
def resetFlags():
import rdflib
+
# Several tests rely on lexical form of literals being kept!
rdflib.NORMALIZE_LITERALS = True
@@ -114,8 +116,12 @@ def bopen_read_close(fn):
try:
with open("skiptests.list") as skip_tests_f:
- skiptests = dict([(URIRef(x.strip().split(
- "\t")[0]), x.strip().split("\t")[1]) for x in skip_tests_f])
+ skiptests = dict(
+ [
+ (URIRef(x.strip().split("\t")[0]), x.strip().split("\t")[1])
+ for x in skip_tests_f
+ ]
+ )
except IOError:
skiptests = set()
@@ -163,8 +169,8 @@ def bindingsCompatible(a, b):
else:
m[b1] = y[v1]
else:
- # if y[v1]!=b1:
- # return False
+ # if y[v1]!=b1:
+ # return False
try:
if y[v1].neq(b1):
return False
@@ -191,9 +197,14 @@ def pp_binding(solutions):
"""
Pretty print a single binding - for less eye-strain when debugging
"""
- return "\n[" + ",\n\t".join("{" + ", ".join("%s:%s" % (
- x[0], x[1].n3()) for x in bindings.items()) + "}"
- for bindings in solutions) + "]\n"
+ return (
+ "\n["
+ + ",\n\t".join(
+ "{" + ", ".join("%s:%s" % (x[0], x[1].n3()) for x in bindings.items()) + "}"
+ for bindings in solutions
+ )
+ + "]\n"
+ )
@nottest
@@ -246,17 +257,21 @@ def update_test(t):
for x, l in resgraphdata:
resg.load(x, publicID=URIRef(l), format=_fmt(x))
- eq(set(x.identifier for x in g.contexts() if x != g.default_context),
- set(x.identifier for x in resg.contexts()
- if x != resg.default_context), 'named graphs in datasets do not match')
- assert isomorphic(g.default_context, resg.default_context), \
- 'Default graphs are not isomorphic'
+ eq(
+ set(x.identifier for x in g.contexts() if x != g.default_context),
+ set(x.identifier for x in resg.contexts() if x != resg.default_context),
+ "named graphs in datasets do not match",
+ )
+ assert isomorphic(
+ g.default_context, resg.default_context
+ ), "Default graphs are not isomorphic"
for x in g.contexts():
if x == g.default_context:
continue
- assert isomorphic(x, resg.get_context(x.identifier)), \
+ assert isomorphic(x, resg.get_context(x.identifier)), (
"Graphs with ID %s are not isomorphic" % x.identifier
+ )
except Exception as e:
@@ -305,7 +320,7 @@ def update_test(t):
print(bopen_read_close(x[7:]))
print("------------- MY RESULT ----------")
- print(g.serialize(format='trig'))
+ print(g.serialize(format="trig"))
try:
pq = translateUpdate(parseUpdate(bopen_read_close(query[7:])))
@@ -318,6 +333,7 @@ def update_test(t):
print(decodeStringEscape(str(e)))
import pdb
+
pdb.post_mortem(sys.exc_info()[2])
raise
@@ -332,7 +348,7 @@ def query_test(t):
if uri in skiptests:
raise SkipTest()
- def skip(reason='(none)'):
+ def skip(reason="(none)"):
print("Skipping %s from now on." % uri)
with bopen("skiptests.list", "a") as f:
f.write("%s\t%s\n" % (uri, reason))
@@ -350,91 +366,102 @@ def query_test(t):
# no result - syntax test
if syntax:
- translateQuery(parseQuery(
- bopen_read_close(query[7:])), base=urljoin(query, '.'))
+ translateQuery(
+ parseQuery(bopen_read_close(query[7:])), base=urljoin(query, ".")
+ )
else:
# negative syntax test
try:
- translateQuery(parseQuery(
- bopen_read_close(query[7:])), base=urljoin(query, '.'))
+ translateQuery(
+ parseQuery(bopen_read_close(query[7:])),
+ base=urljoin(query, "."),
+ )
- assert False, 'Query should not have parsed!'
+ assert False, "Query should not have parsed!"
except:
pass # it's fine - the query should not parse
return
# eval test - carry out query
- res2 = g.query(bopen_read_close(query[7:]), base=urljoin(query, '.'))
+ res2 = g.query(bopen_read_close(query[7:]), base=urljoin(query, "."))
- if resfile.endswith('ttl'):
+ if resfile.endswith("ttl"):
resg = Graph()
- resg.load(resfile, format='turtle', publicID=resfile)
+ resg.load(resfile, format="turtle", publicID=resfile)
res = RDFResultParser().parse(resg)
- elif resfile.endswith('rdf'):
+ elif resfile.endswith("rdf"):
resg = Graph()
resg.load(resfile, publicID=resfile)
res = RDFResultParser().parse(resg)
else:
with bopen(resfile[7:]) as f:
- if resfile.endswith('srj'):
- res = Result.parse(f, format='json')
- elif resfile.endswith('tsv'):
- res = Result.parse(f, format='tsv')
+ if resfile.endswith("srj"):
+ res = Result.parse(f, format="json")
+ elif resfile.endswith("tsv"):
+ res = Result.parse(f, format="tsv")
- elif resfile.endswith('csv'):
- res = Result.parse(f, format='csv')
+ elif resfile.endswith("csv"):
+ res = Result.parse(f, format="csv")
# CSV is lossy, round-trip our own resultset to
# lose the same info :)
# write bytes, read strings...
s = BytesIO()
- res2.serialize(s, format='csv')
+ res2.serialize(s, format="csv")
s.seek(0)
- res2 = Result.parse(s, format='csv')
+ res2 = Result.parse(s, format="csv")
s.close()
else:
- res = Result.parse(f, format='xml')
+ res = Result.parse(f, format="xml")
if not DETAILEDASSERT:
- eq(res.type, res2.type, 'Types do not match')
- if res.type == 'SELECT':
- eq(set(res.vars), set(res2.vars), 'Vars do not match')
- comp = bindingsCompatible(
- set(res),
- set(res2)
- )
- assert comp, 'Bindings do not match'
- elif res.type == 'ASK':
- eq(res.askAnswer, res2.askAnswer, 'Ask answer does not match')
- elif res.type in ('DESCRIBE', 'CONSTRUCT'):
- assert isomorphic(
- res.graph, res2.graph), 'graphs are not isomorphic!'
+ eq(res.type, res2.type, "Types do not match")
+ if res.type == "SELECT":
+ eq(set(res.vars), set(res2.vars), "Vars do not match")
+ comp = bindingsCompatible(set(res), set(res2))
+ assert comp, "Bindings do not match"
+ elif res.type == "ASK":
+ eq(res.askAnswer, res2.askAnswer, "Ask answer does not match")
+ elif res.type in ("DESCRIBE", "CONSTRUCT"):
+ assert isomorphic(res.graph, res2.graph), "graphs are not isomorphic!"
else:
- raise Exception('Unknown result type: %s' % res.type)
+ raise Exception("Unknown result type: %s" % res.type)
else:
- eq(res.type, res2.type,
- 'Types do not match: %r != %r' % (res.type, res2.type))
- if res.type == 'SELECT':
- eq(set(res.vars),
- set(res2.vars), 'Vars do not match: %r != %r' % (
- set(res.vars), set(res2.vars)))
- assert bindingsCompatible(
- set(res),
- set(res2)
- ), 'Bindings do not match: \nexpected:\n%s\n!=\ngot:\n%s' % (
- res.serialize(format='txt', namespace_manager=g.namespace_manager),
- res2.serialize(format='txt', namespace_manager=g.namespace_manager))
- elif res.type == 'ASK':
- eq(res.askAnswer,
- res2.askAnswer, "Ask answer does not match: %r != %r" % (
- res.askAnswer, res2.askAnswer))
- elif res.type in ('DESCRIBE', 'CONSTRUCT'):
- assert isomorphic(
- res.graph, res2.graph), 'graphs are not isomorphic!'
+ eq(
+ res.type,
+ res2.type,
+ "Types do not match: %r != %r" % (res.type, res2.type),
+ )
+ if res.type == "SELECT":
+ eq(
+ set(res.vars),
+ set(res2.vars),
+ "Vars do not match: %r != %r" % (set(res.vars), set(res2.vars)),
+ )
+ assert bindingsCompatible(set(res), set(res2)), (
+ "Bindings do not match: \nexpected:\n%s\n!=\ngot:\n%s"
+ % (
+ res.serialize(
+ format="txt", namespace_manager=g.namespace_manager
+ ),
+ res2.serialize(
+ format="txt", namespace_manager=g.namespace_manager
+ ),
+ )
+ )
+ elif res.type == "ASK":
+ eq(
+ res.askAnswer,
+ res2.askAnswer,
+ "Ask answer does not match: %r != %r"
+ % (res.askAnswer, res2.askAnswer),
+ )
+ elif res.type in ("DESCRIBE", "CONSTRUCT"):
+ assert isomorphic(res.graph, res2.graph), "graphs are not isomorphic!"
else:
- raise Exception('Unknown result type: %s' % res.type)
+ raise Exception("Unknown result type: %s" % res.type)
except Exception as e:
@@ -478,13 +505,14 @@ def query_test(t):
try:
pq = parseQuery(bopen_read_close(query[7:]))
print("----------------- Parsed ------------------")
- pprintAlgebra(translateQuery(pq, base=urljoin(query, '.')))
+ pprintAlgebra(translateQuery(pq, base=urljoin(query, ".")))
except:
print("(parser error)")
print(decodeStringEscape(str(e)))
import pdb
+
pdb.post_mortem(sys.exc_info()[2])
# pdb.set_trace()
# nose.tools.set_trace()
@@ -496,7 +524,6 @@ testers = {
MF.UpdateEvaluationTest: update_test,
MF.PositiveUpdateSyntaxTest11: update_test,
MF.NegativeUpdateSyntaxTest11: update_test,
-
MF.QueryEvaluationTest: query_test,
MF.NegativeSyntaxTest11: query_test,
MF.PositiveSyntaxTest11: query_test,
@@ -523,10 +550,11 @@ def test_dawg():
resetFlags()
-if __name__ == '__main__':
+if __name__ == "__main__":
import sys
import time
+
start = time.time()
if len(sys.argv) > 1:
NAME = sys.argv[1]
@@ -561,6 +589,7 @@ if __name__ == '__main__':
except:
add_test(t[0], "failed", "error")
import traceback
+
traceback.print_exc()
sys.stderr.write("%s\n" % t[0])
@@ -594,12 +623,13 @@ if __name__ == '__main__':
e_sum = sum(errors.values())
if success + f_sum + e_sum + skip != i:
- print("(Something is wrong, %d!=%d)" % (
- success + f_sum + e_sum + skip, i))
+ print("(Something is wrong, %d!=%d)" % (success + f_sum + e_sum + skip, i))
- print("\n%d tests, %d passed, %d failed, %d errors, \
- %d skipped (%.2f%% success)" % (
- i, success, f_sum, e_sum, skip, 100. * success / i))
+ print(
+ "\n%d tests, %d passed, %d failed, %d errors, \
+ %d skipped (%.2f%% success)"
+ % (i, success, f_sum, e_sum, skip, 100.0 * success / i)
+ )
print("Took %.2fs" % (time.time() - start))
if not NAME:
@@ -609,12 +639,12 @@ if __name__ == '__main__':
with open("testruns.txt", "a") as tf:
tf.write(
"%s\n%d tests, %d passed, %d failed, %d errors, %d "
- "skipped (%.2f%% success)\n\n" % (
- now, i, success, f_sum, e_sum, skip, 100. * success / i)
+ "skipped (%.2f%% success)\n\n"
+ % (now, i, success, f_sum, e_sum, skip, 100.0 * success / i)
)
- earl_report = 'test_reports/rdflib_sparql-%s.ttl' % now.replace(":", "")
+ earl_report = "test_reports/rdflib_sparql-%s.ttl" % now.replace(":", "")
- report.serialize(earl_report, format='n3')
- report.serialize('test_reports/rdflib_sparql-latest.ttl', format='n3')
+ report.serialize(earl_report, format="n3")
+ report.serialize("test_reports/rdflib_sparql-latest.ttl", format="n3")
print("Wrote EARL-report to '%s'" % earl_report)
diff --git a/test/test_diff.py b/test/test_diff.py
index bf49dd9d..7e4db728 100644
--- a/test/test_diff.py
+++ b/test/test_diff.py
@@ -13,7 +13,7 @@ class TestDiff(unittest.TestCase):
def testA(self):
"""with bnode"""
g = rdflib.Graph()
- g.add((rdflib.BNode(), rdflib.URIRef("urn:p"), rdflib.Literal(u'\xe9')))
+ g.add((rdflib.BNode(), rdflib.URIRef("urn:p"), rdflib.Literal(u"\xe9")))
diff = graph_diff(g, g)
@@ -21,7 +21,7 @@ class TestDiff(unittest.TestCase):
"""Curiously, this one passes, even before the fix in issue 151"""
g = rdflib.Graph()
- g.add((rdflib.URIRef("urn:a"), rdflib.URIRef("urn:p"), rdflib.Literal(u'\xe9')))
+ g.add((rdflib.URIRef("urn:a"), rdflib.URIRef("urn:p"), rdflib.Literal(u"\xe9")))
diff = graph_diff(g, g)
diff --git a/test/test_duration.py b/test/test_duration.py
index 07542a45..cdea7ab7 100644
--- a/test/test_duration.py
+++ b/test/test_duration.py
@@ -30,13 +30,15 @@ class TestDuration(unittest.TestCase):
def test_duration_le(self):
self.assertTrue(
- Literal("P4DT5H6M7S", datatype=XSD.duration) < Literal("P8DT10H12M14S", datatype=XSD.duration)
+ Literal("P4DT5H6M7S", datatype=XSD.duration)
+ < Literal("P8DT10H12M14S", datatype=XSD.duration)
)
def test_duration_sum(self):
self.assertEqual(
- Literal("P1Y2M4DT5H6M7S", datatype=XSD.duration) + Literal("P1Y2M4DT5H6M7S", datatype=XSD.duration).toPython(),
- Literal("P2Y4M8DT10H12M14S", datatype=XSD.duration)
+ Literal("P1Y2M4DT5H6M7S", datatype=XSD.duration)
+ + Literal("P1Y2M4DT5H6M7S", datatype=XSD.duration).toPython(),
+ Literal("P2Y4M8DT10H12M14S", datatype=XSD.duration),
)
diff --git a/test/test_empty_xml_base.py b/test/test_empty_xml_base.py
index 75e564a7..2f3364b8 100644
--- a/test/test_empty_xml_base.py
+++ b/test/test_empty_xml_base.py
@@ -13,7 +13,7 @@ from io import StringIO
import unittest
-FOAF = Namespace('http://xmlns.com/foaf/0.1/')
+FOAF = Namespace("http://xmlns.com/foaf/0.1/")
test_data = """
<rdf:RDF
@@ -36,8 +36,8 @@ test_data2 = """
</rdf:RDF>"""
-baseUri = URIRef('http://example.com/')
-baseUri2 = URIRef('http://example.com/foo/bar')
+baseUri = URIRef("http://example.com/")
+baseUri2 = URIRef("http://example.com/foo/bar")
class TestEmptyBase(unittest.TestCase):
@@ -46,10 +46,13 @@ class TestEmptyBase(unittest.TestCase):
self.graph.parse(StringIO(test_data), publicID=baseUri)
def test_base_ref(self):
- self.assertTrue(len(self.graph) == 1,
- "There should be at least one statement in the graph")
- self.assertTrue((baseUri, RDF.type, FOAF.Document) in self.graph,
- "There should be a triple with %s as the subject" % baseUri)
+ self.assertTrue(
+ len(self.graph) == 1, "There should be at least one statement in the graph"
+ )
+ self.assertTrue(
+ (baseUri, RDF.type, FOAF.Document) in self.graph,
+ "There should be a triple with %s as the subject" % baseUri,
+ )
class TestRelativeBase(unittest.TestCase):
@@ -58,11 +61,14 @@ class TestRelativeBase(unittest.TestCase):
self.graph.parse(StringIO(test_data2), publicID=baseUri2)
def test_base_ref(self):
- self.assertTrue(len(self.graph) == 1,
- "There should be at least one statement in the graph")
- resolvedBase = URIRef('http://example.com/baz')
- self.assertTrue((resolvedBase, RDF.type, FOAF.Document) in self.graph,
- "There should be a triple with %s as the subject" % resolvedBase)
+ self.assertTrue(
+ len(self.graph) == 1, "There should be at least one statement in the graph"
+ )
+ resolvedBase = URIRef("http://example.com/baz")
+ self.assertTrue(
+ (resolvedBase, RDF.type, FOAF.Document) in self.graph,
+ "There should be a triple with %s as the subject" % resolvedBase,
+ )
if __name__ == "__main__":
diff --git a/test/test_evaluate_bind.py b/test/test_evaluate_bind.py
index bd4ea440..382b4ed5 100644
--- a/test/test_evaluate_bind.py
+++ b/test/test_evaluate_bind.py
@@ -8,19 +8,29 @@ from rdflib import Graph, URIRef, Literal, Variable
def test_bind():
base = "http://example.org/"
g = Graph()
- g.add((URIRef(
- base + "thing"), URIRef(base + "ns#comment"), Literal("anything")))
+ g.add((URIRef(base + "thing"), URIRef(base + "ns#comment"), Literal("anything")))
def check(expr, var, obj):
- r = g.query("""
+ r = g.query(
+ """
prefix : <http://example.org/ns#>
- select * where { ?s ?p ?o . %s } """ % expr)
+ select * where { ?s ?p ?o . %s } """
+ % expr
+ )
assert r.bindings[0][Variable(var)] == obj
- yield (check, 'bind("thing" as ?name)', 'name', Literal("thing"))
+ yield (check, 'bind("thing" as ?name)', "name", Literal("thing"))
- yield (check, 'bind(<http://example.org/other> as ?other)', 'other',
- URIRef("http://example.org/other"))
+ yield (
+ check,
+ "bind(<http://example.org/other> as ?other)",
+ "other",
+ URIRef("http://example.org/other"),
+ )
- yield (check, "bind(:Thing as ?type)", 'type',
- URIRef("http://example.org/ns#Thing"))
+ yield (
+ check,
+ "bind(:Thing as ?type)",
+ "type",
+ URIRef("http://example.org/ns#Thing"),
+ )
diff --git a/test/test_events.py b/test/test_events.py
index f7f706a9..6b413781 100644
--- a/test/test_events.py
+++ b/test/test_events.py
@@ -1,4 +1,3 @@
-
import unittest
from rdflib import events
@@ -24,7 +23,6 @@ def subscribe_all(caches):
class Cache(events.Dispatcher):
-
def __init__(self, data=None):
if data is None:
data = {}
@@ -54,18 +52,17 @@ class Cache(events.Dispatcher):
class EventTestCase(unittest.TestCase):
-
def testEvents(self):
c1 = Cache()
c2 = Cache()
c3 = Cache()
subscribe_all([c1, c2, c3])
- c1['bob'] = 'uncle'
- assert c2['bob'] == 'uncle'
- assert c3['bob'] == 'uncle'
- del c3['bob']
- assert ('bob' in c1) == False
- assert ('bob' in c2) == False
+ c1["bob"] = "uncle"
+ assert c2["bob"] == "uncle"
+ assert c3["bob"] == "uncle"
+ del c3["bob"]
+ assert ("bob" in c1) == False
+ assert ("bob" in c2) == False
if __name__ == "__main__":
diff --git a/test/test_expressions.py b/test/test_expressions.py
index d88d7766..1323e4fc 100644
--- a/test/test_expressions.py
+++ b/test/test_expressions.py
@@ -24,88 +24,89 @@ def _eval(e, ctx=None):
def _translate(e):
- return simplify(traverse(
- e, visitPost=partial(translatePName, prologue=Prologue())))
+ return simplify(traverse(e, visitPost=partial(translatePName, prologue=Prologue())))
def testRegex():
- assert _eval(
- _translate((p.Expression.parseString('REGEX("zxcabczxc","abc")')[0])))
+ assert _eval(_translate((p.Expression.parseString('REGEX("zxcabczxc","abc")')[0])))
- eq(bool(_eval(_translate(
- (p.Expression.parseString('REGEX("zxczxc","abc")')[0])))), False)
+ eq(
+ bool(_eval(_translate((p.Expression.parseString('REGEX("zxczxc","abc")')[0])))),
+ False,
+ )
- assert _eval(_translate(
- (p.Expression.parseString('REGEX("bbbaaaaabbb","ba*b")')[0])))
+ assert _eval(
+ _translate((p.Expression.parseString('REGEX("bbbaaaaabbb","ba*b")')[0]))
+ )
def test_arithmetic():
- eq(_eval(_translate((p.Expression.parseString('2+3')[0]))).value, 5)
- eq(_eval(_translate((p.Expression.parseString('3-2')[0]))).value, 1)
+ eq(_eval(_translate((p.Expression.parseString("2+3")[0]))).value, 5)
+ eq(_eval(_translate((p.Expression.parseString("3-2")[0]))).value, 1)
- eq(_eval(_translate((p.Expression.parseString('2*3')[0]))).value, 6)
- eq(_eval(_translate((p.Expression.parseString('4/2')[0]))).value, 2)
+ eq(_eval(_translate((p.Expression.parseString("2*3")[0]))).value, 6)
+ eq(_eval(_translate((p.Expression.parseString("4/2")[0]))).value, 2)
- eq(_eval(_translate((p.Expression.parseString('2+2+2')[0]))).value, 6)
- eq(_eval(_translate((p.Expression.parseString('2-2+2')[0]))).value, 2)
- eq(_eval(_translate((p.Expression.parseString('(2-2)+2')[0]))).value, 2)
- eq(_eval(_translate((p.Expression.parseString('2-(2+2)')[0]))).value, -2)
+ eq(_eval(_translate((p.Expression.parseString("2+2+2")[0]))).value, 6)
+ eq(_eval(_translate((p.Expression.parseString("2-2+2")[0]))).value, 2)
+ eq(_eval(_translate((p.Expression.parseString("(2-2)+2")[0]))).value, 2)
+ eq(_eval(_translate((p.Expression.parseString("2-(2+2)")[0]))).value, -2)
- eq(_eval(_translate((p.Expression.parseString('2*2*2')[0]))).value, 8)
- eq(_eval(_translate((p.Expression.parseString('4/2*2')[0]))).value, 4)
- eq(_eval(_translate((p.Expression.parseString('8/4*2')[0]))).value, 4)
- eq(_eval(_translate((p.Expression.parseString('8/(4*2)')[0]))).value, 1)
- eq(_eval(_translate((p.Expression.parseString('(2/2)*2')[0]))).value, 2)
- eq(_eval(_translate((p.Expression.parseString('4/(2*2)')[0]))).value, 1)
+ eq(_eval(_translate((p.Expression.parseString("2*2*2")[0]))).value, 8)
+ eq(_eval(_translate((p.Expression.parseString("4/2*2")[0]))).value, 4)
+ eq(_eval(_translate((p.Expression.parseString("8/4*2")[0]))).value, 4)
+ eq(_eval(_translate((p.Expression.parseString("8/(4*2)")[0]))).value, 1)
+ eq(_eval(_translate((p.Expression.parseString("(2/2)*2")[0]))).value, 2)
+ eq(_eval(_translate((p.Expression.parseString("4/(2*2)")[0]))).value, 1)
- eq(_eval(_translate((p.Expression.parseString('2+3*2')[0]))).value, 8)
- eq(_eval(_translate((p.Expression.parseString('(2+3)*2')[0]))).value, 10)
- eq(_eval(_translate((p.Expression.parseString('2+4/2')[0]))).value, 4)
- eq(_eval(_translate((p.Expression.parseString('(2+4)/2')[0]))).value, 3)
+ eq(_eval(_translate((p.Expression.parseString("2+3*2")[0]))).value, 8)
+ eq(_eval(_translate((p.Expression.parseString("(2+3)*2")[0]))).value, 10)
+ eq(_eval(_translate((p.Expression.parseString("2+4/2")[0]))).value, 4)
+ eq(_eval(_translate((p.Expression.parseString("(2+4)/2")[0]))).value, 3)
def test_arithmetic_var():
ctx = QueryContext()
- ctx[Variable('x')] = Literal(2)
+ ctx[Variable("x")] = Literal(2)
- eq(_eval(_translate((p.Expression.parseString('2+?x')[0])), ctx).value, 4)
+ eq(_eval(_translate((p.Expression.parseString("2+?x")[0])), ctx).value, 4)
- eq(_eval(_translate((p.Expression.parseString('?x+3')[0])), ctx).value, 5)
- eq(_eval(_translate((p.Expression.parseString('3-?x')[0])), ctx).value, 1)
+ eq(_eval(_translate((p.Expression.parseString("?x+3")[0])), ctx).value, 5)
+ eq(_eval(_translate((p.Expression.parseString("3-?x")[0])), ctx).value, 1)
- eq(_eval(_translate((p.Expression.parseString('?x*3')[0])), ctx).value, 6)
- eq(_eval(_translate((p.Expression.parseString('4/?x')[0])), ctx).value, 2)
+ eq(_eval(_translate((p.Expression.parseString("?x*3")[0])), ctx).value, 6)
+ eq(_eval(_translate((p.Expression.parseString("4/?x")[0])), ctx).value, 2)
- eq(_eval(_translate((p.Expression.parseString('?x+?x+?x')[0])), ctx).value, 6)
- eq(_eval(_translate((p.Expression.parseString('?x-?x+?x')[0])), ctx).value, 2)
- eq(_eval(_translate((p.Expression.parseString('(?x-?x)+?x')[0])), ctx).value, 2)
- eq(_eval(_translate((p.Expression.parseString('?x-(?x+?x)')[0])), ctx).value, -2)
+ eq(_eval(_translate((p.Expression.parseString("?x+?x+?x")[0])), ctx).value, 6)
+ eq(_eval(_translate((p.Expression.parseString("?x-?x+?x")[0])), ctx).value, 2)
+ eq(_eval(_translate((p.Expression.parseString("(?x-?x)+?x")[0])), ctx).value, 2)
+ eq(_eval(_translate((p.Expression.parseString("?x-(?x+?x)")[0])), ctx).value, -2)
- eq(_eval(_translate((p.Expression.parseString('?x*?x*?x')[0])), ctx).value, 8)
- eq(_eval(_translate((p.Expression.parseString('4/?x*?x')[0])), ctx).value, 4)
- eq(_eval(_translate((p.Expression.parseString('8/4*?x')[0])), ctx).value, 4)
- eq(_eval(_translate((p.Expression.parseString('8/(4*?x)')[0])), ctx).value, 1)
- eq(_eval(_translate((p.Expression.parseString('(?x/?x)*?x')[0])), ctx).value, 2)
- eq(_eval(_translate((p.Expression.parseString('4/(?x*?x)')[0])), ctx).value, 1)
+ eq(_eval(_translate((p.Expression.parseString("?x*?x*?x")[0])), ctx).value, 8)
+ eq(_eval(_translate((p.Expression.parseString("4/?x*?x")[0])), ctx).value, 4)
+ eq(_eval(_translate((p.Expression.parseString("8/4*?x")[0])), ctx).value, 4)
+ eq(_eval(_translate((p.Expression.parseString("8/(4*?x)")[0])), ctx).value, 1)
+ eq(_eval(_translate((p.Expression.parseString("(?x/?x)*?x")[0])), ctx).value, 2)
+ eq(_eval(_translate((p.Expression.parseString("4/(?x*?x)")[0])), ctx).value, 1)
def test_comparisons():
- eq(bool(_eval(_translate((p.Expression.parseString('2<3')[0])))), True)
- eq(bool(_eval(_translate((p.Expression.parseString('2<3.0')[0])))), True)
- eq(bool(_eval(_translate((p.Expression.parseString('2<3e0')[0])))), True)
+ eq(bool(_eval(_translate((p.Expression.parseString("2<3")[0])))), True)
+ eq(bool(_eval(_translate((p.Expression.parseString("2<3.0")[0])))), True)
+ eq(bool(_eval(_translate((p.Expression.parseString("2<3e0")[0])))), True)
- eq(bool(_eval(_translate((p.Expression.parseString('4<3')[0])))), False)
- eq(bool(_eval(_translate((p.Expression.parseString('4<3.0')[0])))), False)
- eq(bool(_eval(_translate((p.Expression.parseString('4<3e0')[0])))), False)
+ eq(bool(_eval(_translate((p.Expression.parseString("4<3")[0])))), False)
+ eq(bool(_eval(_translate((p.Expression.parseString("4<3.0")[0])))), False)
+ eq(bool(_eval(_translate((p.Expression.parseString("4<3e0")[0])))), False)
- eq(bool(_eval(_translate((p.Expression.parseString('2<2.1')[0])))), True)
- eq(bool(_eval(_translate((p.Expression.parseString('2<21e-1')[0])))), True)
+ eq(bool(_eval(_translate((p.Expression.parseString("2<2.1")[0])))), True)
+ eq(bool(_eval(_translate((p.Expression.parseString("2<21e-1")[0])))), True)
- eq(bool(_eval(_translate((p.Expression.parseString('2=2.0')[0])))), True)
- eq(bool(_eval(_translate((p.Expression.parseString('2=2e0')[0])))), True)
+ eq(bool(_eval(_translate((p.Expression.parseString("2=2.0")[0])))), True)
+ eq(bool(_eval(_translate((p.Expression.parseString("2=2e0")[0])))), True)
eq(bool(_eval(_translate((p.Expression.parseString('2="cake"')[0])))), False)
@@ -113,39 +114,46 @@ def test_comparisons():
def test_comparisons_var():
ctx = QueryContext()
- ctx[Variable('x')] = Literal(2)
+ ctx[Variable("x")] = Literal(2)
- eq(bool(_eval(_translate((p.Expression.parseString('?x<3')[0])), ctx)), True)
- eq(bool(_eval(_translate((p.Expression.parseString('?x<3.0')[0])), ctx)), True)
- eq(bool(_eval(_translate((p.Expression.parseString('?x<3e0')[0])), ctx)), True)
+ eq(bool(_eval(_translate((p.Expression.parseString("?x<3")[0])), ctx)), True)
+ eq(bool(_eval(_translate((p.Expression.parseString("?x<3.0")[0])), ctx)), True)
+ eq(bool(_eval(_translate((p.Expression.parseString("?x<3e0")[0])), ctx)), True)
- eq(bool(_eval(_translate((p.Expression.parseString('?x<2.1')[0])), ctx)), True)
- eq(bool(_eval(_translate((p.Expression.parseString('?x<21e-1')[0])), ctx)), True)
+ eq(bool(_eval(_translate((p.Expression.parseString("?x<2.1")[0])), ctx)), True)
+ eq(bool(_eval(_translate((p.Expression.parseString("?x<21e-1")[0])), ctx)), True)
- eq(bool(_eval(_translate((p.Expression.parseString('?x=2.0')[0])), ctx)), True)
- eq(bool(_eval(_translate((p.Expression.parseString('?x=2e0')[0])), ctx)), True)
+ eq(bool(_eval(_translate((p.Expression.parseString("?x=2.0")[0])), ctx)), True)
+ eq(bool(_eval(_translate((p.Expression.parseString("?x=2e0")[0])), ctx)), True)
eq(bool(_eval(_translate((p.Expression.parseString('?x="cake"')[0])), ctx)), False)
ctx = QueryContext()
- ctx[Variable('x')] = Literal(4)
+ ctx[Variable("x")] = Literal(4)
- eq(bool(_eval(_translate((p.Expression.parseString('?x<3')[0])), ctx)), False)
- eq(bool(_eval(_translate((p.Expression.parseString('?x<3.0')[0])), ctx)), False)
- eq(bool(_eval(_translate((p.Expression.parseString('?x<3e0')[0])), ctx)), False)
+ eq(bool(_eval(_translate((p.Expression.parseString("?x<3")[0])), ctx)), False)
+ eq(bool(_eval(_translate((p.Expression.parseString("?x<3.0")[0])), ctx)), False)
+ eq(bool(_eval(_translate((p.Expression.parseString("?x<3e0")[0])), ctx)), False)
def test_and_or():
- eq(bool(_eval(_translate((p.Expression.parseString('3>2 && 3>1')[0])))), True)
- eq(bool(_eval(
- _translate((p.Expression.parseString('3>2 && 3>4 || 2>1')[0])))), True)
- eq(bool(_eval(
- _translate((p.Expression.parseString('2>1 || 3>2 && 3>4')[0])))), True)
- eq(bool(_eval(_translate(
- (p.Expression.parseString('(2>1 || 3>2) && 3>4')[0])))), False)
-
-
-if __name__ == '__main__':
+ eq(bool(_eval(_translate((p.Expression.parseString("3>2 && 3>1")[0])))), True)
+ eq(
+ bool(_eval(_translate((p.Expression.parseString("3>2 && 3>4 || 2>1")[0])))),
+ True,
+ )
+ eq(
+ bool(_eval(_translate((p.Expression.parseString("2>1 || 3>2 && 3>4")[0])))),
+ True,
+ )
+ eq(
+ bool(_eval(_translate((p.Expression.parseString("(2>1 || 3>2) && 3>4")[0])))),
+ False,
+ )
+
+
+if __name__ == "__main__":
import nose
import sys
+
nose.main(defaultTest=sys.argv[0])
diff --git a/test/test_extras_external_graph_libs.py b/test/test_extras_external_graph_libs.py
index d3dc45c8..25b69298 100644
--- a/test/test_extras_external_graph_libs.py
+++ b/test/test_extras_external_graph_libs.py
@@ -10,9 +10,10 @@ def test_rdflib_to_networkx():
from rdflib.extras.external_graph_libs import rdflib_to_networkx_multidigraph
from rdflib.extras.external_graph_libs import rdflib_to_networkx_digraph
from rdflib.extras.external_graph_libs import rdflib_to_networkx_graph
+
g = Graph()
- a, b, l = URIRef('a'), URIRef('b'), Literal('l')
- p, q = URIRef('p'), URIRef('q')
+ a, b, l = URIRef("a"), URIRef("b"), Literal("l")
+ p, q = URIRef("p"), URIRef("q")
edges = [(a, p, b), (a, q, b), (b, p, a), (b, p, l)]
for t in edges:
g.add(t)
@@ -28,26 +29,26 @@ def test_rdflib_to_networkx():
assert mdg.has_edge(a, b, key=1)
dg = rdflib_to_networkx_digraph(g)
- assert dg[a][b]['weight'] == 2
- assert sorted(dg[a][b]['triples']) == [(a, p, b), (a, q, b)]
+ assert dg[a][b]["weight"] == 2
+ assert sorted(dg[a][b]["triples"]) == [(a, p, b), (a, q, b)]
assert len(dg.edges()) == 3
assert dg.size() == 3
- assert dg.size(weight='weight') == 4.0
+ assert dg.size(weight="weight") == 4.0
dg = rdflib_to_networkx_graph(g, False, edge_attrs=lambda s, p, o: {})
- assert 'weight' not in dg[a][b]
- assert 'triples' not in dg[a][b]
+ assert "weight" not in dg[a][b]
+ assert "triples" not in dg[a][b]
ug = rdflib_to_networkx_graph(g)
- assert ug[a][b]['weight'] == 3
- assert sorted(ug[a][b]['triples']) == [(a, p, b), (a, q, b), (b, p, a)]
+ assert ug[a][b]["weight"] == 3
+ assert sorted(ug[a][b]["triples"]) == [(a, p, b), (a, q, b), (b, p, a)]
assert len(ug.edges()) == 2
assert ug.size() == 2
- assert ug.size(weight='weight') == 4.0
+ assert ug.size(weight="weight") == 4.0
ug = rdflib_to_networkx_graph(g, False, edge_attrs=lambda s, p, o: {})
- assert 'weight' not in ug[a][b]
- assert 'triples' not in ug[a][b]
+ assert "weight" not in ug[a][b]
+ assert "triples" not in ug[a][b]
def test_rdflib_to_graphtool():
@@ -56,9 +57,10 @@ def test_rdflib_to_graphtool():
except ImportError:
raise SkipTest("couldn't find graph_tool")
from rdflib.extras.external_graph_libs import rdflib_to_graphtool
+
g = Graph()
- a, b, l = URIRef('a'), URIRef('b'), Literal('l')
- p, q = URIRef('p'), URIRef('q')
+ a, b, l = URIRef("a"), URIRef("b"), Literal("l")
+ p, q = URIRef("p"), URIRef("q")
edges = [(a, p, b), (a, q, b), (b, p, a), (b, p, l)]
for t in edges:
g.add(t)
@@ -66,21 +68,20 @@ def test_rdflib_to_graphtool():
mdg = rdflib_to_graphtool(g)
assert len(list(mdg.edges())) == 4
- vpterm = mdg.vertex_properties['term']
+ vpterm = mdg.vertex_properties["term"]
va = gt_util.find_vertex(mdg, vpterm, a)[0]
vb = gt_util.find_vertex(mdg, vpterm, b)[0]
vl = gt_util.find_vertex(mdg, vpterm, l)[0]
assert (va, vb) in [(e.source(), e.target()) for e in list(mdg.edges())]
- epterm = mdg.edge_properties['term']
+ epterm = mdg.edge_properties["term"]
assert len(list(gt_util.find_edge(mdg, epterm, p))) == 3
assert len(list(gt_util.find_edge(mdg, epterm, q))) == 1
mdg = rdflib_to_graphtool(
- g,
- e_prop_names=[str('name')],
- transform_p=lambda s, p, o: {str('name'): str(p)})
- epterm = mdg.edge_properties['name']
+ g, e_prop_names=[str("name")], transform_p=lambda s, p, o: {str("name"): str(p)}
+ )
+ epterm = mdg.edge_properties["name"]
assert len(list(gt_util.find_edge(mdg, epterm, str(p)))) == 3
assert len(list(gt_util.find_edge(mdg, epterm, str(q)))) == 1
@@ -88,4 +89,5 @@ def test_rdflib_to_graphtool():
if __name__ == "__main__":
import sys
import nose
+
nose.main(defaultTest=sys.argv[0])
diff --git a/test/test_finalnewline.py b/test/test_finalnewline.py
index 8cfdcab3..c78ac247 100644
--- a/test/test_finalnewline.py
+++ b/test/test_finalnewline.py
@@ -1,4 +1,3 @@
-
from rdflib import ConjunctiveGraph, URIRef
import rdflib.plugin
@@ -10,15 +9,19 @@ def testFinalNewline():
import sys
graph = ConjunctiveGraph()
- graph.add((URIRef("http://ex.org/a"),
- URIRef("http://ex.org/b"),
- URIRef("http://ex.org/c")))
+ graph.add(
+ (
+ URIRef("http://ex.org/a"),
+ URIRef("http://ex.org/b"),
+ URIRef("http://ex.org/c"),
+ )
+ )
failed = set()
for p in rdflib.plugin.plugins(None, rdflib.plugin.Serializer):
v = graph.serialize(format=p.name)
lines = v.split("\n".encode("latin-1"))
- if "\n".encode("latin-1") not in v or (lines[-1] != ''.encode("latin-1")):
+ if "\n".encode("latin-1") not in v or (lines[-1] != "".encode("latin-1")):
failed.add(p.name)
assert len(failed) == 0, "No final newline for formats: '%s'" % failed
@@ -27,5 +30,6 @@ if __name__ == "__main__":
import sys
import nose
+
if len(sys.argv) == 1:
nose.main(defaultTest=sys.argv[0])
diff --git a/test/test_graph.py b/test/test_graph.py
index 228550ec..0032213e 100644
--- a/test/test_graph.py
+++ b/test/test_graph.py
@@ -11,29 +11,27 @@ from nose.exc import SkipTest
class GraphTestCase(unittest.TestCase):
- store = 'default'
+ store = "default"
tmppath = None
def setUp(self):
try:
self.graph = Graph(store=self.store)
except ImportError:
- raise SkipTest(
- "Dependencies for store '%s' not available!" % self.store)
+ raise SkipTest("Dependencies for store '%s' not available!" % self.store)
if self.store == "SQLite":
- _, self.tmppath = mkstemp(
- prefix='test', dir='/tmp', suffix='.sqlite')
+ _, self.tmppath = mkstemp(prefix="test", dir="/tmp", suffix=".sqlite")
else:
self.tmppath = mkdtemp()
self.graph.open(self.tmppath, create=True)
- self.michel = URIRef(u'michel')
- self.tarek = URIRef(u'tarek')
- self.bob = URIRef(u'bob')
- self.likes = URIRef(u'likes')
- self.hates = URIRef(u'hates')
- self.pizza = URIRef(u'pizza')
- self.cheese = URIRef(u'cheese')
+ self.michel = URIRef(u"michel")
+ self.tarek = URIRef(u"tarek")
+ self.bob = URIRef(u"bob")
+ self.likes = URIRef(u"likes")
+ self.hates = URIRef(u"hates")
+ self.pizza = URIRef(u"pizza")
+ self.cheese = URIRef(u"cheese")
def tearDown(self):
self.graph.close()
@@ -254,21 +252,27 @@ class GraphTestCase(unittest.TestCase):
# dynamically create classes for each registered Store
pluginname = None
-if __name__ == '__main__':
+if __name__ == "__main__":
if len(sys.argv) > 1:
pluginname = sys.argv[1]
tests = 0
for s in plugin.plugins(pluginname, plugin.Store):
- if s.name in ('default', 'IOMemory', 'Auditable',
- 'Concurrent', 'SPARQLStore',
- 'SPARQLUpdateStore'):
+ if s.name in (
+ "default",
+ "IOMemory",
+ "Auditable",
+ "Concurrent",
+ "SPARQLStore",
+ "SPARQLUpdateStore",
+ ):
continue # these are tested by default
- locals()["t%d" % tests] = type("%sGraphTestCase" %
- s.name, (GraphTestCase,), {"store": s.name})
+ locals()["t%d" % tests] = type(
+ "%sGraphTestCase" % s.name, (GraphTestCase,), {"store": s.name}
+ )
tests += 1
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main(argv=sys.argv[:1])
diff --git a/test/test_graph_context.py b/test/test_graph_context.py
index cc5786dd..0a7ac8a3 100644
--- a/test/test_graph_context.py
+++ b/test/test_graph_context.py
@@ -10,7 +10,7 @@ from nose.exc import SkipTest
class ContextTestCase(unittest.TestCase):
- store = 'default'
+ store = "default"
slow = True
tmppath = None
@@ -18,24 +18,22 @@ class ContextTestCase(unittest.TestCase):
try:
self.graph = ConjunctiveGraph(store=self.store)
except ImportError:
- raise SkipTest(
- "Dependencies for store '%s' not available!" % self.store)
+ raise SkipTest("Dependencies for store '%s' not available!" % self.store)
if self.store == "SQLite":
- _, self.tmppath = mkstemp(
- prefix='test', dir='/tmp', suffix='.sqlite')
+ _, self.tmppath = mkstemp(prefix="test", dir="/tmp", suffix=".sqlite")
else:
self.tmppath = mkdtemp()
self.graph.open(self.tmppath, create=True)
- self.michel = URIRef(u'michel')
- self.tarek = URIRef(u'tarek')
- self.bob = URIRef(u'bob')
- self.likes = URIRef(u'likes')
- self.hates = URIRef(u'hates')
- self.pizza = URIRef(u'pizza')
- self.cheese = URIRef(u'cheese')
+ self.michel = URIRef(u"michel")
+ self.tarek = URIRef(u"tarek")
+ self.bob = URIRef(u"bob")
+ self.likes = URIRef(u"likes")
+ self.hates = URIRef(u"hates")
+ self.pizza = URIRef(u"pizza")
+ self.cheese = URIRef(u"cheese")
- self.c1 = URIRef(u'context-1')
- self.c2 = URIRef(u'context-2')
+ self.c1 = URIRef(u"context-1")
+ self.c2 = URIRef(u"context-2")
# delete the graph for each test!
self.graph.remove((None, None, None))
@@ -176,6 +174,7 @@ class ContextTestCase(unittest.TestCase):
def cid(c):
return c.identifier
+
self.assertTrue(self.c1 in map(cid, self.graph.contexts()))
self.assertTrue(self.c2 in map(cid, self.graph.contexts()))
@@ -305,32 +304,55 @@ class ContextTestCase(unittest.TestCase):
asserte(set(c.predicates(bob, pizza)), set([hates]))
asserte(set(c.predicates(bob, michel)), set([hates]))
- asserte(set(
- c.subject_objects(hates)), set([(bob, pizza), (bob, michel)]))
+ asserte(set(c.subject_objects(hates)), set([(bob, pizza), (bob, michel)]))
+ asserte(
+ set(c.subject_objects(likes)),
+ set(
+ [
+ (tarek, cheese),
+ (michel, cheese),
+ (michel, pizza),
+ (bob, cheese),
+ (tarek, pizza),
+ ]
+ ),
+ )
+
+ asserte(
+ set(c.predicate_objects(michel)), set([(likes, cheese), (likes, pizza)])
+ )
+ asserte(
+ set(c.predicate_objects(bob)),
+ set([(likes, cheese), (hates, pizza), (hates, michel)]),
+ )
asserte(
- set(c.subject_objects(likes)), set(
- [(tarek, cheese), (michel, cheese),
- (michel, pizza), (bob, cheese),
- (tarek, pizza)]))
-
- asserte(set(c.predicate_objects(
- michel)), set([(likes, cheese), (likes, pizza)]))
- asserte(set(c.predicate_objects(bob)), set([(likes,
- cheese), (hates, pizza), (hates, michel)]))
- asserte(set(c.predicate_objects(
- tarek)), set([(likes, cheese), (likes, pizza)]))
-
- asserte(set(c.subject_predicates(
- pizza)), set([(bob, hates), (tarek, likes), (michel, likes)]))
- asserte(set(c.subject_predicates(cheese)), set([(
- bob, likes), (tarek, likes), (michel, likes)]))
+ set(c.predicate_objects(tarek)), set([(likes, cheese), (likes, pizza)])
+ )
+
+ asserte(
+ set(c.subject_predicates(pizza)),
+ set([(bob, hates), (tarek, likes), (michel, likes)]),
+ )
+ asserte(
+ set(c.subject_predicates(cheese)),
+ set([(bob, likes), (tarek, likes), (michel, likes)]),
+ )
asserte(set(c.subject_predicates(michel)), set([(bob, hates)]))
- asserte(set(c), set(
- [(bob, hates, michel), (bob, likes, cheese),
- (tarek, likes, pizza), (michel, likes, pizza),
- (michel, likes, cheese), (bob, hates, pizza),
- (tarek, likes, cheese)]))
+ asserte(
+ set(c),
+ set(
+ [
+ (bob, hates, michel),
+ (bob, likes, cheese),
+ (tarek, likes, pizza),
+ (michel, likes, pizza),
+ (michel, likes, cheese),
+ (bob, hates, pizza),
+ (tarek, likes, cheese),
+ ]
+ ),
+ )
# remove stuff and make sure the graph is empty again
self.removeStuff()
@@ -340,22 +362,29 @@ class ContextTestCase(unittest.TestCase):
# dynamically create classes for each registered Store
pluginname = None
-if __name__ == '__main__':
+if __name__ == "__main__":
if len(sys.argv) > 1:
pluginname = sys.argv[1]
tests = 0
for s in plugin.plugins(pluginname, plugin.Store):
- if s.name in ('default', 'IOMemory', 'Auditable',
- 'Concurrent', 'SPARQLStore', 'SPARQLUpdateStore'):
+ if s.name in (
+ "default",
+ "IOMemory",
+ "Auditable",
+ "Concurrent",
+ "SPARQLStore",
+ "SPARQLUpdateStore",
+ ):
continue # these are tested by default
if not s.getClass().context_aware:
continue
- locals()["t%d" % tests] = type("%sContextTestCase" % s.name, (
- ContextTestCase,), {"store": s.name})
+ locals()["t%d" % tests] = type(
+ "%sContextTestCase" % s.name, (ContextTestCase,), {"store": s.name}
+ )
tests += 1
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main()
diff --git a/test/test_graph_formula.py b/test/test_graph_formula.py
index 412e7a77..52764628 100644
--- a/test/test_graph_formula.py
+++ b/test/test_graph_formula.py
@@ -31,8 +31,8 @@ def testFormulaStore(store="default", configString=None):
g.destroy(configString)
g.open(configString)
else:
- if store == 'SQLite':
- _, path = mkstemp(prefix='test', dir='/tmp', suffix='.sqlite')
+ if store == "SQLite":
+ _, path = mkstemp(prefix="test", dir="/tmp", suffix=".sqlite")
g.open(path, create=True)
else:
g.open(mkdtemp(), create=True)
@@ -45,10 +45,10 @@ def testFormulaStore(store="default", configString=None):
assert type(formulaA) == QuotedGraph and type(formulaB) == QuotedGraph
# a = URIRef('http://test/a')
- b = URIRef('http://test/b')
- c = URIRef('http://test/c')
- d = URIRef('http://test/d')
- v = Variable('y')
+ b = URIRef("http://test/b")
+ c = URIRef("http://test/c")
+ d = URIRef("http://test/d")
+ v = Variable("y")
universe = ConjunctiveGraph(g.store)
@@ -69,10 +69,8 @@ def testFormulaStore(store="default", configString=None):
assert len(list(formulaA.triples((None, None, None)))) == 2
assert len(list(formulaB.triples((None, None, None)))) == 2
assert len(list(universe.triples((None, None, None)))) == 3
- assert len(list(formulaB.triples(
- (None, URIRef('http://test/d'), None)))) == 2
- assert len(list(universe.triples(
- (None, URIRef('http://test/d'), None)))) == 1
+ assert len(list(formulaB.triples((None, URIRef("http://test/d"), None)))) == 2
+ assert len(list(universe.triples((None, URIRef("http://test/d"), None)))) == 1
# #context tests
# #test contexts with triple argument
@@ -115,13 +113,13 @@ def testFormulaStore(store="default", configString=None):
assert len(universe) == 0
g.close()
- if store == 'SQLite':
+ if store == "SQLite":
os.unlink(path)
else:
g.store.destroy(configString)
except:
g.close()
- if store == 'SQLite':
+ if store == "SQLite":
os.unlink(path)
else:
g.store.destroy(configString)
@@ -130,21 +128,19 @@ def testFormulaStore(store="default", configString=None):
def testFormulaStores():
pluginname = None
- if __name__ == '__main__':
+ if __name__ == "__main__":
if len(sys.argv) > 1:
pluginname = sys.argv[1]
for s in plugin.plugins(pluginname, plugin.Store):
- if s.name in (
- 'Auditable', 'Concurrent',
- 'SPARQLStore', 'SPARQLUpdateStore',
- ):
+ if s.name in ("Auditable", "Concurrent", "SPARQLStore", "SPARQLUpdateStore",):
continue
if not s.getClass().formula_aware:
continue
yield testFormulaStore, s.name
-if __name__ == '__main__':
+if __name__ == "__main__":
import nose
+
nose.main(defaultTest=sys.argv[0])
diff --git a/test/test_graph_items.py b/test/test_graph_items.py
index b6cb2529..bc13c367 100644
--- a/test/test_graph_items.py
+++ b/test/test_graph_items.py
@@ -2,7 +2,8 @@ from rdflib import Graph, RDF
def test_recursive_list_detection():
- g = Graph().parse(data="""
+ g = Graph().parse(
+ data="""
@prefix : <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
<> :value _:a .
@@ -14,7 +15,9 @@ def test_recursive_list_detection():
<> :value [ :first "turtles"; :rest _:c ] .
_:c :first "all the way down"; :rest _:a .
- """, format="turtle")
+ """,
+ format="turtle",
+ )
for v in g.objects(None, RDF.value):
try:
diff --git a/test/test_hex_binary.py b/test/test_hex_binary.py
index f4ccbe5a..5f46bda5 100644
--- a/test/test_hex_binary.py
+++ b/test/test_hex_binary.py
@@ -6,7 +6,6 @@ from rdflib import Literal, XSD
class HexBinaryTestCase(unittest.TestCase):
-
def test_int(self):
self._test_integer(5)
self._test_integer(3452)
@@ -30,20 +29,20 @@ class HexBinaryTestCase(unittest.TestCase):
def test_unicode(self):
str1 = u"Test utf-8 string éàë"
# u hexstring
- hex_str1 = binascii.hexlify(str1.encode('utf-8')).decode()
+ hex_str1 = binascii.hexlify(str1.encode("utf-8")).decode()
l1 = Literal(hex_str1, datatype=XSD.hexBinary)
b_str1 = l1.toPython()
- self.assertEquals(b_str1.decode('utf-8'), str1)
+ self.assertEquals(b_str1.decode("utf-8"), str1)
self.assertEquals(str(l1), hex_str1)
# b hexstring
- hex_str1b = binascii.hexlify(str1.encode('utf-8'))
+ hex_str1b = binascii.hexlify(str1.encode("utf-8"))
l1b = Literal(hex_str1b, datatype=XSD.hexBinary)
b_str1b = l1b.toPython()
self.assertEquals(b_str1, b_str1b)
- self.assertEquals(b_str1b.decode('utf-8'), str1)
+ self.assertEquals(b_str1b.decode("utf-8"), str1)
self.assertEquals(str(l1b), hex_str1)
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main()
diff --git a/test/test_initbindings.py b/test/test_initbindings.py
index efa94191..138041b2 100644
--- a/test/test_initbindings.py
+++ b/test/test_initbindings.py
@@ -1,181 +1,349 @@
-
from nose import SkipTest
from rdflib.plugins.sparql import prepareQuery
from rdflib import ConjunctiveGraph, URIRef, Literal, Namespace, Variable
+
g = ConjunctiveGraph()
def testStr():
- a = set(g.query("SELECT (STR(?target) AS ?r) WHERE { }", initBindings={'target': URIRef('example:a')}))
- b = set(g.query("SELECT (STR(?target) AS ?r) WHERE { } VALUES (?target) {(<example:a>)}"))
+ a = set(
+ g.query(
+ "SELECT (STR(?target) AS ?r) WHERE { }",
+ initBindings={"target": URIRef("example:a")},
+ )
+ )
+ b = set(
+ g.query(
+ "SELECT (STR(?target) AS ?r) WHERE { } VALUES (?target) {(<example:a>)}"
+ )
+ )
assert a == b, "STR: %r != %r" % (a, b)
def testIsIRI():
- a = set(g.query("SELECT (isIRI(?target) AS ?r) WHERE { }", initBindings={'target': URIRef('example:a')}))
- b = set(g.query("SELECT (isIRI(?target) AS ?r) WHERE { } VALUES (?target) {(<example:a>)}"))
+ a = set(
+ g.query(
+ "SELECT (isIRI(?target) AS ?r) WHERE { }",
+ initBindings={"target": URIRef("example:a")},
+ )
+ )
+ b = set(
+ g.query(
+ "SELECT (isIRI(?target) AS ?r) WHERE { } VALUES (?target) {(<example:a>)}"
+ )
+ )
assert a == b, "isIRI: %r != %r" % (a, b)
def testIsBlank():
- a = set(g.query("SELECT (isBlank(?target) AS ?r) WHERE { }", initBindings={'target': URIRef('example:a')}))
- b = set(g.query("SELECT (isBlank(?target) AS ?r) WHERE { } VALUES (?target) {(<example:a>)}"))
+ a = set(
+ g.query(
+ "SELECT (isBlank(?target) AS ?r) WHERE { }",
+ initBindings={"target": URIRef("example:a")},
+ )
+ )
+ b = set(
+ g.query(
+ "SELECT (isBlank(?target) AS ?r) WHERE { } VALUES (?target) {(<example:a>)}"
+ )
+ )
assert a == b, "isBlank: %r != %r" % (a, b)
def testIsLiteral():
- a = set(g.query("SELECT (isLiteral(?target) AS ?r) WHERE { }", initBindings={'target': Literal('example')}))
- b = set(g.query("SELECT (isLiteral(?target) AS ?r) WHERE { } VALUES (?target) {('example')}"))
+ a = set(
+ g.query(
+ "SELECT (isLiteral(?target) AS ?r) WHERE { }",
+ initBindings={"target": Literal("example")},
+ )
+ )
+ b = set(
+ g.query(
+ "SELECT (isLiteral(?target) AS ?r) WHERE { } VALUES (?target) {('example')}"
+ )
+ )
assert a == b, "isLiteral: %r != %r" % (a, b)
def testUCase():
- a = set(g.query("SELECT (UCASE(?target) AS ?r) WHERE { }", initBindings={'target': Literal('example')}))
- b = set(g.query("SELECT (UCASE(?target) AS ?r) WHERE { } VALUES (?target) {('example')}"))
+ a = set(
+ g.query(
+ "SELECT (UCASE(?target) AS ?r) WHERE { }",
+ initBindings={"target": Literal("example")},
+ )
+ )
+ b = set(
+ g.query(
+ "SELECT (UCASE(?target) AS ?r) WHERE { } VALUES (?target) {('example')}"
+ )
+ )
assert a == b, "UCASE: %r != %r" % (a, b)
def testNoFunc():
- a = set(g.query("SELECT ?target WHERE { }", initBindings={'target': Literal('example')}))
+ a = set(
+ g.query("SELECT ?target WHERE { }", initBindings={"target": Literal("example")})
+ )
b = set(g.query("SELECT ?target WHERE { } VALUES (?target) {('example')}"))
assert a == b, "no func: %r != %r" % (a, b)
def testOrderBy():
- a = set(g.query("SELECT ?target WHERE { } ORDER BY ?target", initBindings={'target': Literal('example')}))
- b = set(g.query("SELECT ?target WHERE { } ORDER BY ?target VALUES (?target) {('example')}"))
+ a = set(
+ g.query(
+ "SELECT ?target WHERE { } ORDER BY ?target",
+ initBindings={"target": Literal("example")},
+ )
+ )
+ b = set(
+ g.query(
+ "SELECT ?target WHERE { } ORDER BY ?target VALUES (?target) {('example')}"
+ )
+ )
assert a == b, "orderby: %r != %r" % (a, b)
def testOrderByFunc():
- a = set(g.query("SELECT (UCASE(?target) as ?r) WHERE { } ORDER BY ?target", initBindings={'target': Literal('example')}))
- b = set(g.query("SELECT (UCASE(?target) as ?r) WHERE { } ORDER BY ?target VALUES (?target) {('example')} "))
+ a = set(
+ g.query(
+ "SELECT (UCASE(?target) as ?r) WHERE { } ORDER BY ?target",
+ initBindings={"target": Literal("example")},
+ )
+ )
+ b = set(
+ g.query(
+ "SELECT (UCASE(?target) as ?r) WHERE { } ORDER BY ?target VALUES (?target) {('example')} "
+ )
+ )
assert a == b, "orderbyFunc: %r != %r" % (a, b)
def testNoFuncLimit():
- a = set(g.query("SELECT ?target WHERE { } LIMIT 1", initBindings={'target': Literal('example')}))
+ a = set(
+ g.query(
+ "SELECT ?target WHERE { } LIMIT 1",
+ initBindings={"target": Literal("example")},
+ )
+ )
b = set(g.query("SELECT ?target WHERE { } LIMIT 1 VALUES (?target) {('example')}"))
assert a == b, "limit: %r != %r" % (a, b)
def testOrderByLimit():
- a = set(g.query("SELECT ?target WHERE { } ORDER BY ?target LIMIT 1", initBindings={'target': Literal('example')}))
- b = set(g.query("SELECT ?target WHERE { } ORDER BY ?target LIMIT 1 VALUES (?target) {('example')}"))
+ a = set(
+ g.query(
+ "SELECT ?target WHERE { } ORDER BY ?target LIMIT 1",
+ initBindings={"target": Literal("example")},
+ )
+ )
+ b = set(
+ g.query(
+ "SELECT ?target WHERE { } ORDER BY ?target LIMIT 1 VALUES (?target) {('example')}"
+ )
+ )
assert a == b, "orderbyLimit: %r != %r" % (a, b)
def testOrderByFuncLimit():
- a = set(g.query("SELECT (UCASE(?target) as ?r) WHERE { } ORDER BY ?target LIMIT 1", initBindings={'target': Literal('example')}))
- b = set(g.query("SELECT (UCASE(?target) as ?r) WHERE { } ORDER BY ?target LIMIT 1 VALUES (?target) {('example')}"))
+ a = set(
+ g.query(
+ "SELECT (UCASE(?target) as ?r) WHERE { } ORDER BY ?target LIMIT 1",
+ initBindings={"target": Literal("example")},
+ )
+ )
+ b = set(
+ g.query(
+ "SELECT (UCASE(?target) as ?r) WHERE { } ORDER BY ?target LIMIT 1 VALUES (?target) {('example')}"
+ )
+ )
assert a == b, "orderbyFuncLimit: %r != %r" % (a, b)
def testNoFuncOffset():
- a = set(g.query("SELECT ?target WHERE { } OFFSET 1", initBindings={'target': Literal('example')}))
+ a = set(
+ g.query(
+ "SELECT ?target WHERE { } OFFSET 1",
+ initBindings={"target": Literal("example")},
+ )
+ )
b = set(g.query("SELECT ?target WHERE { } OFFSET 1 VALUES (?target) {('example')}"))
assert a == b, "offset: %r != %r" % (a, b)
def testNoFuncLimitOffset():
- a = set(g.query("SELECT ?target WHERE { } LIMIT 1 OFFSET 1", initBindings={'target': Literal('example')}))
- b = set(g.query("SELECT ?target WHERE { } LIMIT 1 OFFSET 1 VALUES (?target) {('example')}"))
+ a = set(
+ g.query(
+ "SELECT ?target WHERE { } LIMIT 1 OFFSET 1",
+ initBindings={"target": Literal("example")},
+ )
+ )
+ b = set(
+ g.query(
+ "SELECT ?target WHERE { } LIMIT 1 OFFSET 1 VALUES (?target) {('example')}"
+ )
+ )
assert a == b, "limitOffset: %r != %r" % (a, b)
def testOrderByLimitOffset():
- a = set(g.query("SELECT ?target WHERE { } ORDER BY ?target LIMIT 1 OFFSET 1", initBindings={'target': Literal('example')}))
- b = set(g.query("SELECT ?target WHERE { } ORDER BY ?target LIMIT 1 OFFSET 1 VALUES (?target) {('example')}"))
+ a = set(
+ g.query(
+ "SELECT ?target WHERE { } ORDER BY ?target LIMIT 1 OFFSET 1",
+ initBindings={"target": Literal("example")},
+ )
+ )
+ b = set(
+ g.query(
+ "SELECT ?target WHERE { } ORDER BY ?target LIMIT 1 OFFSET 1 VALUES (?target) {('example')}"
+ )
+ )
assert a == b, "orderbyLimitOffset: %r != %r" % (a, b)
def testOrderByFuncLimitOffset():
- a = set(g.query("SELECT (UCASE(?target) as ?r) WHERE { } ORDER BY ?target LIMIT 1 OFFSET 1", initBindings={'target': Literal('example')}))
- b = set(g.query("SELECT (UCASE(?target) as ?r) WHERE { } ORDER BY ?target LIMIT 1 OFFSET 1 VALUES (?target) {('example')}"))
+ a = set(
+ g.query(
+ "SELECT (UCASE(?target) as ?r) WHERE { } ORDER BY ?target LIMIT 1 OFFSET 1",
+ initBindings={"target": Literal("example")},
+ )
+ )
+ b = set(
+ g.query(
+ "SELECT (UCASE(?target) as ?r) WHERE { } ORDER BY ?target LIMIT 1 OFFSET 1 VALUES (?target) {('example')}"
+ )
+ )
assert a == b, "orderbyFuncLimitOffset: %r != %r" % (a, b)
def testDistinct():
- a = set(g.query("SELECT DISTINCT ?target WHERE { }", initBindings={'target': Literal('example')}))
+ a = set(
+ g.query(
+ "SELECT DISTINCT ?target WHERE { }",
+ initBindings={"target": Literal("example")},
+ )
+ )
b = set(g.query("SELECT DISTINCT ?target WHERE { } VALUES (?target) {('example')}"))
assert a == b, "distinct: %r != %r" % (a, b)
def testDistinctOrderBy():
- a = set(g.query("SELECT DISTINCT ?target WHERE { } ORDER BY ?target", initBindings={'target': Literal('example')}))
- b = set(g.query("SELECT DISTINCT ?target WHERE { } ORDER BY ?target VALUES (?target) {('example')}"))
+ a = set(
+ g.query(
+ "SELECT DISTINCT ?target WHERE { } ORDER BY ?target",
+ initBindings={"target": Literal("example")},
+ )
+ )
+ b = set(
+ g.query(
+ "SELECT DISTINCT ?target WHERE { } ORDER BY ?target VALUES (?target) {('example')}"
+ )
+ )
assert a == b, "distinctOrderby: %r != %r" % (a, b)
def testDistinctOrderByLimit():
- a = set(g.query("SELECT DISTINCT ?target WHERE { } ORDER BY ?target LIMIT 1", initBindings={'target': Literal('example')}))
- b = set(g.query("SELECT DISTINCT ?target WHERE { } ORDER BY ?target LIMIT 1 VALUES (?target) {('example')}"))
+ a = set(
+ g.query(
+ "SELECT DISTINCT ?target WHERE { } ORDER BY ?target LIMIT 1",
+ initBindings={"target": Literal("example")},
+ )
+ )
+ b = set(
+ g.query(
+ "SELECT DISTINCT ?target WHERE { } ORDER BY ?target LIMIT 1 VALUES (?target) {('example')}"
+ )
+ )
assert a == b, "distinctOrderbyLimit: %r != %r" % (a, b)
def testPrepare():
- q = prepareQuery('SELECT ?target WHERE { }')
+ q = prepareQuery("SELECT ?target WHERE { }")
r = list(g.query(q))
e = []
- assert r == e, 'prepare: %r != %r' % (r, e)
+ assert r == e, "prepare: %r != %r" % (r, e)
- r = list(g.query(q, initBindings={'target': Literal('example')}))
- e = [(Literal('example'),)]
- assert r == e, 'prepare: %r != %r' % (r, e)
+ r = list(g.query(q, initBindings={"target": Literal("example")}))
+ e = [(Literal("example"),)]
+ assert r == e, "prepare: %r != %r" % (r, e)
r = list(g.query(q))
e = []
- assert r == e, 'prepare: %r != %r' % (r, e)
+ assert r == e, "prepare: %r != %r" % (r, e)
def testData():
data = ConjunctiveGraph()
- data += [(URIRef('urn:a'), URIRef('urn:p'), Literal('a')),
- (URIRef('urn:b'), URIRef('urn:p'), Literal('b'))]
-
- a = set(g.query("SELECT ?target WHERE { ?target <urn:p> ?val }", initBindings={'val': Literal('a')}))
- b = set(g.query("SELECT ?target WHERE { ?target <urn:p> ?val } VALUES (?val) {('a')}"))
+ data += [
+ (URIRef("urn:a"), URIRef("urn:p"), Literal("a")),
+ (URIRef("urn:b"), URIRef("urn:p"), Literal("b")),
+ ]
+
+ a = set(
+ g.query(
+ "SELECT ?target WHERE { ?target <urn:p> ?val }",
+ initBindings={"val": Literal("a")},
+ )
+ )
+ b = set(
+ g.query("SELECT ?target WHERE { ?target <urn:p> ?val } VALUES (?val) {('a')}")
+ )
assert a == b, "data: %r != %r" % (a, b)
def testAsk():
- a = set(g.query("ASK { }", initBindings={'target': Literal('example')}))
+ a = set(g.query("ASK { }", initBindings={"target": Literal("example")}))
b = set(g.query("ASK { } VALUES (?target) {('example')}"))
assert a == b, "ask: %r != %r" % (a, b)
EX = Namespace("http://example.com/")
g2 = ConjunctiveGraph()
-g2.bind('', EX)
-g2.add((EX['s1'], EX['p'], EX['o1']))
-g2.add((EX['s2'], EX['p'], EX['o2']))
+g2.bind("", EX)
+g2.add((EX["s1"], EX["p"], EX["o1"]))
+g2.add((EX["s2"], EX["p"], EX["o2"]))
def testStringKey():
- results = list(g2.query("SELECT ?o WHERE { ?s :p ?o }", initBindings={"s": EX['s1']}))
+ results = list(
+ g2.query("SELECT ?o WHERE { ?s :p ?o }", initBindings={"s": EX["s1"]})
+ )
assert len(results) == 1, results
def testStringKeyWithQuestionMark():
- results = list(g2.query("SELECT ?o WHERE { ?s :p ?o }", initBindings={"?s": EX['s1']}))
+ results = list(
+ g2.query("SELECT ?o WHERE { ?s :p ?o }", initBindings={"?s": EX["s1"]})
+ )
assert len(results) == 1, results
def testVariableKey():
- results = list(g2.query("SELECT ?o WHERE { ?s :p ?o }", initBindings={Variable("s"): EX['s1']}))
+ results = list(
+ g2.query("SELECT ?o WHERE { ?s :p ?o }", initBindings={Variable("s"): EX["s1"]})
+ )
assert len(results) == 1, results
+
def testVariableKeyWithQuestionMark():
- results = list(g2.query("SELECT ?o WHERE { ?s :p ?o }", initBindings={Variable("?s"): EX['s1']}))
+ results = list(
+ g2.query(
+ "SELECT ?o WHERE { ?s :p ?o }", initBindings={Variable("?s"): EX["s1"]}
+ )
+ )
assert len(results) == 1, results
def testFilter():
- results = list(g2.query("SELECT ?o WHERE { ?s :p ?o FILTER (?s = ?x)}", initBindings={Variable("?x"): EX['s1']}))
+ results = list(
+ g2.query(
+ "SELECT ?o WHERE { ?s :p ?o FILTER (?s = ?x)}",
+ initBindings={Variable("?x"): EX["s1"]},
+ )
+ )
assert len(results) == 1, results
@@ -183,5 +351,6 @@ if __name__ == "__main__":
import sys
import nose
+
if len(sys.argv) == 1:
nose.main(defaultTest=sys.argv[0])
diff --git a/test/test_iomemory.py b/test/test_iomemory.py
index 897cc8b2..4239fc3c 100644
--- a/test/test_iomemory.py
+++ b/test/test_iomemory.py
@@ -1,4 +1,3 @@
-
"""
Iteration and update conflict with set based IOMemory store
@@ -63,6 +62,6 @@ def test_concurrent2():
assert i == n
-if __name__ == '__main__':
+if __name__ == "__main__":
test_concurrent1()
test_concurrent2()
diff --git a/test/test_issue084.py b/test/test_issue084.py
index 527caf21..23536550 100644
--- a/test/test_issue084.py
+++ b/test/test_issue084.py
@@ -20,55 +20,65 @@ rdf = u"""@prefix skos:
"""
-rdf_utf8 = rdf.encode('utf-8')
+rdf_utf8 = rdf.encode("utf-8")
-rdf_reader = getreader('utf-8')(BytesIO(rdf.encode('utf-8')))
+rdf_reader = getreader("utf-8")(BytesIO(rdf.encode("utf-8")))
def test_a():
"""Test reading N3 from a unicode objects as data"""
g = Graph()
- g.parse(data=rdf, format='n3')
- v = g.value(subject=URIRef("http://www.test.org/#CI"),
- predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"))
- assert v == Literal(u"C\u00f4te d'Ivoire", lang='fr')
+ g.parse(data=rdf, format="n3")
+ v = g.value(
+ subject=URIRef("http://www.test.org/#CI"),
+ predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"),
+ )
+ assert v == Literal(u"C\u00f4te d'Ivoire", lang="fr")
def test_b():
"""Test reading N3 from a utf8 encoded string as data"""
g = Graph()
- g.parse(data=rdf_utf8, format='n3')
- v = g.value(subject=URIRef("http://www.test.org/#CI"),
- predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"))
- assert v == Literal(u"C\u00f4te d'Ivoire", lang='fr')
+ g.parse(data=rdf_utf8, format="n3")
+ v = g.value(
+ subject=URIRef("http://www.test.org/#CI"),
+ predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"),
+ )
+ assert v == Literal(u"C\u00f4te d'Ivoire", lang="fr")
def test_c():
"""Test reading N3 from a codecs.StreamReader, outputting unicode"""
g = Graph()
-# rdf_reader.seek(0)
- g.parse(source=rdf_reader, format='n3')
- v = g.value(subject=URIRef("http://www.test.org/#CI"),
- predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"))
- assert v == Literal(u"C\u00f4te d'Ivoire", lang='fr')
+ # rdf_reader.seek(0)
+ g.parse(source=rdf_reader, format="n3")
+ v = g.value(
+ subject=URIRef("http://www.test.org/#CI"),
+ predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"),
+ )
+ assert v == Literal(u"C\u00f4te d'Ivoire", lang="fr")
def test_d():
"""Test reading N3 from a StringIO over the unicode object"""
g = Graph()
- g.parse(source=StringIO(rdf), format='n3')
- v = g.value(subject=URIRef("http://www.test.org/#CI"),
- predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"))
- assert v == Literal(u"C\u00f4te d'Ivoire", lang='fr')
+ g.parse(source=StringIO(rdf), format="n3")
+ v = g.value(
+ subject=URIRef("http://www.test.org/#CI"),
+ predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"),
+ )
+ assert v == Literal(u"C\u00f4te d'Ivoire", lang="fr")
def test_e():
"""Test reading N3 from a BytesIO over the string object"""
g = Graph()
- g.parse(source=BytesIO(rdf_utf8), format='n3')
- v = g.value(subject=URIRef("http://www.test.org/#CI"),
- predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"))
- assert v == Literal(u"C\u00f4te d'Ivoire", lang='fr')
+ g.parse(source=BytesIO(rdf_utf8), format="n3")
+ v = g.value(
+ subject=URIRef("http://www.test.org/#CI"),
+ predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"),
+ )
+ assert v == Literal(u"C\u00f4te d'Ivoire", lang="fr")
# this is unicode
@@ -86,27 +96,32 @@ rdfxml = u"""<?xml version="1.0" encoding="UTF-8"?>
"""
# this is a str
-rdfxml_utf8 = rdfxml.encode('utf-8')
+rdfxml_utf8 = rdfxml.encode("utf-8")
-rdfxml_reader = getreader('utf-8')(BytesIO(rdfxml.encode('utf-8')))
+rdfxml_reader = getreader("utf-8")(BytesIO(rdfxml.encode("utf-8")))
def test_xml_a():
"""Test reading XML from a unicode object as data"""
g = Graph()
- g.parse(data=rdfxml, format='xml')
- v = g.value(subject=URIRef("http://www.test.org/#CI"),
- predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"))
- assert v == Literal(u"C\u00f4te d'Ivoire", lang='fr')
+ g.parse(data=rdfxml, format="xml")
+ v = g.value(
+ subject=URIRef("http://www.test.org/#CI"),
+ predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"),
+ )
+ assert v == Literal(u"C\u00f4te d'Ivoire", lang="fr")
def test_xml_b():
"""Test reading XML from a utf8 encoded string object as data"""
g = Graph()
- g.parse(data=rdfxml_utf8, format='xml')
- v = g.value(subject=URIRef("http://www.test.org/#CI"),
- predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"))
- assert v == Literal(u"C\u00f4te d'Ivoire", lang='fr')
+ g.parse(data=rdfxml_utf8, format="xml")
+ v = g.value(
+ subject=URIRef("http://www.test.org/#CI"),
+ predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"),
+ )
+ assert v == Literal(u"C\u00f4te d'Ivoire", lang="fr")
+
# The following two cases are currently not supported by Graph.parse
# def test_xml_c():
@@ -127,7 +142,9 @@ def test_xml_b():
def test_xml_e():
"""Test reading XML from a BytesIO created from utf8 encoded string"""
g = Graph()
- g.parse(source=BytesIO(rdfxml_utf8), format='xml')
- v = g.value(subject=URIRef("http://www.test.org/#CI"),
- predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"))
- assert v == Literal(u"C\u00f4te d'Ivoire", lang='fr')
+ g.parse(source=BytesIO(rdfxml_utf8), format="xml")
+ v = g.value(
+ subject=URIRef("http://www.test.org/#CI"),
+ predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"),
+ )
+ assert v == Literal(u"C\u00f4te d'Ivoire", lang="fr")
diff --git a/test/test_issue1003.py b/test/test_issue1003.py
index fdc56c82..d59caf3d 100644
--- a/test/test_issue1003.py
+++ b/test/test_issue1003.py
@@ -34,41 +34,49 @@ g.bind("skos", SKOS)
g1 = Graph()
g1 += g
# @base should not be in output
-assert "@base" not in g.serialize(format='turtle').decode("utf-8")
+assert "@base" not in g.serialize(format="turtle").decode("utf-8")
# 2. base one set for graph, no base set for serialization
g2 = Graph(base=base_one)
g2 += g
# @base should be in output, from Graph (one)
-assert "@base <http://one.org/> ." in g2.serialize(format='turtle').decode("utf-8")
+assert "@base <http://one.org/> ." in g2.serialize(format="turtle").decode("utf-8")
# 3. no base set for graph, base two set for serialization
g3 = Graph()
g3 += g
# @base should be in output, from serialization (two)
-assert "@base <http://two.org/> ." in g3.serialize(format='turtle', base=base_two).decode("utf-8")
+assert "@base <http://two.org/> ." in g3.serialize(
+ format="turtle", base=base_two
+).decode("utf-8")
# 4. base one set for graph, base two set for serialization, Graph one overrides
g4 = Graph(base=base_one)
g4 += g
# @base should be in output, from graph (one)
-assert "@base <http://two.org/> ." in g4.serialize(format='turtle', base=base_two).decode("utf-8")
+assert "@base <http://two.org/> ." in g4.serialize(
+ format="turtle", base=base_two
+).decode("utf-8")
# just checking that the serialization setting (two) hasn't snuck through
-assert "@base <http://one.org/> ." not in g4.serialize(format='turtle', base=base_two).decode("utf-8")
+assert "@base <http://one.org/> ." not in g4.serialize(
+ format="turtle", base=base_two
+).decode("utf-8")
# 5. multiple serialization side effect checking
g5 = Graph()
g5 += g
# @base should be in output, from serialization (two)
-assert "@base <http://two.org/> ." in g5.serialize(format='turtle', base=base_two).decode("utf-8")
+assert "@base <http://two.org/> ." in g5.serialize(
+ format="turtle", base=base_two
+).decode("utf-8")
# checking for side affects - no base now set for this serialization
# @base should not be in output
-assert "@base" not in g5.serialize(format='turtle').decode("utf-8")
+assert "@base" not in g5.serialize(format="turtle").decode("utf-8")
# 6. checking results for RDF/XML
@@ -76,22 +84,30 @@ g6 = Graph()
g6 += g
g6.bind("dct", DCTERMS)
g6.bind("skos", SKOS)
-assert "@xml:base" not in g6.serialize(format='xml').decode("utf-8")
-assert 'xml:base="http://one.org/"' in g6.serialize(format='xml', base=base_one).decode("utf-8")
+assert "@xml:base" not in g6.serialize(format="xml").decode("utf-8")
+assert 'xml:base="http://one.org/"' in g6.serialize(format="xml", base=base_one).decode(
+ "utf-8"
+)
g6.base = base_two
-assert 'xml:base="http://two.org/"' in g6.serialize(format='xml').decode("utf-8")
-assert 'xml:base="http://one.org/"' in g6.serialize(format='xml', base=base_one).decode("utf-8")
+assert 'xml:base="http://two.org/"' in g6.serialize(format="xml").decode("utf-8")
+assert 'xml:base="http://one.org/"' in g6.serialize(format="xml", base=base_one).decode(
+ "utf-8"
+)
# 7. checking results for N3
g7 = Graph()
g7 += g
g7.bind("dct", DCTERMS)
g7.bind("skos", SKOS)
-assert "@xml:base" not in g7.serialize(format='xml').decode("utf-8")
-assert "@base <http://one.org/> ." in g7.serialize(format='n3', base=base_one).decode("utf-8")
+assert "@xml:base" not in g7.serialize(format="xml").decode("utf-8")
+assert "@base <http://one.org/> ." in g7.serialize(format="n3", base=base_one).decode(
+ "utf-8"
+)
g7.base = base_two
-assert "@base <http://two.org/> ." in g7.serialize(format='n3').decode("utf-8")
-assert "@base <http://one.org/> ." in g7.serialize(format='n3', base=base_one).decode("utf-8")
+assert "@base <http://two.org/> ." in g7.serialize(format="n3").decode("utf-8")
+assert "@base <http://one.org/> ." in g7.serialize(format="n3", base=base_one).decode(
+ "utf-8"
+)
# 8. checking results for TriX & TriG
# TriX can specify a base per graph but setting a base for the whole
@@ -99,19 +115,19 @@ base_three = Namespace("http://three.org/")
ds1 = Dataset()
ds1.bind("dct", DCTERMS)
ds1.bind("skos", SKOS)
-g8 = ds1.graph(URIRef('http://g8.com/'), base=base_one)
-g9 = ds1.graph(URIRef('http://g9.com/'))
+g8 = ds1.graph(URIRef("http://g8.com/"), base=base_one)
+g9 = ds1.graph(URIRef("http://g9.com/"))
g8 += g
g9 += g
g9.base = base_two
ds1.base = base_three
-trix = ds1.serialize(format='trix', base=Namespace("http://two.org/")).decode("utf-8")
+trix = ds1.serialize(format="trix", base=Namespace("http://two.org/")).decode("utf-8")
assert '<graph xml:base="http://one.org/">' in trix
assert '<graph xml:base="http://two.org/">' in trix
assert '<TriX xml:base="http://two.org/"' in trix
-trig = ds1.serialize(format='trig', base=Namespace("http://two.org/")).decode("utf-8")
-assert '@base <http://one.org/> .' not in trig
-assert '@base <http://three.org/> .' not in trig
-assert '@base <http://two.org/> .' in trig
+trig = ds1.serialize(format="trig", base=Namespace("http://two.org/")).decode("utf-8")
+assert "@base <http://one.org/> ." not in trig
+assert "@base <http://three.org/> ." not in trig
+assert "@base <http://two.org/> ." in trig
diff --git a/test/test_issue160.py b/test/test_issue160.py
index 17ae18c5..b3c7b422 100644
--- a/test/test_issue160.py
+++ b/test/test_issue160.py
@@ -43,11 +43,10 @@ target2xml = """\
class CollectionTest(TestCase):
-
def test_collection_render(self):
- foo = Namespace('http://www.example.org/foo/ns/')
- ex = Namespace('http://www.example.org/example/foo/')
- rdf = Namespace('http://www.w3.org/1999/02/22-rdf-syntax-ns#')
+ foo = Namespace("http://www.example.org/foo/ns/")
+ ex = Namespace("http://www.example.org/example/foo/")
+ rdf = Namespace("http://www.w3.org/1999/02/22-rdf-syntax-ns#")
# Works: x a rdf:List, a foo:Other ;
# Fails: y a foo:Wrapper, foo:wraps x; x a rdf:List, a foo:Other ;
@@ -58,14 +57,14 @@ class CollectionTest(TestCase):
target2.parse(data=target2xml)
g = ConjunctiveGraph()
- bits = [ex['a'], ex['b'], ex['c']]
- l = Collection(g, ex['thing'], bits)
- triple = (ex['thing'], rdf['type'], foo['Other'])
+ bits = [ex["a"], ex["b"], ex["c"]]
+ l = Collection(g, ex["thing"], bits)
+ triple = (ex["thing"], rdf["type"], foo["Other"])
g.add(triple)
- triple = (ex['thing'], foo['property'], Literal('Some Value'))
+ triple = (ex["thing"], foo["property"], Literal("Some Value"))
g.add(triple)
for b in bits:
- triple = (b, rdf['type'], foo['Item'])
+ triple = (b, rdf["type"], foo["Item"])
g.add(triple)
self.assertEqual(g.isomorphic(target1), True)
diff --git a/test/test_issue161.py b/test/test_issue161.py
index df0b6b7c..fa7529dc 100644
--- a/test/test_issue161.py
+++ b/test/test_issue161.py
@@ -3,12 +3,10 @@ from rdflib.graph import ConjunctiveGraph
class EntityTest(TestCase):
-
def test_turtle_namespace_prefixes(self):
g = ConjunctiveGraph()
- n3 = \
- """
+ n3 = """
@prefix _9: <http://data.linkedmdb.org/resource/movie/> .
@prefix p_9: <urn:test:> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@@ -20,13 +18,13 @@ class EntityTest(TestCase):
rdfs:label "Cecil B. DeMille (Director)";
_9:director_name "Cecil B. DeMille" ."""
- g.parse(data=n3, format='n3')
+ g.parse(data=n3, format="n3")
turtle = g.serialize(format="turtle")
# Check round-tripping, just for kicks.
g = ConjunctiveGraph()
- g.parse(data=turtle, format='turtle')
+ g.parse(data=turtle, format="turtle")
# Shouldn't have got to here
s = g.serialize(format="turtle")
- self.assertTrue('@prefix _9'.encode("latin-1") not in s)
+ self.assertTrue("@prefix _9".encode("latin-1") not in s)
diff --git a/test/test_issue184.py b/test/test_issue184.py
index b4fba8d3..7693dd1c 100644
--- a/test/test_issue184.py
+++ b/test/test_issue184.py
@@ -12,8 +12,8 @@ def test_escaping_of_triple_doublequotes():
is emitted by the serializer, which in turn cannot be parsed correctly.
"""
g = ConjunctiveGraph()
- g.add((URIRef('http://foobar'), URIRef('http://fooprop'), Literal('abc\ndef"""""')))
+ g.add((URIRef("http://foobar"), URIRef("http://fooprop"), Literal('abc\ndef"""""')))
# assert g.serialize(format='n3') == '@prefix ns1: <http:// .\n\nns1:foobar ns1:fooprop """abc\ndef\\"\\"\\"\\"\\"""" .\n\n'
g2 = ConjunctiveGraph()
- g2.parse(data=g.serialize(format='n3'), format='n3')
+ g2.parse(data=g.serialize(format="n3"), format="n3")
assert g.isomorphic(g2) is True
diff --git a/test/test_issue190.py b/test/test_issue190.py
index e5173eff..f8ab37e7 100644
--- a/test/test_issue190.py
+++ b/test/test_issue190.py
@@ -4,7 +4,8 @@ from rdflib.graph import ConjunctiveGraph
from rdflib.parser import StringInputSource
import textwrap
-prefix = textwrap.dedent('''\
+prefix = textwrap.dedent(
+ """\
@prefix nie: <http://www.semanticdesktop.org/ontologies/2007/01/19/nie#> .
@prefix nfo: <http://www.semanticdesktop.org/ontologies/2007/03/22/nfo#> .
@prefix nco: <http://www.semanticdesktop.org/ontologies/2007/03/22/nco#> .
@@ -15,9 +16,11 @@ prefix = textwrap.dedent('''\
@prefix dc: <http://dublincore.org/documents/2010/10/11/dces/#> .
@prefix nmm: <http://library.gnome.org/devel/ontology/unstable/nmm-classes.html#> .
@prefix nao: <http://www.semanticdesktop.org/ontologies/2007/08/15/nao#> .
- ''')
+ """
+)
-meta = textwrap.dedent(u"""\
+meta = textwrap.dedent(
+ u"""\
a nfo:PaginatedTextDocument ;
nie:title "SV Meldung" ;
nco:creator [ a nco:Contact ;
@@ -30,16 +33,20 @@ a nfo:PaginatedTextDocument ;
nie:plainTextContent "%s" .
} } WHERE { {
?tag1 a nao:Tag ; nao:prefLabel "()" .
-""")
+"""
+)
test_string1 = u"""\
Betriebsnummer der Einzugsstelle:\nKnappschaft\n980 0000 6\nWICHTIGES DOKUMENT - SORGFÄLTIG AUFBEWAHREN!\n """
def test1():
- meta1 = meta.encode('utf-8') % test_string1.encode('utf-8')
+ meta1 = meta.encode("utf-8") % test_string1.encode("utf-8")
graph = ConjunctiveGraph()
- graph.parse(StringInputSource(prefix + '<http://example.org/>' + meta1), format='n3')
+ graph.parse(
+ StringInputSource(prefix + "<http://example.org/>" + meta1), format="n3"
+ )
+
test_string2 = u"""\
Betriebsnummer der Einzugsstelle:
@@ -50,8 +57,11 @@ WICHTIGES DOKUMENT - SORGFÄLTIG AUFBEWAHREN!
def test2():
- meta2 = meta.encode('utf-8') % test_string2.encode('utf-8')
+ meta2 = meta.encode("utf-8") % test_string2.encode("utf-8")
graph = ConjunctiveGraph()
- graph.parse(StringInputSource(prefix + '<http://example.org/>' + meta2), format='n3')
+ graph.parse(
+ StringInputSource(prefix + "<http://example.org/>" + meta2), format="n3"
+ )
+
raise SkipTest("Known issue, with newlines in text")
diff --git a/test/test_issue200.py b/test/test_issue200.py
index 80ce3f31..3fb76894 100644
--- a/test/test_issue200.py
+++ b/test/test_issue200.py
@@ -9,11 +9,11 @@ try:
import os.pipe
except ImportError:
from nose import SkipTest
- raise SkipTest('No os.fork() and/or os.pipe() on this platform, skipping')
+ raise SkipTest("No os.fork() and/or os.pipe() on this platform, skipping")
-class TestRandomSeedInFork(unittest.TestCase):
+class TestRandomSeedInFork(unittest.TestCase):
def test_bnode_id_differs_in_fork(self):
"""Checks that os.fork()ed child processes produce a
different sequence of BNode ids from the parent process.
@@ -28,14 +28,15 @@ class TestRandomSeedInFork(unittest.TestCase):
os.waitpid(pid, 0) # make sure the child process gets cleaned up
else:
os.close(r)
- w = os.fdopen(w, 'w')
+ w = os.fdopen(w, "w")
cb = rdflib.term.BNode()
w.write(cb)
w.close()
os._exit(0)
- assert txt != str(pb1), "Parent process BNode id: " + \
- "%s, child process BNode id: %s" % (
- txt, str(pb1))
+ assert txt != str(pb1), (
+ "Parent process BNode id: "
+ + "%s, child process BNode id: %s" % (txt, str(pb1))
+ )
if __name__ == "__main__":
diff --git a/test/test_issue209.py b/test/test_issue209.py
index 1feb0615..083d763d 100644
--- a/test/test_issue209.py
+++ b/test/test_issue209.py
@@ -11,7 +11,6 @@ def makeNode():
class TestRandomSeedInThread(unittest.TestCase):
-
def test_bnode_id_gen_in_thread(self):
"""
"""
diff --git a/test/test_issue223.py b/test/test_issue223.py
index e1981a30..ab61d9d8 100644
--- a/test/test_issue223.py
+++ b/test/test_issue223.py
@@ -11,12 +11,14 @@ ttl = """
def test_collection_with_duplicates():
g = Graph().parse(data=ttl, format="turtle")
- for _, _, o in g.triples((URIRef("http://example.org/s"), URIRef("http://example.org/p"), None)):
+ for _, _, o in g.triples(
+ (URIRef("http://example.org/s"), URIRef("http://example.org/p"), None)
+ ):
break
c = g.collection(o)
assert list(c) == list(URIRef("http://example.org/" + x) for x in ["a", "b", "a"])
assert len(c) == 3
-if __name__ == '__main__':
+if __name__ == "__main__":
test_collection_with_duplicates()
diff --git a/test/test_issue247.py b/test/test_issue247.py
index 780d578b..747dd1e0 100644
--- a/test/test_issue247.py
+++ b/test/test_issue247.py
@@ -31,7 +31,6 @@ passxml = """\
class TestXMLLiteralwithLangAttr(unittest.TestCase):
-
def test_successful_parse_of_literal_without_xmllang_attr(self):
"""
Test parse of Literal without xmllang attr passes
diff --git a/test/test_issue248.py b/test/test_issue248.py
index 4cc490a6..528e81a2 100644
--- a/test/test_issue248.py
+++ b/test/test_issue248.py
@@ -3,7 +3,6 @@ import unittest
class TestSerialization(unittest.TestCase):
-
def test_issue_248(self):
"""
Ed Summers Thu, 24 May 2007 12:21:17 -0700
@@ -63,31 +62,22 @@ class TestSerialization(unittest.TestCase):
"""
graph = rdflib.Graph()
- DC = rdflib.Namespace('http://purl.org/dc/terms/')
- SKOS = rdflib.Namespace('http://www.w3.org/2004/02/skos/core#')
- LCCO = rdflib.Namespace('http://loc.gov/catdir/cpso/lcco/')
-
- graph.bind('dc', DC)
- graph.bind('skos', SKOS)
- graph.bind('lcco', LCCO)
-
- concept = rdflib.URIRef(LCCO['1'])
- graph.add(
- (concept,
- rdflib.RDF.type,
- SKOS['Concept']))
- graph.add(
- (concept,
- SKOS['prefLabel'],
- rdflib.Literal('Scrapbooks')))
- graph.add(
- (concept,
- DC['LCC'],
- rdflib.Literal('AC999.0999 - AC999999.Z9999')))
- sg = graph.serialize(format='n3', base=LCCO).decode('utf8')
+ DC = rdflib.Namespace("http://purl.org/dc/terms/")
+ SKOS = rdflib.Namespace("http://www.w3.org/2004/02/skos/core#")
+ LCCO = rdflib.Namespace("http://loc.gov/catdir/cpso/lcco/")
+
+ graph.bind("dc", DC)
+ graph.bind("skos", SKOS)
+ graph.bind("lcco", LCCO)
+
+ concept = rdflib.URIRef(LCCO["1"])
+ graph.add((concept, rdflib.RDF.type, SKOS["Concept"]))
+ graph.add((concept, SKOS["prefLabel"], rdflib.Literal("Scrapbooks")))
+ graph.add((concept, DC["LCC"], rdflib.Literal("AC999.0999 - AC999999.Z9999")))
+ sg = graph.serialize(format="n3", base=LCCO).decode("utf8")
# See issue 248
# Actual test should be the inverse of the below ...
- self.assertTrue('<1> a skos:Concept ;' in sg, sg)
+ self.assertTrue("<1> a skos:Concept ;" in sg, sg)
if __name__ == "__main__":
diff --git a/test/test_issue274.py b/test/test_issue274.py
index 288d7857..79fc4d15 100644
--- a/test/test_issue274.py
+++ b/test/test_issue274.py
@@ -3,15 +3,18 @@ from nose.tools import eq_
from unittest import TestCase
from rdflib import BNode, Graph, Literal, Namespace, RDFS, XSD
-from rdflib.plugins.sparql.operators import register_custom_function, unregister_custom_function
+from rdflib.plugins.sparql.operators import (
+ register_custom_function,
+ unregister_custom_function,
+)
-EX = Namespace('http://example.org/')
+EX = Namespace("http://example.org/")
G = Graph()
G.add((BNode(), RDFS.label, Literal("bnode")))
NS = {
- 'ex': EX,
- 'rdfs': RDFS,
- 'xsd': XSD,
+ "ex": EX,
+ "rdfs": RDFS,
+ "xsd": XSD,
}
@@ -28,142 +31,145 @@ def teardown():
def test_cast_string_to_string():
- res = query('''SELECT (xsd:string("hello") as ?x) {}''')
+ res = query("""SELECT (xsd:string("hello") as ?x) {}""")
eq_(list(res)[0][0], Literal("hello", datatype=XSD.string))
def test_cast_int_to_string():
- res = query('''SELECT (xsd:string(42) as ?x) {}''')
+ res = query("""SELECT (xsd:string(42) as ?x) {}""")
eq_(list(res)[0][0], Literal("42", datatype=XSD.string))
def test_cast_float_to_string():
- res = query('''SELECT (xsd:string(3.14) as ?x) {}''')
+ res = query("""SELECT (xsd:string(3.14) as ?x) {}""")
eq_(list(res)[0][0], Literal("3.14", datatype=XSD.string))
def test_cast_bool_to_string():
- res = query('''SELECT (xsd:string(true) as ?x) {}''')
+ res = query("""SELECT (xsd:string(true) as ?x) {}""")
eq_(list(res)[0][0], Literal("true", datatype=XSD.string))
def test_cast_iri_to_string():
- res = query('''SELECT (xsd:string(<http://example.org/>) as ?x) {}''')
+ res = query("""SELECT (xsd:string(<http://example.org/>) as ?x) {}""")
eq_(list(res)[0][0], Literal("http://example.org/", datatype=XSD.string))
def test_cast_datetime_to_datetime():
- res = query('''SELECT (xsd:dateTime("1970-01-01T00:00:00Z"^^xsd:dateTime) as ?x) {}''')
+ res = query(
+ """SELECT (xsd:dateTime("1970-01-01T00:00:00Z"^^xsd:dateTime) as ?x) {}"""
+ )
eq_(list(res)[0][0], Literal("1970-01-01T00:00:00Z", datatype=XSD.dateTime))
def test_cast_string_to_datetime():
- res = query('''SELECT (xsd:dateTime("1970-01-01T00:00:00Z"^^xsd:string) as ?x) {}''')
+ res = query(
+ """SELECT (xsd:dateTime("1970-01-01T00:00:00Z"^^xsd:string) as ?x) {}"""
+ )
eq_(list(res)[0][0], Literal("1970-01-01T00:00:00Z", datatype=XSD.dateTime))
def test_cast_string_to_float():
- res = query('''SELECT (xsd:float("0.5") as ?x) {}''')
+ res = query("""SELECT (xsd:float("0.5") as ?x) {}""")
eq_(list(res)[0][0], Literal("0.5", datatype=XSD.float))
def test_cast_int_to_float():
- res = query('''SELECT (xsd:float(1) as ?x) {}''')
+ res = query("""SELECT (xsd:float(1) as ?x) {}""")
eq_(list(res)[0][0], Literal("1", datatype=XSD.float))
def test_cast_float_to_float():
- res = query('''SELECT (xsd:float("0.5"^^xsd:float) as ?x) {}''')
+ res = query("""SELECT (xsd:float("0.5"^^xsd:float) as ?x) {}""")
eq_(list(res)[0][0], Literal("0.5", datatype=XSD.float))
def test_cast_double_to_float():
- res = query('''SELECT (xsd:float("0.5"^^xsd:double) as ?x) {}''')
+ res = query("""SELECT (xsd:float("0.5"^^xsd:double) as ?x) {}""")
eq_(list(res)[0][0], Literal("0.5", datatype=XSD.float))
def test_cast_decimal_to_float():
- res = query('''SELECT (xsd:float("0.5"^^xsd:decimal) as ?x) {}''')
+ res = query("""SELECT (xsd:float("0.5"^^xsd:decimal) as ?x) {}""")
eq_(list(res)[0][0], Literal("0.5", datatype=XSD.float))
def test_cast_string_to_double():
- res = query('''SELECT (xsd:double("0.5") as ?x) {}''')
+ res = query("""SELECT (xsd:double("0.5") as ?x) {}""")
eq_(list(res)[0][0], Literal("0.5", datatype=XSD.double))
def test_cast_int_to_double():
- res = query('''SELECT (xsd:double(1) as ?x) {}''')
+ res = query("""SELECT (xsd:double(1) as ?x) {}""")
eq_(list(res)[0][0], Literal("1", datatype=XSD.double))
def test_cast_float_to_double():
- res = query('''SELECT (xsd:double("0.5"^^xsd:float) as ?x) {}''')
+ res = query("""SELECT (xsd:double("0.5"^^xsd:float) as ?x) {}""")
eq_(list(res)[0][0], Literal("0.5", datatype=XSD.double))
def test_cast_double_to_double():
- res = query('''SELECT (xsd:double("0.5"^^xsd:double) as ?x) {}''')
+ res = query("""SELECT (xsd:double("0.5"^^xsd:double) as ?x) {}""")
eq_(list(res)[0][0], Literal("0.5", datatype=XSD.double))
def test_cast_decimal_to_double():
- res = query('''SELECT (xsd:double("0.5"^^xsd:decimal) as ?x) {}''')
+ res = query("""SELECT (xsd:double("0.5"^^xsd:decimal) as ?x) {}""")
eq_(list(res)[0][0], Literal("0.5", datatype=XSD.double))
def test_cast_string_to_decimal():
- res = query('''SELECT (xsd:decimal("0.5") as ?x) {}''')
+ res = query("""SELECT (xsd:decimal("0.5") as ?x) {}""")
eq_(list(res)[0][0], Literal("0.5", datatype=XSD.decimal))
def test_cast_int_to_decimal():
- res = query('''SELECT (xsd:decimal(1) as ?x) {}''')
+ res = query("""SELECT (xsd:decimal(1) as ?x) {}""")
eq_(list(res)[0][0], Literal("1", datatype=XSD.decimal))
def test_cast_float_to_decimal():
- res = query('''SELECT (xsd:decimal("0.5"^^xsd:float) as ?x) {}''')
+ res = query("""SELECT (xsd:decimal("0.5"^^xsd:float) as ?x) {}""")
eq_(list(res)[0][0], Literal("0.5", datatype=XSD.decimal))
def test_cast_double_to_decimal():
- res = query('''SELECT (xsd:decimal("0.5"^^xsd:double) as ?x) {}''')
+ res = query("""SELECT (xsd:decimal("0.5"^^xsd:double) as ?x) {}""")
eq_(list(res)[0][0], Literal("0.5", datatype=XSD.decimal))
def test_cast_decimal_to_decimal():
- res = query('''SELECT (xsd:decimal("0.5"^^xsd:decimal) as ?x) {}''')
+ res = query("""SELECT (xsd:decimal("0.5"^^xsd:decimal) as ?x) {}""")
eq_(list(res)[0][0], Literal("0.5", datatype=XSD.decimal))
def test_cast_string_to_int():
- res = query('''SELECT (xsd:integer("42") as ?x) {}''')
+ res = query("""SELECT (xsd:integer("42") as ?x) {}""")
eq_(list(res)[0][0], Literal("42", datatype=XSD.integer))
def test_cast_int_to_int():
- res = query('''SELECT (xsd:integer(42) as ?x) {}''')
+ res = query("""SELECT (xsd:integer(42) as ?x) {}""")
eq_(list(res)[0][0], Literal("42", datatype=XSD.integer))
def test_cast_string_to_bool():
- res = query('''SELECT (xsd:boolean("TRUE") as ?x) {}''')
+ res = query("""SELECT (xsd:boolean("TRUE") as ?x) {}""")
eq_(list(res)[0][0], Literal("true", datatype=XSD.boolean))
def test_cast_bool_to_bool():
- res = query('''SELECT (xsd:boolean(true) as ?x) {}''')
+ res = query("""SELECT (xsd:boolean(true) as ?x) {}""")
eq_(list(res)[0][0], Literal("true", datatype=XSD.boolean))
def test_cast_bool_to_bool():
- res = query('''SELECT (ex:f(42, "hello") as ?x) {}''')
+ res = query("""SELECT (ex:f(42, "hello") as ?x) {}""")
eq_(len(list(res)), 0)
class TestCustom(TestCase):
-
@staticmethod
def f(x, y):
return Literal("%s %s" % (x, y), datatype=XSD.string)
@@ -186,13 +192,13 @@ class TestCustom(TestCase):
unregister_custom_function(EX.f, lambda x, y: None)
def test_f(self):
- res = query('''SELECT (ex:f(42, "hello") as ?x) {}''')
+ res = query("""SELECT (ex:f(42, "hello") as ?x) {}""")
eq_(list(res)[0][0], Literal("42 hello", datatype=XSD.string))
def test_f_too_few_args(self):
- res = query('''SELECT (ex:f(42) as ?x) {}''')
+ res = query("""SELECT (ex:f(42) as ?x) {}""")
eq_(len(list(res)), 0)
def test_f_too_many_args(self):
- res = query('''SELECT (ex:f(42, "hello", "world") as ?x) {}''')
+ res = query("""SELECT (ex:f(42, "hello", "world") as ?x) {}""")
eq_(len(list(res)), 0)
diff --git a/test/test_issue363.py b/test/test_issue363.py
index 7fc6cb26..792c2441 100644
--- a/test/test_issue363.py
+++ b/test/test_issue363.py
@@ -1,7 +1,7 @@
import rdflib
from nose.tools import assert_raises
-data = '''<?xml version="1.0" encoding="utf-8"?>
+data = """<?xml version="1.0" encoding="utf-8"?>
<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:http="http://www.w3.org/2011/http#">
@@ -13,9 +13,9 @@ data = '''<?xml version="1.0" encoding="utf-8"?>
</http:HeaderElement>
</rdf:RDF>
-'''
+"""
-data2 = '''<?xml version="1.0" encoding="utf-8"?>
+data2 = """<?xml version="1.0" encoding="utf-8"?>
<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns="http://www.example.org/meeting_organization#">
@@ -26,11 +26,11 @@ data2 = '''<?xml version="1.0" encoding="utf-8"?>
</Location>
</rdf:Description>
</rdf:RDF>
-'''
+"""
def test_broken_rdfxml():
- #import ipdb; ipdb.set_trace()
+ # import ipdb; ipdb.set_trace()
def p():
rdflib.Graph().parse(data=data)
@@ -39,9 +39,9 @@ def test_broken_rdfxml():
def test_parsetype_resource():
g = rdflib.Graph().parse(data=data2)
- print(g.serialize(format='n3'))
+ print(g.serialize(format="n3"))
-if __name__ == '__main__':
+if __name__ == "__main__":
test_broken_rdfxml()
test_parsetype_resource()
diff --git a/test/test_issue379.py b/test/test_issue379.py
index 31dfce2b..348e3d0f 100644
--- a/test/test_issue379.py
+++ b/test/test_issue379.py
@@ -41,7 +41,7 @@ class TestBaseAllowsHash(TestCase):
permitted for an IRIREF:
http://www.w3.org/TR/2014/REC-turtle-20140225/#grammar-production-prefixID
"""
- self.g.parse(data=prefix_data, format='n3')
+ self.g.parse(data=prefix_data, format="n3")
self.assertIsInstance(next(self.g.subjects()), rdflib.URIRef)
def test_parse_successful_base_with_hash(self):
@@ -50,7 +50,7 @@ class TestBaseAllowsHash(TestCase):
permitted for an '@prefix' since both allow an IRIREF:
http://www.w3.org/TR/2014/REC-turtle-20140225/#grammar-production-base
"""
- self.g.parse(data=base_data, format='n3')
+ self.g.parse(data=base_data, format="n3")
self.assertIsInstance(next(self.g.subjects()), rdflib.URIRef)
diff --git a/test/test_issue381.py b/test/test_issue381.py
index 3ab21d88..a48cafe7 100644
--- a/test/test_issue381.py
+++ b/test/test_issue381.py
@@ -12,10 +12,9 @@ def test_no_spurious_semicolon():
} WHERE {}
"""
expected = Graph()
- expected.addN(t + (expected,) for t in [
- (NS.a, NS.b, NS.c),
- (NS.a, NS.d, NS.e),
- ])
+ expected.addN(
+ t + (expected,) for t in [(NS.a, NS.b, NS.c), (NS.a, NS.d, NS.e),]
+ )
got = Graph().query(sparql).graph
assert isomorphic(got, expected), got.serialize(format="turtle")
@@ -28,10 +27,9 @@ def test_one_spurious_semicolon():
} WHERE {}
"""
expected = Graph()
- expected.addN(t + (expected,) for t in [
- (NS.a, NS.b, NS.c),
- (NS.a, NS.d, NS.e),
- ])
+ expected.addN(
+ t + (expected,) for t in [(NS.a, NS.b, NS.c), (NS.a, NS.d, NS.e),]
+ )
got = Graph().query(sparql).graph
assert isomorphic(got, expected), got.serialize(format="turtle")
@@ -44,10 +42,9 @@ def test_one_spurious_semicolon_no_perdiod():
} WHERE {}
"""
expected = Graph()
- expected.addN(t + (expected,) for t in [
- (NS.a, NS.b, NS.c),
- (NS.a, NS.d, NS.e),
- ])
+ expected.addN(
+ t + (expected,) for t in [(NS.a, NS.b, NS.c), (NS.a, NS.d, NS.e),]
+ )
got = Graph().query(sparql).graph
assert isomorphic(got, expected), got.serialize(format="turtle")
@@ -60,10 +57,9 @@ def test_two_spurious_semicolons_no_period():
} WHERE {}
"""
expected = Graph()
- expected.addN(t + (expected,) for t in [
- (NS.a, NS.b, NS.c),
- (NS.a, NS.d, NS.e),
- ])
+ expected.addN(
+ t + (expected,) for t in [(NS.a, NS.b, NS.c), (NS.a, NS.d, NS.e),]
+ )
got = Graph().query(sparql).graph
assert isomorphic(got, expected), got.serialize(format="turtle")
@@ -76,10 +72,9 @@ def test_one_spurious_semicolons_bnode():
} WHERE {}
"""
expected = Graph()
- expected.addN(t + (expected,) for t in [
- (BNode("a"), NS.b, NS.c),
- (BNode("a"), NS.d, NS.e),
- ])
+ expected.addN(
+ t + (expected,) for t in [(BNode("a"), NS.b, NS.c), (BNode("a"), NS.d, NS.e),]
+ )
got = Graph().query(sparql).graph
assert isomorphic(got, expected), got.serialize(format="turtle")
@@ -98,11 +93,10 @@ def test_pathological():
} WHERE {}
"""
expected = Graph()
- expected.addN(t + (expected,) for t in [
- (NS.a, NS.b, NS.c),
- (NS.a, NS.d, NS.e),
- (NS.a, NS.f, NS.g),
- ])
+ expected.addN(
+ t + (expected,)
+ for t in [(NS.a, NS.b, NS.c), (NS.a, NS.d, NS.e), (NS.a, NS.f, NS.g),]
+ )
got = Graph().query(sparql).graph
assert isomorphic(got, expected), got.serialize(format="turtle")
@@ -116,10 +110,9 @@ def test_mixing_spurious_semicolons_and_commas():
} WHERE {}
"""
expected = Graph()
- expected.addN(t + (expected,) for t in [
- (NS.a, NS.b, NS.c),
- (NS.a, NS.d, NS.e),
- (NS.a, NS.d, NS.f),
- ])
+ expected.addN(
+ t + (expected,)
+ for t in [(NS.a, NS.b, NS.c), (NS.a, NS.d, NS.e), (NS.a, NS.d, NS.f),]
+ )
got = Graph().query(sparql).graph
assert isomorphic(got, expected), got.serialize(format="turtle")
diff --git a/test/test_issue432.py b/test/test_issue432.py
index 05d8258a..c0731eb0 100644
--- a/test/test_issue432.py
+++ b/test/test_issue432.py
@@ -12,7 +12,7 @@ def test_trig_default_graph():
<g1> { <d> <e> <f> . }
<g2> { <g> <h> <i> . }
"""
- ds.parse(data=data, format='trig', publicID=ds.default_context.identifier)
+ ds.parse(data=data, format="trig", publicID=ds.default_context.identifier)
assert len(list(ds.contexts())) == 3
assert len(list(ds.default_context)) == 2
diff --git a/test/test_issue446.py b/test/test_issue446.py
index 79cd41be..98c46578 100644
--- a/test/test_issue446.py
+++ b/test/test_issue446.py
@@ -7,16 +7,15 @@ from rdflib import Graph, URIRef, Literal
def test_sparql_unicode():
g = Graph()
trip = (
- URIRef('http://example.org/foo'),
- URIRef('http://example.org/bar'),
- URIRef(u'http://example.org/jörn')
+ URIRef("http://example.org/foo"),
+ URIRef("http://example.org/bar"),
+ URIRef(u"http://example.org/jörn"),
)
g.add(trip)
q = 'select ?s ?p ?o where { ?s ?p ?o . FILTER(lang(?o) = "") }'
r = list(g.query(q))
- assert r == [], \
- 'sparql query %r should return nothing but returns %r' % (q, r)
+ assert r == [], "sparql query %r should return nothing but returns %r" % (q, r)
-if __name__ == '__main__':
+if __name__ == "__main__":
test_sparql_unicode()
diff --git a/test/test_issue492.py b/test/test_issue492.py
index 754e5cbf..713ce7ac 100644
--- a/test/test_issue492.py
+++ b/test/test_issue492.py
@@ -6,7 +6,7 @@ import rdflib
def test_issue492():
- query = '''
+ query = """
prefix owl: <http://www.w3.org/2002/07/owl#>
prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
select ?x
@@ -15,7 +15,7 @@ def test_issue492():
?x rdf:rest/rdf:first _:6.
?x rdf:rest/rdf:first _:5.
}
- '''
+ """
print(rdflib.__version__)
g = rdflib.Graph()
diff --git a/test/test_issue523.py b/test/test_issue523.py
index 774167f3..2910cdd7 100644
--- a/test/test_issue523.py
+++ b/test/test_issue523.py
@@ -5,10 +5,12 @@ import rdflib
def test_issue523():
g = rdflib.Graph()
- r = g.query("SELECT (<../baz> as ?test) WHERE {}",
- base=rdflib.URIRef("http://example.org/foo/bar"))
+ r = g.query(
+ "SELECT (<../baz> as ?test) WHERE {}",
+ base=rdflib.URIRef("http://example.org/foo/bar"),
+ )
res = r.serialize(format="csv")
- assert res == b'test\r\nhttp://example.org/baz\r\n', repr(res)
+ assert res == b"test\r\nhttp://example.org/baz\r\n", repr(res)
# expected result:
# test
diff --git a/test/test_issue532.py b/test/test_issue532.py
index 422dd507..0e9fa89f 100644
--- a/test/test_issue532.py
+++ b/test/test_issue532.py
@@ -32,7 +32,7 @@ def test_issue532():
"""
g = Graph()
- g.parse(data=data, format='n3')
+ g.parse(data=data, format="n3")
getnewMeps = """
PREFIX lpv: <http://purl.org/linkedpolitics/vocabulary/>
diff --git a/test/test_issue545.py b/test/test_issue545.py
index 86c8723a..ea9f185b 100644
--- a/test/test_issue545.py
+++ b/test/test_issue545.py
@@ -1,4 +1,3 @@
-
from rdflib.plugins import sparql
from rdflib.namespace import RDFS, OWL, DC, SKOS
@@ -15,4 +14,5 @@ def test_issue():
?property rdfs:label | skos:altLabel ?label .
}
""",
- initNs={"rdfs": RDFS, "owl": OWL, "dc": DC, "skos": SKOS})
+ initNs={"rdfs": RDFS, "owl": OWL, "dc": DC, "skos": SKOS},
+ )
diff --git a/test/test_issue554.py b/test/test_issue554.py
index ba946cf4..4ea83d21 100644
--- a/test/test_issue554.py
+++ b/test/test_issue554.py
@@ -5,11 +5,10 @@ import rdflib
def test_sparql_empty_no_row():
g = rdflib.Graph()
- q = 'select ?whatever { }'
+ q = "select ?whatever { }"
r = list(g.query(q))
- assert r == [], \
- 'sparql query %s should return empty list but returns %s' % (q, r)
+ assert r == [], "sparql query %s should return empty list but returns %s" % (q, r)
-if __name__ == '__main__':
+if __name__ == "__main__":
test_sparql_empty_no_row()
diff --git a/test/test_issue563.py b/test/test_issue563.py
index 1ae8460d..58721236 100644
--- a/test/test_issue563.py
+++ b/test/test_issue563.py
@@ -25,22 +25,26 @@ def test_sample():
g = Graph()
results = set(tuple(i) for i in g.query(QUERY % ("SAMPLE", "SAMPLE")))
- assert results == set([
- (Literal(2), Literal(6), Literal(10)),
- (Literal(3), Literal(9), Literal(15)),
- (Literal(5), None, Literal(25)),
- ])
+ assert results == set(
+ [
+ (Literal(2), Literal(6), Literal(10)),
+ (Literal(3), Literal(9), Literal(15)),
+ (Literal(5), None, Literal(25)),
+ ]
+ )
def test_count():
g = Graph()
results = set(tuple(i) for i in g.query(QUERY % ("COUNT", "COUNT")))
- assert results == set([
- (Literal(2), Literal(1), Literal(1)),
- (Literal(3), Literal(1), Literal(1)),
- (Literal(5), Literal(0), Literal(1)),
- ])
+ assert results == set(
+ [
+ (Literal(2), Literal(1), Literal(1)),
+ (Literal(3), Literal(1), Literal(1)),
+ (Literal(5), Literal(0), Literal(1)),
+ ]
+ )
if __name__ == "__main__":
diff --git a/test/test_issue579.py b/test/test_issue579.py
index 9ba326b3..2420e077 100644
--- a/test/test_issue579.py
+++ b/test/test_issue579.py
@@ -6,9 +6,9 @@ from rdflib.namespace import FOAF, RDF
def test_issue579():
g = Graph()
- g.bind('foaf', FOAF)
+ g.bind("foaf", FOAF)
n = Namespace("http://myname/")
- g.add((n.bob, FOAF.name, Literal('bb')))
+ g.add((n.bob, FOAF.name, Literal("bb")))
# query is successful.
assert len(g.query("select ?n where { ?n foaf:name 'bb' . }")) == 1
# update is not.
diff --git a/test/test_issue604.py b/test/test_issue604.py
index aef19b8c..7a827241 100644
--- a/test/test_issue604.py
+++ b/test/test_issue604.py
@@ -6,7 +6,7 @@ from rdflib.collection import Collection
def test_issue604():
- EX = Namespace('http://ex.co/')
+ EX = Namespace("http://ex.co/")
g = Graph()
bn = BNode()
g.add((EX.s, EX.p, bn))
diff --git a/test/test_issue655.py b/test/test_issue655.py
index 1c640709..cac449f1 100644
--- a/test/test_issue655.py
+++ b/test/test_issue655.py
@@ -5,53 +5,27 @@ from rdflib.compare import to_isomorphic
class TestIssue655(unittest.TestCase):
-
def test_issue655(self):
# make sure that inf and nan are serialized correctly
- dt = XSD['double'].n3()
- self.assertEqual(
- Literal(float("inf"))._literal_n3(True),
- '"INF"^^%s' % dt
- )
- self.assertEqual(
- Literal(float("-inf"))._literal_n3(True),
- '"-INF"^^%s' % dt
- )
- self.assertEqual(
- Literal(float("nan"))._literal_n3(True),
- '"NaN"^^%s' % dt
- )
+ dt = XSD["double"].n3()
+ self.assertEqual(Literal(float("inf"))._literal_n3(True), '"INF"^^%s' % dt)
+ self.assertEqual(Literal(float("-inf"))._literal_n3(True), '"-INF"^^%s' % dt)
+ self.assertEqual(Literal(float("nan"))._literal_n3(True), '"NaN"^^%s' % dt)
- dt = XSD['decimal'].n3()
- self.assertEqual(
- Literal(Decimal("inf"))._literal_n3(True),
- '"INF"^^%s' % dt
- )
- self.assertEqual(
- Literal(Decimal("-inf"))._literal_n3(True),
- '"-INF"^^%s' % dt
- )
- self.assertEqual(
- Literal(Decimal("nan"))._literal_n3(True),
- '"NaN"^^%s' % dt
- )
+ dt = XSD["decimal"].n3()
+ self.assertEqual(Literal(Decimal("inf"))._literal_n3(True), '"INF"^^%s' % dt)
+ self.assertEqual(Literal(Decimal("-inf"))._literal_n3(True), '"-INF"^^%s' % dt)
+ self.assertEqual(Literal(Decimal("nan"))._literal_n3(True), '"NaN"^^%s' % dt)
self.assertEqual(
- Literal("inf", datatype=XSD['decimal'])._literal_n3(True),
- '"INF"^^%s' % dt
+ Literal("inf", datatype=XSD["decimal"])._literal_n3(True), '"INF"^^%s' % dt
)
# assert that non-numerical aren't changed
- self.assertEqual(
- Literal('inf')._literal_n3(True),
- '"inf"'
- )
- self.assertEqual(
- Literal('nan')._literal_n3(True),
- '"nan"'
- )
+ self.assertEqual(Literal("inf")._literal_n3(True), '"inf"')
+ self.assertEqual(Literal("nan")._literal_n3(True), '"nan"')
- PROV = Namespace('http://www.w3.org/ns/prov#')
+ PROV = Namespace("http://www.w3.org/ns/prov#")
bob = URIRef("http://example.org/object/Bob")
@@ -62,7 +36,7 @@ class TestIssue655(unittest.TestCase):
# Build g2 out of the deserialisation of g1 serialisation
g2 = Graph()
- g2.parse(data=g1.serialize(format='turtle'), format='turtle')
+ g2.parse(data=g1.serialize(format="turtle"), format="turtle")
self.assertTrue(to_isomorphic(g1) == to_isomorphic(g2))
diff --git a/test/test_issue715.py b/test/test_issue715.py
index 121e05fd..a2e21169 100644
--- a/test/test_issue715.py
+++ b/test/test_issue715.py
@@ -11,19 +11,18 @@ from rdflib import URIRef, Graph
def test_issue_715():
g = Graph()
a, b, x, y, z = [URIRef(s) for s in "abxyz"]
- isa = URIRef('isa')
+ isa = URIRef("isa")
g.add((a, isa, x))
g.add((a, isa, y))
g.add((b, isa, x))
- l1 = list(g.query('SELECT ?child ?parent WHERE {?child <isa> ?parent .}'))
- l2 = list(g.query('SELECT ?child ?parent WHERE {?child <isa>+ ?parent .}'))
+ l1 = list(g.query("SELECT ?child ?parent WHERE {?child <isa> ?parent .}"))
+ l2 = list(g.query("SELECT ?child ?parent WHERE {?child <isa>+ ?parent .}"))
assert len(l1) == len(l2)
assert set(l1) == set(l2)
- l3 = list(g.query('SELECT ?child ?parent WHERE {?child <isa>* ?parent .}'))
+ l3 = list(g.query("SELECT ?child ?parent WHERE {?child <isa>* ?parent .}"))
assert len(l3) == 7
- assert set(l3) == set(l1).union({(URIRef(n), URIRef(n)) for
- n in (a, b, x, y)})
+ assert set(l3) == set(l1).union({(URIRef(n), URIRef(n)) for n in (a, b, x, y)})
g.add((y, isa, z))
- l4 = list(g.query('SELECT ?child ?parent WHERE {?child <isa>* ?parent .}'))
+ l4 = list(g.query("SELECT ?child ?parent WHERE {?child <isa>* ?parent .}"))
assert len(l4) == 10
assert (a, z) in l4
diff --git a/test/test_issue733.py b/test/test_issue733.py
index bffeb400..2a6b612a 100644
--- a/test/test_issue733.py
+++ b/test/test_issue733.py
@@ -12,13 +12,12 @@ from rdflib.namespace import RDF, RDFS, NamespaceManager, Namespace
class TestIssue733(unittest.TestCase):
-
def test_issue_733(self):
g = Graph()
- example = Namespace('http://example.org/')
+ example = Namespace("http://example.org/")
g.add((example.S, example.P, example.O1))
g.add((example.S, example.P, example.O2))
- q = '''
+ q = """
prefix ex:<http://example.org/>
select ?st ?ot ?gt where {
{SELECT (count(*) as ?st) where {
@@ -34,20 +33,20 @@ class TestIssue733(unittest.TestCase):
FILTER (?o!=ex:O1 && ?s!=ex:O2)
}}
}
- '''
+ """
res = g.query(q)
assert len(res) == 1
results = [[lit.toPython() for lit in line] for line in res]
- assert results[0][0]== 2
+ assert results[0][0] == 2
assert results[0][1] == 1
assert results[0][2] == 1
def test_issue_733_independant(self):
g = Graph()
- example = Namespace('http://example.org/')
+ example = Namespace("http://example.org/")
g.add((example.S, example.P, example.O1))
g.add((example.S, example.P, example.O2))
- q = '''
+ q = """
prefix ex:<http://example.org/>
select ?st where {
{SELECT (count(*) as ?st) where {
@@ -55,12 +54,12 @@ class TestIssue733(unittest.TestCase):
FILTER (?s=ex:S)
}}
}
- '''
+ """
res = g.query(q)
assert len(res) == 1
results = [[lit.toPython() for lit in line] for line in res]
assert results[0][0] == 2
- q = '''
+ q = """
prefix ex:<http://example.org/>
select ?st where {
{SELECT (count(*) as ?st) where {
@@ -68,7 +67,7 @@ class TestIssue733(unittest.TestCase):
FILTER (?o=ex:O1)
}}
}
- '''
+ """
res = g.query(q)
results = [[lit.toPython() for lit in line] for line in res]
assert results[0][0] == 1
diff --git a/test/test_issue920.py b/test/test_issue920.py
index eb12edc4..7aafa794 100644
--- a/test/test_issue920.py
+++ b/test/test_issue920.py
@@ -14,22 +14,21 @@ import unittest
class TestIssue920(unittest.TestCase):
-
def test_issue_920(self):
g = Graph()
# NT tests
- g.parse(data='<a:> <b:> <c:> .', format='nt')
- g.parse(data='<http://a> <http://b> <http://c> .', format='nt')
- g.parse(data='<https://a> <http://> <http://c> .', format='nt')
+ g.parse(data="<a:> <b:> <c:> .", format="nt")
+ g.parse(data="<http://a> <http://b> <http://c> .", format="nt")
+ g.parse(data="<https://a> <http://> <http://c> .", format="nt")
# related parser tests
- g.parse(data='<a:> <b:> <c:> .', format='turtle')
- g.parse(data='<http://a> <http://b> <http://c> .', format='turtle')
- g.parse(data='<https://a> <http://> <http://c> .', format='turtle')
+ g.parse(data="<a:> <b:> <c:> .", format="turtle")
+ g.parse(data="<http://a> <http://b> <http://c> .", format="turtle")
+ g.parse(data="<https://a> <http://> <http://c> .", format="turtle")
- g.parse(data='<a:> <b:> <c:> .', format='n3')
- g.parse(data='<http://a> <http://b> <http://c> .', format='n3')
- g.parse(data='<https://a> <http://> <http://c> .', format='n3')
+ g.parse(data="<a:> <b:> <c:> .", format="n3")
+ g.parse(data="<http://a> <http://b> <http://c> .", format="n3")
+ g.parse(data="<https://a> <http://> <http://c> .", format="n3")
if __name__ == "__main__":
diff --git a/test/test_issue923.py b/test/test_issue923.py
index 3becb6f8..48f2e4de 100644
--- a/test/test_issue923.py
+++ b/test/test_issue923.py
@@ -32,4 +32,7 @@ RESULT_SOURCE = u"""\
def test_issue_923():
with StringIO(RESULT_SOURCE) as result_source:
- Result.parse(source=result_source, content_type="application/sparql-results+json;charset=utf-8")
+ Result.parse(
+ source=result_source,
+ content_type="application/sparql-results+json;charset=utf-8",
+ )
diff --git a/test/test_issue953.py b/test/test_issue953.py
index 1e211e12..879486d8 100644
--- a/test/test_issue953.py
+++ b/test/test_issue953.py
@@ -5,11 +5,11 @@ import unittest
class TestIssue953(unittest.TestCase):
-
def test_issue_939(self):
- lit = Literal(Fraction('2/3'))
- assert lit.datatype == URIRef('http://www.w3.org/2002/07/owl#rational')
+ lit = Literal(Fraction("2/3"))
+ assert lit.datatype == URIRef("http://www.w3.org/2002/07/owl#rational")
assert lit.n3() == '"2/3"^^<http://www.w3.org/2002/07/owl#rational>'
+
if __name__ == "__main__":
unittest.main()
diff --git a/test/test_issue_git_200.py b/test/test_issue_git_200.py
index 32a4ba9f..84e06b1a 100644
--- a/test/test_issue_git_200.py
+++ b/test/test_issue_git_200.py
@@ -10,7 +10,8 @@ def test_broken_add():
nose.tools.assert_raises(AssertionError, lambda: g.addN([(1, 2, 3, g)]))
-if __name__ == '__main__':
+if __name__ == "__main__":
import nose
import sys
+
nose.main(defaultTest=sys.argv[0])
diff --git a/test/test_issue_git_336.py b/test/test_issue_git_336.py
index f3250107..6a8abb7c 100644
--- a/test/test_issue_git_336.py
+++ b/test/test_issue_git_336.py
@@ -8,7 +8,7 @@ import nose.tools
# stripped-down culprit:
-'''\
+"""\
@prefix fs: <http://freesurfer.net/fswiki/terms/> .
@prefix prov: <http://www.w3.org/ns/prov#> .
@@ -17,21 +17,24 @@ import nose.tools
prov:Entity ;
fs:mrisurf.c-cvs_version
"$Id: mrisurf.c,v 1.693.2.2 2011/04/27 19:21:05 nicks Exp $" .
-'''
+"""
def test_ns_localname_roundtrip():
- XNS = rdflib.Namespace('http://example.net/fs')
+ XNS = rdflib.Namespace("http://example.net/fs")
g = rdflib.Graph()
- g.bind('xns', str(XNS))
- g.add((
- rdflib.URIRef('http://example.com/thingy'),
- XNS['lowecase.xxx-xxx_xxx'], # <- not round trippable
- rdflib.Literal("Junk")))
- turtledump = g.serialize(format="turtle").decode('utf-8')
- xmldump = g.serialize().decode('utf-8')
+ g.bind("xns", str(XNS))
+ g.add(
+ (
+ rdflib.URIRef("http://example.com/thingy"),
+ XNS["lowecase.xxx-xxx_xxx"], # <- not round trippable
+ rdflib.Literal("Junk"),
+ )
+ )
+ turtledump = g.serialize(format="turtle").decode("utf-8")
+ xmldump = g.serialize().decode("utf-8")
g1 = rdflib.Graph()
g1.parse(data=xmldump)
@@ -39,7 +42,8 @@ def test_ns_localname_roundtrip():
g1.parse(data=turtledump, format="turtle")
-if __name__ == '__main__':
+if __name__ == "__main__":
import nose
import sys
+
nose.main(defaultTest=sys.argv[0])
diff --git a/test/test_literal.py b/test/test_literal.py
index 0a20d85e..8124f99d 100644
--- a/test/test_literal.py
+++ b/test/test_literal.py
@@ -34,7 +34,7 @@ class TestLiteral(unittest.TestCase):
"""
g = rdflib.Graph()
g.parse(data=d)
- a = rdflib.Literal('a\\b')
+ a = rdflib.Literal("a\\b")
b = list(g.objects())[0]
self.assertEqual(a, b)
@@ -45,8 +45,9 @@ class TestLiteral(unittest.TestCase):
class TestNew(unittest.TestCase):
def testCantPassLangAndDatatype(self):
- self.assertRaises(TypeError,
- Literal, 'foo', lang='en', datatype=URIRef("http://example.com/"))
+ self.assertRaises(
+ TypeError, Literal, "foo", lang="en", datatype=URIRef("http://example.com/")
+ )
def testFromOtherLiteral(self):
l = Literal(1)
@@ -71,21 +72,26 @@ class TestNew(unittest.TestCase):
class TestRepr(unittest.TestCase):
def testOmitsMissingDatatypeAndLang(self):
- self.assertEqual(repr(Literal("foo")),
- uformat("rdflib.term.Literal(u'foo')"))
+ self.assertEqual(repr(Literal("foo")), uformat("rdflib.term.Literal(u'foo')"))
def testOmitsMissingDatatype(self):
- self.assertEqual(repr(Literal("foo", lang='en')),
- uformat("rdflib.term.Literal(u'foo', lang='en')"))
+ self.assertEqual(
+ repr(Literal("foo", lang="en")),
+ uformat("rdflib.term.Literal(u'foo', lang='en')"),
+ )
def testOmitsMissingLang(self):
self.assertEqual(
- repr(Literal("foo", datatype=URIRef('http://example.com/'))),
- uformat("rdflib.term.Literal(u'foo', datatype=rdflib.term.URIRef(u'http://example.com/'))"))
+ repr(Literal("foo", datatype=URIRef("http://example.com/"))),
+ uformat(
+ "rdflib.term.Literal(u'foo', datatype=rdflib.term.URIRef(u'http://example.com/'))"
+ ),
+ )
def testSubclassNameAppearsInRepr(self):
class MyLiteral(Literal):
pass
+
x = MyLiteral(u"foo")
self.assertEqual(repr(x), uformat("MyLiteral(u'foo')"))
@@ -97,42 +103,41 @@ class TestDoubleOutput(unittest.TestCase):
out = vv._literal_n3(use_plain=True)
self.assertTrue(out in ["8.8e-01", "0.88"], out)
+
class TestParseBoolean(unittest.TestCase):
"""confirms the fix for https://github.com/RDFLib/rdflib/issues/913"""
+
def testTrueBoolean(self):
- test_value = Literal("tRue", datatype = _XSD_BOOLEAN)
+ test_value = Literal("tRue", datatype=_XSD_BOOLEAN)
self.assertTrue(test_value.value)
- test_value = Literal("1",datatype = _XSD_BOOLEAN)
+ test_value = Literal("1", datatype=_XSD_BOOLEAN)
self.assertTrue(test_value.value)
def testFalseBoolean(self):
- test_value = Literal("falsE", datatype = _XSD_BOOLEAN)
+ test_value = Literal("falsE", datatype=_XSD_BOOLEAN)
self.assertFalse(test_value.value)
- test_value = Literal("0",datatype = _XSD_BOOLEAN)
+ test_value = Literal("0", datatype=_XSD_BOOLEAN)
self.assertFalse(test_value.value)
def testNonFalseBoolean(self):
- test_value = Literal("abcd", datatype = _XSD_BOOLEAN)
+ test_value = Literal("abcd", datatype=_XSD_BOOLEAN)
self.assertRaises(DeprecationWarning)
self.assertFalse(test_value.value)
- test_value = Literal("10",datatype = _XSD_BOOLEAN)
+ test_value = Literal("10", datatype=_XSD_BOOLEAN)
self.assertRaises(DeprecationWarning)
self.assertFalse(test_value.value)
-
class TestBindings(unittest.TestCase):
-
def testBinding(self):
-
class a:
def __init__(self, v):
self.v = v[3:-3]
def __str__(self):
- return '<<<%s>>>' % self.v
+ return "<<<%s>>>" % self.v
- dtA = rdflib.URIRef('urn:dt:a')
+ dtA = rdflib.URIRef("urn:dt:a")
bind(dtA, a)
va = a("<<<2>>>")
@@ -149,10 +154,10 @@ class TestBindings(unittest.TestCase):
self.v = v[3:-3]
def __str__(self):
- return 'B%s' % self.v
+ return "B%s" % self.v
- dtB = rdflib.URIRef('urn:dt:b')
- bind(dtB, b, None, lambda x: '<<<%s>>>' % x)
+ dtB = rdflib.URIRef("urn:dt:b")
+ bind(dtB, b, None, lambda x: "<<<%s>>>" % x)
vb = b("<<<3>>>")
lb = Literal(vb, normalize=True)
@@ -160,16 +165,15 @@ class TestBindings(unittest.TestCase):
self.assertEqual(lb.datatype, dtB)
def testSpecificBinding(self):
-
def lexify(s):
return "--%s--" % s
def unlexify(s):
return s[2:-2]
- datatype = rdflib.URIRef('urn:dt:mystring')
+ datatype = rdflib.URIRef("urn:dt:mystring")
- #Datatype-specific rule
+ # Datatype-specific rule
bind(datatype, str, unlexify, lexify, datatype_specific=True)
s = "Hello"
diff --git a/test/test_memory_store.py b/test/test_memory_store.py
index f579250e..546d12ad 100644
--- a/test/test_memory_store.py
+++ b/test/test_memory_store.py
@@ -1,21 +1,21 @@
import unittest
import rdflib
-rdflib.plugin.register('Memory', rdflib.store.Store,
- 'rdflib.plugins.memory', 'Memory')
+rdflib.plugin.register("Memory", rdflib.store.Store, "rdflib.plugins.memory", "Memory")
class StoreTestCase(unittest.TestCase):
-
def test_memory_store(self):
g = rdflib.Graph("Memory")
subj1 = rdflib.URIRef("http://example.org/foo#bar1")
pred1 = rdflib.URIRef("http://example.org/foo#bar2")
obj1 = rdflib.URIRef("http://example.org/foo#bar3")
triple1 = (subj1, pred1, obj1)
- triple2 = (subj1,
- rdflib.URIRef("http://example.org/foo#bar4"),
- rdflib.URIRef("http://example.org/foo#bar5"))
+ triple2 = (
+ subj1,
+ rdflib.URIRef("http://example.org/foo#bar4"),
+ rdflib.URIRef("http://example.org/foo#bar5"),
+ )
g.add(triple1)
self.assertTrue(len(g) == 1)
g.add(triple2)
@@ -27,5 +27,5 @@ class StoreTestCase(unittest.TestCase):
g.serialize()
-if __name__ == '__main__':
- unittest.main(defaultTest='test_suite')
+if __name__ == "__main__":
+ unittest.main(defaultTest="test_suite")
diff --git a/test/test_mulpath_n3.py b/test/test_mulpath_n3.py
index f0bbda73..f4f26dc4 100644
--- a/test/test_mulpath_n3.py
+++ b/test/test_mulpath_n3.py
@@ -4,6 +4,6 @@ from rdflib import RDFS, URIRef
def test_mulpath_n3():
- uri = 'http://example.com/foo'
+ uri = "http://example.com/foo"
n3 = (URIRef(uri) * ZeroOrMore).n3()
- assert n3 == '<' + uri + '>*'
+ assert n3 == "<" + uri + ">*"
diff --git a/test/test_n3.py b/test/test_n3.py
index 48a77eb4..9a378843 100644
--- a/test/test_n3.py
+++ b/test/test_n3.py
@@ -61,7 +61,6 @@ n3:context a rdf:Property; rdfs:domain n3:statement;
class TestN3Case(unittest.TestCase):
-
def setUp(self):
pass
@@ -92,12 +91,10 @@ class TestN3Case(unittest.TestCase):
g = Graph()
g.parse(data=input, format="n3")
print(list(g))
- self.assertTrue((None, None, Literal('Foo')) in g)
- self.assertTrue(
- (URIRef('http://example.com/doc/bar'), None, None) in g)
- self.assertTrue(
- (URIRef('http://example.com/doc/doc2/bing'), None, None) in g)
- self.assertTrue((URIRef('http://test.com/bong'), None, None) in g)
+ self.assertTrue((None, None, Literal("Foo")) in g)
+ self.assertTrue((URIRef("http://example.com/doc/bar"), None, None) in g)
+ self.assertTrue((URIRef("http://example.com/doc/doc2/bing"), None, None) in g)
+ self.assertTrue((URIRef("http://test.com/bong"), None, None) in g)
def testBaseExplicit(self):
"""
@@ -114,21 +111,24 @@ class TestN3Case(unittest.TestCase):
<bar> :name "Bar" .
"""
g = Graph()
- g.parse(data=input, publicID='http://blah.com/', format="n3")
+ g.parse(data=input, publicID="http://blah.com/", format="n3")
print(list(g))
- self.assertTrue(
- (URIRef('http://blah.com/foo'), None, Literal('Foo')) in g)
- self.assertTrue(
- (URIRef('http://example.com/doc/bar'), None, None) in g)
+ self.assertTrue((URIRef("http://blah.com/foo"), None, Literal("Foo")) in g)
+ self.assertTrue((URIRef("http://example.com/doc/bar"), None, None) in g)
def testBaseSerialize(self):
g = Graph()
- g.add((URIRef('http://example.com/people/Bob'), URIRef(
- 'urn:knows'), URIRef('http://example.com/people/Linda')))
- s = g.serialize(base='http://example.com/', format='n3')
- self.assertTrue('<people/Bob>'.encode("latin-1") in s)
+ g.add(
+ (
+ URIRef("http://example.com/people/Bob"),
+ URIRef("urn:knows"),
+ URIRef("http://example.com/people/Linda"),
+ )
+ )
+ s = g.serialize(base="http://example.com/", format="n3")
+ self.assertTrue("<people/Bob>".encode("latin-1") in s)
g2 = ConjunctiveGraph()
- g2.parse(data=s, publicID='http://example.com/', format='n3')
+ g2.parse(data=s, publicID="http://example.com/", format="n3")
self.assertEqual(list(g), list(g2))
def testIssue23(self):
@@ -192,7 +192,8 @@ foo-bar:Ex foo-bar:name "Test" . """
g = Graph()
g.parse(
data="@prefix a.1: <http://example.org/> .\n a.1:cake <urn:x> <urn:y> . \n",
- format='n3')
+ format="n3",
+ )
def testModel(self):
g = ConjunctiveGraph()
@@ -215,47 +216,62 @@ foo-bar:Ex foo-bar:name "Test" . """
g = ConjunctiveGraph()
try:
g.parse(
- "http://groups.csail.mit.edu/dig/2005/09/rein/examples/troop42-policy.n3", format="n3")
+ "http://groups.csail.mit.edu/dig/2005/09/rein/examples/troop42-policy.n3",
+ format="n3",
+ )
except URLError:
from nose import SkipTest
- raise SkipTest(
- 'No network to retrieve the information, skipping test')
+
+ raise SkipTest("No network to retrieve the information, skipping test")
def testSingleQuotedLiterals(self):
- test_data = ["""@prefix : <#> . :s :p 'o' .""",
- """@prefix : <#> . :s :p '''o''' ."""]
+ test_data = [
+ """@prefix : <#> . :s :p 'o' .""",
+ """@prefix : <#> . :s :p '''o''' .""",
+ ]
for data in test_data:
# N3 doesn't accept single quotes around string literals
g = ConjunctiveGraph()
- self.assertRaises(BadSyntax, g.parse,
- data=data, format='n3')
+ self.assertRaises(BadSyntax, g.parse, data=data, format="n3")
g = ConjunctiveGraph()
- g.parse(data=data, format='turtle')
+ g.parse(data=data, format="turtle")
self.assertEqual(len(g), 1)
for _, _, o in g:
- self.assertEqual(o, Literal('o'))
+ self.assertEqual(o, Literal("o"))
def testEmptyPrefix(self):
# this is issue https://github.com/RDFLib/rdflib/issues/312
g1 = Graph()
- g1.parse(data=":a :b :c .", format='n3')
+ g1.parse(data=":a :b :c .", format="n3")
g2 = Graph()
- g2.parse(data="@prefix : <#> . :a :b :c .", format='n3')
+ g2.parse(data="@prefix : <#> . :a :b :c .", format="n3")
assert set(g1) == set(
- g2), 'Document with declared empty prefix must match default #'
+ g2
+ ), "Document with declared empty prefix must match default #"
class TestRegularExpressions(unittest.TestCase):
def testExponents(self):
signs = ("", "+", "-")
- mantissas = ("1", "1.", ".1",
- "12", "12.", "1.2", ".12",
- "123", "123.", "12.3", "1.23", ".123")
+ mantissas = (
+ "1",
+ "1.",
+ ".1",
+ "12",
+ "12.",
+ "1.2",
+ ".12",
+ "123",
+ "123.",
+ "12.3",
+ "1.23",
+ ".123",
+ )
es = "eE"
exps = ("1", "12", "+1", "-1", "+12", "-12")
for parts in itertools.product(signs, mantissas, es, exps):
@@ -269,5 +285,5 @@ class TestRegularExpressions(unittest.TestCase):
self.assertFalse(exponent_syntax.match(expstring))
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main()
diff --git a/test/test_n3_suite.py b/test/test_n3_suite.py
index 21e6bcba..f2ab1ab6 100644
--- a/test/test_n3_suite.py
+++ b/test/test_n3_suite.py
@@ -11,19 +11,19 @@ except:
def _get_test_files_formats():
- skiptests = [
- ]
- for f in os.listdir('test/n3'):
+ skiptests = []
+ for f in os.listdir("test/n3"):
if f not in skiptests:
fpath = "test/n3/" + f
- if f.endswith('.rdf'):
- yield fpath, 'xml'
- elif f.endswith('.n3'):
- yield fpath, 'n3'
+ if f.endswith(".rdf"):
+ yield fpath, "xml"
+ elif f.endswith(".n3"):
+ yield fpath, "n3"
+
def all_n3_files():
skiptests = [
- 'test/n3/example-lots_of_graphs.n3', # only n3 can serialize QuotedGraph, no point in testing roundtrip
+ "test/n3/example-lots_of_graphs.n3", # only n3 can serialize QuotedGraph, no point in testing roundtrip
]
for fpath, fmt in _get_test_files_formats():
if fpath in skiptests:
@@ -31,15 +31,17 @@ def all_n3_files():
else:
yield fpath, fmt
+
def test_n3_writing():
for fpath, fmt in _get_test_files_formats():
- yield check_serialize_parse, fpath, fmt, 'n3'
+ yield check_serialize_parse, fpath, fmt, "n3"
if __name__ == "__main__":
if len(sys.argv) > 1:
- check_serialize_parse(sys.argv[1], 'n3', 'n3', True)
+ check_serialize_parse(sys.argv[1], "n3", "n3", True)
sys.exit()
else:
import nose
+
nose.main(defaultTest=__name__)
diff --git a/test/test_namespace.py b/test/test_namespace.py
index 7dd1a25f..48896fdc 100644
--- a/test/test_namespace.py
+++ b/test/test_namespace.py
@@ -6,53 +6,72 @@ from rdflib.term import URIRef
class NamespacePrefixTest(unittest.TestCase):
-
def test_compute_qname(self):
"""Test sequential assignment of unknown prefixes"""
g = Graph()
- self.assertEqual(g.compute_qname(URIRef("http://foo/bar/baz")),
- ("ns1", URIRef("http://foo/bar/"), "baz"))
+ self.assertEqual(
+ g.compute_qname(URIRef("http://foo/bar/baz")),
+ ("ns1", URIRef("http://foo/bar/"), "baz"),
+ )
- self.assertEqual(g.compute_qname(URIRef("http://foo/bar#baz")),
- ("ns2", URIRef("http://foo/bar#"), "baz"))
+ self.assertEqual(
+ g.compute_qname(URIRef("http://foo/bar#baz")),
+ ("ns2", URIRef("http://foo/bar#"), "baz"),
+ )
# should skip to ns4 when ns3 is already assigned
g.bind("ns3", URIRef("http://example.org/"))
- self.assertEqual(g.compute_qname(URIRef("http://blip/blop")),
- ("ns4", URIRef("http://blip/"), "blop"))
+ self.assertEqual(
+ g.compute_qname(URIRef("http://blip/blop")),
+ ("ns4", URIRef("http://blip/"), "blop"),
+ )
# should return empty qnames correctly
- self.assertEqual(g.compute_qname(URIRef("http://foo/bar/")),
- ("ns1", URIRef("http://foo/bar/"), ""))
+ self.assertEqual(
+ g.compute_qname(URIRef("http://foo/bar/")),
+ ("ns1", URIRef("http://foo/bar/"), ""),
+ )
def test_reset(self):
- data = ('@prefix a: <http://example.org/a> .\n'
- 'a: <http://example.org/b> <http://example.org/c> .')
- graph = Graph().parse(data=data, format='turtle')
+ data = (
+ "@prefix a: <http://example.org/a> .\n"
+ "a: <http://example.org/b> <http://example.org/c> ."
+ )
+ graph = Graph().parse(data=data, format="turtle")
for p, n in tuple(graph.namespaces()):
graph.store._IOMemory__namespace.pop(p)
graph.store._IOMemory__prefix.pop(n)
graph.namespace_manager.reset()
self.assertFalse(tuple(graph.namespaces()))
- u = URIRef('http://example.org/a')
- prefix, namespace, name = graph.namespace_manager.compute_qname(u, generate=True)
+ u = URIRef("http://example.org/a")
+ prefix, namespace, name = graph.namespace_manager.compute_qname(
+ u, generate=True
+ )
self.assertNotEqual(namespace, u)
def test_reset_preserve_prefixes(self):
- data = ('@prefix a: <http://example.org/a> .\n'
- 'a: <http://example.org/b> <http://example.org/c> .')
- graph = Graph().parse(data=data, format='turtle')
+ data = (
+ "@prefix a: <http://example.org/a> .\n"
+ "a: <http://example.org/b> <http://example.org/c> ."
+ )
+ graph = Graph().parse(data=data, format="turtle")
graph.namespace_manager.reset()
self.assertTrue(tuple(graph.namespaces()))
- u = URIRef('http://example.org/a')
- prefix, namespace, name = graph.namespace_manager.compute_qname(u, generate=True)
+ u = URIRef("http://example.org/a")
+ prefix, namespace, name = graph.namespace_manager.compute_qname(
+ u, generate=True
+ )
self.assertEqual(namespace, u)
def test_n3(self):
g = Graph()
- g.add((URIRef("http://example.com/foo"),
- URIRef("http://example.com/bar"),
- URIRef("http://example.com/baz")))
+ g.add(
+ (
+ URIRef("http://example.com/foo"),
+ URIRef("http://example.com/bar"),
+ URIRef("http://example.com/baz"),
+ )
+ )
n3 = g.serialize(format="n3")
# Gunnar disagrees that this is right:
# self.assertTrue("<http://example.com/foo> ns1:bar <http://example.com/baz> ." in n3)
@@ -62,12 +81,21 @@ class NamespacePrefixTest(unittest.TestCase):
def test_n32(self):
# this test not generating prefixes for subjects/objects
g = Graph()
- g.add((URIRef("http://example1.com/foo"),
- URIRef("http://example2.com/bar"),
- URIRef("http://example3.com/baz")))
+ g.add(
+ (
+ URIRef("http://example1.com/foo"),
+ URIRef("http://example2.com/bar"),
+ URIRef("http://example3.com/baz"),
+ )
+ )
n3 = g.serialize(format="n3")
- self.assertTrue("<http://example1.com/foo> ns1:bar <http://example3.com/baz> .".encode("latin-1") in n3)
+ self.assertTrue(
+ "<http://example1.com/foo> ns1:bar <http://example3.com/baz> .".encode(
+ "latin-1"
+ )
+ in n3
+ )
def test_closed_namespace(self):
"""Tests terms both in an out of the ClosedNamespace FOAF"""
@@ -82,4 +110,7 @@ class NamespacePrefixTest(unittest.TestCase):
self.assertRaises(KeyError, add_not_in_namespace, "firstName")
# a property name within the core FOAF namespace
- self.assertEqual(add_not_in_namespace("givenName"), URIRef("http://xmlns.com/foaf/0.1/givenName"))
+ self.assertEqual(
+ add_not_in_namespace("givenName"),
+ URIRef("http://xmlns.com/foaf/0.1/givenName"),
+ )
diff --git a/test/test_nodepickler.py b/test/test_nodepickler.py
index 31a667da..970ec232 100644
--- a/test/test_nodepickler.py
+++ b/test/test_nodepickler.py
@@ -7,7 +7,7 @@ from rdflib.store import NodePickler
# same as nt/more_literals.nt
cases = [
- 'no quotes',
+ "no quotes",
"single ' quote",
'double " quote',
'triple """ quotes',
@@ -15,7 +15,7 @@ cases = [
'"',
"'",
'"\'"',
- '\\', # len 1
+ "\\", # len 1
'\\"', # len 2
'\\\\"', # len 3
'\\"\\', # len 3
@@ -24,12 +24,13 @@ cases = [
class UtilTestCase(unittest.TestCase):
-
def test_to_bits_from_bits_round_trip(self):
np = NodePickler()
- a = Literal(u'''A test with a \\n (backslash n), "\u00a9" , and newline \n and a second line.
-''')
+ a = Literal(
+ u"""A test with a \\n (backslash n), "\u00a9" , and newline \n and a second line.
+"""
+ )
b = np.loads(np.dumps(a))
self.assertEqual(a, b)
@@ -49,5 +50,5 @@ class UtilTestCase(unittest.TestCase):
self.assertEqual(np._objects, np2._objects)
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main()
diff --git a/test/test_nquads.py b/test/test_nquads.py
index 72d15ea7..c25bc7ed 100644
--- a/test/test_nquads.py
+++ b/test/test_nquads.py
@@ -1,11 +1,10 @@
import unittest
from rdflib import ConjunctiveGraph, URIRef, Namespace
-TEST_BASE = 'test/nquads.rdflib'
+TEST_BASE = "test/nquads.rdflib"
class NQuadsParserTest(unittest.TestCase):
-
def _load_example(self):
g = ConjunctiveGraph()
with open("test/nquads.rdflib/example.nquads", "rb") as data:
@@ -46,22 +45,26 @@ class NQuadsParserTest(unittest.TestCase):
uri1 = URIRef("http://example.org/mygraph1")
uri2 = URIRef("http://example.org/mygraph2")
- bob = URIRef(u'urn:bob')
- likes = URIRef(u'urn:likes')
- pizza = URIRef(u'urn:pizza')
+ bob = URIRef(u"urn:bob")
+ likes = URIRef(u"urn:likes")
+ pizza = URIRef(u"urn:pizza")
g.get_context(uri1).add((bob, likes, pizza))
g.get_context(uri2).add((bob, likes, pizza))
- s = g.serialize(format='nquads')
- self.assertEqual(len([x for x in s.split("\n".encode("latin-1")) if x.strip()]), 2)
+ s = g.serialize(format="nquads")
+ self.assertEqual(
+ len([x for x in s.split("\n".encode("latin-1")) if x.strip()]), 2
+ )
g2 = ConjunctiveGraph()
- g2.parse(data=s, format='nquads')
+ g2.parse(data=s, format="nquads")
self.assertEqual(len(g), len(g2))
- self.assertEqual(sorted(x.identifier for x in g.contexts()),
- sorted(x.identifier for x in g2.contexts()))
+ self.assertEqual(
+ sorted(x.identifier for x in g.contexts()),
+ sorted(x.identifier for x in g2.contexts()),
+ )
if __name__ == "__main__":
diff --git a/test/test_nquads_w3c.py b/test/test_nquads_w3c.py
index f12850d2..02d79576 100644
--- a/test/test_nquads_w3c.py
+++ b/test/test_nquads_w3c.py
@@ -13,7 +13,7 @@ def nquads(test):
g = ConjunctiveGraph()
try:
- g.parse(test.action, format='nquads')
+ g.parse(test.action, format="nquads")
if not test.syntax:
raise AssertionError("Input shouldn't have parsed!")
except:
@@ -21,14 +21,11 @@ def nquads(test):
raise
-testers = {
- RDFT.TestNQuadsPositiveSyntax: nquads,
- RDFT.TestNQuadsNegativeSyntax: nquads
-}
+testers = {RDFT.TestNQuadsPositiveSyntax: nquads, RDFT.TestNQuadsNegativeSyntax: nquads}
def test_nquads(tests=None):
- for t in nose_tests(testers, 'test/w3c/nquads/manifest.ttl'):
+ for t in nose_tests(testers, "test/w3c/nquads/manifest.ttl"):
if tests:
for test in tests:
if test in t[1].uri:
@@ -39,7 +36,7 @@ def test_nquads(tests=None):
yield t
-if __name__ == '__main__':
+if __name__ == "__main__":
verbose = True
- nose_tst_earl_report(test_nquads, 'rdflib_nquads')
+ nose_tst_earl_report(test_nquads, "rdflib_nquads")
diff --git a/test/test_nt_misc.py b/test/test_nt_misc.py
index ff49e0c4..7934f70e 100644
--- a/test/test_nt_misc.py
+++ b/test/test_nt_misc.py
@@ -10,7 +10,6 @@ log = logging.getLogger(__name__)
class NTTestCase(unittest.TestCase):
-
def testIssue859(self):
graphA = Graph()
graphB = Graph()
@@ -25,7 +24,7 @@ class NTTestCase(unittest.TestCase):
def testIssue78(self):
g = Graph()
g.add((URIRef("foo"), URIRef("foo"), Literal(u"R\u00E4ksm\u00F6rg\u00E5s")))
- s = g.serialize(format='nt')
+ s = g.serialize(format="nt")
self.assertEqual(type(s), bytes)
self.assertTrue(r"R\u00E4ksm\u00F6rg\u00E5s".encode("latin-1") in s)
@@ -79,7 +78,7 @@ class NTTestCase(unittest.TestCase):
self.assertEqual(res, uniquot)
def test_NTriplesParser_fpath(self):
- fpath = "test/nt/" + os.listdir('test/nt')[0]
+ fpath = "test/nt/" + os.listdir("test/nt")[0]
p = ntriples.NTriplesParser()
self.assertRaises(ntriples.ParseError, p.parse, fpath)
@@ -88,7 +87,7 @@ class NTTestCase(unittest.TestCase):
data = 3
self.assertRaises(ntriples.ParseError, p.parsestring, data)
fname = "test/nt/lists-02.nt"
- with open(fname, 'r') as f:
+ with open(fname, "r") as f:
data = f.read()
p = ntriples.NTriplesParser()
res = p.parsestring(data)
@@ -105,15 +104,21 @@ class NTTestCase(unittest.TestCase):
self.assertTrue(sink is not None)
def test_bad_line(self):
- data = '''<http://example.org/resource32> 3 <http://example.org/datatype1> .\n'''
+ data = (
+ """<http://example.org/resource32> 3 <http://example.org/datatype1> .\n"""
+ )
p = ntriples.NTriplesParser()
self.assertRaises(ntriples.ParseError, p.parsestring, data)
def test_cover_eat(self):
- data = '''<http://example.org/resource32> 3 <http://example.org/datatype1> .\n'''
+ data = (
+ """<http://example.org/resource32> 3 <http://example.org/datatype1> .\n"""
+ )
p = ntriples.NTriplesParser()
p.line = data
- self.assertRaises(ntriples.ParseError, p.eat, re.compile('<http://example.org/datatype1>'))
+ self.assertRaises(
+ ntriples.ParseError, p.eat, re.compile("<http://example.org/datatype1>")
+ )
def test_cover_subjectobjectliteral(self):
# data = '''<http://example.org/resource32> 3 <http://example.org/datatype1> .\n'''
diff --git a/test/test_nt_suite.py b/test/test_nt_suite.py
index c9175320..753d2897 100644
--- a/test/test_nt_suite.py
+++ b/test/test_nt_suite.py
@@ -12,41 +12,39 @@ The actual tests are done in test_roundtrip
def _get_test_files_formats():
- for f in os.listdir('test/nt'):
+ for f in os.listdir("test/nt"):
fpath = "test/nt/" + f
- if f.endswith('.rdf'):
- yield fpath, 'xml'
- elif f.endswith('.nt'):
- yield fpath, 'nt'
+ if f.endswith(".rdf"):
+ yield fpath, "xml"
+ elif f.endswith(".nt"):
+ yield fpath, "nt"
def all_nt_files():
skiptests = [
# illegal literal as subject
- 'test/nt/literals-01.nt',
- 'test/nt/keywords-08.nt',
- 'test/nt/paths-04.nt',
- 'test/nt/numeric-01.nt',
- 'test/nt/numeric-02.nt',
- 'test/nt/numeric-03.nt',
- 'test/nt/numeric-04.nt',
- 'test/nt/numeric-05.nt',
-
+ "test/nt/literals-01.nt",
+ "test/nt/keywords-08.nt",
+ "test/nt/paths-04.nt",
+ "test/nt/numeric-01.nt",
+ "test/nt/numeric-02.nt",
+ "test/nt/numeric-03.nt",
+ "test/nt/numeric-04.nt",
+ "test/nt/numeric-05.nt",
# illegal variables
- 'test/nt/formulae-01.nt',
- 'test/nt/formulae-02.nt',
- 'test/nt/formulae-03.nt',
- 'test/nt/formulae-05.nt',
- 'test/nt/formulae-06.nt',
- 'test/nt/formulae-10.nt',
-
+ "test/nt/formulae-01.nt",
+ "test/nt/formulae-02.nt",
+ "test/nt/formulae-03.nt",
+ "test/nt/formulae-05.nt",
+ "test/nt/formulae-06.nt",
+ "test/nt/formulae-10.nt",
# illegal bnode as predicate
- 'test/nt/paths-06.nt',
- 'test/nt/anons-02.nt',
- 'test/nt/anons-03.nt',
- 'test/nt/qname-01.nt',
- 'test/nt/lists-06.nt',
- ]
+ "test/nt/paths-06.nt",
+ "test/nt/anons-02.nt",
+ "test/nt/anons-03.nt",
+ "test/nt/qname-01.nt",
+ "test/nt/lists-06.nt",
+ ]
for fpath, fmt in _get_test_files_formats():
if fpath in skiptests:
log.debug("Skipping %s, known issue" % fpath)
diff --git a/test/test_nt_w3c.py b/test/test_nt_w3c.py
index 65166f5e..8294e8ff 100644
--- a/test/test_nt_w3c.py
+++ b/test/test_nt_w3c.py
@@ -13,7 +13,7 @@ def nt(test):
g = Graph()
try:
- g.parse(test.action, format='nt')
+ g.parse(test.action, format="nt")
if not test.syntax:
raise AssertionError("Input shouldn't have parsed!")
except:
@@ -21,14 +21,11 @@ def nt(test):
raise
-testers = {
- RDFT.TestNTriplesPositiveSyntax: nt,
- RDFT.TestNTriplesNegativeSyntax: nt
-}
+testers = {RDFT.TestNTriplesPositiveSyntax: nt, RDFT.TestNTriplesNegativeSyntax: nt}
def test_nt(tests=None):
- for t in nose_tests(testers, 'test/w3c/nt/manifest.ttl', legacy=True):
+ for t in nose_tests(testers, "test/w3c/nt/manifest.ttl", legacy=True):
if tests:
for test in tests:
if test in t[1].uri:
@@ -39,7 +36,7 @@ def test_nt(tests=None):
yield t
-if __name__ == '__main__':
+if __name__ == "__main__":
verbose = True
- nose_tst_earl_report(test_nt, 'rdflib_nt')
+ nose_tst_earl_report(test_nt, "rdflib_nt")
diff --git a/test/test_parser.py b/test/test_parser.py
index d311a89b..3aaf5658 100644
--- a/test/test_parser.py
+++ b/test/test_parser.py
@@ -7,8 +7,8 @@ from rdflib.graph import Graph
class ParserTestCase(unittest.TestCase):
- backend = 'default'
- path = 'store'
+ backend = "default"
+ path = "store"
def setUp(self):
self.graph = Graph(store=self.backend)
@@ -19,7 +19,8 @@ class ParserTestCase(unittest.TestCase):
def testNoPathWithHash(self):
g = self.graph
- g.parse(data="""\
+ g.parse(
+ data="""\
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<rdf:RDF
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
@@ -31,7 +32,9 @@ class ParserTestCase(unittest.TestCase):
</rdfs:Class>
</rdf:RDF>
-""", publicID="http://example.org")
+""",
+ publicID="http://example.org",
+ )
subject = URIRef("http://example.org#")
label = g.value(subject, RDFS.label)
diff --git a/test/test_parser_helpers.py b/test/test_parser_helpers.py
index 58d083cb..090a8a49 100644
--- a/test/test_parser_helpers.py
+++ b/test/test_parser_helpers.py
@@ -1,4 +1,5 @@
from rdflib.plugins.sparql.parser import TriplesSameSubject
+
# from rdflib.plugins.sparql.algebra import triples
diff --git a/test/test_prefixTypes.py b/test/test_prefixTypes.py
index 2cf89596..8a785094 100644
--- a/test/test_prefixTypes.py
+++ b/test/test_prefixTypes.py
@@ -2,14 +2,17 @@ import unittest
from rdflib import Graph
-graph = Graph().parse(format='n3', data="""
+graph = Graph().parse(
+ format="n3",
+ data="""
@prefix dct: <http://purl.org/dc/terms/> .
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
<http://example.org/doc> a foaf:Document;
dct:created "2011-03-20"^^xsd:date .
-""")
+""",
+)
class PrefixTypesTest(unittest.TestCase):
@@ -22,11 +25,11 @@ class PrefixTypesTest(unittest.TestCase):
"""
def test(self):
- s = graph.serialize(format='n3')
+ s = graph.serialize(format="n3")
print(s)
self.assertTrue("foaf:Document".encode("latin-1") in s)
self.assertTrue("xsd:date".encode("latin-1") in s)
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main()
diff --git a/test/test_preflabel.py b/test/test_preflabel.py
index 76c4131e..b35c626c 100644
--- a/test/test_preflabel.py
+++ b/test/test_preflabel.py
@@ -8,50 +8,73 @@ from rdflib import URIRef
class TestPrefLabel(unittest.TestCase):
-
def setUp(self):
self.g = ConjunctiveGraph()
- self.u = URIRef('http://example.com/foo')
- self.g.add([self.u, RDFS.label, Literal('foo')])
- self.g.add([self.u, RDFS.label, Literal('bar')])
+ self.u = URIRef("http://example.com/foo")
+ self.g.add([self.u, RDFS.label, Literal("foo")])
+ self.g.add([self.u, RDFS.label, Literal("bar")])
def test_default_label_sorting(self):
res = sorted(self.g.preferredLabel(self.u))
- tgt = [(rdflib.term.URIRef('http://www.w3.org/2000/01/rdf-schema#label'),
- rdflib.term.Literal(u'bar')),
- (rdflib.term.URIRef('http://www.w3.org/2000/01/rdf-schema#label'),
- rdflib.term.Literal(u'foo'))]
+ tgt = [
+ (
+ rdflib.term.URIRef("http://www.w3.org/2000/01/rdf-schema#label"),
+ rdflib.term.Literal(u"bar"),
+ ),
+ (
+ rdflib.term.URIRef("http://www.w3.org/2000/01/rdf-schema#label"),
+ rdflib.term.Literal(u"foo"),
+ ),
+ ]
self.assertEqual(res, tgt)
def test_default_preflabel_sorting(self):
- self.g.add([self.u, SKOS.prefLabel, Literal('bla')])
+ self.g.add([self.u, SKOS.prefLabel, Literal("bla")])
res = self.g.preferredLabel(self.u)
- tgt = [(rdflib.term.URIRef('http://www.w3.org/2004/02/skos/core#prefLabel'),
- rdflib.term.Literal(u'bla'))]
+ tgt = [
+ (
+ rdflib.term.URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"),
+ rdflib.term.Literal(u"bla"),
+ )
+ ]
self.assertEqual(res, tgt)
def test_preflabel_lang_sorting_no_lang_attr(self):
- self.g.add([self.u, SKOS.prefLabel, Literal('bla')])
- self.g.add([self.u, SKOS.prefLabel, Literal('blubb', lang='en')])
+ self.g.add([self.u, SKOS.prefLabel, Literal("bla")])
+ self.g.add([self.u, SKOS.prefLabel, Literal("blubb", lang="en")])
res = sorted(self.g.preferredLabel(self.u))
- tgt = [(rdflib.term.URIRef('http://www.w3.org/2004/02/skos/core#prefLabel'),
- rdflib.term.Literal(u'bla')),
- (rdflib.term.URIRef('http://www.w3.org/2004/02/skos/core#prefLabel'),
- rdflib.term.Literal(u'blubb', lang='en'))]
+ tgt = [
+ (
+ rdflib.term.URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"),
+ rdflib.term.Literal(u"bla"),
+ ),
+ (
+ rdflib.term.URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"),
+ rdflib.term.Literal(u"blubb", lang="en"),
+ ),
+ ]
self.assertEqual(res, tgt)
def test_preflabel_lang_sorting_empty_lang_attr(self):
- self.g.add([self.u, SKOS.prefLabel, Literal('bla')])
- self.g.add([self.u, SKOS.prefLabel, Literal('blubb', lang='en')])
- res = self.g.preferredLabel(self.u, lang='')
- tgt = [(rdflib.term.URIRef('http://www.w3.org/2004/02/skos/core#prefLabel'),
- rdflib.term.Literal(u'bla'))]
+ self.g.add([self.u, SKOS.prefLabel, Literal("bla")])
+ self.g.add([self.u, SKOS.prefLabel, Literal("blubb", lang="en")])
+ res = self.g.preferredLabel(self.u, lang="")
+ tgt = [
+ (
+ rdflib.term.URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"),
+ rdflib.term.Literal(u"bla"),
+ )
+ ]
self.assertEqual(res, tgt)
def test_preflabel_lang_sorting_en_lang_attr(self):
- self.g.add([self.u, SKOS.prefLabel, Literal('blubb', lang='en')])
- res = self.g.preferredLabel(self.u, lang='en')
- tgt = [(rdflib.term.URIRef('http://www.w3.org/2004/02/skos/core#prefLabel'),
- rdflib.term.Literal(u'blubb', lang='en'))]
+ self.g.add([self.u, SKOS.prefLabel, Literal("blubb", lang="en")])
+ res = self.g.preferredLabel(self.u, lang="en")
+ tgt = [
+ (
+ rdflib.term.URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"),
+ rdflib.term.Literal(u"blubb", lang="en"),
+ )
+ ]
self.assertEqual(res, tgt)
diff --git a/test/test_prettyxml.py b/test/test_prettyxml.py
index e20ec067..4a033fa4 100644
--- a/test/test_prettyxml.py
+++ b/test/test_prettyxml.py
@@ -45,7 +45,9 @@ def _mangled_copy(g):
"Makes a copy of the graph, replacing all bnodes with the bnode ``_blank``."
gcopy = ConjunctiveGraph()
- def isbnode(v): return isinstance(v, BNode)
+ def isbnode(v):
+ return isinstance(v, BNode)
+
for s, p, o in g:
if isbnode(s):
s = _blank
@@ -116,56 +118,101 @@ class TestPrettyXmlSerializer(SerializerTestBase):
rdfs:seeAlso _:bnode2 .
"""
- testContentFormat = 'n3'
+ testContentFormat = "n3"
def test_result_fragments(self):
rdfXml = serialize(self.sourceGraph, self.serializer)
- assert '<Test rdf:about="http://example.org/data/a">'.encode("latin-1") in rdfXml
- assert '<rdf:Description rdf:about="http://example.org/data/b">'.encode("latin-1") in rdfXml
+ assert (
+ '<Test rdf:about="http://example.org/data/a">'.encode("latin-1") in rdfXml
+ )
+ assert (
+ '<rdf:Description rdf:about="http://example.org/data/b">'.encode("latin-1")
+ in rdfXml
+ )
assert '<name xml:lang="en">Bee</name>'.encode("latin-1") in rdfXml
- assert '<value rdf:datatype="http://www.w3.org/2001/XMLSchema#integer">3</value>'.encode("latin-1") in rdfXml
- assert '<BNode rdf:nodeID="'.encode("latin-1") in rdfXml, "expected one identified bnode in serialized graph"
- #onlyBNodesMsg = "expected only inlined subClassOf-bnodes in serialized graph"
- #assert '<rdfs:subClassOf>' in rdfXml, onlyBNodesMsg
- #assert not '<rdfs:subClassOf ' in rdfXml, onlyBNodesMsg
+ assert (
+ '<value rdf:datatype="http://www.w3.org/2001/XMLSchema#integer">3</value>'.encode(
+ "latin-1"
+ )
+ in rdfXml
+ )
+ assert (
+ '<BNode rdf:nodeID="'.encode("latin-1") in rdfXml
+ ), "expected one identified bnode in serialized graph"
+ # onlyBNodesMsg = "expected only inlined subClassOf-bnodes in serialized graph"
+ # assert '<rdfs:subClassOf>' in rdfXml, onlyBNodesMsg
+ # assert not '<rdfs:subClassOf ' in rdfXml, onlyBNodesMsg
def test_result_fragments_with_base(self):
- rdfXml = serialize(self.sourceGraph, self.serializer,
- extra_args={'base': "http://example.org/", 'xml_base': "http://example.org/"})
+ rdfXml = serialize(
+ self.sourceGraph,
+ self.serializer,
+ extra_args={
+ "base": "http://example.org/",
+ "xml_base": "http://example.org/",
+ },
+ )
assert 'xml:base="http://example.org/"'.encode("latin-1") in rdfXml
assert '<Test rdf:about="data/a">'.encode("latin-1") in rdfXml
assert '<rdf:Description rdf:about="data/b">'.encode("latin-1") in rdfXml
- assert '<value rdf:datatype="http://www.w3.org/2001/XMLSchema#integer">3</value>'.encode("latin-1") in rdfXml
- assert '<BNode rdf:nodeID="'.encode("latin-1") in rdfXml, "expected one identified bnode in serialized graph"
+ assert (
+ '<value rdf:datatype="http://www.w3.org/2001/XMLSchema#integer">3</value>'.encode(
+ "latin-1"
+ )
+ in rdfXml
+ )
+ assert (
+ '<BNode rdf:nodeID="'.encode("latin-1") in rdfXml
+ ), "expected one identified bnode in serialized graph"
def test_subClassOf_objects(self):
reparsedGraph = serialize_and_load(self.sourceGraph, self.serializer)
- _assert_expected_object_types_for_predicates(reparsedGraph,
- [RDFS.seeAlso, RDFS.subClassOf],
- [URIRef, BNode])
+ _assert_expected_object_types_for_predicates(
+ reparsedGraph, [RDFS.seeAlso, RDFS.subClassOf], [URIRef, BNode]
+ )
def test_pretty_xmlliteral(self):
# given:
g = ConjunctiveGraph()
- g.add((BNode(), RDF.value, Literal(u'''<p xmlns="http://www.w3.org/1999/xhtml">See also <a href="#aring">Å</a></p>''', datatype=RDF.XMLLiteral)))
+ g.add(
+ (
+ BNode(),
+ RDF.value,
+ Literal(
+ u"""<p xmlns="http://www.w3.org/1999/xhtml">See also <a href="#aring">Å</a></p>""",
+ datatype=RDF.XMLLiteral,
+ ),
+ )
+ )
# when:
- xmlrepr = g.serialize(format='pretty-xml')
+ xmlrepr = g.serialize(format="pretty-xml")
# then:
- assert u'''<rdf:value rdf:parseType="Literal"><p xmlns="http://www.w3.org/1999/xhtml">See also <a href="#aring">Å</a></p></rdf:value>'''.encode('utf-8') in xmlrepr
+ assert (
+ u"""<rdf:value rdf:parseType="Literal"><p xmlns="http://www.w3.org/1999/xhtml">See also <a href="#aring">Å</a></p></rdf:value>""".encode(
+ "utf-8"
+ )
+ in xmlrepr
+ )
def test_pretty_broken_xmlliteral(self):
# given:
g = ConjunctiveGraph()
- g.add((BNode(), RDF.value, Literal(u'''<p ''', datatype=RDF.XMLLiteral)))
+ g.add((BNode(), RDF.value, Literal(u"""<p """, datatype=RDF.XMLLiteral)))
# when:
- xmlrepr = g.serialize(format='pretty-xml')
+ xmlrepr = g.serialize(format="pretty-xml")
# then:
- assert u'''<rdf:value rdf:datatype="http://www.w3.org/1999/02/22-rdf-syntax-ns#XMLLiteral">&lt;p '''.encode('utf-8') in xmlrepr
+ assert (
+ u"""<rdf:value rdf:datatype="http://www.w3.org/1999/02/22-rdf-syntax-ns#XMLLiteral">&lt;p """.encode(
+ "utf-8"
+ )
+ in xmlrepr
+ )
def _assert_expected_object_types_for_predicates(graph, predicates, types):
for s, p, o in graph:
if p in predicates:
someTrue = [isinstance(o, t) for t in types]
- assert True in someTrue, \
- "Bad type %s for object when predicate is <%s>." % (type(o), p)
+ assert (
+ True in someTrue
+ ), "Bad type %s for object when predicate is <%s>." % (type(o), p)
diff --git a/test/test_rdf_lists.py b/test/test_rdf_lists.py
index a73d14d8..466b4847 100644
--- a/test/test_rdf_lists.py
+++ b/test/test_rdf_lists.py
@@ -5,8 +5,7 @@ from rdflib.graph import Graph
from rdflib.term import URIRef
-DATA =\
- """<http://example.com#C> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://www.w3.org/2002/07/owl#Class>.
+DATA = """<http://example.com#C> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://www.w3.org/2002/07/owl#Class>.
<http://example.com#B> <http://www.w3.org/2000/01/rdf-schema#subClassOf> _:fIYNVPxd4.
<http://example.com#B> <http://www.w3.org/2000/01/rdf-schema#subClassOf> <http://example.com#A>.
<http://example.com#B> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://www.w3.org/2002/07/owl#Class>.
@@ -20,8 +19,7 @@ _:fIYNVPxd3 <http://www.w3.org/1999/02/22-rdf-syntax-ns#first> <http://example.c
_:fIYNVPxd3 <http://www.w3.org/1999/02/22-rdf-syntax-ns#rest> <http://www.w3.org/1999/02/22-rdf-syntax-ns#nil>.
"""
-DATA_FALSE_ELEMENT =\
- """
+DATA_FALSE_ELEMENT = """
<http://example.org/#ThreeMemberList> <http://www.w3.org/1999/02/22-rdf-syntax-ns#first> <http://example.org/#p> .
<http://example.org/#ThreeMemberList> <http://www.w3.org/1999/02/22-rdf-syntax-ns#rest> _:list2 .
_:list2 <http://www.w3.org/1999/02/22-rdf-syntax-ns#first> "false"^^<http://www.w3.org/2001/XMLSchema#boolean> .
@@ -36,19 +34,19 @@ def main():
class OWLCollectionTest(unittest.TestCase):
-
def testCollectionRDFXML(self):
- g = Graph().parse(data=DATA, format='nt')
- g.namespace_manager.bind('owl', URIRef('http://www.w3.org/2002/07/owl#'))
- print(g.serialize(format='pretty-xml'))
+ g = Graph().parse(data=DATA, format="nt")
+ g.namespace_manager.bind("owl", URIRef("http://www.w3.org/2002/07/owl#"))
+ print(g.serialize(format="pretty-xml"))
class ListTest(unittest.TestCase):
def testFalseElement(self):
- g = Graph().parse(data=DATA_FALSE_ELEMENT, format='nt')
+ g = Graph().parse(data=DATA_FALSE_ELEMENT, format="nt")
self.assertEqual(
- len(list(g.items(URIRef('http://example.org/#ThreeMemberList')))), 3)
+ len(list(g.items(URIRef("http://example.org/#ThreeMemberList")))), 3
+ )
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/test/test_rdfxml.py b/test/test_rdfxml.py
index 22d8fdb7..845a9a7d 100644
--- a/test/test_rdfxml.py
+++ b/test/test_rdfxml.py
@@ -42,7 +42,10 @@ class TestStore(Graph):
if not isinstance(s, BNode) and not isinstance(o, BNode):
if not (s, p, o) in self.expected:
m = "Triple not in expected result: %s, %s, %s" % (
- s.n3(), p.n3(), o.n3())
+ s.n3(),
+ p.n3(),
+ o.n3(),
+ )
if verbose:
write(m)
# raise Exception(m)
@@ -73,7 +76,7 @@ def cached_file(url):
folder = os.path.dirname(fpath)
if not os.path.exists(folder):
os.makedirs(folder)
- f = open(fpath, 'w')
+ f = open(fpath, "w")
try:
f.write(urlopen(url).read())
finally:
@@ -85,7 +88,7 @@ RDFCOREBASE = "http://www.w3.org/2000/10/rdf-tests/rdfcore/"
def relative(url):
- return url[len(RDFCOREBASE):]
+ return url[len(RDFCOREBASE) :]
def resolve(rel):
@@ -164,15 +167,16 @@ def _testNegative(uri, manifest):
class ParserTestCase(unittest.TestCase):
- store = 'default'
- path = 'store'
+ store = "default"
+ path = "store"
slow = True
def setUp(self):
self.manifest = manifest = Graph(store=self.store)
manifest.open(self.path)
- manifest.load(cached_file(
- "http://www.w3.org/2000/10/rdf-tests/rdfcore/Manifest.rdf"))
+ manifest.load(
+ cached_file("http://www.w3.org/2000/10/rdf-tests/rdfcore/Manifest.rdf")
+ )
def tearDown(self):
self.manifest.close()
@@ -188,8 +192,7 @@ class ParserTestCase(unittest.TestCase):
result = _testNegative(neg, manifest)
total += 1
num_failed += result
- self.assertEqual(
- num_failed, 0, "Failed: %s of %s." % (num_failed, total))
+ self.assertEqual(num_failed, 0, "Failed: %s of %s." % (num_failed, total))
def testPositive(self):
manifest = self.manifest
@@ -213,8 +216,7 @@ class ParserTestCase(unittest.TestCase):
results.add((test, RDF.type, RESULT["FailingRun"]))
total += 1
num_failed += result
- self.assertEqual(
- num_failed, 0, "Failed: %s of %s." % (num_failed, total))
+ self.assertEqual(num_failed, 0, "Failed: %s of %s." % (num_failed, total))
RESULT = Namespace("http://www.w3.org/2002/03owlt/resultsOntology#")
@@ -231,12 +233,14 @@ results.add((system, RDFS.comment, Literal("")))
if __name__ == "__main__":
manifest = Graph()
- manifest.load(cached_file(
- "http://www.w3.org/2000/10/rdf-tests/rdfcore/Manifest.rdf"))
+ manifest.load(
+ cached_file("http://www.w3.org/2000/10/rdf-tests/rdfcore/Manifest.rdf")
+ )
import sys
import getopt
+
try:
- optlist, args = getopt.getopt(sys.argv[1:], 'h:', ["help"])
+ optlist, args = getopt.getopt(sys.argv[1:], "h:", ["help"])
except getopt.GetoptError as msg:
write(msg)
# usage()
diff --git a/test/test_roundtrip.py b/test/test_roundtrip.py
index 9dfed952..149e9eb5 100644
--- a/test/test_roundtrip.py
+++ b/test/test_roundtrip.py
@@ -4,8 +4,10 @@ import rdflib.compare
try:
from .test_nt_suite import all_nt_files
+
assert all_nt_files
from .test_n3_suite import all_n3_files
+
assert all_n3_files
except:
from test.test_nt_suite import all_nt_files
@@ -28,10 +30,13 @@ tests roundtripping through rdf/xml with only the literals-02 file
SKIP = [
- ('xml', 'test/n3/n3-writer-test-29.n3'), # has predicates that cannot be shortened to strict qnames
- ('xml', 'test/nt/qname-02.nt'), # uses a property that cannot be qname'd
- ('trix', 'test/n3/strquot.n3'), # contains charachters forbidden by the xml spec
- ('xml', 'test/n3/strquot.n3'), # contains charachters forbidden by the xml spec
+ (
+ "xml",
+ "test/n3/n3-writer-test-29.n3",
+ ), # has predicates that cannot be shortened to strict qnames
+ ("xml", "test/nt/qname-02.nt"), # uses a property that cannot be qname'd
+ ("trix", "test/n3/strquot.n3"), # contains charachters forbidden by the xml spec
+ ("xml", "test/n3/strquot.n3"), # contains charachters forbidden by the xml spec
]
@@ -78,11 +83,9 @@ def test_cases():
global formats
if not formats:
serializers = set(
- x.name for x in rdflib.plugin.plugins(
- None, rdflib.plugin.Serializer))
- parsers = set(
- x.name for x in rdflib.plugin.plugins(
- None, rdflib.plugin.Parser))
+ x.name for x in rdflib.plugin.plugins(None, rdflib.plugin.Serializer)
+ )
+ parsers = set(x.name for x in rdflib.plugin.plugins(None, rdflib.plugin.Parser))
formats = parsers.intersection(serializers)
for testfmt in formats:
@@ -97,15 +100,14 @@ def test_n3():
global formats
if not formats:
serializers = set(
- x.name for x in rdflib.plugin.plugins(
- None, rdflib.plugin.Serializer))
- parsers = set(
- x.name for x in rdflib.plugin.plugins(
- None, rdflib.plugin.Parser))
+ x.name for x in rdflib.plugin.plugins(None, rdflib.plugin.Serializer)
+ )
+ parsers = set(x.name for x in rdflib.plugin.plugins(None, rdflib.plugin.Parser))
formats = parsers.intersection(serializers)
for testfmt in formats:
- if "/" in testfmt: continue # skip double testing
+ if "/" in testfmt:
+ continue # skip double testing
for f, infmt in all_n3_files():
if (testfmt, f) not in SKIP:
yield roundtrip, (infmt, testfmt, f)
@@ -113,12 +115,13 @@ def test_n3():
if __name__ == "__main__":
import nose
+
if len(sys.argv) == 1:
nose.main(defaultTest=sys.argv[0])
elif len(sys.argv) == 2:
import test.test_roundtrip
+
test.test_roundtrip.formats = [sys.argv[1]]
nose.main(defaultTest=sys.argv[0], argv=sys.argv[:1])
else:
- roundtrip(
- (sys.argv[2], sys.argv[1], sys.argv[3]), verbose=True)
+ roundtrip((sys.argv[2], sys.argv[1], sys.argv[3]), verbose=True)
diff --git a/test/test_rules.py b/test/test_rules.py
index 008104da..c2496760 100644
--- a/test/test_rules.py
+++ b/test/test_rules.py
@@ -36,11 +36,15 @@ try:
def facts(g):
for s, p, o in g:
- if p != LOG.implies and not isinstance(s, BNode) and not isinstance(o, BNode):
+ if (
+ p != LOG.implies
+ and not isinstance(s, BNode)
+ and not isinstance(o, BNode)
+ ):
yield terms.Fact(_convert(s), _convert(p), _convert(o))
class PychinkoTestCase(unittest.TestCase):
- backend = 'default'
+ backend = "default"
tmppath = None
def setUp(self):
@@ -66,7 +70,8 @@ try:
source = self.g
interp.addFacts(set(facts(source)), initialSet=True)
interp.run()
- #_logger.debug("inferred facts: %s" % interp.inferredFacts)
+ # _logger.debug("inferred facts: %s" % interp.inferredFacts)
+
except ImportError as e:
print("Could not test Pychinko: %s" % e)
diff --git a/test/test_seq.py b/test/test_seq.py
index a1411649..7f177574 100644
--- a/test/test_seq.py
+++ b/test/test_seq.py
@@ -23,8 +23,8 @@ s = """\
class SeqTestCase(unittest.TestCase):
- backend = 'default'
- path = 'store'
+ backend = "default"
+ path = "store"
def setUp(self):
store = self.store = Graph(store=self.backend)
@@ -47,5 +47,5 @@ def test_suite():
return unittest.makeSuite(SeqTestCase)
-if __name__ == '__main__':
- unittest.main(defaultTest='test_suite')
+if __name__ == "__main__":
+ unittest.main(defaultTest="test_suite")
diff --git a/test/test_serializexml.py b/test/test_serializexml.py
index d79c1d5f..6ca25a92 100644
--- a/test/test_serializexml.py
+++ b/test/test_serializexml.py
@@ -44,7 +44,9 @@ def _mangled_copy(g):
"Makes a copy of the graph, replacing all bnodes with the bnode ``_blank``."
gcopy = ConjunctiveGraph()
- def isbnode(v): return isinstance(v, BNode)
+ def isbnode(v):
+ return isinstance(v, BNode)
+
for s, p, o in g:
if isbnode(s):
s = _blank
@@ -115,23 +117,47 @@ class TestXMLSerializer(SerializerTestBase):
rdfs:seeAlso _:bnode2 .
"""
- testContentFormat = 'n3'
+ testContentFormat = "n3"
def test_result_fragments(self):
rdfXml = serialize(self.sourceGraph, self.serializer)
# print "--------"
# print rdfXml
# print "--------"
- assert '<rdf:Description rdf:about="http://example.org/data/a">'.encode("latin-1") in rdfXml
- assert '<rdf:type rdf:resource="http://example.org/model/test#Test"/>'.encode("latin-1") in rdfXml
- assert '<rdf:Description rdf:about="http://example.org/data/b">'.encode("latin-1") in rdfXml
+ assert (
+ '<rdf:Description rdf:about="http://example.org/data/a">'.encode("latin-1")
+ in rdfXml
+ )
+ assert (
+ '<rdf:type rdf:resource="http://example.org/model/test#Test"/>'.encode(
+ "latin-1"
+ )
+ in rdfXml
+ )
+ assert (
+ '<rdf:Description rdf:about="http://example.org/data/b">'.encode("latin-1")
+ in rdfXml
+ )
assert '<name xml:lang="en">Bee</name>'.encode("latin-1") in rdfXml
- assert '<value rdf:datatype="http://www.w3.org/2001/XMLSchema#integer">3</value>'.encode("latin-1") in rdfXml
- assert '<rdf:Description rdf:nodeID="'.encode("latin-1") in rdfXml, "expected one identified bnode in serialized graph"
+ assert (
+ '<value rdf:datatype="http://www.w3.org/2001/XMLSchema#integer">3</value>'.encode(
+ "latin-1"
+ )
+ in rdfXml
+ )
+ assert (
+ '<rdf:Description rdf:nodeID="'.encode("latin-1") in rdfXml
+ ), "expected one identified bnode in serialized graph"
def test_result_fragments_with_base(self):
- rdfXml = serialize(self.sourceGraph, self.serializer,
- extra_args={'base': "http://example.org/", 'xml_base': "http://example.org/"})
+ rdfXml = serialize(
+ self.sourceGraph,
+ self.serializer,
+ extra_args={
+ "base": "http://example.org/",
+ "xml_base": "http://example.org/",
+ },
+ )
# print "--------"
# print rdfXml
# print "--------"
@@ -139,19 +165,27 @@ class TestXMLSerializer(SerializerTestBase):
assert '<rdf:Description rdf:about="data/a">'.encode("latin-1") in rdfXml
assert '<rdf:type rdf:resource="model/test#Test"/>'.encode("latin-1") in rdfXml
assert '<rdf:Description rdf:about="data/b">'.encode("latin-1") in rdfXml
- assert '<value rdf:datatype="http://www.w3.org/2001/XMLSchema#integer">3</value>'.encode("latin-1") in rdfXml
- assert '<rdf:Description rdf:nodeID="'.encode("latin-1") in rdfXml, "expected one identified bnode in serialized graph"
+ assert (
+ '<value rdf:datatype="http://www.w3.org/2001/XMLSchema#integer">3</value>'.encode(
+ "latin-1"
+ )
+ in rdfXml
+ )
+ assert (
+ '<rdf:Description rdf:nodeID="'.encode("latin-1") in rdfXml
+ ), "expected one identified bnode in serialized graph"
def test_subClassOf_objects(self):
reparsedGraph = serialize_and_load(self.sourceGraph, self.serializer)
- _assert_expected_object_types_for_predicates(reparsedGraph,
- [RDFS.seeAlso, RDFS.subClassOf],
- [URIRef, BNode])
+ _assert_expected_object_types_for_predicates(
+ reparsedGraph, [RDFS.seeAlso, RDFS.subClassOf], [URIRef, BNode]
+ )
def _assert_expected_object_types_for_predicates(graph, predicates, types):
for s, p, o in graph:
if p in predicates:
someTrue = [isinstance(o, t) for t in types]
- assert True in someTrue, \
- "Bad type %s for object when predicate is <%s>." % (type(o), p)
+ assert (
+ True in someTrue
+ ), "Bad type %s for object when predicate is <%s>." % (type(o), p)
diff --git a/test/test_slice.py b/test/test_slice.py
index 27e6e49a..36c72ca8 100644
--- a/test/test_slice.py
+++ b/test/test_slice.py
@@ -1,10 +1,8 @@
-
from rdflib import Graph, URIRef
import unittest
class GraphSlice(unittest.TestCase):
-
def testSlice(self):
"""
We pervert the slice object,
@@ -13,10 +11,12 @@ class GraphSlice(unittest.TestCase):
all operations return generators over full triples
"""
- def sl(x, y): return self.assertEqual(len(list(x)), y)
+ def sl(x, y):
+ return self.assertEqual(len(list(x)), y)
+
+ def soe(x, y):
+ return self.assertEqual(set([a[2] for a in x]), set(y)) # equals objects
- def soe(x, y): return self.assertEqual(
- set([a[2] for a in x]), set(y)) # equals objects
g = self.graph
# Single terms are all trivial:
@@ -27,35 +27,35 @@ class GraphSlice(unittest.TestCase):
# single slice slices by s,p,o, with : used to split
# tell me everything about "tarek" (same as above)
- sl(g[self.tarek::], 2)
+ sl(g[self.tarek : :], 2)
# give me every "likes" relationship
- sl(g[:self.likes:], 5)
+ sl(g[: self.likes :], 5)
# give me every relationship to pizza
- sl(g[::self.pizza], 3)
+ sl(g[:: self.pizza], 3)
# give me everyone who likes pizza
- sl(g[:self.likes:self.pizza], 2)
+ sl(g[: self.likes : self.pizza], 2)
# does tarek like pizza?
- self.assertTrue(g[self.tarek:self.likes:self.pizza])
+ self.assertTrue(g[self.tarek : self.likes : self.pizza])
# More intesting is using paths
# everything hated or liked
- sl(g[:self.hates | self.likes], 7)
+ sl(g[: self.hates | self.likes], 7)
def setUp(self):
self.graph = Graph()
- self.michel = URIRef(u'michel')
- self.tarek = URIRef(u'tarek')
- self.bob = URIRef(u'bob')
- self.likes = URIRef(u'likes')
- self.hates = URIRef(u'hates')
- self.pizza = URIRef(u'pizza')
- self.cheese = URIRef(u'cheese')
+ self.michel = URIRef(u"michel")
+ self.tarek = URIRef(u"tarek")
+ self.bob = URIRef(u"bob")
+ self.likes = URIRef(u"likes")
+ self.hates = URIRef(u"hates")
+ self.pizza = URIRef(u"pizza")
+ self.cheese = URIRef(u"cheese")
self.addStuff()
@@ -77,5 +77,5 @@ class GraphSlice(unittest.TestCase):
self.graph.add((bob, hates, michel)) # gasp!
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main()
diff --git a/test/test_sparql.py b/test/test_sparql.py
index c3f289fc..fdf29c3c 100644
--- a/test/test_sparql.py
+++ b/test/test_sparql.py
@@ -11,24 +11,30 @@ def test_graph_prefix():
"""
g1 = Graph()
- g1.parse(data="""
+ g1.parse(
+ data="""
@prefix : <urn:ns1:> .
:foo <p> 42.
- """, format="n3")
+ """,
+ format="n3",
+ )
g2 = Graph()
- g2.parse(data="""
+ g2.parse(
+ data="""
@prefix : <urn:somethingelse:> .
<urn:ns1:foo> <p> 42.
- """, format="n3")
+ """,
+ format="n3",
+ )
assert isomorphic(g1, g2)
- q_str = ("""
+ q_str = """
PREFIX : <urn:ns1:>
SELECT ?val
WHERE { :foo ?p ?val }
- """)
+ """
q_prepared = prepareQuery(q_str)
expected = [(Literal(42),)]
@@ -61,21 +67,21 @@ def test_sparql_bnodelist():
"""
- prepareQuery('select * where { ?s ?p ( [] ) . }')
- prepareQuery('select * where { ?s ?p ( [ ?p2 ?o2 ] ) . }')
- prepareQuery('select * where { ?s ?p ( [ ?p2 ?o2 ] [] ) . }')
- prepareQuery('select * where { ?s ?p ( [] [ ?p2 ?o2 ] [] ) . }')
+ prepareQuery("select * where { ?s ?p ( [] ) . }")
+ prepareQuery("select * where { ?s ?p ( [ ?p2 ?o2 ] ) . }")
+ prepareQuery("select * where { ?s ?p ( [ ?p2 ?o2 ] [] ) . }")
+ prepareQuery("select * where { ?s ?p ( [] [ ?p2 ?o2 ] [] ) . }")
def test_complex_sparql_construct():
g = Graph()
- q = '''select ?subject ?study ?id where {
+ q = """select ?subject ?study ?id where {
?s a <urn:Person>;
<urn:partOf> ?c;
<urn:hasParent> ?mother, ?father;
<urn:id> [ a <urn:Identifier>; <urn:has-value> ?id].
- }'''
+ }"""
g.query(q)
@@ -84,8 +90,7 @@ def test_sparql_update_with_bnode():
Test if the blank node is inserted correctly.
"""
graph = Graph()
- graph.update(
- "INSERT DATA { _:blankA <urn:type> <urn:Blank> }")
+ graph.update("INSERT DATA { _:blankA <urn:type> <urn:Blank> }")
for t in graph.triples((None, None, None)):
assert isinstance(t[0], BNode)
eq_(t[1].n3(), "<urn:type>")
@@ -97,9 +102,8 @@ def test_sparql_update_with_bnode_serialize_parse():
Test if the blank node is inserted correctly, can be serialized and parsed.
"""
graph = Graph()
- graph.update(
- "INSERT DATA { _:blankA <urn:type> <urn:Blank> }")
- string = graph.serialize(format='ntriples').decode('utf-8')
+ graph.update("INSERT DATA { _:blankA <urn:type> <urn:Blank> }")
+ string = graph.serialize(format="ntriples").decode("utf-8")
raised = False
try:
Graph().parse(data=string, format="ntriples")
@@ -108,6 +112,7 @@ def test_sparql_update_with_bnode_serialize_parse():
assert not raised
-if __name__ == '__main__':
+if __name__ == "__main__":
import nose
+
nose.main(defaultTest=__name__)
diff --git a/test/test_sparql_agg_distinct.py b/test/test_sparql_agg_distinct.py
index 7ab0f58a..39d6eb95 100644
--- a/test/test_sparql_agg_distinct.py
+++ b/test/test_sparql_agg_distinct.py
@@ -1,6 +1,6 @@
from rdflib import Graph
-query_tpl = '''
+query_tpl = """
SELECT ?x (MIN(?y_) as ?y) (%s(DISTINCT ?z_) as ?z) {
VALUES (?x ?y_ ?z_) {
("x1" 10 1)
@@ -8,42 +8,37 @@ SELECT ?x (MIN(?y_) as ?y) (%s(DISTINCT ?z_) as ?z) {
("x2" 20 2)
}
} GROUP BY ?x ORDER BY ?x
-'''
+"""
def test_group_concat_distinct():
g = Graph()
- results = g.query(query_tpl % 'GROUP_CONCAT')
+ results = g.query(query_tpl % "GROUP_CONCAT")
results = [[lit.toPython() for lit in line] for line in results]
# this is the tricky part
assert results[0][2] == "1", results[0][2]
# still check the whole result, to be on the safe side
- assert results == [
- ["x1", 10, "1"],
- ["x2", 20, "2"],
- ], results
+ assert results == [["x1", 10, "1"], ["x2", 20, "2"],], results
def test_sum_distinct():
g = Graph()
- results = g.query(query_tpl % 'SUM')
+ results = g.query(query_tpl % "SUM")
results = [[lit.toPython() for lit in line] for line in results]
# this is the tricky part
assert results[0][2] == 1, results[0][2]
# still check the whole result, to be on the safe side
- assert results == [
- ["x1", 10, 1],
- ["x2", 20, 2],
- ], results
+ assert results == [["x1", 10, 1], ["x2", 20, 2],], results
def test_avg_distinct():
g = Graph()
- results = g.query("""
+ results = g.query(
+ """
SELECT ?x (MIN(?y_) as ?y) (AVG(DISTINCT ?z_) as ?z) {
VALUES (?x ?y_ ?z_) {
("x1" 10 1)
@@ -52,23 +47,24 @@ def test_avg_distinct():
("x2" 20 2)
}
} GROUP BY ?x ORDER BY ?x
- """)
+ """
+ )
results = [[lit.toPython() for lit in line] for line in results]
# this is the tricky part
assert results[0][2] == 2, results[0][2]
# still check the whole result, to be on the safe side
- assert results == [
- ["x1", 10, 2],
- ["x2", 20, 2],
- ], results
+ assert results == [["x1", 10, 2], ["x2", 20, 2],], results
def test_count_distinct():
g = Graph()
- g.parse(format="turtle", publicID="http://example.org/", data="""
+ g.parse(
+ format="turtle",
+ publicID="http://example.org/",
+ data="""
@prefix : <> .
<#a>
@@ -83,26 +79,31 @@ def test_count_distinct():
:knows <#b>, <#c> ;
:age 20 .
- """)
+ """,
+ )
# Query 1: people knowing someone younger
- results = g.query("""
+ results = g.query(
+ """
PREFIX : <http://example.org/>
SELECT DISTINCT ?x {
?x :age ?ax ; :knows [ :age ?ay ].
FILTER( ?ax > ?ay )
}
- """)
+ """
+ )
assert len(results) == 2
# nQuery 2: count people knowing someone younger
- results = g.query("""
+ results = g.query(
+ """
PREFIX : <http://example.org/>
SELECT (COUNT(DISTINCT ?x) as ?cx) {
?x :age ?ax ; :knows [ :age ?ay ].
FILTER( ?ax > ?ay )
}
- """)
+ """
+ )
assert list(results)[0][0].toPython() == 2
diff --git a/test/test_sparql_agg_undef.py b/test/test_sparql_agg_undef.py
index 649a5a8c..f36e9eb5 100644
--- a/test/test_sparql_agg_undef.py
+++ b/test/test_sparql_agg_undef.py
@@ -1,6 +1,6 @@
from rdflib import Graph, Literal, Variable
-query_tpl = '''
+query_tpl = """
SELECT ?x (%s(?y_) as ?y) {
VALUES (?x ?y_ ?z) {
("x1" undef 1)
@@ -9,7 +9,7 @@ SELECT ?x (%s(?y_) as ?y) {
("x2" 42 4)
}
} GROUP BY ?x ORDER BY ?x
-'''
+"""
Y = Variable("y")
@@ -24,18 +24,20 @@ def template_tst(agg_func, first, second):
def test_aggregates():
- yield template_tst, 'SUM', Literal(0), Literal(42)
- yield template_tst, 'MIN', None, Literal(42)
- yield template_tst, 'MAX', None, Literal(42)
+ yield template_tst, "SUM", Literal(0), Literal(42)
+ yield template_tst, "MIN", None, Literal(42)
+ yield template_tst, "MAX", None, Literal(42)
# yield template_tst, 'AVG', Literal(0), Literal(42)
- yield template_tst, 'SAMPLE', None, Literal(42)
- yield template_tst, 'COUNT', Literal(0), Literal(1)
- yield template_tst, 'GROUP_CONCAT', Literal(''), Literal("42")
+ yield template_tst, "SAMPLE", None, Literal(42)
+ yield template_tst, "COUNT", Literal(0), Literal(1)
+ yield template_tst, "GROUP_CONCAT", Literal(""), Literal("42")
def test_group_by_null():
g = Graph()
- results = list(g.query("""
+ results = list(
+ g.query(
+ """
SELECT ?x ?y (AVG(?z) as ?az) {
VALUES (?x ?y ?z) {
(1 undef 10)
@@ -46,7 +48,9 @@ def test_group_by_null():
}
} GROUP BY ?x ?y
ORDER BY ?x
- """))
+ """
+ )
+ )
assert len(results) == 2
assert results[0][0] == Literal(1)
assert results[1][0] == Literal(2)
diff --git a/test/test_sparql_construct_bindings.py b/test/test_sparql_construct_bindings.py
index d5a68b94..8f8240b2 100644
--- a/test/test_sparql_construct_bindings.py
+++ b/test/test_sparql_construct_bindings.py
@@ -5,16 +5,16 @@ from rdflib.compare import isomorphic
import unittest
from nose.tools import eq_
-class TestConstructInitBindings(unittest.TestCase):
+class TestConstructInitBindings(unittest.TestCase):
def test_construct_init_bindings(self):
"""
This is issue https://github.com/RDFLib/rdflib/issues/1001
"""
g1 = Graph()
-
- q_str = ("""
+
+ q_str = """
PREFIX : <urn:ns1:>
CONSTRUCT {
?uri :prop1 ?val1;
@@ -24,17 +24,16 @@ class TestConstructInitBindings(unittest.TestCase):
bind(uri(concat("urn:ns1:", ?a)) as ?uri)
bind(?b as ?val1)
}
- """)
+ """
q_prepared = prepareQuery(q_str)
expected = [
- (URIRef('urn:ns1:A'),URIRef('urn:ns1:prop1'), Literal('B')),
- (URIRef('urn:ns1:A'),URIRef('urn:ns1:prop2'), Literal('C'))
+ (URIRef("urn:ns1:A"), URIRef("urn:ns1:prop1"), Literal("B")),
+ (URIRef("urn:ns1:A"), URIRef("urn:ns1:prop2"), Literal("C")),
]
- results = g1.query(q_prepared, initBindings={
- 'a': Literal('A'),
- 'b': Literal('B'),
- 'c': Literal('C')
- })
+ results = g1.query(
+ q_prepared,
+ initBindings={"a": Literal("A"), "b": Literal("B"), "c": Literal("C")},
+ )
eq_(sorted(results, key=lambda x: str(x[1])), expected)
diff --git a/test/test_sparql_service.py b/test/test_sparql_service.py
index 19f713c3..550bfcb2 100644
--- a/test/test_sparql_service.py
+++ b/test/test_sparql_service.py
@@ -5,7 +5,7 @@ from rdflib.compare import isomorphic
def test_service():
g = Graph()
- q = '''select ?dbpHypernym ?dbpComment
+ q = """select ?dbpHypernym ?dbpComment
where
{ service <http://DBpedia.org/sparql>
{ select ?dbpHypernym ?dbpComment
@@ -15,7 +15,7 @@ def test_service():
<http://purl.org/linguistics/gold/hypernym> ?dbpHypernym ;
<http://www.w3.org/2000/01/rdf-schema#comment> ?dbpComment .
- } } } limit 2'''
+ } } } limit 2"""
results = g.query(q)
assert len(results) == 2
@@ -25,7 +25,7 @@ def test_service():
def test_service_with_bind():
g = Graph()
- q = '''select ?dbpHypernym ?dbpComment ?dbpDeathPlace
+ q = """select ?dbpHypernym ?dbpComment ?dbpDeathPlace
where
{ bind (<http://dbpedia.org/resource/Eltham> as ?dbpDeathPlace)
service <http://DBpedia.org/sparql>
@@ -37,7 +37,7 @@ def test_service_with_bind():
<http://www.w3.org/2000/01/rdf-schema#comment> ?dbpComment ;
<http://dbpedia.org/ontology/deathPlace> ?dbpDeathPlace .
- } } } limit 2'''
+ } } } limit 2"""
results = g.query(q)
assert len(results) == 2
@@ -47,7 +47,7 @@ def test_service_with_bind():
def test_service_with_values():
g = Graph()
- q = '''select ?dbpHypernym ?dbpComment ?dbpDeathPlace
+ q = """select ?dbpHypernym ?dbpComment ?dbpDeathPlace
where
{ values (?dbpHypernym ?dbpDeathPlace) {(<http://dbpedia.org/resource/Leveller> <http://dbpedia.org/resource/London>) (<http://dbpedia.org/resource/Leveller> <http://dbpedia.org/resource/Eltham>)}
service <http://DBpedia.org/sparql>
@@ -59,7 +59,7 @@ def test_service_with_values():
<http://www.w3.org/2000/01/rdf-schema#comment> ?dbpComment ;
<http://dbpedia.org/ontology/deathPlace> ?dbpDeathPlace .
- } } } limit 2'''
+ } } } limit 2"""
results = g.query(q)
assert len(results) == 2
@@ -69,13 +69,13 @@ def test_service_with_values():
def test_service_with_implicit_select():
g = Graph()
- q = '''select ?s ?p ?o
+ q = """select ?s ?p ?o
where
{
service <http://DBpedia.org/sparql>
{
values (?s ?p ?o) {(<http://example.org/a> <http://example.org/b> 1) (<http://example.org/a> <http://example.org/b> 2)}
- }} limit 2'''
+ }} limit 2"""
results = g.query(q)
assert len(results) == 2
@@ -85,14 +85,14 @@ def test_service_with_implicit_select():
def test_service_with_implicit_select_and_prefix():
g = Graph()
- q = '''prefix ex:<http://example.org/>
+ q = """prefix ex:<http://example.org/>
select ?s ?p ?o
where
{
service <http://DBpedia.org/sparql>
{
values (?s ?p ?o) {(ex:a ex:b 1) (<http://example.org/a> <http://example.org/b> 2)}
- }} limit 2'''
+ }} limit 2"""
results = g.query(q)
assert len(results) == 2
@@ -102,14 +102,14 @@ def test_service_with_implicit_select_and_prefix():
def test_service_with_implicit_select_and_base():
g = Graph()
- q = '''base <http://example.org/>
+ q = """base <http://example.org/>
select ?s ?p ?o
where
{
service <http://DBpedia.org/sparql>
{
values (?s ?p ?o) {(<a> <b> 1) (<a> <b> 2)}
- }} limit 2'''
+ }} limit 2"""
results = g.query(q)
assert len(results) == 2
@@ -119,19 +119,19 @@ def test_service_with_implicit_select_and_base():
def test_service_with_implicit_select_and_allcaps():
g = Graph()
- q = '''SELECT ?s
+ q = """SELECT ?s
WHERE
{
SERVICE <http://dbpedia.org/sparql>
{
?s <http://purl.org/linguistics/gold/hypernym> <http://dbpedia.org/resource/Leveller> .
}
- } LIMIT 3'''
+ } LIMIT 3"""
results = g.query(q)
assert len(results) == 3
-#def test_with_fixture(httpserver):
+# def test_with_fixture(httpserver):
# httpserver.expect_request("/sparql/?query=SELECT * WHERE ?s ?p ?o").respond_with_json({"vars": ["s","p","o"], "bindings":[]})
# test_server = httpserver.url_for('/sparql')
# g = Graph()
@@ -140,7 +140,7 @@ def test_service_with_implicit_select_and_allcaps():
# assert len(results) == 0
-if __name__ == '__main__':
+if __name__ == "__main__":
# import nose
# nose.main(defaultTest=__name__)
test_service()
diff --git a/test/test_sparqlstore.py b/test/test_sparqlstore.py
index f638a178..38a8b481 100644
--- a/test/test_sparqlstore.py
+++ b/test/test_sparqlstore.py
@@ -15,7 +15,7 @@ except:
class SPARQLStoreDBPediaTestCase(unittest.TestCase):
- store_name = 'SPARQLStore'
+ store_name = "SPARQLStore"
path = "http://dbpedia.org/sparql"
storetest = True
create = False
@@ -41,8 +41,8 @@ class SPARQLStoreDBPediaTestCase(unittest.TestCase):
{ ?s a xyzzy:Concept ; xyzzy:prefLabel ?label . } LIMIT 10
"""
res = self.graph.query(
- query,
- initNs={"xyzzy": "http://www.w3.org/2004/02/skos/core#"})
+ query, initNs={"xyzzy": "http://www.w3.org/2004/02/skos/core#"}
+ )
for i in res:
assert type(i[0]) == Literal, i[0].n3()
@@ -51,10 +51,7 @@ class SPARQLStoreDBPediaTestCase(unittest.TestCase):
SELECT ?label WHERE
{ ?s a xyzzy:Concept ; xyzzy:prefLabel ?label . } LIMIT 10
"""
- self.assertRaises(
- HTTPError,
- self.graph.query,
- query)
+ self.assertRaises(HTTPError, self.graph.query, query)
def test_query_with_added_prolog(self):
prologue = """\
@@ -73,25 +70,34 @@ class SPARQLStoreUpdateTestCase(unittest.TestCase):
def setUp(self):
port = self.setup_mocked_endpoint()
self.graph = Graph(store="SPARQLUpdateStore", identifier=URIRef("urn:ex"))
- self.graph.open(("http://localhost:{port}/query".format(port=port),
- "http://localhost:{port}/update".format(port=port)), create=False)
+ self.graph.open(
+ (
+ "http://localhost:{port}/query".format(port=port),
+ "http://localhost:{port}/update".format(port=port),
+ ),
+ create=False,
+ )
ns = list(self.graph.namespaces())
assert len(ns) > 0, ns
def setup_mocked_endpoint(self):
# Configure mock server.
s = socket.socket(socket.AF_INET, type=socket.SOCK_STREAM)
- s.bind(('localhost', 0))
+ s.bind(("localhost", 0))
address, port = s.getsockname()
s.close()
- mock_server = HTTPServer(('localhost', port), SPARQL11ProtocolStoreMock)
+ mock_server = HTTPServer(("localhost", port), SPARQL11ProtocolStoreMock)
# Start running mock server in a separate thread.
# Daemon threads automatically shut down when the main process exits.
mock_server_thread = Thread(target=mock_server.serve_forever)
mock_server_thread.setDaemon(True)
mock_server_thread.start()
- print("Started mocked sparql endpoint on http://localhost:{port}/".format(port=port))
+ print(
+ "Started mocked sparql endpoint on http://localhost:{port}/".format(
+ port=port
+ )
+ )
return port
def tearDown(self):
@@ -116,7 +122,9 @@ class SPARQL11ProtocolStoreMock(BaseHTTPRequestHandler):
if self.path == "/query":
if self.headers.get("Content-Type") == "application/sparql-query":
pass
- elif self.headers.get("Content-Type") == "application/x-www-form-urlencoded":
+ elif (
+ self.headers.get("Content-Type") == "application/x-www-form-urlencoded"
+ ):
pass
else:
self.send_response(requests.codes.not_acceptable)
@@ -124,7 +132,9 @@ class SPARQL11ProtocolStoreMock(BaseHTTPRequestHandler):
elif self.path == "/update":
if self.headers.get("Content-Type") == "application/sparql-update":
pass
- elif self.headers.get("Content-Type") == "application/x-www-form-urlencoded":
+ elif (
+ self.headers.get("Content-Type") == "application/x-www-form-urlencoded"
+ ):
pass
else:
self.send_response(requests.codes.not_acceptable)
@@ -142,5 +152,6 @@ class SPARQL11ProtocolStoreMock(BaseHTTPRequestHandler):
self.end_headers()
return
-if __name__ == '__main__':
+
+if __name__ == "__main__":
unittest.main()
diff --git a/test/test_sparqlupdatestore.py b/test/test_sparqlupdatestore.py
index 233dd834..b6dcedbd 100644
--- a/test/test_sparqlupdatestore.py
+++ b/test/test_sparqlupdatestore.py
@@ -9,8 +9,8 @@ import re
from rdflib import ConjunctiveGraph, URIRef, Literal, BNode, Graph
from urllib.request import urlopen
-HOST = 'http://localhost:3031'
-DB = '/db/'
+HOST = "http://localhost:3031"
+DB = "/db/"
# this assumes SPARQL1.1 query/update endpoints running locally at
# http://localhost:3031/db/
@@ -24,23 +24,22 @@ DB = '/db/'
# THIS WILL DELETE ALL DATA IN THE /db dataset
-michel = URIRef(u'urn:michel')
-tarek = URIRef(u'urn:tarek')
-bob = URIRef(u'urn:bob')
-likes = URIRef(u'urn:likes')
-hates = URIRef(u'urn:hates')
-pizza = URIRef(u'urn:pizza')
-cheese = URIRef(u'urn:cheese')
+michel = URIRef("urn:michel")
+tarek = URIRef("urn:tarek")
+bob = URIRef("urn:bob")
+likes = URIRef("urn:likes")
+hates = URIRef("urn:hates")
+pizza = URIRef("urn:pizza")
+cheese = URIRef("urn:cheese")
-graphuri = URIRef('urn:graph')
-othergraphuri = URIRef('urn:othergraph')
+graphuri = URIRef("urn:graph")
+othergraphuri = URIRef("urn:othergraph")
class TestSparql11(unittest.TestCase):
-
def setUp(self):
self.longMessage = True
- self.graph = ConjunctiveGraph('SPARQLUpdateStore')
+ self.graph = ConjunctiveGraph("SPARQLUpdateStore")
root = HOST + DB
self.graph.open((root + "sparql", root + "update"))
@@ -62,8 +61,8 @@ class TestSparql11(unittest.TestCase):
g2 = self.graph.get_context(othergraphuri)
g2.add((michel, likes, pizza))
- self.assertEqual(3, len(g), 'graph contains 3 triples')
- self.assertEqual(1, len(g2), 'other graph contains 1 triple')
+ self.assertEqual(3, len(g), "graph contains 3 triples")
+ self.assertEqual(1, len(g2), "other graph contains 1 triple")
r = g.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }")
self.assertEqual(2, len(list(r)), "two people like pizza")
@@ -72,8 +71,9 @@ class TestSparql11(unittest.TestCase):
self.assertEqual(2, len(list(r)), "two people like pizza")
# Test initBindings
- r = g.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }",
- initBindings={'s': tarek})
+ r = g.query(
+ "SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }", initBindings={"s": tarek}
+ )
self.assertEqual(1, len(list(r)), "i was asking only about tarek")
r = g.triples((tarek, likes, pizza))
@@ -94,7 +94,7 @@ class TestSparql11(unittest.TestCase):
g2.add((bob, likes, pizza))
g.add((tarek, hates, cheese))
- self.assertEqual(2, len(g), 'graph contains 2 triples')
+ self.assertEqual(2, len(g), "graph contains 2 triples")
# the following are actually bad tests as they depend on your endpoint,
# as pointed out in the sparqlstore.py code:
@@ -106,15 +106,19 @@ class TestSparql11(unittest.TestCase):
##
# Fuseki/TDB has a flag for specifying that the default graph
# is the union of all graphs (tdb:unionDefaultGraph in the Fuseki config).
- self.assertEqual(3, len(self.graph),
- 'default union graph should contain three triples but contains:\n'
- '%s' % list(self.graph))
+ self.assertEqual(
+ 3,
+ len(self.graph),
+ "default union graph should contain three triples but contains:\n"
+ "%s" % list(self.graph),
+ )
r = self.graph.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }")
self.assertEqual(2, len(list(r)), "two people like pizza")
- r = self.graph.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }",
- initBindings={'s': tarek})
+ r = self.graph.query(
+ "SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }", initBindings={"s": tarek}
+ )
self.assertEqual(1, len(list(r)), "i was asking only about tarek")
r = self.graph.triples((tarek, likes, pizza))
@@ -129,44 +133,47 @@ class TestSparql11(unittest.TestCase):
self.assertEqual(1, len(list(r)), "only tarek likes pizza")
def testUpdate(self):
- self.graph.update("INSERT DATA { GRAPH <urn:graph> { <urn:michel> <urn:likes> <urn:pizza> . } }")
+ self.graph.update(
+ "INSERT DATA { GRAPH <urn:graph> { <urn:michel> <urn:likes> <urn:pizza> . } }"
+ )
g = self.graph.get_context(graphuri)
- self.assertEqual(1, len(g), 'graph contains 1 triples')
+ self.assertEqual(1, len(g), "graph contains 1 triples")
def testUpdateWithInitNs(self):
self.graph.update(
"INSERT DATA { GRAPH ns:graph { ns:michel ns:likes ns:pizza . } }",
- initNs={'ns': URIRef('urn:')}
+ initNs={"ns": URIRef("urn:")},
)
g = self.graph.get_context(graphuri)
self.assertEqual(
set(g.triples((None, None, None))),
set([(michel, likes, pizza)]),
- 'only michel likes pizza'
+ "only michel likes pizza",
)
def testUpdateWithInitBindings(self):
self.graph.update(
"INSERT { GRAPH <urn:graph> { ?a ?b ?c . } } WherE { }",
initBindings={
- 'a': URIRef('urn:michel'),
- 'b': URIRef('urn:likes'),
- 'c': URIRef('urn:pizza'),
- }
+ "a": URIRef("urn:michel"),
+ "b": URIRef("urn:likes"),
+ "c": URIRef("urn:pizza"),
+ },
)
g = self.graph.get_context(graphuri)
self.assertEqual(
set(g.triples((None, None, None))),
set([(michel, likes, pizza)]),
- 'only michel likes pizza'
+ "only michel likes pizza",
)
def testUpdateWithBlankNode(self):
self.graph.update(
- "INSERT DATA { GRAPH <urn:graph> { _:blankA <urn:type> <urn:Blank> } }")
+ "INSERT DATA { GRAPH <urn:graph> { _:blankA <urn:type> <urn:Blank> } }"
+ )
g = self.graph.get_context(graphuri)
for t in g.triples((None, None, None)):
self.assertTrue(isinstance(t[0], BNode))
@@ -175,33 +182,34 @@ class TestSparql11(unittest.TestCase):
def testUpdateWithBlankNodeSerializeAndParse(self):
self.graph.update(
- "INSERT DATA { GRAPH <urn:graph> { _:blankA <urn:type> <urn:Blank> } }")
+ "INSERT DATA { GRAPH <urn:graph> { _:blankA <urn:type> <urn:Blank> } }"
+ )
g = self.graph.get_context(graphuri)
- string = g.serialize(format='ntriples').decode('utf-8')
+ string = g.serialize(format="ntriples").decode("utf-8")
raised = False
try:
Graph().parse(data=string, format="ntriples")
except Exception as e:
raised = True
- self.assertFalse(raised, 'Exception raised when parsing: ' + string)
+ self.assertFalse(raised, "Exception raised when parsing: " + string)
def testMultipleUpdateWithInitBindings(self):
self.graph.update(
"INSERT { GRAPH <urn:graph> { ?a ?b ?c . } } WHERE { };"
"INSERT { GRAPH <urn:graph> { ?d ?b ?c . } } WHERE { }",
initBindings={
- 'a': URIRef('urn:michel'),
- 'b': URIRef('urn:likes'),
- 'c': URIRef('urn:pizza'),
- 'd': URIRef('urn:bob'),
- }
+ "a": URIRef("urn:michel"),
+ "b": URIRef("urn:likes"),
+ "c": URIRef("urn:pizza"),
+ "d": URIRef("urn:bob"),
+ },
)
g = self.graph.get_context(graphuri)
self.assertEqual(
set(g.triples((None, None, None))),
set([(michel, likes, pizza), (bob, likes, pizza)]),
- 'michel and bob like pizza'
+ "michel and bob like pizza",
)
def testNamedGraphUpdate(self):
@@ -211,25 +219,31 @@ class TestSparql11(unittest.TestCase):
self.assertEqual(
set(g.triples((None, None, None))),
set([(michel, likes, pizza)]),
- 'only michel likes pizza'
+ "only michel likes pizza",
)
- r2 = "DELETE { <urn:michel> <urn:likes> <urn:pizza> } " + \
- "INSERT { <urn:bob> <urn:likes> <urn:pizza> } WHERE {}"
+ r2 = (
+ "DELETE { <urn:michel> <urn:likes> <urn:pizza> } "
+ + "INSERT { <urn:bob> <urn:likes> <urn:pizza> } WHERE {}"
+ )
g.update(r2)
self.assertEqual(
set(g.triples((None, None, None))),
set([(bob, likes, pizza)]),
- 'only bob likes pizza'
+ "only bob likes pizza",
)
says = URIRef("urn:says")
# Strings with unbalanced curly braces
- tricky_strs = ["With an unbalanced curly brace %s " % brace
- for brace in ["{", "}"]]
+ tricky_strs = [
+ "With an unbalanced curly brace %s " % brace for brace in ["{", "}"]
+ ]
for tricky_str in tricky_strs:
- r3 = """INSERT { ?b <urn:says> "%s" }
- WHERE { ?b <urn:likes> <urn:pizza>} """ % tricky_str
+ r3 = (
+ """INSERT { ?b <urn:says> "%s" }
+ WHERE { ?b <urn:likes> <urn:pizza>} """
+ % tricky_str
+ )
g.update(r3)
values = set()
@@ -253,16 +267,26 @@ class TestSparql11(unittest.TestCase):
r4strings.append(r"""'''9: adfk } <foo> #éï \\'''""")
r4strings.append("'''10: ad adsfj \n { \n sadfj'''")
- r4 = "\n".join([
- u'INSERT DATA { <urn:michel> <urn:says> %s } ;' % s
- for s in r4strings
- ])
+ r4 = "\n".join(
+ ["INSERT DATA { <urn:michel> <urn:says> %s } ;" % s for s in r4strings]
+ )
g.update(r4)
values = set()
for v in g.objects(michel, says):
values.add(str(v))
- self.assertEqual(values, set([re.sub(r"\\(.)", r"\1", re.sub(
- r"^'''|'''$|^'|'$|" + r'^"""|"""$|^"|"$', r"", s)) for s in r4strings]))
+ self.assertEqual(
+ values,
+ set(
+ [
+ re.sub(
+ r"\\(.)",
+ r"\1",
+ re.sub(r"^'''|'''$|^'|'$|" + r'^"""|"""$|^"|"$', r"", s),
+ )
+ for s in r4strings
+ ]
+ ),
+ )
# IRI Containing ' or #
# The fragment identifier must not be misinterpreted as a comment
@@ -275,10 +299,10 @@ class TestSparql11(unittest.TestCase):
values = set()
for v in g.objects(michel, hates):
values.add(str(v))
- self.assertEqual(values, set([u"urn:foo'bar?baz;a=1&b=2#fragment", u"'}"]))
+ self.assertEqual(values, set(["urn:foo'bar?baz;a=1&b=2#fragment", "'}"]))
# Comments
- r6 = u"""
+ r6 = """
INSERT DATA {
<urn:bob> <urn:hates> <urn:bob> . # No closing brace: }
<urn:bob> <urn:hates> <urn:michel>.
@@ -294,39 +318,40 @@ class TestSparql11(unittest.TestCase):
def testNamedGraphUpdateWithInitBindings(self):
g = self.graph.get_context(graphuri)
r = "INSERT { ?a ?b ?c } WHERE {}"
- g.update(r, initBindings={
- 'a': michel,
- 'b': likes,
- 'c': pizza
- })
+ g.update(r, initBindings={"a": michel, "b": likes, "c": pizza})
self.assertEqual(
set(g.triples((None, None, None))),
set([(michel, likes, pizza)]),
- 'only michel likes pizza'
+ "only michel likes pizza",
)
def testEmptyNamedGraph(self):
empty_graph_iri = "urn:empty-graph-1"
self.graph.update("CREATE GRAPH <%s>" % empty_graph_iri)
- named_graphs = [str(r[0]) for r in self.graph.query(
- "SELECT ?name WHERE { GRAPH ?name {} }")]
+ named_graphs = [
+ str(r[0]) for r in self.graph.query("SELECT ?name WHERE { GRAPH ?name {} }")
+ ]
# Some SPARQL endpoint backends (like TDB) are not able to find empty named graphs
# (at least with this query)
if empty_graph_iri in named_graphs:
- self.assertTrue(empty_graph_iri in [str(g.identifier)
- for g in self.graph.contexts()])
+ self.assertTrue(
+ empty_graph_iri in [str(g.identifier) for g in self.graph.contexts()]
+ )
def testEmptyLiteral(self):
# test for https://github.com/RDFLib/rdflib/issues/457
# also see test_issue457.py which is sparql store independent!
g = self.graph.get_context(graphuri)
- g.add((
- URIRef('http://example.com/s'),
- URIRef('http://example.com/p'),
- Literal('')))
+ g.add(
+ (
+ URIRef("http://example.com/s"),
+ URIRef("http://example.com/p"),
+ Literal(""),
+ )
+ )
o = tuple(g)[0][2]
- self.assertEqual(o, Literal(''), repr(o))
+ self.assertEqual(o, Literal(""), repr(o))
try:
@@ -335,5 +360,5 @@ except:
raise SkipTest(HOST + " is unavailable.")
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main()
diff --git a/test/test_swap_n3.py b/test/test_swap_n3.py
index b6dcc698..f7071bec 100644
--- a/test/test_swap_n3.py
+++ b/test/test_swap_n3.py
@@ -2,6 +2,7 @@ from nose.exc import SkipTest
import os
import sys
import unittest
+
try:
maketrans = str.maketrans
except AttributeError:
@@ -42,22 +43,22 @@ qt = rdflib.Namespace("http://www.w3.org/2001/sw/DataAccess/tests/test-query#")
skiptests = [
- 'syntax_neg_single_quote',
- 'syntax_neg_literal_predicate',
- 'syntax_this_quantifiers',
- 'syntax_trailing_semicolon',
- 'syntax_neg_thisadoc',
- 'syntax_equals1',
- 'syntax_equals2',
- 'syntax_this_rules',
- 'syntax_neg_keywords3',
- 'syntax_zero_objects',
- 'syntax_neg_formula_predicate',
- 'syntax_zero_predicates',
+ "syntax_neg_single_quote",
+ "syntax_neg_literal_predicate",
+ "syntax_this_quantifiers",
+ "syntax_trailing_semicolon",
+ "syntax_neg_thisadoc",
+ "syntax_equals1",
+ "syntax_equals2",
+ "syntax_this_rules",
+ "syntax_neg_keywords3",
+ "syntax_zero_objects",
+ "syntax_neg_formula_predicate",
+ "syntax_zero_predicates",
# 'syntax_qvars1',
# 'syntax_qvars2',
# 'contexts',
- 'syntax_too_nested'
+ "syntax_too_nested",
]
@@ -81,7 +82,7 @@ def generictest(e):
def dir_to_uri(directory, sep=os.path.sep):
- '''
+ """
Convert a local path to a File URI.
>>> dir_to_uri('c:\\\\temp\\\\foo\\\\file.txt', sep='\\\\')
@@ -89,28 +90,36 @@ def dir_to_uri(directory, sep=os.path.sep):
>>> dir_to_uri('/tmp/foo/file.txt', sep='/')
'file:///tmp/foo/file.txt'
- '''
+ """
items = directory.split(sep)
- path = '/'.join(items)
- if path.startswith('/'):
+ path = "/".join(items)
+ if path.startswith("/"):
path = path[1:]
- return 'file:///%s' % (path,)
+ return "file:///%s" % (path,)
def test_cases():
from copy import deepcopy
+
g = rdflib.Graph()
- swap_dir = os.path.join(os.getcwd(), 'test', 'swap-n3')
- g.parse(os.path.join(swap_dir, 'n3-rdf.tests'), format="n3")
- g.parse(os.path.join(swap_dir, 'n3-full.tests'), format="n3")
+ swap_dir = os.path.join(os.getcwd(), "test", "swap-n3")
+ g.parse(os.path.join(swap_dir, "n3-rdf.tests"), format="n3")
+ g.parse(os.path.join(swap_dir, "n3-full.tests"), format="n3")
tfiles = []
- swap_dir_uri = dir_to_uri(swap_dir) + '/'
+ swap_dir_uri = dir_to_uri(swap_dir) + "/"
for tst in g.subjects():
- files = [str(tfile).replace('http://www.w3.org/2000/10/', swap_dir_uri)
- for tfile in g.objects(tst, rdflib.URIRef("http://www.w3.org/2004/11/n3test#inputDocument")) if tfile.endswith('n3')]
+ files = [
+ str(tfile).replace("http://www.w3.org/2000/10/", swap_dir_uri)
+ for tfile in g.objects(
+ tst, rdflib.URIRef("http://www.w3.org/2004/11/n3test#inputDocument")
+ )
+ if tfile.endswith("n3")
+ ]
tfiles += files
for tfile in set(tfiles):
- gname = tfile.split('/swap-n3/swap/test/')[1][:-3].translate(maketrans('-/','__'))
+ gname = tfile.split("/swap-n3/swap/test/")[1][:-3].translate(
+ maketrans("-/", "__")
+ )
e = Envelope(gname, tfile)
if gname in skiptests:
e.skip = True
@@ -119,6 +128,7 @@ def test_cases():
# e.skip = True
if sys.version_info[:2] == (2, 4):
import pickle
+
gjt = pickle.dumps(generictest)
gt = pickle.loads(gjt)
else:
@@ -130,4 +140,3 @@ def test_cases():
if __name__ == "__main__":
test_cases()
# unittest.main()
-
diff --git a/test/test_term.py b/test/test_term.py
index aae05600..0363baba 100644
--- a/test/test_term.py
+++ b/test/test_term.py
@@ -23,11 +23,12 @@ class TestURIRefRepr(unittest.TestCase):
def testSubclassNameAppearsInRepr(self):
class MyURIRef(URIRef):
pass
- x = MyURIRef('http://example.com/')
+
+ x = MyURIRef("http://example.com/")
self.assertEqual(repr(x), uformat("MyURIRef(u'http://example.com/')"))
def testGracefulOrdering(self):
- u = URIRef('cake')
+ u = URIRef("cake")
g = Graph()
a = u > u
a = u > BNode()
@@ -36,18 +37,17 @@ class TestURIRefRepr(unittest.TestCase):
class TestBNodeRepr(unittest.TestCase):
-
def testSubclassNameAppearsInRepr(self):
class MyBNode(BNode):
pass
+
x = MyBNode()
self.assertTrue(repr(x).startswith("MyBNode("))
class TestLiteral(unittest.TestCase):
-
def test_base64_values(self):
- b64msg = 'cmRmbGliIGlzIGNvb2whIGFsc28gaGVyZSdzIHNvbWUgYmluYXJ5IAAR83UC'
+ b64msg = "cmRmbGliIGlzIGNvb2whIGFsc28gaGVyZSdzIHNvbWUgYmluYXJ5IAAR83UC"
decoded_b64msg = base64.b64decode(b64msg)
lit = Literal(b64msg, datatype=XSD.base64Binary)
self.assertEqual(lit.value, decoded_b64msg)
@@ -56,30 +56,14 @@ class TestLiteral(unittest.TestCase):
def test_total_order(self):
types = {
XSD.dateTime: (
- '2001-01-01T00:00:00',
- '2001-01-01T00:00:00Z',
- '2001-01-01T00:00:00-00:00'
- ),
- XSD.date: (
- '2001-01-01',
- '2001-01-01Z',
- '2001-01-01-00:00'
- ),
- XSD.time: (
- '00:00:00',
- '00:00:00Z',
- '00:00:00-00:00'
- ),
- XSD.gYear: (
- '2001',
- '2001Z',
- '2001-00:00'
- ), # interval
- XSD.gYearMonth: (
- '2001-01',
- '2001-01Z',
- '2001-01-00:00'
+ "2001-01-01T00:00:00",
+ "2001-01-01T00:00:00Z",
+ "2001-01-01T00:00:00-00:00",
),
+ XSD.date: ("2001-01-01", "2001-01-01Z", "2001-01-01-00:00"),
+ XSD.time: ("00:00:00", "00:00:00Z", "00:00:00-00:00"),
+ XSD.gYear: ("2001", "2001Z", "2001-00:00"), # interval
+ XSD.gYearMonth: ("2001-01", "2001-01Z", "2001-01-00:00"),
}
literals = [
Literal(literal, datatype=t)
@@ -100,19 +84,19 @@ class TestLiteral(unittest.TestCase):
l1 = [
Literal(l, datatype=XSD.dateTime)
for l in [
- '2001-01-01T00:00:00',
- '2001-01-01T01:00:00',
- '2001-01-01T01:00:01',
- '2001-01-02T01:00:01',
- '2001-01-01T00:00:00Z',
- '2001-01-01T00:00:00-00:00',
- '2001-01-01T01:00:00Z',
- '2001-01-01T01:00:00-00:00',
- '2001-01-01T00:00:00-01:30',
- '2001-01-01T01:00:00-01:30',
- '2001-01-02T01:00:01Z',
- '2001-01-02T01:00:01-00:00',
- '2001-01-02T01:00:01-01:30'
+ "2001-01-01T00:00:00",
+ "2001-01-01T01:00:00",
+ "2001-01-01T01:00:01",
+ "2001-01-02T01:00:01",
+ "2001-01-01T00:00:00Z",
+ "2001-01-01T00:00:00-00:00",
+ "2001-01-01T01:00:00Z",
+ "2001-01-01T01:00:00-00:00",
+ "2001-01-01T00:00:00-01:30",
+ "2001-01-01T01:00:00-01:30",
+ "2001-01-02T01:00:01Z",
+ "2001-01-02T01:00:01-00:00",
+ "2001-01-02T01:00:01-01:30",
]
]
l2 = list(l1)
@@ -134,7 +118,12 @@ class TestLiteral(unittest.TestCase):
(3, Literal(float(1)), Literal(float(1)), Literal(float(2))),
(4, Literal(1), Literal(1.1), Literal(2.1, datatype=XSD.decimal)),
(5, Literal(1.1), Literal(1.1), Literal(2.2)),
- (6, Literal(Decimal(1)), Literal(Decimal(1.1)), Literal(Decimal(2.1), datatype=XSD.decimal)),
+ (
+ 6,
+ Literal(Decimal(1)),
+ Literal(Decimal(1.1)),
+ Literal(Decimal(2.1), datatype=XSD.decimal),
+ ),
(7, Literal(Decimal(1.1)), Literal(Decimal(1.1)), Literal(Decimal(2.2))),
(8, Literal(float(1)), Literal(float(1.1)), Literal(float(2.1))),
(9, Literal(float(1.1)), Literal(float(1.1)), Literal(float(2.2))),
@@ -144,27 +133,74 @@ class TestLiteral(unittest.TestCase):
(14, Literal(-1), Literal(-1.1), Literal(-2.1)),
(15, Literal(-1.1), Literal(-1.1), Literal(-2.2)),
(16, Literal(Decimal(-1)), Literal(Decimal(-1.1)), Literal(Decimal(-2.1))),
- (17, Literal(Decimal(-1.1)), Literal(Decimal(-1.1)), Literal(Decimal(-2.2))),
+ (
+ 17,
+ Literal(Decimal(-1.1)),
+ Literal(Decimal(-1.1)),
+ Literal(Decimal(-2.2)),
+ ),
(18, Literal(float(-1)), Literal(float(-1.1)), Literal(float(-2.1))),
(19, Literal(float(-1.1)), Literal(float(-1.1)), Literal(float(-2.2))),
-
(20, Literal(1), Literal(1.0), Literal(2.0)),
(21, Literal(1.0), Literal(1.0), Literal(2.0)),
(22, Literal(Decimal(1)), Literal(Decimal(1.0)), Literal(Decimal(2.0))),
(23, Literal(Decimal(1.0)), Literal(Decimal(1.0)), Literal(Decimal(2.0))),
(24, Literal(float(1)), Literal(float(1.0)), Literal(float(2.0))),
(25, Literal(float(1.0)), Literal(float(1.0)), Literal(float(2.0))),
-
- (26, Literal(1, datatype=XSD.integer), Literal(1, datatype=XSD.integer), Literal(2, datatype=XSD.integer)),
- (27, Literal(1, datatype=XSD.integer), Literal("1", datatype=XSD.integer), Literal("2", datatype=XSD.integer)),
- (28, Literal("1", datatype=XSD.integer), Literal("1", datatype=XSD.integer), Literal("2", datatype=XSD.integer)),
- (29, Literal("1"), Literal("1", datatype=XSD.integer), Literal("11", datatype=XSD.string)),
- (30, Literal(1), Literal("1", datatype=XSD.integer), Literal("2", datatype=XSD.integer)),
- (31, Literal(Decimal(1), datatype=XSD.decimal), Literal(Decimal(1), datatype=XSD.decimal), Literal(Decimal(2), datatype=XSD.decimal)),
- (32, Literal(Decimal(1)), Literal(Decimal(1), datatype=XSD.decimal), Literal(Decimal(2), datatype=XSD.decimal)),
- (33, Literal(float(1)), Literal(float(1), datatype=XSD.float), Literal(float(2), datatype=XSD.float)),
- (34, Literal(float(1), datatype=XSD.float), Literal(float(1), datatype=XSD.float), Literal(float(2), datatype=XSD.float)),
-
+ (
+ 26,
+ Literal(1, datatype=XSD.integer),
+ Literal(1, datatype=XSD.integer),
+ Literal(2, datatype=XSD.integer),
+ ),
+ (
+ 27,
+ Literal(1, datatype=XSD.integer),
+ Literal("1", datatype=XSD.integer),
+ Literal("2", datatype=XSD.integer),
+ ),
+ (
+ 28,
+ Literal("1", datatype=XSD.integer),
+ Literal("1", datatype=XSD.integer),
+ Literal("2", datatype=XSD.integer),
+ ),
+ (
+ 29,
+ Literal("1"),
+ Literal("1", datatype=XSD.integer),
+ Literal("11", datatype=XSD.string),
+ ),
+ (
+ 30,
+ Literal(1),
+ Literal("1", datatype=XSD.integer),
+ Literal("2", datatype=XSD.integer),
+ ),
+ (
+ 31,
+ Literal(Decimal(1), datatype=XSD.decimal),
+ Literal(Decimal(1), datatype=XSD.decimal),
+ Literal(Decimal(2), datatype=XSD.decimal),
+ ),
+ (
+ 32,
+ Literal(Decimal(1)),
+ Literal(Decimal(1), datatype=XSD.decimal),
+ Literal(Decimal(2), datatype=XSD.decimal),
+ ),
+ (
+ 33,
+ Literal(float(1)),
+ Literal(float(1), datatype=XSD.float),
+ Literal(float(2), datatype=XSD.float),
+ ),
+ (
+ 34,
+ Literal(float(1), datatype=XSD.float),
+ Literal(float(1), datatype=XSD.float),
+ Literal(float(2), datatype=XSD.float),
+ ),
(35, Literal(1), 1, Literal(2)),
(36, Literal(1), 1.0, Literal(2, datatype=XSD.decimal)),
(37, Literal(1.0), 1, Literal(2, datatype=XSD.decimal)),
@@ -173,14 +209,42 @@ class TestLiteral(unittest.TestCase):
(40, Literal(Decimal(1.0)), Decimal(1.0), Literal(Decimal(2.0))),
(41, Literal(float(1.0)), float(1), Literal(float(2.0))),
(42, Literal(float(1.0)), float(1.0), Literal(float(2.0))),
-
- (43, Literal(1, datatype=XSD.integer), "+1.1", Literal("1+1.1", datatype=XSD.string)),
- (44, Literal(1, datatype=XSD.integer), Literal("+1.1", datatype=XSD.string), Literal("1+1.1", datatype=XSD.string)),
- (45, Literal(Decimal(1.0), datatype=XSD.integer), Literal(u"1", datatype=XSD.string), Literal("11", datatype=XSD.string)),
- (46, Literal(1.1, datatype=XSD.integer), Literal("1", datatype=XSD.string), Literal("1.11", datatype=XSD.string)),
-
- (47, Literal(1, datatype=XSD.integer), None, Literal(1, datatype=XSD.integer)),
- (48, Literal("1", datatype=XSD.string), None, Literal("1", datatype=XSD.string)),
+ (
+ 43,
+ Literal(1, datatype=XSD.integer),
+ "+1.1",
+ Literal("1+1.1", datatype=XSD.string),
+ ),
+ (
+ 44,
+ Literal(1, datatype=XSD.integer),
+ Literal("+1.1", datatype=XSD.string),
+ Literal("1+1.1", datatype=XSD.string),
+ ),
+ (
+ 45,
+ Literal(Decimal(1.0), datatype=XSD.integer),
+ Literal(u"1", datatype=XSD.string),
+ Literal("11", datatype=XSD.string),
+ ),
+ (
+ 46,
+ Literal(1.1, datatype=XSD.integer),
+ Literal("1", datatype=XSD.string),
+ Literal("1.11", datatype=XSD.string),
+ ),
+ (
+ 47,
+ Literal(1, datatype=XSD.integer),
+ None,
+ Literal(1, datatype=XSD.integer),
+ ),
+ (
+ 48,
+ Literal("1", datatype=XSD.string),
+ None,
+ Literal("1", datatype=XSD.string),
+ ),
]
for case in cases:
@@ -196,22 +260,26 @@ class TestLiteral(unittest.TestCase):
if not case_passed:
print(case[1], case[2])
print("expected: " + case[3] + ", " + case[3].datatype)
- print("actual: " + (case[1] + case[2]) + ", " + (case[1] + case[2]).datatype)
+ print(
+ "actual: "
+ + (case[1] + case[2])
+ + ", "
+ + (case[1] + case[2]).datatype
+ )
self.assertTrue(case_passed, "Case " + str(case[0]) + " failed")
class TestValidityFunctions(unittest.TestCase):
-
def test_is_valid_unicode(self):
testcase_list = (
(None, True),
(1, True),
- (['foo'], True),
- ({'foo': b'bar'}, True),
- ('foo', True),
- (b'foo\x00', True),
- (b'foo\xf3\x02', False)
+ (["foo"], True),
+ ({"foo": b"bar"}, True),
+ ("foo", True),
+ (b"foo\x00", True),
+ (b"foo\xf3\x02", False),
)
for val, expected in testcase_list:
self.assertEqual(_is_valid_unicode(val), expected)
diff --git a/test/test_trig.py b/test/test_trig.py
index 78f257ea..90321c5c 100644
--- a/test/test_trig.py
+++ b/test/test_trig.py
@@ -4,85 +4,86 @@ import re
from nose import SkipTest
-TRIPLE = (rdflib.URIRef("http://example.com/s"),
- rdflib.RDFS.label,
- rdflib.Literal("example 1"))
+TRIPLE = (
+ rdflib.URIRef("http://example.com/s"),
+ rdflib.RDFS.label,
+ rdflib.Literal("example 1"),
+)
class TestTrig(unittest.TestCase):
-
def testEmpty(self):
g = rdflib.Graph()
- s = g.serialize(format='trig')
+ s = g.serialize(format="trig")
self.assertTrue(s is not None)
def testRepeatTriples(self):
g = rdflib.ConjunctiveGraph()
- g.get_context('urn:a').add((rdflib.URIRef('urn:1'),
- rdflib.URIRef('urn:2'),
- rdflib.URIRef('urn:3')))
+ g.get_context("urn:a").add(
+ (rdflib.URIRef("urn:1"), rdflib.URIRef("urn:2"), rdflib.URIRef("urn:3"))
+ )
- g.get_context('urn:b').add((rdflib.URIRef('urn:1'),
- rdflib.URIRef('urn:2'),
- rdflib.URIRef('urn:3')))
+ g.get_context("urn:b").add(
+ (rdflib.URIRef("urn:1"), rdflib.URIRef("urn:2"), rdflib.URIRef("urn:3"))
+ )
- self.assertEqual(len(g.get_context('urn:a')), 1)
- self.assertEqual(len(g.get_context('urn:b')), 1)
+ self.assertEqual(len(g.get_context("urn:a")), 1)
+ self.assertEqual(len(g.get_context("urn:b")), 1)
- s = g.serialize(format='trig')
- self.assertTrue('{}'.encode("latin-1") not in s) # no empty graphs!
+ s = g.serialize(format="trig")
+ self.assertTrue("{}".encode("latin-1") not in s) # no empty graphs!
def testSameSubject(self):
g = rdflib.ConjunctiveGraph()
- g.get_context('urn:a').add((rdflib.URIRef('urn:1'),
- rdflib.URIRef('urn:p1'),
- rdflib.URIRef('urn:o1')))
+ g.get_context("urn:a").add(
+ (rdflib.URIRef("urn:1"), rdflib.URIRef("urn:p1"), rdflib.URIRef("urn:o1"))
+ )
- g.get_context('urn:b').add((rdflib.URIRef('urn:1'),
- rdflib.URIRef('urn:p2'),
- rdflib.URIRef('urn:o2')))
+ g.get_context("urn:b").add(
+ (rdflib.URIRef("urn:1"), rdflib.URIRef("urn:p2"), rdflib.URIRef("urn:o2"))
+ )
- self.assertEqual(len(g.get_context('urn:a')), 1)
- self.assertEqual(len(g.get_context('urn:b')), 1)
+ self.assertEqual(len(g.get_context("urn:a")), 1)
+ self.assertEqual(len(g.get_context("urn:b")), 1)
- s = g.serialize(format='trig')
+ s = g.serialize(format="trig")
self.assertEqual(len(re.findall("p1".encode("latin-1"), s)), 1)
self.assertEqual(len(re.findall("p2".encode("latin-1"), s)), 1)
- self.assertTrue('{}'.encode("latin-1") not in s) # no empty graphs!
+ self.assertTrue("{}".encode("latin-1") not in s) # no empty graphs!
def testRememberNamespace(self):
g = rdflib.ConjunctiveGraph()
g.add(TRIPLE + (rdflib.URIRef("http://example.com/graph1"),))
# In 4.2.0 the first serialization would fail to include the
# prefix for the graph but later serialize() calls would work.
- first_out = g.serialize(format='trig')
- second_out = g.serialize(format='trig')
- self.assertTrue(b'@prefix ns1: <http://example.com/> .' in second_out)
- self.assertTrue(b'@prefix ns1: <http://example.com/> .' in first_out)
+ first_out = g.serialize(format="trig")
+ second_out = g.serialize(format="trig")
+ self.assertTrue(b"@prefix ns1: <http://example.com/> ." in second_out)
+ self.assertTrue(b"@prefix ns1: <http://example.com/> ." in first_out)
def testGraphQnameSyntax(self):
g = rdflib.ConjunctiveGraph()
g.add(TRIPLE + (rdflib.URIRef("http://example.com/graph1"),))
- out = g.serialize(format='trig')
- self.assertTrue(b'ns1:graph1 {' in out)
+ out = g.serialize(format="trig")
+ self.assertTrue(b"ns1:graph1 {" in out)
def testGraphUriSyntax(self):
g = rdflib.ConjunctiveGraph()
# getQName will not abbreviate this, so it should serialize as
# a '<...>' term.
g.add(TRIPLE + (rdflib.URIRef("http://example.com/foo."),))
- out = g.serialize(format='trig')
- self.assertTrue(b'<http://example.com/foo.> {' in out)
+ out = g.serialize(format="trig")
+ self.assertTrue(b"<http://example.com/foo.> {" in out)
def testBlankGraphIdentifier(self):
g = rdflib.ConjunctiveGraph()
g.add(TRIPLE + (rdflib.BNode(),))
- out = g.serialize(format='trig')
+ out = g.serialize(format="trig")
graph_label_line = out.splitlines()[-4]
- self.assertTrue(re.match(br'^_:[a-zA-Z0-9]+ \{', graph_label_line))
+ self.assertTrue(re.match(br"^_:[a-zA-Z0-9]+ \{", graph_label_line))
def testGraphParsing(self):
# should parse into single default graph context
@@ -90,7 +91,7 @@ class TestTrig(unittest.TestCase):
<http://example.com/thing#thing_a> <http://example.com/knows> <http://example.com/thing#thing_b> .
"""
g = rdflib.ConjunctiveGraph()
- g.parse(data=data, format='trig')
+ g.parse(data=data, format="trig")
self.assertEqual(len(list(g.contexts())), 1)
# should parse into single default graph context
@@ -100,7 +101,7 @@ class TestTrig(unittest.TestCase):
{ <http://example.com/thing#thing_c> <http://example.com/knows> <http://example.com/thing#thing_d> . }
"""
g = rdflib.ConjunctiveGraph()
- g.parse(data=data, format='trig')
+ g.parse(data=data, format="trig")
self.assertEqual(len(list(g.contexts())), 1)
# should parse into 2 contexts, one default, one named
@@ -114,12 +115,12 @@ class TestTrig(unittest.TestCase):
}
"""
g = rdflib.ConjunctiveGraph()
- g.parse(data=data, format='trig')
+ g.parse(data=data, format="trig")
self.assertEqual(len(list(g.contexts())), 2)
def testRoundTrips(self):
- raise SkipTest('skipped until 5.0')
+ raise SkipTest("skipped until 5.0")
data = """
<http://example.com/thing#thing_a> <http://example.com/knows> <http://example.com/thing#thing_b> .
@@ -132,17 +133,17 @@ class TestTrig(unittest.TestCase):
"""
g = rdflib.ConjunctiveGraph()
for i in range(5):
- g.parse(data=data, format='trig')
- data = g.serialize(format='trig')
+ g.parse(data=data, format="trig")
+ data = g.serialize(format="trig")
# output should only contain 1 mention of each resource/graph name
- self.assertEqual(data.count('thing_a'), 1)
- self.assertEqual(data.count('thing_b'), 1)
- self.assertEqual(data.count('thing_c'), 1)
- self.assertEqual(data.count('thing_d'), 1)
- self.assertEqual(data.count('thing_e'), 1)
- self.assertEqual(data.count('thing_f'), 1)
- self.assertEqual(data.count('graph_a'), 1)
+ self.assertEqual(data.count("thing_a"), 1)
+ self.assertEqual(data.count("thing_b"), 1)
+ self.assertEqual(data.count("thing_c"), 1)
+ self.assertEqual(data.count("thing_d"), 1)
+ self.assertEqual(data.count("thing_e"), 1)
+ self.assertEqual(data.count("thing_f"), 1)
+ self.assertEqual(data.count("graph_a"), 1)
def testDefaultGraphSerializesWithoutName(self):
data = """
@@ -151,10 +152,10 @@ class TestTrig(unittest.TestCase):
{ <http://example.com/thing#thing_c> <http://example.com/knows> <http://example.com/thing#thing_d> . }
"""
g = rdflib.ConjunctiveGraph()
- g.parse(data=data, format='trig')
- data = g.serialize(format='trig')
+ g.parse(data=data, format="trig")
+ data = g.serialize(format="trig")
- self.assertTrue('None'.encode("latin-1") not in data)
+ self.assertTrue("None".encode("latin-1") not in data)
def testPrefixes(self):
@@ -171,9 +172,9 @@ class TestTrig(unittest.TestCase):
"""
cg = rdflib.ConjunctiveGraph()
- cg.parse(data=data, format='trig')
- data = cg.serialize(format='trig')
+ cg.parse(data=data, format="trig")
+ data = cg.serialize(format="trig")
- self.assert_('ns2: <http://ex.org/docs/'.encode("latin-1") in data, data)
- self.assert_('<ns2:document1>'.encode("latin-1") not in data, data)
- self.assert_('ns2:document1'.encode("latin-1") in data, data)
+ self.assert_("ns2: <http://ex.org/docs/".encode("latin-1") in data, data)
+ self.assert_("<ns2:document1>".encode("latin-1") not in data, data)
+ self.assert_("ns2:document1".encode("latin-1") in data, data)
diff --git a/test/test_trig_w3c.py b/test/test_trig_w3c.py
index bb8588e0..d59a2f08 100644
--- a/test/test_trig_w3c.py
+++ b/test/test_trig_w3c.py
@@ -16,15 +16,15 @@ def trig(test):
g = ConjunctiveGraph()
try:
- base = 'http://www.w3.org/2013/TriGTests/' + split_uri(test.action)[1]
+ base = "http://www.w3.org/2013/TriGTests/" + split_uri(test.action)[1]
- g.parse(test.action, publicID=base, format='trig')
+ g.parse(test.action, publicID=base, format="trig")
if not test.syntax:
raise AssertionError("Input shouldn't have parsed!")
if test.result: # eval test
res = ConjunctiveGraph()
- res.parse(test.result, format='nquads')
+ res.parse(test.result, format="nquads")
if verbose:
@@ -32,13 +32,13 @@ def trig(test):
if not first and not second:
return
- print('===============================')
- print('TriG')
- print(g.serialize(format='nquads'))
- print('===============================')
- print('NQuads')
- print(res.serialize(format='nquads'))
- print('===============================')
+ print("===============================")
+ print("TriG")
+ print(g.serialize(format="nquads"))
+ print("===============================")
+ print("NQuads")
+ print(res.serialize(format="nquads"))
+ print("===============================")
print("Diff:")
# print "%d triples in both"%len(both)
@@ -50,9 +50,9 @@ def trig(test):
print("NQuads Only")
for t in second:
print(t)
- raise Exception('Graphs do not match!')
+ raise Exception("Graphs do not match!")
- assert isomorphic(g, res), 'graphs must be the same'
+ assert isomorphic(g, res), "graphs must be the same"
except:
if test.syntax:
@@ -63,12 +63,12 @@ testers = {
RDFT.TestTrigPositiveSyntax: trig,
RDFT.TestTrigNegativeSyntax: trig,
RDFT.TestTrigEval: trig,
- RDFT.TestTrigNegativeEval: trig
+ RDFT.TestTrigNegativeEval: trig,
}
def test_trig(tests=None):
- for t in nose_tests(testers, 'test/w3c/trig/manifest.ttl'):
+ for t in nose_tests(testers, "test/w3c/trig/manifest.ttl"):
if tests:
for test in tests:
if test in t[1].uri:
@@ -79,7 +79,7 @@ def test_trig(tests=None):
yield t
-if __name__ == '__main__':
+if __name__ == "__main__":
verbose = True
- nose_tst_earl_report(test_trig, 'rdflib_trig')
+ nose_tst_earl_report(test_trig, "rdflib_trig")
diff --git a/test/test_trix_parse.py b/test/test_trix_parse.py
index 1b0f9fb9..290ce0b6 100644
--- a/test/test_trix_parse.py
+++ b/test/test_trix_parse.py
@@ -6,7 +6,6 @@ import unittest
class TestTrixParse(unittest.TestCase):
-
def setUp(self):
pass
@@ -45,5 +44,5 @@ class TestTrixParse(unittest.TestCase):
# print "Parsed %d triples"%len(g)
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main()
diff --git a/test/test_trix_serialize.py b/test/test_trix_serialize.py
index 3330234b..4fe78a18 100644
--- a/test/test_trix_serialize.py
+++ b/test/test_trix_serialize.py
@@ -9,7 +9,6 @@ from io import BytesIO
class TestTrixSerialize(unittest.TestCase):
-
def setUp(self):
pass
@@ -18,17 +17,17 @@ class TestTrixSerialize(unittest.TestCase):
def testSerialize(self):
- s1 = URIRef('store:1')
- r1 = URIRef('resource:1')
- r2 = URIRef('resource:2')
+ s1 = URIRef("store:1")
+ r1 = URIRef("resource:1")
+ r2 = URIRef("resource:2")
- label = URIRef('predicate:label')
+ label = URIRef("predicate:label")
g1 = Graph(identifier=s1)
g1.add((r1, label, Literal("label 1", lang="en")))
g1.add((r1, label, Literal("label 2")))
- s2 = URIRef('store:2')
+ s2 = URIRef("store:2")
g2 = Graph(identifier=s2)
g2.add((r2, label, Literal("label 3")))
@@ -37,13 +36,13 @@ class TestTrixSerialize(unittest.TestCase):
g.addN([(s, p, o, g1)])
for s, p, o in g2.triples((None, None, None)):
g.addN([(s, p, o, g2)])
- r3 = URIRef('resource:3')
+ r3 = URIRef("resource:3")
g.add((r3, label, Literal(4)))
- r = g.serialize(format='trix')
+ r = g.serialize(format="trix")
g3 = ConjunctiveGraph()
- g3.parse(BytesIO(r), format='trix')
+ g3.parse(BytesIO(r), format="trix")
for q in g3.quads((None, None, None)):
# TODO: Fix once getGraph/getContext is in conjunctive graph
@@ -87,12 +86,10 @@ class TestTrixSerialize(unittest.TestCase):
graph = ConjunctiveGraph()
graph.bind(None, "http://defaultnamespace")
- sg = graph.serialize(format='trix').decode('UTF-8')
- self.assertTrue(
- 'xmlns="http://defaultnamespace"' not in sg, sg)
- self.assertTrue(
- 'xmlns="http://www.w3.org/2004/03/trix/trix-1/' in sg, sg)
+ sg = graph.serialize(format="trix").decode("UTF-8")
+ self.assertTrue('xmlns="http://defaultnamespace"' not in sg, sg)
+ self.assertTrue('xmlns="http://www.w3.org/2004/03/trix/trix-1/' in sg, sg)
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main()
diff --git a/test/test_tsvresults.py b/test/test_tsvresults.py
index 36d8e7fa..5c4d12d0 100644
--- a/test/test_tsvresults.py
+++ b/test/test_tsvresults.py
@@ -4,7 +4,6 @@ from rdflib.plugins.sparql.results.tsvresults import TSVResultParser
class TestTSVResults(unittest.TestCase):
-
def test_empty_tsvresults_bindings(self):
# check that optional bindings are ordered properly
source = """?s\t?p\t?o
diff --git a/test/test_turtle_serialize.py b/test/test_turtle_serialize.py
index c34c1d79..155cdffd 100644
--- a/test/test_turtle_serialize.py
+++ b/test/test_turtle_serialize.py
@@ -12,7 +12,7 @@ def testTurtleFinalDot():
u = URIRef("http://ex.org/bob.")
g.bind("ns", "http://ex.org/")
g.add((u, u, u))
- s = g.serialize(format='turtle')
+ s = g.serialize(format="turtle")
assert "ns:bob.".encode("latin-1") not in s
@@ -50,18 +50,23 @@ def testUnicodeEscaping():
assert len(triples) == 3
print(triples)
# Now check that was decoded into python values properly
- assert triples[0][2] == URIRef(u'http://example.com/aaa\xf3bbbb')
- assert triples[1][2] == URIRef(u'http://example.com/zzz\U00100000zzz')
- assert triples[2][2] == URIRef(u'http://example.com/aaa\xf3bbb')
+ assert triples[0][2] == URIRef(u"http://example.com/aaa\xf3bbbb")
+ assert triples[1][2] == URIRef(u"http://example.com/zzz\U00100000zzz")
+ assert triples[2][2] == URIRef(u"http://example.com/aaa\xf3bbb")
def test_turtle_valid_list():
- NS = Namespace('http://example.org/ns/')
+ NS = Namespace("http://example.org/ns/")
g = Graph()
- g.parse(data="""
+ g.parse(
+ data="""
@prefix : <{0}> .
:s :p (""), (0), (false) .
- """.format(NS), format='turtle')
+ """.format(
+ NS
+ ),
+ format="turtle",
+ )
turtle_serializer = TurtleSerializer(g)
@@ -70,24 +75,30 @@ def test_turtle_valid_list():
def test_turtle_namespace():
- graph = Graph()
- graph.bind('OBO', 'http://purl.obolibrary.org/obo/')
- graph.bind('GENO', 'http://purl.obolibrary.org/obo/GENO_')
- graph.bind('RO', 'http://purl.obolibrary.org/obo/RO_')
- graph.bind('RO_has_phenotype',
- 'http://purl.obolibrary.org/obo/RO_0002200')
- graph.add((URIRef('http://example.org'),
- URIRef('http://purl.obolibrary.org/obo/RO_0002200'),
- URIRef('http://purl.obolibrary.org/obo/GENO_0000385')))
- output = [val for val in
- graph.serialize(format='turtle').decode().splitlines()
- if not val.startswith('@prefix')]
- output = ' '.join(output)
- assert 'RO_has_phenotype:' in output
- assert 'GENO:0000385' in output
+ graph = Graph()
+ graph.bind("OBO", "http://purl.obolibrary.org/obo/")
+ graph.bind("GENO", "http://purl.obolibrary.org/obo/GENO_")
+ graph.bind("RO", "http://purl.obolibrary.org/obo/RO_")
+ graph.bind("RO_has_phenotype", "http://purl.obolibrary.org/obo/RO_0002200")
+ graph.add(
+ (
+ URIRef("http://example.org"),
+ URIRef("http://purl.obolibrary.org/obo/RO_0002200"),
+ URIRef("http://purl.obolibrary.org/obo/GENO_0000385"),
+ )
+ )
+ output = [
+ val
+ for val in graph.serialize(format="turtle").decode().splitlines()
+ if not val.startswith("@prefix")
+ ]
+ output = " ".join(output)
+ assert "RO_has_phenotype:" in output
+ assert "GENO:0000385" in output
if __name__ == "__main__":
import nose
import sys
+
nose.main(defaultTest=sys.argv[0])
diff --git a/test/test_turtle_sort_issue613.py b/test/test_turtle_sort_issue613.py
index f81cba33..a26ede28 100644
--- a/test/test_turtle_sort_issue613.py
+++ b/test/test_turtle_sort_issue613.py
@@ -17,8 +17,8 @@ https://github.com/RDFLib/rdflib/issues/676
def test_sort_dates():
g = rdflib.Graph()
- y = '''@prefix ex: <http://ex.org> .
-ex:X ex:p "2016-01-01T00:00:00"^^<http://www.w3.org/2001/XMLSchema#dateTime>, "2016-01-01T00:00:00Z"^^<http://www.w3.org/2001/XMLSchema#dateTime> . '''
+ y = """@prefix ex: <http://ex.org> .
+ex:X ex:p "2016-01-01T00:00:00"^^<http://www.w3.org/2001/XMLSchema#dateTime>, "2016-01-01T00:00:00Z"^^<http://www.w3.org/2001/XMLSchema#dateTime> . """
p = g.parse(data=y, format="turtle")
p.serialize(format="turtle")
@@ -27,14 +27,14 @@ ex:X ex:p "2016-01-01T00:00:00"^^<http://www.w3.org/2001/XMLSchema#dateTime>, "2
def test_sort_docfrag():
g = rdflib.Graph()
- y = '''@prefix ex: <http://ex.org> .
+ y = """@prefix ex: <http://ex.org> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
-ex:X ex:p "<h1>hi</h1>"^^rdf:HTML, "<h1>ho</h1>"^^rdf:HTML . '''
+ex:X ex:p "<h1>hi</h1>"^^rdf:HTML, "<h1>ho</h1>"^^rdf:HTML . """
p = g.parse(data=y, format="turtle")
p.serialize(format="turtle")
-if __name__ == '__main__':
+if __name__ == "__main__":
test_sort_docfrag()
diff --git a/test/test_turtle_w3c.py b/test/test_turtle_w3c.py
index 469ed023..b89ce66b 100644
--- a/test/test_turtle_w3c.py
+++ b/test/test_turtle_w3c.py
@@ -15,15 +15,15 @@ def turtle(test):
g = Graph()
try:
- base = 'http://www.w3.org/2013/TurtleTests/' + split_uri(test.action)[1]
+ base = "http://www.w3.org/2013/TurtleTests/" + split_uri(test.action)[1]
- g.parse(test.action, publicID=base, format='turtle')
+ g.parse(test.action, publicID=base, format="turtle")
if not test.syntax:
raise AssertionError("Input shouldn't have parsed!")
if test.result: # eval test
res = Graph()
- res.parse(test.result, format='nt')
+ res.parse(test.result, format="nt")
if verbose:
both, first, second = graph_diff(g, res)
@@ -39,9 +39,9 @@ def turtle(test):
print("NT Only")
for t in second:
print(t)
- raise Exception('Graphs do not match!')
+ raise Exception("Graphs do not match!")
- assert isomorphic(g, res), 'graphs must be the same'
+ assert isomorphic(g, res), "graphs must be the same"
except:
if test.syntax:
@@ -52,13 +52,12 @@ testers = {
RDFT.TestTurtlePositiveSyntax: turtle,
RDFT.TestTurtleNegativeSyntax: turtle,
RDFT.TestTurtleEval: turtle,
- RDFT.TestTurtleNegativeEval: turtle
+ RDFT.TestTurtleNegativeEval: turtle,
}
def test_turtle(tests=None):
- for t in nose_tests(testers,
- 'test/w3c/turtle/manifest.ttl'):
+ for t in nose_tests(testers, "test/w3c/turtle/manifest.ttl"):
if tests:
for test in tests:
if test in t[1].uri:
@@ -69,8 +68,8 @@ def test_turtle(tests=None):
yield t
-if __name__ == '__main__':
+if __name__ == "__main__":
verbose = True
- nose_tst_earl_report(test_turtle, 'rdflib_turtle')
+ nose_tst_earl_report(test_turtle, "rdflib_turtle")
diff --git a/test/test_util.py b/test/test_util.py
index 4184b659..89890c8d 100644
--- a/test/test_util.py
+++ b/test/test_util.py
@@ -54,11 +54,13 @@ n3source = """\
class TestUtilMisc(unittest.TestCase):
def setUp(self):
- self.x = Literal("2008-12-01T18:02:00Z",
- datatype=URIRef('http://www.w3.org/2001/XMLSchema#dateTime'))
+ self.x = Literal(
+ "2008-12-01T18:02:00Z",
+ datatype=URIRef("http://www.w3.org/2001/XMLSchema#dateTime"),
+ )
def test_util_list2set(self):
- base = [Literal('foo'), self.x]
+ base = [Literal("foo"), self.x]
r = util.list2set(base + base)
self.assertTrue(r == base)
@@ -75,10 +77,11 @@ class TestUtilMisc(unittest.TestCase):
class TestUtilDateTime(unittest.TestCase):
-
def setUp(self):
- self.x = Literal("2008-12-01T18:02:00Z",
- datatype=URIRef('http://www.w3.org/2001/XMLSchema#dateTime'))
+ self.x = Literal(
+ "2008-12-01T18:02:00Z",
+ datatype=URIRef("http://www.w3.org/2001/XMLSchema#dateTime"),
+ )
def test_util_date_time_tisnoneandnotz(self):
t = None
@@ -115,8 +118,10 @@ class TestUtilDateTime(unittest.TestCase):
def ablocaltime(t):
from time import gmtime
+
res = gmtime(t)
return res
+
util.localtime = ablocaltime
res = util.date_time(t, local_time_zone=True)
self.assertTrue(res is not t)
@@ -124,8 +129,10 @@ class TestUtilDateTime(unittest.TestCase):
class TestUtilTermConvert(unittest.TestCase):
def setUp(self):
- self.x = Literal("2008-12-01T18:02:00Z",
- datatype=URIRef('http://www.w3.org/2001/XMLSchema#dateTime'))
+ self.x = Literal(
+ "2008-12-01T18:02:00Z",
+ datatype=URIRef("http://www.w3.org/2001/XMLSchema#dateTime"),
+ )
def test_util_to_term_sisNone(self):
s = None
@@ -145,12 +152,12 @@ class TestUtilTermConvert(unittest.TestCase):
self.assertEqual(str(res), s[1:-1])
def test_util_to_term_sisbnode(self):
- s = '_http%23%4F%4Fexample%33com'
+ s = "_http%23%4F%4Fexample%33com"
res = util.to_term(s)
self.assertTrue(isinstance(res, BNode))
def test_util_to_term_sisunknown(self):
- s = 'http://example.com'
+ s = "http://example.com"
self.assertRaises(Exception, util.to_term, s)
def test_util_to_term_sisnotstr(self):
@@ -185,7 +192,7 @@ class TestUtilTermConvert(unittest.TestCase):
self.assertTrue(isinstance(res, Literal))
def test_util_from_n3_expecturiref(self):
- s = '<http://example.org/schema>'
+ s = "<http://example.org/schema>"
res = util.from_n3(s, default=None, backend=None)
self.assertTrue(isinstance(res, URIRef))
@@ -198,89 +205,99 @@ class TestUtilTermConvert(unittest.TestCase):
s = '"michel"@fr^^xsd:fr'
res = util.from_n3(s, default=None, backend=None)
self.assertTrue(isinstance(res, Literal))
- self.assertEqual(res, Literal('michel',
- datatype=XSD['fr']))
+ self.assertEqual(res, Literal("michel", datatype=XSD["fr"]))
def test_util_from_n3_expectliteralanddtype(self):
s = '"true"^^xsd:boolean'
res = util.from_n3(s, default=None, backend=None)
- self.assertTrue(res.eq(Literal('true', datatype=XSD['boolean'])))
+ self.assertTrue(res.eq(Literal("true", datatype=XSD["boolean"])))
def test_util_from_n3_expectliteralwithdatatypefromint(self):
- s = '42'
+ s = "42"
res = util.from_n3(s)
self.assertEqual(res, Literal(42))
def test_util_from_n3_expectliteralwithdatatypefrombool(self):
- s = 'true'
+ s = "true"
res = util.from_n3(s)
self.assertEqual(res, Literal(True))
- s = 'false'
+ s = "false"
res = util.from_n3(s)
self.assertEqual(res, Literal(False))
def test_util_from_n3_expectliteralmultiline(self):
s = '"""multi\nline\nstring"""@en'
res = util.from_n3(s, default=None, backend=None)
- self.assertTrue(res, Literal('multi\nline\nstring', lang='en'))
+ self.assertTrue(res, Literal("multi\nline\nstring", lang="en"))
def test_util_from_n3_expectliteralwithescapedquote(self):
s = '"\\""'
res = util.from_n3(s, default=None, backend=None)
- self.assertTrue(res, Literal('\\"', lang='en'))
+ self.assertTrue(res, Literal('\\"', lang="en"))
def test_util_from_n3_expectliteralwithtrailingbackslash(self):
s = '"trailing\\\\"^^<http://www.w3.org/2001/XMLSchema#string>'
res = util.from_n3(s)
- self.assertTrue(res, Literal('trailing\\', datatype=XSD['string']))
+ self.assertTrue(res, Literal("trailing\\", datatype=XSD["string"]))
self.assertTrue(res.n3(), s)
def test_util_from_n3_expectpartialidempotencewithn3(self):
- for n3 in ('<http://ex.com/foo>',
- '"foo"@de',
- u'<http://ex.com/漢字>',
- u'<http://ex.com/a#あ>',
- # '"\\""', # exception as '\\"' --> '"' by orig parser as well
- '"""multi\n"line"\nstring"""@en'):
- self.assertEqual(util.from_n3(n3).n3(), n3,
- 'from_n3(%(n3e)r).n3() != %(n3e)r' % {'n3e': n3})
+ for n3 in (
+ "<http://ex.com/foo>",
+ '"foo"@de',
+ u"<http://ex.com/漢字>",
+ u"<http://ex.com/a#あ>",
+ # '"\\""', # exception as '\\"' --> '"' by orig parser as well
+ '"""multi\n"line"\nstring"""@en',
+ ):
+ self.assertEqual(
+ util.from_n3(n3).n3(),
+ n3,
+ "from_n3(%(n3e)r).n3() != %(n3e)r" % {"n3e": n3},
+ )
def test_util_from_n3_expectsameasn3parser(self):
def parse_n3(term_n3):
- ''' Disclaimer: Quick and dirty hack using the n3 parser. '''
- prepstr = ("@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .\n"
- "<urn:no_use> <urn:no_use> %s.\n" % term_n3)
+ """ Disclaimer: Quick and dirty hack using the n3 parser. """
+ prepstr = (
+ "@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .\n"
+ "<urn:no_use> <urn:no_use> %s.\n" % term_n3
+ )
g = ConjunctiveGraph()
- g.parse(data=prepstr, format='n3')
+ g.parse(data=prepstr, format="n3")
return [t for t in g.triples((None, None, None))][0][2]
for n3 in ( # "michel", # won't parse in original parser
# "_:michel", # BNodes won't be the same
'"michel"',
- '<http://example.org/schema>',
+ "<http://example.org/schema>",
'"michel"@fr',
# '"michel"@fr^^xsd:fr', # FIXME: invalid n3, orig parser will prefer datatype
# '"true"^^xsd:boolean', # FIXME: orig parser will expand xsd prefix
- '42',
- 'true',
- 'false',
+ "42",
+ "true",
+ "false",
'"""multi\nline\nstring"""@en',
- '<http://ex.com/foo>',
+ "<http://ex.com/foo>",
'"foo"@de',
'"\\""@en',
- '"""multi\n"line"\nstring"""@en'):
+ '"""multi\n"line"\nstring"""@en',
+ ):
res, exp = util.from_n3(n3), parse_n3(n3)
- self.assertEqual(res, exp,
- 'from_n3(%(n3e)r): %(res)r != parser.notation3: %(exp)r' % {
- 'res': res, 'exp': exp, 'n3e': n3})
+ self.assertEqual(
+ res,
+ exp,
+ "from_n3(%(n3e)r): %(res)r != parser.notation3: %(exp)r"
+ % {"res": res, "exp": exp, "n3e": n3},
+ )
def test_util_from_n3_expectquotedgraph(self):
- s = '{<http://example.com/schema>}'
+ s = "{<http://example.com/schema>}"
res = util.from_n3(s, default=None, backend="IOMemory")
self.assertTrue(isinstance(res, QuotedGraph))
def test_util_from_n3_expectgraph(self):
- s = '[<http://example.com/schema>]'
+ s = "[<http://example.com/schema>]"
res = util.from_n3(s, default=None, backend="IOMemory")
self.assertTrue(isinstance(res, Graph))
@@ -317,35 +334,17 @@ class TestUtilCheckers(unittest.TestCase):
def test_util_check_statement(self):
c = "http://example.com"
- self.assertRaises(
- SubjectTypeError,
- util.check_statement,
- (c, self.p, self.o))
- self.assertRaises(
- PredicateTypeError,
- util.check_statement,
- (self.s, c, self.o))
- self.assertRaises(
- ObjectTypeError,
- util.check_statement,
- (self.s, self.p, c))
+ self.assertRaises(SubjectTypeError, util.check_statement, (c, self.p, self.o))
+ self.assertRaises(PredicateTypeError, util.check_statement, (self.s, c, self.o))
+ self.assertRaises(ObjectTypeError, util.check_statement, (self.s, self.p, c))
res = util.check_statement((self.s, self.p, self.o))
self.assertTrue(res == None)
def test_util_check_pattern(self):
c = "http://example.com"
- self.assertRaises(
- SubjectTypeError,
- util.check_pattern,
- (c, self.p, self.o))
- self.assertRaises(
- PredicateTypeError,
- util.check_pattern,
- (self.s, c, self.o))
- self.assertRaises(
- ObjectTypeError,
- util.check_pattern,
- (self.s, self.p, c))
+ self.assertRaises(SubjectTypeError, util.check_pattern, (c, self.p, self.o))
+ self.assertRaises(PredicateTypeError, util.check_pattern, (self.s, c, self.o))
+ self.assertRaises(ObjectTypeError, util.check_pattern, (self.s, self.p, c))
res = util.check_pattern((self.s, self.p, self.o))
self.assertTrue(res == None)
diff --git a/test/test_wide_python.py b/test/test_wide_python.py
index feef4519..5463e798 100644
--- a/test/test_wide_python.py
+++ b/test/test_wide_python.py
@@ -1,14 +1,13 @@
-
def test_wide_python_build():
"""This test is meant to fail on narrow python builds (common on Mac OS X).
See https://github.com/RDFLib/rdflib/issues/456 for more information.
"""
- assert len(u'\U0010FFFF') == 1, (
- 'You are using a narrow Python build!\n'
- 'This means that your Python does not properly support chars > 16bit.\n'
+ assert len(u"\U0010FFFF") == 1, (
+ "You are using a narrow Python build!\n"
+ "This means that your Python does not properly support chars > 16bit.\n"
'On your system chars like c=u"\\U0010FFFF" will have a len(c)==2.\n'
- 'As this can cause hard to debug problems with string processing\n'
- '(slicing, regexp, ...) later on, we strongly advise to use a wide\n'
- 'Python build in production systems.'
+ "As this can cause hard to debug problems with string processing\n"
+ "(slicing, regexp, ...) later on, we strongly advise to use a wide\n"
+ "Python build in production systems."
)
diff --git a/test/test_xmlliterals.py b/test/test_xmlliterals.py
index b467e82a..fcc0ddf2 100644
--- a/test/test_xmlliterals.py
+++ b/test/test_xmlliterals.py
@@ -3,24 +3,24 @@ from rdflib import RDF, Graph, Literal
def testPythonRoundtrip():
- l1 = Literal('<msg>hello</msg>', datatype=RDF.XMLLiteral)
- assert l1.value is not None, 'xml must have been parsed'
- assert l1.datatype == RDF.XMLLiteral, 'literal must have right datatype'
+ l1 = Literal("<msg>hello</msg>", datatype=RDF.XMLLiteral)
+ assert l1.value is not None, "xml must have been parsed"
+ assert l1.datatype == RDF.XMLLiteral, "literal must have right datatype"
- l2 = Literal('<msg>good morning</msg>', datatype=RDF.XMLLiteral)
- assert l2.value is not None, 'xml must have been parsed'
- assert not l1.eq(l2), 'literals must NOT be equal'
+ l2 = Literal("<msg>good morning</msg>", datatype=RDF.XMLLiteral)
+ assert l2.value is not None, "xml must have been parsed"
+ assert not l1.eq(l2), "literals must NOT be equal"
l3 = Literal(l1.value)
- assert l1.eq(l3), 'roundtripped literals must be equal'
- assert l3.datatype == RDF.XMLLiteral, 'literal must have right datatype'
+ assert l1.eq(l3), "roundtripped literals must be equal"
+ assert l3.datatype == RDF.XMLLiteral, "literal must have right datatype"
- l4 = Literal('<msg >hello</msg>', datatype=RDF.XMLLiteral)
+ l4 = Literal("<msg >hello</msg>", datatype=RDF.XMLLiteral)
assert l1 == l4
assert l1.eq(l4)
rdflib.NORMALIZE_LITERALS = False
- l4 = Literal('<msg >hello</msg>', datatype=RDF.XMLLiteral)
+ l4 = Literal("<msg >hello</msg>", datatype=RDF.XMLLiteral)
assert l1 != l4
assert l1.eq(l4)
rdflib.NORMALIZE_LITERALS = True
@@ -49,9 +49,13 @@ def testRDFXMLParse():
def graph():
g = rdflib.Graph()
- g.add((rdflib.URIRef('http://example.org/a'),
- rdflib.URIRef('http://example.org/p'),
- rdflib.Literal('<msg>hei</hei>', datatype=RDF.XMLLiteral)))
+ g.add(
+ (
+ rdflib.URIRef("http://example.org/a"),
+ rdflib.URIRef("http://example.org/p"),
+ rdflib.Literal("<msg>hei</hei>", datatype=RDF.XMLLiteral),
+ )
+ )
return g
@@ -65,20 +69,20 @@ def roundtrip(fmt):
def testRoundtrip():
- roundtrip('xml')
- roundtrip('n3')
- roundtrip('nt')
+ roundtrip("xml")
+ roundtrip("n3")
+ roundtrip("nt")
def testHTML():
- l1 = Literal('<msg>hello</msg>', datatype=RDF.XMLLiteral)
- assert l1.value is not None, 'xml must have been parsed'
- assert l1.datatype == RDF.XMLLiteral, 'literal must have right datatype'
+ l1 = Literal("<msg>hello</msg>", datatype=RDF.XMLLiteral)
+ assert l1.value is not None, "xml must have been parsed"
+ assert l1.datatype == RDF.XMLLiteral, "literal must have right datatype"
- l2 = Literal('<msg>hello</msg>', datatype=RDF.HTML)
- assert l2.value is not None, 'xml must have been parsed'
- assert l2.datatype == RDF.HTML, 'literal must have right datatype'
+ l2 = Literal("<msg>hello</msg>", datatype=RDF.HTML)
+ assert l2.value is not None, "xml must have been parsed"
+ assert l2.datatype == RDF.HTML, "literal must have right datatype"
assert l1 != l2
assert not l1.eq(l2)
diff --git a/test/testutils.py b/test/testutils.py
index 20b060d3..03366cfb 100644
--- a/test/testutils.py
+++ b/test/testutils.py
@@ -65,6 +65,7 @@ def _parse_or_report(verbose, graph, *args, **kwargs):
def nose_tst_earl_report(generator, earl_report_name=None):
from optparse import OptionParser
+
p = OptionParser()
(options, args) = p.parse_args()
@@ -74,7 +75,7 @@ def nose_tst_earl_report(generator, earl_report_name=None):
for t in generator(args):
tests += 1
- print('Running ', t[1].uri)
+ print("Running ", t[1].uri)
try:
t[0](t[1])
add_test(t[1].uri, "passed")
@@ -93,11 +94,16 @@ def nose_tst_earl_report(generator, earl_report_name=None):
print_exc()
sys.stderr.write("%s\n" % t[1].uri)
- print("Ran %d tests, %d skipped, %d failed. "%(tests, skip, tests-skip-success))
+ print(
+ "Ran %d tests, %d skipped, %d failed. " % (tests, skip, tests - skip - success)
+ )
if earl_report_name:
now = isodate.datetime_isoformat(datetime.datetime.utcnow())
- earl_report = 'test_reports/%s-%s.ttl' % (earl_report_name, now.replace(":", ""))
+ earl_report = "test_reports/%s-%s.ttl" % (
+ earl_report_name,
+ now.replace(":", ""),
+ )
- report.serialize(earl_report, format='n3')
- report.serialize('test_reports/%s-latest.ttl'%earl_report_name, format='n3')
+ report.serialize(earl_report, format="n3")
+ report.serialize("test_reports/%s-latest.ttl" % earl_report_name, format="n3")
print("Wrote EARL-report to '%s'" % earl_report)
diff --git a/test/triple_store.py b/test/triple_store.py
index b9c5221a..f37bea33 100644
--- a/test/triple_store.py
+++ b/test/triple_store.py
@@ -6,8 +6,8 @@ from rdflib.graph import Graph
class GraphTest(unittest.TestCase):
- backend = 'default'
- path = 'store'
+ backend = "default"
+ path = "store"
def setUp(self):
self.store = Graph(store=self.backend)
diff --git a/test/type_check.py b/test/type_check.py
index 605f0916..19329a39 100644
--- a/test/type_check.py
+++ b/test/type_check.py
@@ -10,9 +10,11 @@ foo = URIRef("foo")
class TypeCheckCase(unittest.TestCase):
- unstable = True # TODO: until we decide if we want to add type checking back to rdflib
- backend = 'default'
- path = 'store'
+ unstable = (
+ True # TODO: until we decide if we want to add type checking back to rdflib
+ )
+ backend = "default"
+ path = "store"
def setUp(self):
self.store = Graph(backend=self.backend)
@@ -22,13 +24,10 @@ class TypeCheckCase(unittest.TestCase):
self.store.close()
def testSubjectTypeCheck(self):
- self.assertRaises(SubjectTypeError,
- self.store.add, (None, foo, foo))
+ self.assertRaises(SubjectTypeError, self.store.add, (None, foo, foo))
def testPredicateTypeCheck(self):
- self.assertRaises(PredicateTypeError,
- self.store.add, (foo, None, foo))
+ self.assertRaises(PredicateTypeError, self.store.add, (foo, None, foo))
def testObjectTypeCheck(self):
- self.assertRaises(ObjectTypeError,
- self.store.add, (foo, foo, None))
+ self.assertRaises(ObjectTypeError, self.store.add, (foo, foo, None))