summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNicholas Car <nicholas.car@surroundaustralia.com>2020-05-16 21:00:24 +1000
committerNicholas Car <nicholas.car@surroundaustralia.com>2020-05-16 21:00:24 +1000
commit0be6f6039479ce29cf71b11e76be54e186130036 (patch)
tree897d208525a7e8952fb496151db074d49dcdeb3a
parent2a8d70824e1b4caf0c606074a44ac3a15fa72718 (diff)
downloadrdflib-0be6f6039479ce29cf71b11e76be54e186130036.tar.gz
blacked all python files
-rw-r--r--docs/plugintable.py17
-rw-r--r--examples/conjunctive_graphs.py2
-rw-r--r--examples/slice.py4
-rw-r--r--examples/sparqlstore_example.py19
-rw-r--r--examples/swap_primer.py2
-rw-r--r--rdflib/__init__.py1
-rw-r--r--rdflib/collection.py12
-rw-r--r--rdflib/compare.py106
-rw-r--r--rdflib/compat.py49
-rw-r--r--rdflib/events.py7
-rw-r--r--rdflib/exceptions.py46
-rw-r--r--rdflib/extras/cmdlineutils.py18
-rw-r--r--rdflib/extras/describer.py7
-rw-r--r--rdflib/extras/external_graph_libs.py72
-rw-r--r--rdflib/extras/infixowl.py1046
-rw-r--r--rdflib/graph.py24
-rw-r--r--rdflib/namespace.py394
-rw-r--r--rdflib/parser.py81
-rw-r--r--rdflib/paths.py48
-rw-r--r--rdflib/plugin.py400
-rw-r--r--rdflib/plugins/memory.py64
-rwxr-xr-xrdflib/plugins/parsers/notation3.py871
-rw-r--r--rdflib/plugins/parsers/nquads.py16
-rw-r--r--rdflib/plugins/parsers/nt.py2
-rw-r--r--rdflib/plugins/parsers/ntriples.py53
-rw-r--r--rdflib/plugins/parsers/rdfxml.py151
-rw-r--r--rdflib/plugins/parsers/trig.py43
-rw-r--r--rdflib/plugins/parsers/trix.py60
-rw-r--r--rdflib/plugins/serializers/n3.py28
-rw-r--r--rdflib/plugins/serializers/nquads.py32
-rw-r--r--rdflib/plugins/serializers/nt.py34
-rw-r--r--rdflib/plugins/serializers/rdfxml.py73
-rw-r--r--rdflib/plugins/serializers/trig.py21
-rw-r--r--rdflib/plugins/serializers/trix.py39
-rw-r--r--rdflib/plugins/serializers/turtle.py104
-rw-r--r--rdflib/plugins/serializers/xmlwriter.py19
-rw-r--r--rdflib/plugins/sleepycat.py133
-rw-r--r--rdflib/plugins/sparql/__init__.py2
-rw-r--r--rdflib/plugins/sparql/aggregates.py11
-rw-r--r--rdflib/plugins/sparql/algebra.py240
-rw-r--r--rdflib/plugins/sparql/datatypes.py78
-rw-r--r--rdflib/plugins/sparql/evaluate.py172
-rw-r--r--rdflib/plugins/sparql/evalutils.py15
-rw-r--r--rdflib/plugins/sparql/operators.py190
-rw-r--r--rdflib/plugins/sparql/parser.py1123
-rw-r--r--rdflib/plugins/sparql/parserutils.py24
-rw-r--r--rdflib/plugins/sparql/processor.py13
-rw-r--r--rdflib/plugins/sparql/results/csvresults.py24
-rw-r--r--rdflib/plugins/sparql/results/graph.py10
-rw-r--r--rdflib/plugins/sparql/results/jsonresults.py59
-rw-r--r--rdflib/plugins/sparql/results/rdfresults.py25
-rw-r--r--rdflib/plugins/sparql/results/tsvresults.py51
-rw-r--r--rdflib/plugins/sparql/results/txtresults.py16
-rw-r--r--rdflib/plugins/sparql/results/xmlresults.py150
-rw-r--r--rdflib/plugins/sparql/sparql.py73
-rw-r--r--rdflib/plugins/sparql/update.py37
-rw-r--r--rdflib/plugins/stores/auditable.py62
-rw-r--r--rdflib/plugins/stores/concurrent.py11
-rw-r--r--rdflib/plugins/stores/regexmatching.py70
-rw-r--r--rdflib/plugins/stores/sparqlconnector.py65
-rw-r--r--rdflib/plugins/stores/sparqlstore.py253
-rw-r--r--rdflib/query.py62
-rw-r--r--rdflib/resource.py71
-rw-r--r--rdflib/serializer.py3
-rw-r--r--rdflib/store.py69
-rw-r--r--rdflib/term.py466
-rw-r--r--rdflib/tools/csv2rdf.py110
-rw-r--r--rdflib/tools/graphisomorphism.py44
-rw-r--r--rdflib/tools/rdf2dot.py109
-rw-r--r--rdflib/tools/rdfpipe.py127
-rw-r--r--rdflib/tools/rdfs2dot.py88
-rw-r--r--rdflib/util.py117
-rw-r--r--rdflib/void.py12
-rwxr-xr-xrun_tests.py38
-rw-r--r--setup.py81
-rw-r--r--test/earl.py12
-rw-r--r--test/manifest.py102
-rw-r--r--test/store_performance.py14
-rw-r--r--test/test_aggregate_graphs.py46
-rw-r--r--test/test_auditable.py244
-rw-r--r--test/test_batch_add.py33
-rw-r--r--test/test_bnode_ncname.py16
-rw-r--r--test/test_canonicalization.py435
-rw-r--r--test/test_comparison.py7
-rw-r--r--test/test_conjunctive_graph.py15
-rw-r--r--test/test_conneg.py8
-rw-r--r--test/test_conventions.py13
-rw-r--r--test/test_core_sparqlstore.py5
-rw-r--r--test/test_dataset.py94
-rw-r--r--test/test_datetime.py38
-rw-r--r--test/test_dawg.py206
-rw-r--r--test/test_diff.py4
-rw-r--r--test/test_duration.py8
-rw-r--r--test/test_empty_xml_base.py30
-rw-r--r--test/test_evaluate_bind.py28
-rw-r--r--test/test_events.py15
-rw-r--r--test/test_expressions.py156
-rw-r--r--test/test_extras_external_graph_libs.py42
-rw-r--r--test/test_finalnewline.py14
-rw-r--r--test/test_graph.py42
-rw-r--r--test/test_graph_context.py115
-rw-r--r--test/test_graph_formula.py32
-rw-r--r--test/test_graph_items.py7
-rw-r--r--test/test_hex_binary.py11
-rw-r--r--test/test_initbindings.py275
-rw-r--r--test/test_iomemory.py3
-rw-r--r--test/test_issue084.py91
-rw-r--r--test/test_issue1003.py60
-rw-r--r--test/test_issue160.py17
-rw-r--r--test/test_issue161.py10
-rw-r--r--test/test_issue184.py4
-rw-r--r--test/test_issue190.py26
-rw-r--r--test/test_issue200.py13
-rw-r--r--test/test_issue209.py1
-rw-r--r--test/test_issue223.py6
-rw-r--r--test/test_issue247.py1
-rw-r--r--test/test_issue248.py38
-rw-r--r--test/test_issue274.py78
-rw-r--r--test/test_issue363.py14
-rw-r--r--test/test_issue379.py4
-rw-r--r--test/test_issue381.py53
-rw-r--r--test/test_issue432.py2
-rw-r--r--test/test_issue446.py11
-rw-r--r--test/test_issue492.py4
-rw-r--r--test/test_issue523.py8
-rw-r--r--test/test_issue532.py2
-rw-r--r--test/test_issue545.py4
-rw-r--r--test/test_issue554.py7
-rw-r--r--test/test_issue563.py24
-rw-r--r--test/test_issue579.py4
-rw-r--r--test/test_issue604.py2
-rw-r--r--test/test_issue655.py52
-rw-r--r--test/test_issue715.py13
-rw-r--r--test/test_issue733.py19
-rw-r--r--test/test_issue920.py19
-rw-r--r--test/test_issue923.py5
-rw-r--r--test/test_issue953.py6
-rw-r--r--test/test_issue_git_200.py3
-rw-r--r--test/test_issue_git_336.py26
-rw-r--r--test/test_literal.py56
-rw-r--r--test/test_memory_store.py16
-rw-r--r--test/test_mulpath_n3.py4
-rw-r--r--test/test_n3.py84
-rw-r--r--test/test_n3_suite.py22
-rw-r--r--test/test_namespace.py85
-rw-r--r--test/test_nodepickler.py13
-rw-r--r--test/test_nquads.py23
-rw-r--r--test/test_nquads_w3c.py13
-rw-r--r--test/test_nt_misc.py19
-rw-r--r--test/test_nt_suite.py52
-rw-r--r--test/test_nt_w3c.py13
-rw-r--r--test/test_parser.py11
-rw-r--r--test/test_parser_helpers.py1
-rw-r--r--test/test_prefixTypes.py11
-rw-r--r--test/test_preflabel.py75
-rw-r--r--test/test_prettyxml.py95
-rw-r--r--test/test_rdf_lists.py20
-rw-r--r--test/test_rdfxml.py32
-rw-r--r--test/test_roundtrip.py37
-rw-r--r--test/test_rules.py11
-rw-r--r--test/test_seq.py8
-rw-r--r--test/test_serializexml.py66
-rw-r--r--test/test_slice.py38
-rw-r--r--test/test_sparql.py41
-rw-r--r--test/test_sparql_agg_distinct.py49
-rw-r--r--test/test_sparql_agg_undef.py24
-rw-r--r--test/test_sparql_construct_bindings.py21
-rw-r--r--test/test_sparql_service.py32
-rw-r--r--test/test_sparqlstore.py41
-rw-r--r--test/test_sparqlupdatestore.py173
-rw-r--r--test/test_swap_n3.py61
-rw-r--r--test/test_term.py208
-rw-r--r--test/test_trig.py113
-rw-r--r--test/test_trig_w3c.py32
-rw-r--r--test/test_trix_parse.py3
-rw-r--r--test/test_trix_serialize.py27
-rw-r--r--test/test_tsvresults.py1
-rw-r--r--test/test_turtle_serialize.py55
-rw-r--r--test/test_turtle_sort_issue613.py10
-rw-r--r--test/test_turtle_w3c.py19
-rw-r--r--test/test_util.py133
-rw-r--r--test/test_wide_python.py13
-rw-r--r--test/test_xmlliterals.py48
-rw-r--r--test/testutils.py16
-rw-r--r--test/triple_store.py4
-rw-r--r--test/type_check.py17
186 files changed, 7386 insertions, 5526 deletions
diff --git a/docs/plugintable.py b/docs/plugintable.py
index ddf0fe97..1d64c1a6 100644
--- a/docs/plugintable.py
+++ b/docs/plugintable.py
@@ -12,21 +12,24 @@ cls = sys.argv[1]
p = {}
for (name, kind), plugin in _plugins.items():
- if "/" in name: continue # skip duplicate entries for mimetypes
+ if "/" in name:
+ continue # skip duplicate entries for mimetypes
if cls == kind.__name__:
- p[name]="%s.%s"%(plugin.module_path, plugin.class_name)
+ p[name] = "%s.%s" % (plugin.module_path, plugin.class_name)
+
+l1 = max(len(x) for x in p)
+l2 = max(10 + len(x) for x in p.values())
-l1=max(len(x) for x in p)
-l2=max(10+len(x) for x in p.values())
def hr():
- print("="*l1,"="*l2)
+ print("=" * l1, "=" * l2)
+
hr()
-print("%-*s"%(l1,"Name"), "%-*s"%(l2, "Class"))
+print("%-*s" % (l1, "Name"), "%-*s" % (l2, "Class"))
hr()
for n in sorted(p):
- print("%-*s"%(l1,n), ":class:`~%s`"%p[n])
+ print("%-*s" % (l1, n), ":class:`~%s`" % p[n])
hr()
print()
diff --git a/examples/conjunctive_graphs.py b/examples/conjunctive_graphs.py
index 80282684..f714d9ff 100644
--- a/examples/conjunctive_graphs.py
+++ b/examples/conjunctive_graphs.py
@@ -53,7 +53,7 @@ if __name__ == "__main__":
# query the conjunction of all graphs
xx = None
- for x in g[mary: ns.loves / ns.hasCuteName]:
+ for x in g[mary : ns.loves / ns.hasCuteName]:
xx = x
print("Q: Who does Mary love?")
print("A: Mary loves {}".format(xx))
diff --git a/examples/slice.py b/examples/slice.py
index eaab1540..33aacf9b 100644
--- a/examples/slice.py
+++ b/examples/slice.py
@@ -18,9 +18,9 @@ if __name__ == "__main__":
graph.load("foaf.n3", format="n3")
- for person in graph[: RDF.type: FOAF.Person]:
+ for person in graph[: RDF.type : FOAF.Person]:
- friends = list(graph[person: FOAF.knows * "+" / FOAF.name])
+ friends = list(graph[person : FOAF.knows * "+" / FOAF.name])
if friends:
print("%s's circle of friends:" % graph.value(person, FOAF.name))
for name in friends:
diff --git a/examples/sparqlstore_example.py b/examples/sparqlstore_example.py
index 874d6183..936f6540 100644
--- a/examples/sparqlstore_example.py
+++ b/examples/sparqlstore_example.py
@@ -14,16 +14,21 @@ if __name__ == "__main__":
graph = Graph("SPARQLStore", identifier="http://dbpedia.org")
graph.open("http://dbpedia.org/sparql")
- pop = graph.value(
- URIRef("http://dbpedia.org/resource/Berlin"),
- dbo.populationTotal)
+ pop = graph.value(URIRef("http://dbpedia.org/resource/Berlin"), dbo.populationTotal)
- print("According to DBPedia, Berlin has a population of {0:,}".format(int(pop), ',d').replace(",", "."))
+ print(
+ "According to DBPedia, Berlin has a population of {0:,}".format(
+ int(pop), ",d"
+ ).replace(",", ".")
+ )
# using a SPARQLStore object directly
s = SPARQLStore(endpoint="http://dbpedia.org/sparql")
s.open(None)
pop = graph.value(
- URIRef("http://dbpedia.org/resource/Brisbane"),
- dbo.populationTotal)
- print("According to DBPedia, Brisbane has a population of " "{0:,}".format(int(pop), ',d'))
+ URIRef("http://dbpedia.org/resource/Brisbane"), dbo.populationTotal
+ )
+ print(
+ "According to DBPedia, Brisbane has a population of "
+ "{0:,}".format(int(pop), ",d")
+ )
diff --git a/examples/swap_primer.py b/examples/swap_primer.py
index e3adc298..35dc107c 100644
--- a/examples/swap_primer.py
+++ b/examples/swap_primer.py
@@ -58,7 +58,7 @@ if __name__ == "__main__":
# dataset from the example, and start
# with a fresh new graph.
- del(primer)
+ del primer
primer = ConjunctiveGraph()
# Lets start with a verbatim string straight from the primer text:
diff --git a/rdflib/__init__.py b/rdflib/__init__.py
index 457f78e9..96244acd 100644
--- a/rdflib/__init__.py
+++ b/rdflib/__init__.py
@@ -112,7 +112,6 @@ del _interactive_mode
del sys
-
NORMALIZE_LITERALS = True
"""
If True - Literals lexical forms are normalized when created.
diff --git a/rdflib/collection.py b/rdflib/collection.py
index 52eda4a9..6f715b15 100644
--- a/rdflib/collection.py
+++ b/rdflib/collection.py
@@ -7,7 +7,7 @@ from rdflib.term import BNode
from rdflib.term import Literal
-__all__ = ['Collection']
+__all__ = ["Collection"]
class Collection(object):
@@ -67,7 +67,7 @@ class Collection(object):
"2"^^<http://www.w3.org/2001/XMLSchema#integer>
"3"^^<http://www.w3.org/2001/XMLSchema#integer> )
"""
- return "( %s )" % (' '.join([i.n3() for i in self]))
+ return "( %s )" % (" ".join([i.n3() for i in self]))
def _get_container(self, index):
"""Gets the first, rest holding node at index."""
@@ -103,8 +103,7 @@ class Collection(object):
elif not newLink:
raise Exception("Malformed RDF Collection: %s" % self.uri)
else:
- assert len(newLink) == 1, \
- "Malformed RDF Collection: %s" % self.uri
+ assert len(newLink) == 1, "Malformed RDF Collection: %s" % self.uri
listName = newLink[0]
def __getitem__(self, key):
@@ -246,6 +245,7 @@ class Collection(object):
def test():
import doctest
+
doctest.testmod()
@@ -253,14 +253,14 @@ if __name__ == "__main__":
test()
from rdflib import Graph
+
g = Graph()
c = Collection(g, BNode())
assert len(c) == 0
- c = Collection(
- g, BNode(), [Literal("1"), Literal("2"), Literal("3"), Literal("4")])
+ c = Collection(g, BNode(), [Literal("1"), Literal("2"), Literal("3"), Literal("4")])
assert len(c) == 4
diff --git a/rdflib/compare.py b/rdflib/compare.py
index 818e758e..7fe93f52 100644
--- a/rdflib/compare.py
+++ b/rdflib/compare.py
@@ -83,8 +83,14 @@ from __future__ import print_function
# - Add warning and/or safety mechanism before working on large graphs?
# - use this in existing Graph.isomorphic?
-__all__ = ['IsomorphicGraph', 'to_isomorphic', 'isomorphic',
- 'to_canonical_graph', 'graph_diff', 'similar']
+__all__ = [
+ "IsomorphicGraph",
+ "to_isomorphic",
+ "isomorphic",
+ "to_canonical_graph",
+ "graph_diff",
+ "similar",
+]
from rdflib.graph import Graph, ConjunctiveGraph, ReadOnlyGraphAggregate
from rdflib.term import BNode, Node
@@ -94,8 +100,6 @@ from datetime import datetime
from collections import defaultdict
-
-
def _total_seconds(td):
result = td.days * 24 * 60 * 60
result += td.seconds
@@ -114,10 +118,11 @@ class _runtime(object):
def wrapped_f(*args, **kwargs):
start = datetime.now()
result = f(*args, **kwargs)
- if 'stats' in kwargs and kwargs['stats'] is not None:
- stats = kwargs['stats']
+ if "stats" in kwargs and kwargs["stats"] is not None:
+ stats = kwargs["stats"]
stats[self.label] = _total_seconds(datetime.now() - start)
return result
+
return wrapped_f
@@ -130,12 +135,13 @@ class _call_count(object):
self.label = f.__name__ + "_runtime"
def wrapped_f(*args, **kwargs):
- if 'stats' in kwargs and kwargs['stats'] is not None:
- stats = kwargs['stats']
+ if "stats" in kwargs and kwargs["stats"] is not None:
+ stats = kwargs["stats"]
if self.label not in stats:
stats[self.label] = 0
stats[self.label] += 1
return f(*args, **kwargs)
+
return wrapped_f
@@ -212,11 +218,12 @@ class Color:
return x.n3()
else:
return str(x)
+
if isinstance(color, Node):
return stringify(color)
value = 0
for triple in color:
- value += self.hashfunc(' '.join([stringify(x) for x in triple]))
+ value += self.hashfunc(" ".join([stringify(x) for x in triple]))
val = u"%x" % value
self._hash_cache[color] = val
return val
@@ -227,18 +234,16 @@ class Color:
new_color = list(self.color)
for node in W.nodes:
new_color += [
- (1, p, W.hash_color())
- for s, p, o in graph.triples((n, None, node))]
+ (1, p, W.hash_color()) for s, p, o in graph.triples((n, None, node))
+ ]
new_color += [
- (W.hash_color(), p, 3)
- for s, p, o in graph.triples((node, None, n))]
+ (W.hash_color(), p, 3) for s, p, o in graph.triples((node, None, n))
+ ]
new_color = tuple(new_color)
new_hash_color = self.hash_color(new_color)
if new_hash_color not in colors:
- c = Color(
- [], self.hashfunc, new_color,
- hash_cache=self._hash_cache)
+ c = Color([], self.hashfunc, new_color, hash_cache=self._hash_cache)
colors[new_hash_color] = c
colors[new_hash_color].nodes.append(n)
return colors.values()
@@ -248,12 +253,11 @@ class Color:
def copy(self):
return Color(
- self.nodes[:], self.hashfunc, self.color,
- hash_cache=self._hash_cache)
+ self.nodes[:], self.hashfunc, self.color, hash_cache=self._hash_cache
+ )
class _TripleCanonicalizer(object):
-
def __init__(self, graph, hashfunc=sha256):
self.graph = graph
@@ -261,6 +265,7 @@ class _TripleCanonicalizer(object):
h = hashfunc()
h.update(str(s).encode("utf8"))
return int(h.hexdigest(), 16)
+
self._hash_cache = {}
self.hashfunc = _hashfunc
@@ -292,12 +297,10 @@ class _TripleCanonicalizer(object):
self._neighbors[p].add(s)
self._neighbors[p].add(p)
if len(bnodes) > 0:
- return [
- Color(list(bnodes), self.hashfunc, hash_cache=self._hash_cache)
- ] + [
- Color([x], self.hashfunc, x, hash_cache=self._hash_cache)
- for x in others
- ]
+ return [Color(list(bnodes), self.hashfunc, hash_cache=self._hash_cache)] + [
+ Color([x], self.hashfunc, x, hash_cache=self._hash_cache)
+ for x in others
+ ]
else:
return []
@@ -306,8 +309,9 @@ class _TripleCanonicalizer(object):
new_color.append((len(color.nodes),))
color.nodes.remove(individual)
- c = Color([individual], self.hashfunc, tuple(new_color),
- hash_cache=self._hash_cache)
+ c = Color(
+ [individual], self.hashfunc, tuple(new_color), hash_cache=self._hash_cache
+ )
return c
def _get_candidates(self, coloring):
@@ -323,14 +327,16 @@ class _TripleCanonicalizer(object):
W = sequence.pop()
for c in coloring[:]:
if len(c.nodes) > 1 or isinstance(c.nodes[0], BNode):
- colors = sorted(c.distinguish(W, self.graph),
- key=lambda x: x.key(),
- reverse=True)
+ colors = sorted(
+ c.distinguish(W, self.graph),
+ key=lambda x: x.key(),
+ reverse=True,
+ )
coloring.remove(c)
coloring.extend(colors)
try:
si = sequence.index(c)
- sequence = sequence[:si] + colors + sequence[si+1:]
+ sequence = sequence[:si] + colors + sequence[si + 1 :]
except ValueError:
sequence = colors[1:] + sequence
combined_colors = []
@@ -349,9 +355,9 @@ class _TripleCanonicalizer(object):
def to_hash(self, stats=None):
result = 0
for triple in self.canonical_triples(stats=stats):
- result += self.hashfunc(' '.join([x.n3() for x in triple]))
+ result += self.hashfunc(" ".join([x.n3() for x in triple]))
if stats is not None:
- stats['graph_digest'] = "%x" % result
+ stats["graph_digest"] = "%x" % result
return result
def _experimental_path(self, coloring):
@@ -377,8 +383,8 @@ class _TripleCanonicalizer(object):
@_call_count("individuations")
def _traces(self, coloring, stats=None, depth=[0]):
- if stats is not None and 'prunings' not in stats:
- stats['prunings'] = 0
+ if stats is not None and "prunings" not in stats:
+ stats["prunings"] = 0
depth[0] += 1
candidates = self._get_candidates(coloring)
best = []
@@ -410,8 +416,8 @@ class _TripleCanonicalizer(object):
experimental_score = set([c.key() for c in experimental])
if last_coloring:
generator = self._create_generator(
- [last_coloring, experimental],
- generator)
+ [last_coloring, experimental], generator
+ )
last_coloring = experimental
if best_score is None or best_score < color_score:
best = [refined_coloring]
@@ -421,13 +427,13 @@ class _TripleCanonicalizer(object):
elif best_score > color_score:
# prune this branch.
if stats is not None:
- stats['prunings'] += 1
+ stats["prunings"] += 1
elif experimental_score != best_experimental_score:
best.append(refined_coloring)
else:
# prune this branch.
if stats is not None:
- stats['prunings'] += 1
+ stats["prunings"] += 1
discrete = [x for x in best if self._discrete(x)]
if len(discrete) == 0:
best_score = None
@@ -450,27 +456,31 @@ class _TripleCanonicalizer(object):
start_coloring = datetime.now()
coloring = self._initial_color()
if stats is not None:
- stats['triple_count'] = len(self.graph)
- stats['adjacent_nodes'] = max(0, len(coloring) - 1)
+ stats["triple_count"] = len(self.graph)
+ stats["adjacent_nodes"] = max(0, len(coloring) - 1)
coloring = self._refine(coloring, coloring[:])
if stats is not None:
- stats['initial_coloring_runtime'] = _total_seconds(datetime.now() - start_coloring)
- stats['initial_color_count'] = len(coloring)
+ stats["initial_coloring_runtime"] = _total_seconds(
+ datetime.now() - start_coloring
+ )
+ stats["initial_color_count"] = len(coloring)
if not self._discrete(coloring):
depth = [0]
coloring = self._traces(coloring, stats=stats, depth=depth)
if stats is not None:
- stats['tree_depth'] = depth[0]
+ stats["tree_depth"] = depth[0]
elif stats is not None:
- stats['individuations'] = 0
- stats['tree_depth'] = 0
+ stats["individuations"] = 0
+ stats["tree_depth"] = 0
if stats is not None:
- stats['color_count'] = len(coloring)
+ stats["color_count"] = len(coloring)
bnode_labels = dict([(c.nodes[0], c.hash_color()) for c in coloring])
if stats is not None:
- stats["canonicalize_triples_runtime"] = _total_seconds(datetime.now() - start_coloring)
+ stats["canonicalize_triples_runtime"] = _total_seconds(
+ datetime.now() - start_coloring
+ )
for triple in self.graph:
result = tuple(self._canonicalize_bnodes(triple, bnode_labels))
yield result
diff --git a/rdflib/compat.py b/rdflib/compat.py
index eb9221be..c058e8df 100644
--- a/rdflib/compat.py
+++ b/rdflib/compat.py
@@ -31,8 +31,7 @@ except ImportError:
# normal ElementTree install
import elementtree.ElementTree as etree
except ImportError:
- raise Exception(
- "Failed to import ElementTree from any known place")
+ raise Exception("Failed to import ElementTree from any known place")
try:
etree_register_namespace = etree.register_namespace
@@ -44,18 +43,18 @@ except AttributeError:
etreenative._namespace_map[uri] = prefix
-def cast_bytes(s, enc='utf-8'):
+def cast_bytes(s, enc="utf-8"):
if isinstance(s, str):
return s.encode(enc)
return s
def ascii(stream):
- return codecs.getreader('ascii')(stream)
+ return codecs.getreader("ascii")(stream)
def bopen(*args, **kwargs):
- return open(*args, mode='rb', **kwargs)
+ return open(*args, mode="rb", **kwargs)
long_type = int
@@ -69,7 +68,7 @@ def sign(n):
return 0
-r_unicodeEscape = re.compile(r'(\\u[0-9A-Fa-f]{4}|\\U[0-9A-Fa-f]{8})')
+r_unicodeEscape = re.compile(r"(\\u[0-9A-Fa-f]{4}|\\U[0-9A-Fa-f]{8})")
def _unicodeExpand(s):
@@ -83,17 +82,19 @@ except ValueError:
narrow_build = True
if narrow_build:
+
def _unicodeExpand(s):
try:
- return r_unicodeEscape.sub(
- lambda m: chr(int(m.group(0)[2:], 16)), s)
+ return r_unicodeEscape.sub(lambda m: chr(int(m.group(0)[2:], 16)), s)
except ValueError:
warnings.warn(
- 'Encountered a unicode char > 0xFFFF in a narrow python build. '
- 'Trying to degrade gracefully, but this can cause problems '
- 'later when working with the string:\n%s' % s)
+ "Encountered a unicode char > 0xFFFF in a narrow python build. "
+ "Trying to degrade gracefully, but this can cause problems "
+ "later when working with the string:\n%s" % s
+ )
return r_unicodeEscape.sub(
- lambda m: codecs.decode(m.group(0), 'unicode_escape'), s)
+ lambda m: codecs.decode(m.group(0), "unicode_escape"), s
+ )
def decodeStringEscape(s):
@@ -101,14 +102,14 @@ def decodeStringEscape(s):
s is byte-string - replace \ escapes in string
"""
- s = s.replace('\\t', '\t')
- s = s.replace('\\n', '\n')
- s = s.replace('\\r', '\r')
- s = s.replace('\\b', '\b')
- s = s.replace('\\f', '\f')
+ s = s.replace("\\t", "\t")
+ s = s.replace("\\n", "\n")
+ s = s.replace("\\r", "\r")
+ s = s.replace("\\b", "\b")
+ s = s.replace("\\f", "\f")
s = s.replace('\\"', '"')
s = s.replace("\\'", "'")
- s = s.replace('\\\\', '\\')
+ s = s.replace("\\\\", "\\")
return s
# return _unicodeExpand(s) # hmm - string escape doesn't do unicode escaping
@@ -119,14 +120,14 @@ def decodeUnicodeEscape(s):
s is a unicode string
replace ``\\n`` and ``\\u00AC`` unicode escapes
"""
- s = s.replace('\\t', '\t')
- s = s.replace('\\n', '\n')
- s = s.replace('\\r', '\r')
- s = s.replace('\\b', '\b')
- s = s.replace('\\f', '\f')
+ s = s.replace("\\t", "\t")
+ s = s.replace("\\n", "\n")
+ s = s.replace("\\r", "\r")
+ s = s.replace("\\b", "\b")
+ s = s.replace("\\f", "\f")
s = s.replace('\\"', '"')
s = s.replace("\\'", "'")
- s = s.replace('\\\\', '\\')
+ s = s.replace("\\\\", "\\")
s = _unicodeExpand(s) # hmm - string escape doesn't do unicode escaping
diff --git a/rdflib/events.py b/rdflib/events.py
index 2c563c10..5451c6be 100644
--- a/rdflib/events.py
+++ b/rdflib/events.py
@@ -26,7 +26,7 @@ fired:
<rdflib.events.Event ['data', 'foo', 'used_by']>
"""
-__all__ = ['Event', 'Dispatcher']
+__all__ = ["Event", "Dispatcher"]
class Event(object):
@@ -47,7 +47,7 @@ class Event(object):
def __repr__(self):
attrs = sorted(self.__dict__.keys())
- return '<rdflib.events.Event %s>' % ([a for a in attrs],)
+ return "<rdflib.events.Event %s>" % ([a for a in attrs],)
class Dispatcher(object):
@@ -90,8 +90,9 @@ class Dispatcher(object):
def test():
import doctest
+
doctest.testmod()
-if __name__ == '__main__':
+if __name__ == "__main__":
test()
diff --git a/rdflib/exceptions.py b/rdflib/exceptions.py
index 85195a53..4e31c0b8 100644
--- a/rdflib/exceptions.py
+++ b/rdflib/exceptions.py
@@ -2,9 +2,15 @@
TODO:
"""
-__all__ = ['Error', 'TypeCheckError', 'SubjectTypeError',
- 'PredicateTypeError', 'ObjectTypeError', 'ContextTypeError',
- 'ParserError']
+__all__ = [
+ "Error",
+ "TypeCheckError",
+ "SubjectTypeError",
+ "PredicateTypeError",
+ "ObjectTypeError",
+ "ContextTypeError",
+ "ParserError",
+]
class Error(Exception):
@@ -29,8 +35,10 @@ class SubjectTypeError(TypeCheckError):
def __init__(self, node):
TypeCheckError.__init__(self, node)
- self.msg = "Subject must be instance of URIRef or BNode: %s(%s)" \
- % (self.node, self.type)
+ self.msg = "Subject must be instance of URIRef or BNode: %s(%s)" % (
+ self.node,
+ self.type,
+ )
class PredicateTypeError(TypeCheckError):
@@ -38,8 +46,10 @@ class PredicateTypeError(TypeCheckError):
def __init__(self, node):
TypeCheckError.__init__(self, node)
- self.msg = "Predicate must be a URIRef instance: %s(%s)" \
- % (self.node, self.type)
+ self.msg = "Predicate must be a URIRef instance: %s(%s)" % (
+ self.node,
+ self.type,
+ )
class ObjectTypeError(TypeCheckError):
@@ -48,9 +58,11 @@ class ObjectTypeError(TypeCheckError):
def __init__(self, node):
TypeCheckError.__init__(self, node)
- self.msg = "\
-Object must be instance of URIRef, Literal, or BNode: %s(%s)" % \
- (self.node, self.type)
+ self.msg = (
+ "\
+Object must be instance of URIRef, Literal, or BNode: %s(%s)"
+ % (self.node, self.type)
+ )
class ContextTypeError(TypeCheckError):
@@ -58,8 +70,10 @@ class ContextTypeError(TypeCheckError):
def __init__(self, node):
TypeCheckError.__init__(self, node)
- self.msg = "Context must be instance of URIRef or BNode: %s(%s)" \
- % (self.node, self.type)
+ self.msg = "Context must be instance of URIRef or BNode: %s(%s)" % (
+ self.node,
+ self.type,
+ )
class ParserError(Error):
@@ -77,5 +91,9 @@ class UniquenessError(Error):
"""A uniqueness assumption was made in the context, and that is not true"""
def __init__(self, values):
- Error.__init__(self, "\
-Uniqueness assumption is not fulfilled. Multiple values are: %s" % values)
+ Error.__init__(
+ self,
+ "\
+Uniqueness assumption is not fulfilled. Multiple values are: %s"
+ % values,
+ )
diff --git a/rdflib/extras/cmdlineutils.py b/rdflib/extras/cmdlineutils.py
index a771d4d7..9abb10ba 100644
--- a/rdflib/extras/cmdlineutils.py
+++ b/rdflib/extras/cmdlineutils.py
@@ -8,14 +8,16 @@ from rdflib.util import guess_format
def _help():
- sys.stderr.write("""
+ sys.stderr.write(
+ """
program.py [-f <format>] [-o <output>] [files...]
Read RDF files given on STDOUT - does something to the resulting graph
If no files are given, read from stdin
-o specifies file for output, if not given stdout is used
-f specifies parser to use, if not given it is guessed from extension
-""")
+"""
+ )
def main(target, _help=_help, options="", stdin=True):
@@ -57,11 +59,15 @@ def main(target, _help=_help, options="", stdin=True):
start1 = time.time()
sys.stderr.write("Loading %s as %s... " % (x, f))
g.load(x, format=f)
- sys.stderr.write("done.\t(%d triples\t%.2f seconds)\n" %
- (len(g) - size, time.time() - start1))
+ sys.stderr.write(
+ "done.\t(%d triples\t%.2f seconds)\n"
+ % (len(g) - size, time.time() - start1)
+ )
size = len(g)
- sys.stderr.write("Loaded a total of %d triples in %.2f seconds.\n" %
- (len(g), time.time() - start))
+ sys.stderr.write(
+ "Loaded a total of %d triples in %.2f seconds.\n"
+ % (len(g), time.time() - start)
+ )
target(g, out, args)
diff --git a/rdflib/extras/describer.py b/rdflib/extras/describer.py
index c7444776..cec3b602 100644
--- a/rdflib/extras/describer.py
+++ b/rdflib/extras/describer.py
@@ -119,7 +119,6 @@ from rdflib.term import URIRef
class Describer(object):
-
def __init__(self, graph=None, about=None, base=None):
if graph is None:
graph = Graph()
@@ -143,7 +142,7 @@ class Describer(object):
rdflib.term.URIRef(u'http://example.org/')
"""
- kws.setdefault('base', self.base)
+ kws.setdefault("base", self.base)
subject = cast_identifier(subject, **kws)
if self._subjects:
self._subjects[-1] = subject
@@ -195,7 +194,7 @@ class Describer(object):
"""
- kws.setdefault('base', self.base)
+ kws.setdefault("base", self.base)
p = cast_identifier(p)
o = cast_identifier(o, **kws)
self.graph.add((self._current(), p, o))
@@ -221,7 +220,7 @@ class Describer(object):
rdflib.term.Literal(u'Net')
"""
- kws.setdefault('base', self.base)
+ kws.setdefault("base", self.base)
p = cast_identifier(p)
s = cast_identifier(s, **kws)
self.graph.add((s, p, self._current()))
diff --git a/rdflib/extras/external_graph_libs.py b/rdflib/extras/external_graph_libs.py
index 873805b4..fa311490 100644
--- a/rdflib/extras/external_graph_libs.py
+++ b/rdflib/extras/external_graph_libs.py
@@ -17,18 +17,22 @@ see ../../test/test_extras_external_graph_libs.py for conditional tests
"""
import logging
+
logger = logging.getLogger(__name__)
-def _identity(x): return x
+def _identity(x):
+ return x
def _rdflib_to_networkx_graph(
- graph,
- nxgraph,
- calc_weights,
- edge_attrs,
- transform_s=_identity, transform_o=_identity):
+ graph,
+ nxgraph,
+ calc_weights,
+ edge_attrs,
+ transform_s=_identity,
+ transform_o=_identity,
+):
"""Helper method for multidigraph, digraph and graph.
Modifies nxgraph in-place!
@@ -50,6 +54,7 @@ def _rdflib_to_networkx_graph(
assert callable(transform_s)
assert callable(transform_o)
import networkx as nx
+
for s, p, o in graph:
ts, to = transform_s(s), transform_o(o) # apply possible transformations
data = nxgraph.get_edge_data(ts, to)
@@ -57,21 +62,20 @@ def _rdflib_to_networkx_graph(
# no edge yet, set defaults
data = edge_attrs(s, p, o)
if calc_weights:
- data['weight'] = 1
+ data["weight"] = 1
nxgraph.add_edge(ts, to, **data)
else:
# already have an edge, just update attributes
if calc_weights:
- data['weight'] += 1
- if 'triples' in data:
+ data["weight"] += 1
+ if "triples" in data:
d = edge_attrs(s, p, o)
- data['triples'].extend(d['triples'])
+ data["triples"].extend(d["triples"])
def rdflib_to_networkx_multidigraph(
- graph,
- edge_attrs=lambda s, p, o: {'key': p},
- **kwds):
+ graph, edge_attrs=lambda s, p, o: {"key": p}, **kwds
+):
"""Converts the given graph into a networkx.MultiDiGraph.
The subjects and objects are the later nodes of the MultiDiGraph.
@@ -116,16 +120,18 @@ def rdflib_to_networkx_multidigraph(
True
"""
import networkx as nx
+
mdg = nx.MultiDiGraph()
_rdflib_to_networkx_graph(graph, mdg, False, edge_attrs, **kwds)
return mdg
def rdflib_to_networkx_digraph(
- graph,
- calc_weights=True,
- edge_attrs=lambda s, p, o: {'triples': [(s, p, o)]},
- **kwds):
+ graph,
+ calc_weights=True,
+ edge_attrs=lambda s, p, o: {"triples": [(s, p, o)]},
+ **kwds
+):
"""Converts the given graph into a networkx.DiGraph.
As an rdflib.Graph() can contain multiple edges between nodes, by default
@@ -176,16 +182,18 @@ def rdflib_to_networkx_digraph(
False
"""
import networkx as nx
+
dg = nx.DiGraph()
_rdflib_to_networkx_graph(graph, dg, calc_weights, edge_attrs, **kwds)
return dg
def rdflib_to_networkx_graph(
- graph,
- calc_weights=True,
- edge_attrs=lambda s, p, o: {'triples': [(s, p, o)]},
- **kwds):
+ graph,
+ calc_weights=True,
+ edge_attrs=lambda s, p, o: {"triples": [(s, p, o)]},
+ **kwds
+):
"""Converts the given graph into a networkx.Graph.
As an rdflib.Graph() can contain multiple directed edges between nodes, by
@@ -236,6 +244,7 @@ def rdflib_to_networkx_graph(
False
"""
import networkx as nx
+
g = nx.Graph()
_rdflib_to_networkx_graph(graph, g, calc_weights, edge_attrs, **kwds)
return g
@@ -243,11 +252,11 @@ def rdflib_to_networkx_graph(
def rdflib_to_graphtool(
graph,
- v_prop_names=[str('term')],
- e_prop_names=[str('term')],
- transform_s=lambda s, p, o: {str('term'): s},
- transform_p=lambda s, p, o: {str('term'): p},
- transform_o=lambda s, p, o: {str('term'): o},
+ v_prop_names=[str("term")],
+ e_prop_names=[str("term")],
+ transform_s=lambda s, p, o: {str("term"): s},
+ transform_p=lambda s, p, o: {str("term"): p},
+ transform_o=lambda s, p, o: {str("term"): o},
):
"""Converts the given graph into a graph_tool.Graph().
@@ -306,12 +315,13 @@ def rdflib_to_graphtool(
True
"""
import graph_tool as gt
+
g = gt.Graph()
- vprops = [(vpn, g.new_vertex_property('object')) for vpn in v_prop_names]
+ vprops = [(vpn, g.new_vertex_property("object")) for vpn in v_prop_names]
for vpn, vprop in vprops:
g.vertex_properties[vpn] = vprop
- eprops = [(epn, g.new_edge_property('object')) for epn in e_prop_names]
+ eprops = [(epn, g.new_edge_property("object")) for epn in e_prop_names]
for epn, eprop in eprops:
g.edge_properties[epn] = eprop
node_to_vertex = {}
@@ -341,10 +351,12 @@ def rdflib_to_graphtool(
return g
-if __name__ == '__main__':
+if __name__ == "__main__":
import sys
import logging.config
+
logging.basicConfig(level=logging.DEBUG)
import nose
- nose.run(argv=[sys.argv[0], sys.argv[0], '-v', '--without-doctest'])
+
+ nose.run(argv=[sys.argv[0], sys.argv[0], "-v", "--without-doctest"])
diff --git a/rdflib/extras/infixowl.py b/rdflib/extras/infixowl.py
index f295f1f8..16dc616a 100644
--- a/rdflib/extras/infixowl.py
+++ b/rdflib/extras/infixowl.py
@@ -5,8 +5,6 @@ from __future__ import division
from __future__ import print_function
-
-
__doc__ = """
RDFLib Python binding for OWL Abstract Syntax
@@ -118,15 +116,7 @@ Python
import itertools
-from rdflib import (
- BNode,
- Literal,
- Namespace,
- RDF,
- RDFS,
- URIRef,
- Variable
-)
+from rdflib import BNode, Literal, Namespace, RDF, RDFS, URIRef, Variable
from rdflib.graph import Graph
from rdflib.collection import Collection
from rdflib.namespace import XSD as _XSD_NS
@@ -135,6 +125,7 @@ from rdflib.term import Identifier
from rdflib.util import first
import logging
+
logger = logging.getLogger(__name__)
@@ -148,43 +139,43 @@ operators can be defined.
"""
__all__ = [
- 'OWL_NS',
- 'nsBinds',
- 'ACE_NS',
- 'CLASS_RELATIONS',
- 'some',
- 'only',
- 'max',
- 'min',
- 'exactly',
- 'value',
- 'PropertyAbstractSyntax',
- 'AllClasses',
- 'AllDifferent',
- 'AllProperties',
- 'AnnotatableTerms',
- 'BooleanClass',
- 'Callable',
- 'CastClass',
- 'Class',
- 'ClassNamespaceFactory',
- 'classOrIdentifier',
- 'classOrTerm',
- 'CommonNSBindings',
- 'ComponentTerms',
- 'DeepClassClear',
- 'EnumeratedClass',
- 'generateQName',
- 'GetIdentifiedClasses',
- 'Individual',
- 'MalformedClass',
- 'manchesterSyntax',
- 'Ontology',
- 'OWLRDFListProxy',
- 'Property',
- 'propertyOrIdentifier',
- 'Restriction',
- 'termDeletionDecorator',
+ "OWL_NS",
+ "nsBinds",
+ "ACE_NS",
+ "CLASS_RELATIONS",
+ "some",
+ "only",
+ "max",
+ "min",
+ "exactly",
+ "value",
+ "PropertyAbstractSyntax",
+ "AllClasses",
+ "AllDifferent",
+ "AllProperties",
+ "AnnotatableTerms",
+ "BooleanClass",
+ "Callable",
+ "CastClass",
+ "Class",
+ "ClassNamespaceFactory",
+ "classOrIdentifier",
+ "classOrTerm",
+ "CommonNSBindings",
+ "ComponentTerms",
+ "DeepClassClear",
+ "EnumeratedClass",
+ "generateQName",
+ "GetIdentifiedClasses",
+ "Individual",
+ "MalformedClass",
+ "manchesterSyntax",
+ "Ontology",
+ "OWLRDFListProxy",
+ "Property",
+ "propertyOrIdentifier",
+ "Restriction",
+ "termDeletionDecorator",
]
# definition of an Infix operator class
@@ -218,18 +209,18 @@ class Infix:
OWL_NS = Namespace("http://www.w3.org/2002/07/owl#")
nsBinds = {
- 'skos': 'http://www.w3.org/2004/02/skos/core#',
- 'rdf': RDF,
- 'rdfs': RDFS,
- 'owl': OWL_NS,
- 'list': URIRef('http://www.w3.org/2000/10/swap/list#'),
- 'dc': "http://purl.org/dc/elements/1.1/",
+ "skos": "http://www.w3.org/2004/02/skos/core#",
+ "rdf": RDF,
+ "rdfs": RDFS,
+ "owl": OWL_NS,
+ "list": URIRef("http://www.w3.org/2000/10/swap/list#"),
+ "dc": "http://purl.org/dc/elements/1.1/",
}
def generateQName(graph, uri):
prefix, uri, localName = graph.compute_qname(classOrIdentifier(uri))
- return u':'.join([prefix, localName])
+ return u":".join([prefix, localName])
def classOrTerm(thing):
@@ -244,8 +235,9 @@ def classOrIdentifier(thing):
if isinstance(thing, (Property, Class)):
return thing.identifier
else:
- assert isinstance(thing, (URIRef, BNode)), \
+ assert isinstance(thing, (URIRef, BNode)), (
"Expecting a Class, Property, URIRef, or BNode.. not a %s" % thing
+ )
return thing
@@ -268,8 +260,9 @@ def manchesterSyntax(thing, store, boolean=None, transientList=False):
children = [manchesterSyntax(child, store) for child in thing]
else:
liveChildren = iter(Collection(store, thing))
- children = [manchesterSyntax(
- child, store) for child in Collection(store, thing)]
+ children = [
+ manchesterSyntax(child, store) for child in Collection(store, thing)
+ ]
if boolean == OWL_NS.intersectionOf:
childList = []
named = []
@@ -279,82 +272,77 @@ def manchesterSyntax(thing, store, boolean=None, transientList=False):
else:
childList.append(child)
if named:
+
def castToQName(x):
prefix, uri, localName = store.compute_qname(x)
- return ':'.join([prefix, localName])
+ return ":".join([prefix, localName])
if len(named) > 1:
- prefix = u'( ' + u' AND '.join(map(
- castToQName, named)) + u' )'
+ prefix = u"( " + u" AND ".join(map(castToQName, named)) + u" )"
else:
prefix = manchesterSyntax(named[0], store)
if childList:
- return str(prefix) + u' THAT ' + u' AND '.join(
- [str(manchesterSyntax(x, store)) for x in childList])
+ return (
+ str(prefix)
+ + u" THAT "
+ + u" AND ".join(
+ [str(manchesterSyntax(x, store)) for x in childList]
+ )
+ )
else:
return prefix
else:
- return u'( ' + u' AND '.join(
- [str(c) for c in children]) + u' )'
+ return u"( " + u" AND ".join([str(c) for c in children]) + u" )"
elif boolean == OWL_NS.unionOf:
- return u'( ' + u' OR '.join([str(c) for c in children]) + ' )'
+ return u"( " + u" OR ".join([str(c) for c in children]) + " )"
elif boolean == OWL_NS.oneOf:
- return u'{ ' + u' '.join([str(c) for c in children]) + ' }'
+ return u"{ " + u" ".join([str(c) for c in children]) + " }"
else:
assert boolean == OWL_NS.complementOf
- elif OWL_NS.Restriction in store.objects(
- subject=thing, predicate=RDF.type):
- prop = list(
- store.objects(subject=thing, predicate=OWL_NS.onProperty))[0]
+ elif OWL_NS.Restriction in store.objects(subject=thing, predicate=RDF.type):
+ prop = list(store.objects(subject=thing, predicate=OWL_NS.onProperty))[0]
prefix, uri, localName = store.compute_qname(prop)
- propString = u':'.join([prefix, localName])
+ propString = u":".join([prefix, localName])
label = first(store.objects(subject=prop, predicate=RDFS.label))
if label:
propString = "'%s'" % label
- for onlyClass in store.objects(
- subject=thing, predicate=OWL_NS.allValuesFrom):
- return u'( %s ONLY %s )' % (
- propString, manchesterSyntax(onlyClass, store))
+ for onlyClass in store.objects(subject=thing, predicate=OWL_NS.allValuesFrom):
+ return u"( %s ONLY %s )" % (propString, manchesterSyntax(onlyClass, store))
for val in store.objects(subject=thing, predicate=OWL_NS.hasValue):
- return u'( %s VALUE %s )' % (
- propString,
- manchesterSyntax(val, store))
- for someClass in store.objects(
- subject=thing, predicate=OWL_NS.someValuesFrom):
- return u'( %s SOME %s )' % (
- propString, manchesterSyntax(someClass, store))
- cardLookup = {OWL_NS.maxCardinality: 'MAX',
- OWL_NS.minCardinality: 'MIN',
- OWL_NS.cardinality: 'EQUALS'}
- for s, p, o in store.triples_choices(
- (thing, list(cardLookup.keys()), None)):
- return u'( %s %s %s )' % (
- propString, cardLookup[p], o)
+ return u"( %s VALUE %s )" % (propString, manchesterSyntax(val, store))
+ for someClass in store.objects(subject=thing, predicate=OWL_NS.someValuesFrom):
+ return u"( %s SOME %s )" % (propString, manchesterSyntax(someClass, store))
+ cardLookup = {
+ OWL_NS.maxCardinality: "MAX",
+ OWL_NS.minCardinality: "MIN",
+ OWL_NS.cardinality: "EQUALS",
+ }
+ for s, p, o in store.triples_choices((thing, list(cardLookup.keys()), None)):
+ return u"( %s %s %s )" % (propString, cardLookup[p], o)
compl = list(store.objects(subject=thing, predicate=OWL_NS.complementOf))
if compl:
- return '( NOT %s )' % (manchesterSyntax(compl[0], store))
+ return "( NOT %s )" % (manchesterSyntax(compl[0], store))
else:
- prolog = '\n'.join(
- ["PREFIX %s: <%s>" % (k, nsBinds[k]) for k in nsBinds])
- qstr = \
- prolog + \
- "\nSELECT ?p ?bool WHERE {?class a owl:Class; ?p ?bool ." + \
- "?bool rdf:first ?foo }"
+ prolog = "\n".join(["PREFIX %s: <%s>" % (k, nsBinds[k]) for k in nsBinds])
+ qstr = (
+ prolog
+ + "\nSELECT ?p ?bool WHERE {?class a owl:Class; ?p ?bool ."
+ + "?bool rdf:first ?foo }"
+ )
initb = {Variable("?class"): thing}
- for boolProp, col in \
- store.query(qstr, processor="sparql", initBindings=initb):
+ for boolProp, col in store.query(qstr, processor="sparql", initBindings=initb):
if not isinstance(thing, URIRef):
return manchesterSyntax(col, store, boolean=boolProp)
try:
prefix, uri, localName = store.compute_qname(thing)
- qname = u':'.join([prefix, localName])
+ qname = u":".join([prefix, localName])
except Exception:
if isinstance(thing, BNode):
return thing.n3()
return u"<" + thing + ">"
logger.debug(list(store.objects(subject=thing, predicate=RDF.type)))
raise
- return '[]' # +thing._id.encode('utf-8')+'</em>'
+ return "[]" # +thing._id.encode('utf-8')+'</em>'
label = first(Class(thing, graph=store).label)
if label:
return label
@@ -372,6 +360,7 @@ def termDeletionDecorator(prop):
def someFunc(func):
func.property = prop
return func
+
return someFunc
@@ -382,6 +371,7 @@ class TermDeletionHelper:
def __call__(self, f):
def _remover(inst):
inst.graph.remove((inst.identifier, self.prop, None))
+
return _remover
@@ -389,6 +379,7 @@ class Individual(object):
"""
A typed individual
"""
+
factoryGraph = Graph()
def serialize(self, graph):
@@ -404,9 +395,8 @@ class Individual(object):
self.qname = None
if not isinstance(self.identifier, BNode):
try:
- prefix, uri, localName = self.graph.compute_qname(
- self.identifier)
- self.qname = u':'.join([prefix, localName])
+ prefix, uri, localName = self.graph.compute_qname(self.identifier)
+ self.qname = u":".join([prefix, localName])
except:
pass
@@ -426,21 +416,18 @@ class Individual(object):
self.delete()
def _get_type(self):
- for _t in self.graph.objects(
- subject=self.identifier, predicate=RDF.type):
+ for _t in self.graph.objects(subject=self.identifier, predicate=RDF.type):
yield _t
def _set_type(self, kind):
if not kind:
return
if isinstance(kind, (Individual, Identifier)):
- self.graph.add(
- (self.identifier, RDF.type, classOrIdentifier(kind)))
+ self.graph.add((self.identifier, RDF.type, classOrIdentifier(kind)))
else:
for c in kind:
assert isinstance(c, (Individual, Identifier))
- self.graph.add(
- (self.identifier, RDF.type, classOrIdentifier(c)))
+ self.graph.add((self.identifier, RDF.type, classOrIdentifier(c)))
@TermDeletionHelper(RDF.type)
def _delete_type(self):
@@ -464,43 +451,43 @@ class Individual(object):
def _set_identifier(self, i):
assert i
if i != self.__identifier:
- oldStmtsOut = [(p, o) for s, p, o in self.graph.triples(
- (self.__identifier, None, None))]
- oldStmtsIn = [(s, p) for s, p, o in self.graph.triples(
- (None, None, self.__identifier))]
+ oldStmtsOut = [
+ (p, o)
+ for s, p, o in self.graph.triples((self.__identifier, None, None))
+ ]
+ oldStmtsIn = [
+ (s, p)
+ for s, p, o in self.graph.triples((None, None, self.__identifier))
+ ]
for p1, o1 in oldStmtsOut:
self.graph.remove((self.__identifier, p1, o1))
for s1, p1 in oldStmtsIn:
self.graph.remove((s1, p1, self.__identifier))
self.__identifier = i
- self.graph.addN(
- [(i, p1, o1, self.graph) for p1, o1 in oldStmtsOut])
+ self.graph.addN([(i, p1, o1, self.graph) for p1, o1 in oldStmtsOut])
self.graph.addN([(s1, p1, i, self.graph) for s1, p1 in oldStmtsIn])
if not isinstance(i, BNode):
try:
prefix, uri, localName = self.graph.compute_qname(i)
- self.qname = u':'.join([prefix, localName])
+ self.qname = u":".join([prefix, localName])
except:
pass
identifier = property(_get_identifier, _set_identifier)
def _get_sameAs(self):
- for _t in self.graph.objects(
- subject=self.identifier, predicate=OWL_NS.sameAs):
+ for _t in self.graph.objects(subject=self.identifier, predicate=OWL_NS.sameAs):
yield _t
def _set_sameAs(self, term):
# if not kind:
# return
if isinstance(term, (Individual, Identifier)):
- self.graph.add(
- (self.identifier, OWL_NS.sameAs, classOrIdentifier(term)))
+ self.graph.add((self.identifier, OWL_NS.sameAs, classOrIdentifier(term)))
else:
for c in term:
assert isinstance(c, (Individual, Identifier))
- self.graph.add(
- (self.identifier, OWL_NS.sameAs, classOrIdentifier(c)))
+ self.graph.add((self.identifier, OWL_NS.sameAs, classOrIdentifier(c)))
@TermDeletionHelper(OWL_NS.sameAs)
def _delete_sameAs(self):
@@ -509,7 +496,7 @@ class Individual(object):
sameAs = property(_get_sameAs, _set_sameAs, _delete_sameAs)
-ACE_NS = Namespace('http://attempto.ifi.uzh.ch/ace_lexicon#')
+ACE_NS = Namespace("http://attempto.ifi.uzh.ch/ace_lexicon#")
class AnnotatableTerms(Individual):
@@ -517,16 +504,13 @@ class AnnotatableTerms(Individual):
Terms in an OWL ontology with rdfs:label and rdfs:comment
"""
- def __init__(self,
- identifier,
- graph=None,
- nameAnnotation=None,
- nameIsLabel=False):
+ def __init__(self, identifier, graph=None, nameAnnotation=None, nameIsLabel=False):
super(AnnotatableTerms, self).__init__(identifier, graph)
if nameAnnotation:
self.setupACEAnnotations()
- self.PN_sgProp.extent = [(self.identifier,
- self.handleAnnotation(nameAnnotation))]
+ self.PN_sgProp.extent = [
+ (self.identifier, self.handleAnnotation(nameAnnotation))
+ ]
if nameIsLabel:
self.label = [nameAnnotation]
@@ -534,41 +518,42 @@ class AnnotatableTerms(Individual):
return val if isinstance(val, Literal) else Literal(val)
def setupACEAnnotations(self):
- self.graph.bind('ace', ACE_NS, override=False)
+ self.graph.bind("ace", ACE_NS, override=False)
# PN_sg singular form of a proper name ()
- self.PN_sgProp = Property(ACE_NS.PN_sg,
- baseType=OWL_NS.AnnotationProperty,
- graph=self.graph)
+ self.PN_sgProp = Property(
+ ACE_NS.PN_sg, baseType=OWL_NS.AnnotationProperty, graph=self.graph
+ )
# CN_sg singular form of a common noun
- self.CN_sgProp = Property(ACE_NS.CN_sg,
- baseType=OWL_NS.AnnotationProperty,
- graph=self.graph)
+ self.CN_sgProp = Property(
+ ACE_NS.CN_sg, baseType=OWL_NS.AnnotationProperty, graph=self.graph
+ )
# CN_pl plural form of a common noun
- self.CN_plProp = Property(ACE_NS.CN_pl,
- baseType=OWL_NS.AnnotationProperty,
- graph=self.graph)
+ self.CN_plProp = Property(
+ ACE_NS.CN_pl, baseType=OWL_NS.AnnotationProperty, graph=self.graph
+ )
# singular form of a transitive verb
- self.TV_sgProp = Property(ACE_NS.TV_sg,
- baseType=OWL_NS.AnnotationProperty,
- graph=self.graph)
+ self.TV_sgProp = Property(
+ ACE_NS.TV_sg, baseType=OWL_NS.AnnotationProperty, graph=self.graph
+ )
# plural form of a transitive verb
- self.TV_plProp = Property(ACE_NS.TV_pl,
- baseType=OWL_NS.AnnotationProperty,
- graph=self.graph)
+ self.TV_plProp = Property(
+ ACE_NS.TV_pl, baseType=OWL_NS.AnnotationProperty, graph=self.graph
+ )
# past participle form a transitive verb
- self.TV_vbgProp = Property(ACE_NS.TV_vbg,
- baseType=OWL_NS.AnnotationProperty,
- graph=self.graph)
+ self.TV_vbgProp = Property(
+ ACE_NS.TV_vbg, baseType=OWL_NS.AnnotationProperty, graph=self.graph
+ )
def _get_comment(self):
for comment in self.graph.objects(
- subject=self.identifier, predicate=RDFS.comment):
+ subject=self.identifier, predicate=RDFS.comment
+ ):
yield comment
def _set_comment(self, comment):
@@ -587,8 +572,7 @@ class AnnotatableTerms(Individual):
comment = property(_get_comment, _set_comment, _del_comment)
def _get_seeAlso(self):
- for sA in self.graph.objects(
- subject=self.identifier, predicate=RDFS.seeAlso):
+ for sA in self.graph.objects(subject=self.identifier, predicate=RDFS.seeAlso):
yield sA
def _set_seeAlso(self, seeAlsos):
@@ -600,11 +584,11 @@ class AnnotatableTerms(Individual):
@TermDeletionHelper(RDFS.seeAlso)
def _del_seeAlso(self):
pass
+
seeAlso = property(_get_seeAlso, _set_seeAlso, _del_seeAlso)
def _get_label(self):
- for label in self.graph.objects(
- subject=self.identifier, predicate=RDFS.label):
+ for label in self.graph.objects(subject=self.identifier, predicate=RDFS.label):
yield label
def _set_label(self, label):
@@ -636,8 +620,7 @@ class AnnotatableTerms(Individual):
class Ontology(AnnotatableTerms):
""" The owl ontology metadata"""
- def __init__(self,
- identifier=None, imports=None, comment=None, graph=None):
+ def __init__(self, identifier=None, imports=None, comment=None, graph=None):
super(Ontology, self).__init__(identifier, graph)
self.imports = imports and imports or []
self.comment = comment and comment or []
@@ -649,16 +632,17 @@ class Ontology(AnnotatableTerms):
def _get_imports(self):
for owl in self.graph.objects(
- subject=self.identifier, predicate=OWL_NS['imports']):
+ subject=self.identifier, predicate=OWL_NS["imports"]
+ ):
yield owl
def _set_imports(self, other):
if not other:
return
for o in other:
- self.graph.add((self.identifier, OWL_NS['imports'], o))
+ self.graph.add((self.identifier, OWL_NS["imports"], o))
- @TermDeletionHelper(OWL_NS['imports'])
+ @TermDeletionHelper(OWL_NS["imports"])
def _del_imports(self):
pass
@@ -676,25 +660,32 @@ def AllClasses(graph):
def AllProperties(graph):
prevProps = set()
for s, p, o in graph.triples_choices(
- (None, RDF.type, [OWL_NS.SymmetricProperty,
- OWL_NS.FunctionalProperty,
- OWL_NS.InverseFunctionalProperty,
- OWL_NS.TransitiveProperty,
- OWL_NS.DatatypeProperty,
- OWL_NS.ObjectProperty,
- OWL_NS.AnnotationProperty])):
- if o in [OWL_NS.SymmetricProperty,
- OWL_NS.InverseFunctionalProperty,
- OWL_NS.TransitiveProperty,
- OWL_NS.ObjectProperty]:
+ (
+ None,
+ RDF.type,
+ [
+ OWL_NS.SymmetricProperty,
+ OWL_NS.FunctionalProperty,
+ OWL_NS.InverseFunctionalProperty,
+ OWL_NS.TransitiveProperty,
+ OWL_NS.DatatypeProperty,
+ OWL_NS.ObjectProperty,
+ OWL_NS.AnnotationProperty,
+ ],
+ )
+ ):
+ if o in [
+ OWL_NS.SymmetricProperty,
+ OWL_NS.InverseFunctionalProperty,
+ OWL_NS.TransitiveProperty,
+ OWL_NS.ObjectProperty,
+ ]:
bType = OWL_NS.ObjectProperty
else:
bType = OWL_NS.DatatypeProperty
if s not in prevProps:
prevProps.add(s)
- yield Property(s,
- graph=graph,
- baseType=bType)
+ yield Property(s, graph=graph, baseType=bType)
class ClassNamespaceFactory(Namespace):
@@ -711,20 +702,22 @@ class ClassNamespaceFactory(Namespace):
return self.term(name)
-CLASS_RELATIONS = set(
- OWL_NS.resourceProperties
-).difference([OWL_NS.onProperty,
- OWL_NS.allValuesFrom,
- OWL_NS.hasValue,
- OWL_NS.someValuesFrom,
- OWL_NS.inverseOf,
- OWL_NS.imports,
- OWL_NS.versionInfo,
- OWL_NS.backwardCompatibleWith,
- OWL_NS.incompatibleWith,
- OWL_NS.unionOf,
- OWL_NS.intersectionOf,
- OWL_NS.oneOf])
+CLASS_RELATIONS = set(OWL_NS.resourceProperties).difference(
+ [
+ OWL_NS.onProperty,
+ OWL_NS.allValuesFrom,
+ OWL_NS.hasValue,
+ OWL_NS.someValuesFrom,
+ OWL_NS.inverseOf,
+ OWL_NS.imports,
+ OWL_NS.versionInfo,
+ OWL_NS.backwardCompatibleWith,
+ OWL_NS.incompatibleWith,
+ OWL_NS.unionOf,
+ OWL_NS.intersectionOf,
+ OWL_NS.oneOf,
+ ]
+)
def ComponentTerms(cls):
@@ -736,10 +729,8 @@ def ComponentTerms(cls):
try:
cls = CastClass(cls, Individual.factoryGraph)
for s, p, innerClsId in cls.factoryGraph.triples_choices(
- (cls.identifier,
- [OWL_NS.allValuesFrom,
- OWL_NS.someValuesFrom],
- None)):
+ (cls.identifier, [OWL_NS.allValuesFrom, OWL_NS.someValuesFrom], None)
+ ):
innerCls = Class(innerClsId, skipOWLClassMembership=True)
if isinstance(innerClsId, BNode):
for _c in ComponentTerms(innerCls):
@@ -766,13 +757,10 @@ def ComponentTerms(cls):
else:
yield innerCls
for s, p, o in cls.factoryGraph.triples_choices(
- (classOrIdentifier(cls),
- CLASS_RELATIONS,
- None)
+ (classOrIdentifier(cls), CLASS_RELATIONS, None)
):
if isinstance(o, BNode):
- for _c in ComponentTerms(
- CastClass(o, Individual.factoryGraph)):
+ for _c in ComponentTerms(CastClass(o, Individual.factoryGraph)):
yield _c
else:
yield innerCls
@@ -823,29 +811,29 @@ def DeepClassClear(classToPrune):
>>> list(g.triples((otherClass.identifier, None, None))) #doctest: +SKIP
[]
"""
+
def deepClearIfBNode(_class):
if isinstance(classOrIdentifier(_class), BNode):
DeepClassClear(_class)
+
classToPrune = CastClass(classToPrune, Individual.factoryGraph)
for c in classToPrune.subClassOf:
deepClearIfBNode(c)
classToPrune.graph.remove((classToPrune.identifier, RDFS.subClassOf, None))
for c in classToPrune.equivalentClass:
deepClearIfBNode(c)
- classToPrune.graph.remove(
- (classToPrune.identifier, OWL_NS.equivalentClass, None))
+ classToPrune.graph.remove((classToPrune.identifier, OWL_NS.equivalentClass, None))
inverseClass = classToPrune.complementOf
if inverseClass:
- classToPrune.graph.remove(
- (classToPrune.identifier, OWL_NS.complementOf, None))
+ classToPrune.graph.remove((classToPrune.identifier, OWL_NS.complementOf, None))
deepClearIfBNode(inverseClass)
if isinstance(classToPrune, BooleanClass):
for c in classToPrune:
deepClearIfBNode(c)
classToPrune.clear()
- classToPrune.graph.remove((classToPrune.identifier,
- classToPrune._operator,
- None))
+ classToPrune.graph.remove(
+ (classToPrune.identifier, classToPrune._operator, None)
+ )
class MalformedClass(Exception):
@@ -858,40 +846,36 @@ class MalformedClass(Exception):
def CastClass(c, graph=None):
graph = graph is None and c.factoryGraph or graph
- for kind in graph.objects(subject=classOrIdentifier(c),
- predicate=RDF.type):
+ for kind in graph.objects(subject=classOrIdentifier(c), predicate=RDF.type):
if kind == OWL_NS.Restriction:
- kwArgs = {'identifier': classOrIdentifier(c),
- 'graph': graph}
- for s, p, o in graph.triples((classOrIdentifier(c),
- None,
- None)):
+ kwArgs = {"identifier": classOrIdentifier(c), "graph": graph}
+ for s, p, o in graph.triples((classOrIdentifier(c), None, None)):
if p != RDF.type:
if p == OWL_NS.onProperty:
- kwArgs['onProperty'] = o
+ kwArgs["onProperty"] = o
else:
if p not in Restriction.restrictionKinds:
continue
kwArgs[str(p.split(OWL_NS)[-1])] = o
- if not set([str(i.split(OWL_NS)[-1])
- for i in Restriction.restrictionKinds]
- ).intersection(kwArgs):
+ if not set(
+ [str(i.split(OWL_NS)[-1]) for i in Restriction.restrictionKinds]
+ ).intersection(kwArgs):
raise MalformedClass("Malformed owl:Restriction")
return Restriction(**kwArgs)
else:
- for s, p, o in graph.triples_choices((classOrIdentifier(c),
- [OWL_NS.intersectionOf,
- OWL_NS.unionOf,
- OWL_NS.oneOf],
- None)):
+ for s, p, o in graph.triples_choices(
+ (
+ classOrIdentifier(c),
+ [OWL_NS.intersectionOf, OWL_NS.unionOf, OWL_NS.oneOf],
+ None,
+ )
+ ):
if p == OWL_NS.oneOf:
return EnumeratedClass(classOrIdentifier(c), graph=graph)
else:
- return BooleanClass(
- classOrIdentifier(c), operator=p, graph=graph)
+ return BooleanClass(classOrIdentifier(c), operator=p, graph=graph)
# assert (classOrIdentifier(c),RDF.type,OWL_NS.Class) in graph
- return Class(
- classOrIdentifier(c), graph=graph, skipOWLClassMembership=True)
+ return Class(classOrIdentifier(c), graph=graph, skipOWLClassMembership=True)
class Class(AnnotatableTerms):
@@ -947,28 +931,37 @@ class Class(AnnotatableTerms):
CN_plProp = nounAnnotations
if CN_sgProp:
- self.CN_sgProp.extent = [(self.identifier,
- self.handleAnnotation(CN_sgProp))]
+ self.CN_sgProp.extent = [
+ (self.identifier, self.handleAnnotation(CN_sgProp))
+ ]
if CN_plProp:
- self.CN_plProp.extent = [(self.identifier,
- self.handleAnnotation(CN_plProp))]
-
- def __init__(self, identifier=None, subClassOf=None, equivalentClass=None,
- disjointWith=None, complementOf=None, graph=None,
- skipOWLClassMembership=False, comment=None,
- nounAnnotations=None,
- nameAnnotation=None,
- nameIsLabel=False):
- super(Class, self).__init__(identifier, graph,
- nameAnnotation, nameIsLabel)
+ self.CN_plProp.extent = [
+ (self.identifier, self.handleAnnotation(CN_plProp))
+ ]
+
+ def __init__(
+ self,
+ identifier=None,
+ subClassOf=None,
+ equivalentClass=None,
+ disjointWith=None,
+ complementOf=None,
+ graph=None,
+ skipOWLClassMembership=False,
+ comment=None,
+ nounAnnotations=None,
+ nameAnnotation=None,
+ nameIsLabel=False,
+ ):
+ super(Class, self).__init__(identifier, graph, nameAnnotation, nameIsLabel)
if nounAnnotations:
self.setupNounAnnotations(nounAnnotations)
- if not skipOWLClassMembership \
- and (self.identifier, RDF.type, OWL_NS.Class) \
- not in self.graph and \
- (self.identifier, RDF.type, OWL_NS.Restriction) \
- not in self.graph:
+ if (
+ not skipOWLClassMembership
+ and (self.identifier, RDF.type, OWL_NS.Class) not in self.graph
+ and (self.identifier, RDF.type, OWL_NS.Restriction) not in self.graph
+ ):
self.graph.add((self.identifier, RDF.type, OWL_NS.Class))
self.subClassOf = subClassOf and subClassOf or []
@@ -979,9 +972,9 @@ class Class(AnnotatableTerms):
self.comment = comment and comment or []
def _get_extent(self, graph=None):
- for member in (
- graph is None and self.graph or graph).subjects(
- predicate=RDF.type, object=self.identifier):
+ for member in (graph is None and self.graph or graph).subjects(
+ predicate=RDF.type, object=self.identifier
+ ):
yield member
def _set_extent(self, other):
@@ -1003,7 +996,7 @@ class Class(AnnotatableTerms):
annotation = property(_get_annotation, lambda x: x)
def _get_extentQuery(self):
- return (Variable('CLASS'), RDF.type, self.identifier)
+ return (Variable("CLASS"), RDF.type, self.identifier)
def _set_extentQuery(self, other):
pass
@@ -1030,8 +1023,7 @@ class Class(AnnotatableTerms):
def __isub__(self, other):
assert isinstance(other, Class)
- self.graph.remove(
- (classOrIdentifier(other), RDFS.subClassOf, self.identifier))
+ self.graph.remove((classOrIdentifier(other), RDFS.subClassOf, self.identifier))
return self
def __invert__(self):
@@ -1046,7 +1038,8 @@ class Class(AnnotatableTerms):
this class and 'other' and return it
"""
return BooleanClass(
- operator=OWL_NS.unionOf, members=[self, other], graph=self.graph)
+ operator=OWL_NS.unionOf, members=[self, other], graph=self.graph
+ )
def __and__(self, other):
"""
@@ -1074,22 +1067,20 @@ class Class(AnnotatableTerms):
True
"""
return BooleanClass(
- operator=OWL_NS.intersectionOf,
- members=[self, other], graph=self.graph)
+ operator=OWL_NS.intersectionOf, members=[self, other], graph=self.graph
+ )
def _get_subClassOf(self):
for anc in self.graph.objects(
- subject=self.identifier, predicate=RDFS.subClassOf):
- yield Class(anc,
- graph=self.graph,
- skipOWLClassMembership=True)
+ subject=self.identifier, predicate=RDFS.subClassOf
+ ):
+ yield Class(anc, graph=self.graph, skipOWLClassMembership=True)
def _set_subClassOf(self, other):
if not other:
return
for sc in other:
- self.graph.add(
- (self.identifier, RDFS.subClassOf, classOrIdentifier(sc)))
+ self.graph.add((self.identifier, RDFS.subClassOf, classOrIdentifier(sc)))
@TermDeletionHelper(RDFS.subClassOf)
def _del_subClassOf(self):
@@ -1099,45 +1090,48 @@ class Class(AnnotatableTerms):
def _get_equivalentClass(self):
for ec in self.graph.objects(
- subject=self.identifier, predicate=OWL_NS.equivalentClass):
+ subject=self.identifier, predicate=OWL_NS.equivalentClass
+ ):
yield Class(ec, graph=self.graph)
def _set_equivalentClass(self, other):
if not other:
return
for sc in other:
- self.graph.add((self.identifier,
- OWL_NS.equivalentClass, classOrIdentifier(sc)))
+ self.graph.add(
+ (self.identifier, OWL_NS.equivalentClass, classOrIdentifier(sc))
+ )
@TermDeletionHelper(OWL_NS.equivalentClass)
def _del_equivalentClass(self):
pass
equivalentClass = property(
- _get_equivalentClass, _set_equivalentClass, _del_equivalentClass)
+ _get_equivalentClass, _set_equivalentClass, _del_equivalentClass
+ )
def _get_disjointWith(self):
for dc in self.graph.objects(
- subject=self.identifier, predicate=OWL_NS.disjointWith):
+ subject=self.identifier, predicate=OWL_NS.disjointWith
+ ):
yield Class(dc, graph=self.graph)
def _set_disjointWith(self, other):
if not other:
return
for c in other:
- self.graph.add(
- (self.identifier, OWL_NS.disjointWith, classOrIdentifier(c)))
+ self.graph.add((self.identifier, OWL_NS.disjointWith, classOrIdentifier(c)))
@TermDeletionHelper(OWL_NS.disjointWith)
def _del_disjointWith(self):
pass
- disjointWith = property(
- _get_disjointWith, _set_disjointWith, _del_disjointWith)
+ disjointWith = property(_get_disjointWith, _set_disjointWith, _del_disjointWith)
def _get_complementOf(self):
- comp = list(self.graph.objects(
- subject=self.identifier, predicate=OWL_NS.complementOf))
+ comp = list(
+ self.graph.objects(subject=self.identifier, predicate=OWL_NS.complementOf)
+ )
if not comp:
return None
elif len(comp) == 1:
@@ -1148,15 +1142,13 @@ class Class(AnnotatableTerms):
def _set_complementOf(self, other):
if not other:
return
- self.graph.add(
- (self.identifier, OWL_NS.complementOf, classOrIdentifier(other)))
+ self.graph.add((self.identifier, OWL_NS.complementOf, classOrIdentifier(other)))
@TermDeletionHelper(OWL_NS.complementOf)
def _del_complementOf(self):
pass
- complementOf = property(
- _get_complementOf, _set_complementOf, _del_complementOf)
+ complementOf = property(_get_complementOf, _set_complementOf, _del_complementOf)
def _get_parents(self):
"""
@@ -1187,24 +1179,22 @@ class Class(AnnotatableTerms):
[Class: ex:Parent , Class: ex:Male ]
"""
- for parent in itertools.chain(self.subClassOf,
- self.equivalentClass):
+ for parent in itertools.chain(self.subClassOf, self.equivalentClass):
yield parent
link = first(self.factoryGraph.subjects(RDF.first, self.identifier))
if link:
- listSiblings = list(self.factoryGraph.transitive_subjects(RDF.rest,
- link))
+ listSiblings = list(self.factoryGraph.transitive_subjects(RDF.rest, link))
if listSiblings:
collectionHead = listSiblings[-1]
else:
collectionHead = link
- for disjCls in self.factoryGraph.subjects(
- OWL_NS.unionOf, collectionHead):
+ for disjCls in self.factoryGraph.subjects(OWL_NS.unionOf, collectionHead):
if isinstance(disjCls, URIRef):
yield Class(disjCls, skipOWLClassMembership=True)
for rdfList in self.factoryGraph.objects(
- self.identifier, OWL_NS.intersectionOf):
+ self.identifier, OWL_NS.intersectionOf
+ ):
for member in OWLRDFListProxy([rdfList], graph=self.factoryGraph):
if isinstance(member, URIRef):
yield Class(member, skipOWLClassMembership=True)
@@ -1217,10 +1207,8 @@ class Class(AnnotatableTerms):
# sc = list(self.subClassOf)
ec = list(self.equivalentClass)
for boolClass, p, rdfList in self.graph.triples_choices(
- (self.identifier,
- [OWL_NS.intersectionOf,
- OWL_NS.unionOf],
- None)):
+ (self.identifier, [OWL_NS.intersectionOf, OWL_NS.unionOf], None)
+ ):
ec.append(manchesterSyntax(rdfList, self.graph, boolean=p))
for e in ec:
return False
@@ -1229,8 +1217,7 @@ class Class(AnnotatableTerms):
return True
def subSumpteeIds(self):
- for s in self.graph.subjects(
- predicate=RDFS.subClassOf, object=self.identifier):
+ for s in self.graph.subjects(predicate=RDFS.subClassOf, object=self.identifier):
yield s
# def __iter__(self):
@@ -1246,62 +1233,80 @@ class Class(AnnotatableTerms):
sc = list(self.subClassOf)
ec = list(self.equivalentClass)
for boolClass, p, rdfList in self.graph.triples_choices(
- (self.identifier,
- [OWL_NS.intersectionOf,
- OWL_NS.unionOf],
- None)):
+ (self.identifier, [OWL_NS.intersectionOf, OWL_NS.unionOf], None)
+ ):
ec.append(manchesterSyntax(rdfList, self.graph, boolean=p))
dc = list(self.disjointWith)
c = self.complementOf
if c:
dc.append(c)
- klassKind = ''
+ klassKind = ""
label = list(self.graph.objects(self.identifier, RDFS.label))
- label = label and '(' + label[0] + ')' or ''
+ label = label and "(" + label[0] + ")" or ""
if sc:
if full:
- scJoin = '\n '
+ scJoin = "\n "
else:
- scJoin = ', '
+ scJoin = ", "
necStatements = [
- isinstance(s, Class) and isinstance(self.identifier, BNode)
- and repr(CastClass(s, self.graph)) or
+ isinstance(s, Class)
+ and isinstance(self.identifier, BNode)
+ and repr(CastClass(s, self.graph))
+ or
# repr(BooleanClass(classOrIdentifier(s),
# operator=None,
# graph=self.graph)) or
- manchesterSyntax(classOrIdentifier(s), self.graph) for s in sc]
+ manchesterSyntax(classOrIdentifier(s), self.graph)
+ for s in sc
+ ]
if necStatements:
klassKind = "Primitive Type %s" % label
- exprs.append("SubClassOf: %s" % scJoin.join(
- [str(n) for n in necStatements]))
+ exprs.append(
+ "SubClassOf: %s" % scJoin.join([str(n) for n in necStatements])
+ )
if full:
exprs[-1] = "\n " + exprs[-1]
if ec:
nec_SuffStatements = [
- isinstance(s, str) and s
- or manchesterSyntax(classOrIdentifier(s), self.graph) for s in ec]
+ isinstance(s, str)
+ and s
+ or manchesterSyntax(classOrIdentifier(s), self.graph)
+ for s in ec
+ ]
if nec_SuffStatements:
klassKind = "A Defined Class %s" % label
- exprs.append("EquivalentTo: %s" % ', '.join(nec_SuffStatements))
+ exprs.append("EquivalentTo: %s" % ", ".join(nec_SuffStatements))
if full:
exprs[-1] = "\n " + exprs[-1]
if dc:
- exprs.append("DisjointWith %s\n" % '\n '.join(
- [manchesterSyntax(classOrIdentifier(s), self.graph)
- for s in dc]))
+ exprs.append(
+ "DisjointWith %s\n"
+ % "\n ".join(
+ [manchesterSyntax(classOrIdentifier(s), self.graph) for s in dc]
+ )
+ )
if full:
exprs[-1] = "\n " + exprs[-1]
descr = list(self.graph.objects(self.identifier, RDFS.comment))
if full and normalization:
- klassDescr = klassKind and '\n ## %s ##' % klassKind +\
- (descr and "\n %s" % descr[0] or '') + \
- ' . '.join(exprs) or ' . '.join(exprs)
+ klassDescr = (
+ klassKind
+ and "\n ## %s ##" % klassKind
+ + (descr and "\n %s" % descr[0] or "")
+ + " . ".join(exprs)
+ or " . ".join(exprs)
+ )
else:
- klassDescr = full and (descr and "\n %s" %
- descr[0] or '') or '' + ' . '.join(exprs)
- return (isinstance(self.identifier, BNode) and
- "Some Class " or
- "Class: %s " % self.qname) + klassDescr
+ klassDescr = (
+ full
+ and (descr and "\n %s" % descr[0] or "")
+ or "" + " . ".join(exprs)
+ )
+ return (
+ isinstance(self.identifier, BNode)
+ and "Some Class "
+ or "Class: %s " % self.qname
+ ) + klassDescr
class OWLRDFListProxy(object):
@@ -1315,10 +1320,10 @@ class OWLRDFListProxy(object):
if member not in self._rdfList:
self._rdfList.append(classOrIdentifier(member))
else:
- self._rdfList = Collection(self.graph, BNode(),
- [classOrIdentifier(m) for m in members])
- self.graph.add(
- (self.identifier, self._operator, self._rdfList.uri))
+ self._rdfList = Collection(
+ self.graph, BNode(), [classOrIdentifier(m) for m in members]
+ )
+ self.graph.add((self.identifier, self._operator, self._rdfList.uri))
def __eq__(self, other):
"""
@@ -1412,6 +1417,7 @@ class EnumeratedClass(OWLRDFListProxy, Class):
<BLANKLINE>
<BLANKLINE>
"""
+
_operator = OWL_NS.oneOf
def isPrimitive(self):
@@ -1420,16 +1426,16 @@ class EnumeratedClass(OWLRDFListProxy, Class):
def __init__(self, identifier=None, members=None, graph=None):
Class.__init__(self, identifier, graph=graph)
members = members and members or []
- rdfList = list(self.graph.objects(
- predicate=OWL_NS.oneOf, subject=self.identifier))
+ rdfList = list(
+ self.graph.objects(predicate=OWL_NS.oneOf, subject=self.identifier)
+ )
OWLRDFListProxy.__init__(self, rdfList, members)
def __repr__(self):
"""
Returns the Manchester Syntax equivalent for this class
"""
- return manchesterSyntax(
- self._rdfList.uri, self.graph, boolean=self._operator)
+ return manchesterSyntax(self._rdfList.uri, self.graph, boolean=self._operator)
def serialize(self, graph):
clonedList = Collection(graph, BNode())
@@ -1475,10 +1481,11 @@ class BooleanClassExtentHelper:
def _getExtent():
for c in Individual.factoryGraph.subjects(self.operator):
yield BooleanClass(c, operator=self.operator)
+
return _getExtent
-class Callable():
+class Callable:
def __init__(self, anycallable):
self.__call__ = anycallable
@@ -1490,38 +1497,40 @@ class BooleanClass(OWLRDFListProxy, Class):
owl:complementOf is an attribute of Class, however
"""
+
@BooleanClassExtentHelper(OWL_NS.intersectionOf)
@Callable
def getIntersections():
pass
+
getIntersections = Callable(getIntersections)
@BooleanClassExtentHelper(OWL_NS.unionOf)
@Callable
def getUnions():
pass
+
getUnions = Callable(getUnions)
- def __init__(self, identifier=None, operator=OWL_NS.intersectionOf,
- members=None, graph=None):
+ def __init__(
+ self, identifier=None, operator=OWL_NS.intersectionOf, members=None, graph=None
+ ):
if operator is None:
props = []
- for s, p, o in graph.triples_choices((identifier,
- [OWL_NS.intersectionOf,
- OWL_NS.unionOf],
- None)):
+ for s, p, o in graph.triples_choices(
+ (identifier, [OWL_NS.intersectionOf, OWL_NS.unionOf], None)
+ ):
props.append(p)
operator = p
assert len(props) == 1, repr(props)
Class.__init__(self, identifier, graph=graph)
- assert operator in [OWL_NS.intersectionOf,
- OWL_NS.unionOf], str(operator)
+ assert operator in [OWL_NS.intersectionOf, OWL_NS.unionOf], str(operator)
self._operator = operator
- rdfList = list(
- self.graph.objects(predicate=operator, subject=self.identifier))
- assert not members or not rdfList, \
- "This is a previous boolean class description!" + \
- repr(Collection(self.graph, rdfList[0]).n3())
+ rdfList = list(self.graph.objects(predicate=operator, subject=self.identifier))
+ assert not members or not rdfList, (
+ "This is a previous boolean class description!"
+ + repr(Collection(self.graph, rdfList[0]).n3())
+ )
OWLRDFListProxy.__init__(self, rdfList, members)
def copy(self):
@@ -1529,7 +1538,8 @@ class BooleanClass(OWLRDFListProxy, Class):
Create a copy of this class
"""
copyOfClass = BooleanClass(
- operator=self._operator, members=list(self), graph=self.graph)
+ operator=self._operator, members=list(self), graph=self.graph
+ )
return copyOfClass
def serialize(self, graph):
@@ -1573,8 +1583,7 @@ class BooleanClass(OWLRDFListProxy, Class):
The new operator is already being used!
"""
- assert newOperator != self._operator, \
- "The new operator is already being used!"
+ assert newOperator != self._operator, "The new operator is already being used!"
self.graph.remove((self.identifier, self._operator, self._rdfList.uri))
self.graph.add((self.identifier, newOperator, self._rdfList.uri))
self._operator = newOperator
@@ -1583,8 +1592,7 @@ class BooleanClass(OWLRDFListProxy, Class):
"""
Returns the Manchester Syntax equivalent for this class
"""
- return manchesterSyntax(
- self._rdfList.uri, self.graph, boolean=self._operator)
+ return manchesterSyntax(self._rdfList.uri, self.graph, boolean=self._operator)
def __or__(self, other):
"""
@@ -1613,30 +1621,37 @@ class Restriction(Class):
{ individualRestrictionComponent } ')'
"""
- restrictionKinds = [OWL_NS.allValuesFrom,
- OWL_NS.someValuesFrom,
- OWL_NS.hasValue,
- OWL_NS.maxCardinality,
- OWL_NS.minCardinality]
-
- def __init__(self,
- onProperty,
- graph=Graph(),
- allValuesFrom=None,
- someValuesFrom=None,
- value=None,
- cardinality=None,
- maxCardinality=None,
- minCardinality=None,
- identifier=None):
- super(Restriction, self).__init__(identifier,
- graph=graph,
- skipOWLClassMembership=True)
- if (self.identifier,
+ restrictionKinds = [
+ OWL_NS.allValuesFrom,
+ OWL_NS.someValuesFrom,
+ OWL_NS.hasValue,
+ OWL_NS.maxCardinality,
+ OWL_NS.minCardinality,
+ ]
+
+ def __init__(
+ self,
+ onProperty,
+ graph=Graph(),
+ allValuesFrom=None,
+ someValuesFrom=None,
+ value=None,
+ cardinality=None,
+ maxCardinality=None,
+ minCardinality=None,
+ identifier=None,
+ ):
+ super(Restriction, self).__init__(
+ identifier, graph=graph, skipOWLClassMembership=True
+ )
+ if (
+ self.identifier,
OWL_NS.onProperty,
- propertyOrIdentifier(onProperty)) not in graph:
- graph.add((self.identifier, OWL_NS.onProperty,
- propertyOrIdentifier(onProperty)))
+ propertyOrIdentifier(onProperty),
+ ) not in graph:
+ graph.add(
+ (self.identifier, OWL_NS.onProperty, propertyOrIdentifier(onProperty))
+ )
self.onProperty = onProperty
restrTypes = [
(allValuesFrom, OWL_NS.allValuesFrom),
@@ -1644,7 +1659,8 @@ class Restriction(Class):
(value, OWL_NS.hasValue),
(cardinality, OWL_NS.cardinality),
(maxCardinality, OWL_NS.maxCardinality),
- (minCardinality, OWL_NS.minCardinality)]
+ (minCardinality, OWL_NS.minCardinality),
+ ]
validRestrProps = [(i, oTerm) for (i, oTerm) in restrTypes if i]
assert len(validRestrProps)
restrictionRange, restrictionType = validRestrProps.pop()
@@ -1654,13 +1670,11 @@ class Restriction(Class):
elif isinstance(restrictionRange, Class):
self.restrictionRange = classOrIdentifier(restrictionRange)
else:
- self.restrictionRange = first(self.graph.objects(self.identifier,
- restrictionType))
- if (self.identifier,
- restrictionType,
- self.restrictionRange) not in self.graph:
- self.graph.add(
- (self.identifier, restrictionType, self.restrictionRange))
+ self.restrictionRange = first(
+ self.graph.objects(self.identifier, restrictionType)
+ )
+ if (self.identifier, restrictionType, self.restrictionRange) not in self.graph:
+ self.graph.add((self.identifier, restrictionType, self.restrictionRange))
assert self.restrictionRange is not None, Class(self.identifier)
if (self.identifier, RDF.type, OWL_NS.Restriction) not in self.graph:
self.graph.add((self.identifier, RDF.type, OWL_NS.Restriction))
@@ -1690,8 +1704,7 @@ class Restriction(Class):
[rdflib.term.URIRef(
u'http://www.w3.org/2002/07/owl#DatatypeProperty')]
"""
- Property(
- self.onProperty, graph=self.graph, baseType=None).serialize(graph)
+ Property(self.onProperty, graph=self.graph, baseType=None).serialize(graph)
for s, p, o in self.graph.triples((self.identifier, None, None)):
graph.add((s, p, o))
if p in [OWL_NS.allValuesFrom, OWL_NS.someValuesFrom]:
@@ -1710,18 +1723,20 @@ class Restriction(Class):
"""
assert isinstance(other, Class), repr(other) + repr(type(other))
if isinstance(other, Restriction):
- return other.onProperty == self.onProperty and \
- other.restrictionRange == self.restrictionRange
+ return (
+ other.onProperty == self.onProperty
+ and other.restrictionRange == self.restrictionRange
+ )
else:
return False
def _get_onProperty(self):
- return list(self.graph.objects(
- subject=self.identifier, predicate=OWL_NS.onProperty))[0]
+ return list(
+ self.graph.objects(subject=self.identifier, predicate=OWL_NS.onProperty)
+ )[0]
def _set_onProperty(self, prop):
- triple = (
- self.identifier, OWL_NS.onProperty, propertyOrIdentifier(prop))
+ triple = (self.identifier, OWL_NS.onProperty, propertyOrIdentifier(prop))
if not prop:
return
elif triple in self.graph:
@@ -1737,13 +1752,13 @@ class Restriction(Class):
def _get_allValuesFrom(self):
for i in self.graph.objects(
- subject=self.identifier, predicate=OWL_NS.allValuesFrom):
+ subject=self.identifier, predicate=OWL_NS.allValuesFrom
+ ):
return Class(i, graph=self.graph)
return None
def _set_allValuesFrom(self, other):
- triple = (
- self.identifier, OWL_NS.allValuesFrom, classOrIdentifier(other))
+ triple = (self.identifier, OWL_NS.allValuesFrom, classOrIdentifier(other))
if not other:
return
elif triple in self.graph:
@@ -1755,18 +1770,17 @@ class Restriction(Class):
def _del_allValuesFrom(self):
pass
- allValuesFrom = property(
- _get_allValuesFrom, _set_allValuesFrom, _del_allValuesFrom)
+ allValuesFrom = property(_get_allValuesFrom, _set_allValuesFrom, _del_allValuesFrom)
def _get_someValuesFrom(self):
for i in self.graph.objects(
- subject=self.identifier, predicate=OWL_NS.someValuesFrom):
+ subject=self.identifier, predicate=OWL_NS.someValuesFrom
+ ):
return Class(i, graph=self.graph)
return None
def _set_someValuesFrom(self, other):
- triple = (
- self.identifier, OWL_NS.someValuesFrom, classOrIdentifier(other))
+ triple = (self.identifier, OWL_NS.someValuesFrom, classOrIdentifier(other))
if not other:
return
elif triple in self.graph:
@@ -1779,11 +1793,11 @@ class Restriction(Class):
pass
someValuesFrom = property(
- _get_someValuesFrom, _set_someValuesFrom, _del_someValuesFrom)
+ _get_someValuesFrom, _set_someValuesFrom, _del_someValuesFrom
+ )
def _get_hasValue(self):
- for i in self.graph.objects(
- subject=self.identifier, predicate=OWL_NS.hasValue):
+ for i in self.graph.objects(subject=self.identifier, predicate=OWL_NS.hasValue):
return Class(i, graph=self.graph)
return None
@@ -1804,13 +1818,13 @@ class Restriction(Class):
def _get_cardinality(self):
for i in self.graph.objects(
- subject=self.identifier, predicate=OWL_NS.cardinality):
+ subject=self.identifier, predicate=OWL_NS.cardinality
+ ):
return Class(i, graph=self.graph)
return None
def _set_cardinality(self, other):
- triple = (
- self.identifier, OWL_NS.cardinality, classOrIdentifier(other))
+ triple = (self.identifier, OWL_NS.cardinality, classOrIdentifier(other))
if not other:
return
elif triple in self.graph:
@@ -1822,18 +1836,17 @@ class Restriction(Class):
def _del_cardinality(self):
pass
- cardinality = property(
- _get_cardinality, _set_cardinality, _del_cardinality)
+ cardinality = property(_get_cardinality, _set_cardinality, _del_cardinality)
def _get_maxCardinality(self):
for i in self.graph.objects(
- subject=self.identifier, predicate=OWL_NS.maxCardinality):
+ subject=self.identifier, predicate=OWL_NS.maxCardinality
+ ):
return Class(i, graph=self.graph)
return None
def _set_maxCardinality(self, other):
- triple = (
- self.identifier, OWL_NS.maxCardinality, classOrIdentifier(other))
+ triple = (self.identifier, OWL_NS.maxCardinality, classOrIdentifier(other))
if not other:
return
elif triple in self.graph:
@@ -1846,17 +1859,18 @@ class Restriction(Class):
pass
maxCardinality = property(
- _get_maxCardinality, _set_maxCardinality, _del_maxCardinality)
+ _get_maxCardinality, _set_maxCardinality, _del_maxCardinality
+ )
def _get_minCardinality(self):
for i in self.graph.objects(
- subject=self.identifier, predicate=OWL_NS.minCardinality):
+ subject=self.identifier, predicate=OWL_NS.minCardinality
+ ):
return Class(i, graph=self.graph)
return None
def _set_minCardinality(self, other):
- triple = (
- self.identifier, OWL_NS.minCardinality, classOrIdentifier(other))
+ triple = (self.identifier, OWL_NS.minCardinality, classOrIdentifier(other))
if not other:
return
elif triple in self.graph:
@@ -1869,12 +1883,13 @@ class Restriction(Class):
pass
minCardinality = property(
- _get_minCardinality, _set_minCardinality, _del_minCardinality)
+ _get_minCardinality, _set_minCardinality, _del_minCardinality
+ )
def restrictionKind(self):
- for p in self.graph.triple_choices((self.identifier,
- self.restrictionKinds,
- None)):
+ for p in self.graph.triple_choices(
+ (self.identifier, self.restrictionKinds, None)
+ ):
return p.split(OWL_NS)[-1]
raise
@@ -1884,24 +1899,28 @@ class Restriction(Class):
"""
return manchesterSyntax(self.identifier, self.graph)
+
### Infix Operators ###
-some = Infix(lambda prop, _class: Restriction(prop, graph=_class.graph,
- someValuesFrom=_class))
-only = Infix(lambda prop, _class: Restriction(prop, graph=_class.graph,
- allValuesFrom=_class))
-max = Infix(lambda prop, _class: Restriction(prop, graph=prop.graph,
- maxCardinality=_class))
-min = Infix(lambda prop, _class: Restriction(prop, graph=prop.graph,
- minCardinality=_class))
-exactly = Infix(lambda prop, _class: Restriction(prop, graph=prop.graph,
- cardinality=_class))
-value = Infix(
- lambda prop, _class: Restriction(prop, graph=prop.graph, value=_class))
-
-PropertyAbstractSyntax =\
- """
+some = Infix(
+ lambda prop, _class: Restriction(prop, graph=_class.graph, someValuesFrom=_class)
+)
+only = Infix(
+ lambda prop, _class: Restriction(prop, graph=_class.graph, allValuesFrom=_class)
+)
+max = Infix(
+ lambda prop, _class: Restriction(prop, graph=prop.graph, maxCardinality=_class)
+)
+min = Infix(
+ lambda prop, _class: Restriction(prop, graph=prop.graph, minCardinality=_class)
+)
+exactly = Infix(
+ lambda prop, _class: Restriction(prop, graph=prop.graph, cardinality=_class)
+)
+value = Infix(lambda prop, _class: Restriction(prop, graph=prop.graph, value=_class))
+
+PropertyAbstractSyntax = """
%s( %s { %s }
%s
{ 'super(' datavaluedPropertyID ')'} ['Functional']
@@ -1932,33 +1951,40 @@ class Property(AnnotatableTerms):
TV_plProp = verbAnnotations
TV_vbg = verbAnnotations
if TV_sgProp:
- self.TV_sgProp.extent = [(self.identifier,
- self.handleAnnotation(TV_sgProp))]
+ self.TV_sgProp.extent = [
+ (self.identifier, self.handleAnnotation(TV_sgProp))
+ ]
if TV_plProp:
- self.TV_plProp.extent = [(self.identifier,
- self.handleAnnotation(TV_plProp))]
+ self.TV_plProp.extent = [
+ (self.identifier, self.handleAnnotation(TV_plProp))
+ ]
if TV_vbg:
- self.TV_vbgProp.extent = [(self.identifier,
- self.handleAnnotation(TV_vbg))]
+ self.TV_vbgProp.extent = [(self.identifier, self.handleAnnotation(TV_vbg))]
def __init__(
- self, identifier=None, graph=None, baseType=OWL_NS.ObjectProperty,
- subPropertyOf=None, domain=None, range=None, inverseOf=None,
- otherType=None, equivalentProperty=None,
+ self,
+ identifier=None,
+ graph=None,
+ baseType=OWL_NS.ObjectProperty,
+ subPropertyOf=None,
+ domain=None,
+ range=None,
+ inverseOf=None,
+ otherType=None,
+ equivalentProperty=None,
comment=None,
verbAnnotations=None,
nameAnnotation=None,
- nameIsLabel=False):
- super(Property, self).__init__(identifier, graph,
- nameAnnotation, nameIsLabel)
+ nameIsLabel=False,
+ ):
+ super(Property, self).__init__(identifier, graph, nameAnnotation, nameIsLabel)
if verbAnnotations:
self.setupVerbAnnotations(verbAnnotations)
assert not isinstance(self.identifier, BNode)
if baseType is None:
# None give, determine via introspection
- self._baseType = first(
- Individual(self.identifier, graph=self.graph).type)
+ self._baseType = first(Individual(self.identifier, graph=self.graph).type)
else:
if (self.identifier, RDF.type, baseType) not in self.graph:
self.graph.add((self.identifier, RDF.type, baseType))
@@ -1972,16 +1998,15 @@ class Property(AnnotatableTerms):
def serialize(self, graph):
for fact in self.graph.triples((self.identifier, None, None)):
graph.add(fact)
- for p in itertools.chain(self.subPropertyOf,
- self.inverseOf):
+ for p in itertools.chain(self.subPropertyOf, self.inverseOf):
p.serialize(graph)
- for c in itertools.chain(self.domain,
- self.range):
+ for c in itertools.chain(self.domain, self.range):
CastClass(c, self.graph).serialize(graph)
def _get_extent(self, graph=None):
for triple in (graph is None and self.graph or graph).triples(
- (None, self.identifier, None)):
+ (None, self.identifier, None)
+ ):
yield triple
def _set_extent(self, other):
@@ -1995,36 +2020,44 @@ class Property(AnnotatableTerms):
def __repr__(self):
rt = []
if OWL_NS.ObjectProperty in self.type:
- rt.append('ObjectProperty( %s annotation(%s)'
- % (self.qname, first(self.comment) and
- first(self.comment) or ''))
+ rt.append(
+ "ObjectProperty( %s annotation(%s)"
+ % (self.qname, first(self.comment) and first(self.comment) or "")
+ )
if first(self.inverseOf):
twoLinkInverse = first(first(self.inverseOf).inverseOf)
- if twoLinkInverse \
- and twoLinkInverse.identifier == self.identifier:
+ if twoLinkInverse and twoLinkInverse.identifier == self.identifier:
inverseRepr = first(self.inverseOf).qname
else:
inverseRepr = repr(first(self.inverseOf))
- rt.append(" inverseOf( %s )%s" % (
- inverseRepr,
- OWL_NS.SymmetricProperty in self.type and
- ' Symmetric' or
- ''))
+ rt.append(
+ " inverseOf( %s )%s"
+ % (
+ inverseRepr,
+ OWL_NS.SymmetricProperty in self.type and " Symmetric" or "",
+ )
+ )
for s, p, roleType in self.graph.triples_choices(
- (self.identifier,
- RDF.type,
- [OWL_NS.FunctionalProperty,
- OWL_NS.InverseFunctionalProperty,
- OWL_NS.TransitiveProperty])):
+ (
+ self.identifier,
+ RDF.type,
+ [
+ OWL_NS.FunctionalProperty,
+ OWL_NS.InverseFunctionalProperty,
+ OWL_NS.TransitiveProperty,
+ ],
+ )
+ ):
rt.append(str(roleType.split(OWL_NS)[-1]))
else:
- rt.append('DatatypeProperty( %s %s'
- % (self.qname, first(self.comment) and
- first(self.comment) or
- ''))
- for s, p, roleType in self.graph.triples((
- self.identifier, RDF.type, OWL_NS.FunctionalProperty)):
- rt.append(' Functional')
+ rt.append(
+ "DatatypeProperty( %s %s"
+ % (self.qname, first(self.comment) and first(self.comment) or "")
+ )
+ for s, p, roleType in self.graph.triples(
+ (self.identifier, RDF.type, OWL_NS.FunctionalProperty)
+ ):
+ rt.append(" Functional")
def canonicalName(term, g):
normalizedName = classOrIdentifier(term)
@@ -2032,55 +2065,71 @@ class Property(AnnotatableTerms):
return term
elif normalizedName.startswith(_XSD_NS):
return str(term)
- elif first(g.triples_choices((
- normalizedName,
- [OWL_NS.unionOf,
- OWL_NS.intersectionOf], None))):
+ elif first(
+ g.triples_choices(
+ (normalizedName, [OWL_NS.unionOf, OWL_NS.intersectionOf], None)
+ )
+ ):
return repr(term)
else:
return str(term.qname)
- rt.append(' '.join([" super( %s )" % canonicalName(
- superP, self.graph)
- for superP in self.subPropertyOf]))
- rt.append(' '.join([" domain( %s )" % canonicalName(
- domain, self.graph)
- for domain in self.domain]))
- rt.append(' '.join([" range( %s )" % canonicalName(
- range, self.graph)
- for range in self.range]))
- rt = '\n'.join([expr for expr in rt if expr])
- rt += '\n)'
- return str(rt).encode('utf-8')
+
+ rt.append(
+ " ".join(
+ [
+ " super( %s )" % canonicalName(superP, self.graph)
+ for superP in self.subPropertyOf
+ ]
+ )
+ )
+ rt.append(
+ " ".join(
+ [
+ " domain( %s )" % canonicalName(domain, self.graph)
+ for domain in self.domain
+ ]
+ )
+ )
+ rt.append(
+ " ".join(
+ [
+ " range( %s )" % canonicalName(range, self.graph)
+ for range in self.range
+ ]
+ )
+ )
+ rt = "\n".join([expr for expr in rt if expr])
+ rt += "\n)"
+ return str(rt).encode("utf-8")
def _get_subPropertyOf(self):
for anc in self.graph.objects(
- subject=self.identifier, predicate=RDFS.subPropertyOf):
+ subject=self.identifier, predicate=RDFS.subPropertyOf
+ ):
yield Property(anc, graph=self.graph, baseType=None)
def _set_subPropertyOf(self, other):
if not other:
return
for sP in other:
- self.graph.add(
- (self.identifier, RDFS.subPropertyOf, classOrIdentifier(sP)))
+ self.graph.add((self.identifier, RDFS.subPropertyOf, classOrIdentifier(sP)))
@TermDeletionHelper(RDFS.subPropertyOf)
def _del_subPropertyOf(self):
pass
- subPropertyOf = property(
- _get_subPropertyOf, _set_subPropertyOf, _del_subPropertyOf)
+ subPropertyOf = property(_get_subPropertyOf, _set_subPropertyOf, _del_subPropertyOf)
def _get_inverseOf(self):
for anc in self.graph.objects(
- subject=self.identifier, predicate=OWL_NS.inverseOf):
+ subject=self.identifier, predicate=OWL_NS.inverseOf
+ ):
yield Property(anc, graph=self.graph, baseType=None)
def _set_inverseOf(self, other):
if not other:
return
- self.graph.add(
- (self.identifier, OWL_NS.inverseOf, classOrIdentifier(other)))
+ self.graph.add((self.identifier, OWL_NS.inverseOf, classOrIdentifier(other)))
@TermDeletionHelper(OWL_NS.inverseOf)
def _del_inverseOf(self):
@@ -2089,20 +2138,17 @@ class Property(AnnotatableTerms):
inverseOf = property(_get_inverseOf, _set_inverseOf, _del_inverseOf)
def _get_domain(self):
- for dom in self.graph.objects(
- subject=self.identifier, predicate=RDFS.domain):
+ for dom in self.graph.objects(subject=self.identifier, predicate=RDFS.domain):
yield Class(dom, graph=self.graph)
def _set_domain(self, other):
if not other:
return
if isinstance(other, (Individual, Identifier)):
- self.graph.add(
- (self.identifier, RDFS.domain, classOrIdentifier(other)))
+ self.graph.add((self.identifier, RDFS.domain, classOrIdentifier(other)))
else:
for dom in other:
- self.graph.add(
- (self.identifier, RDFS.domain, classOrIdentifier(dom)))
+ self.graph.add((self.identifier, RDFS.domain, classOrIdentifier(dom)))
@TermDeletionHelper(RDFS.domain)
def _del_domain(self):
@@ -2111,20 +2157,17 @@ class Property(AnnotatableTerms):
domain = property(_get_domain, _set_domain, _del_domain)
def _get_range(self):
- for ran in self.graph.objects(
- subject=self.identifier, predicate=RDFS.range):
+ for ran in self.graph.objects(subject=self.identifier, predicate=RDFS.range):
yield Class(ran, graph=self.graph)
def _set_range(self, ranges):
if not ranges:
return
if isinstance(ranges, (Individual, Identifier)):
- self.graph.add(
- (self.identifier, RDFS.range, classOrIdentifier(ranges)))
+ self.graph.add((self.identifier, RDFS.range, classOrIdentifier(ranges)))
else:
for range in ranges:
- self.graph.add(
- (self.identifier, RDFS.range, classOrIdentifier(range)))
+ self.graph.add((self.identifier, RDFS.range, classOrIdentifier(range)))
@TermDeletionHelper(RDFS.range)
def _del_range(self):
@@ -2144,9 +2187,9 @@ def CommonNSBindings(graph, additionalNS={}):
Takes a graph and binds the common namespaces (rdf,rdfs, & owl)
"""
namespace_manager = NamespaceManager(graph)
- namespace_manager.bind('rdfs', RDFS)
- namespace_manager.bind('rdf', RDF)
- namespace_manager.bind('owl', OWL_NS)
+ namespace_manager.bind("rdfs", RDFS)
+ namespace_manager.bind("rdf", RDF)
+ namespace_manager.bind("owl", OWL_NS)
for prefix, uri in list(additionalNS.items()):
namespace_manager.bind(prefix, uri, override=False)
graph.namespace_manager = namespace_manager
@@ -2154,8 +2197,9 @@ def CommonNSBindings(graph, additionalNS={}):
def test():
import doctest
+
doctest.testmod()
-if __name__ == '__main__':
+if __name__ == "__main__":
test()
diff --git a/rdflib/graph.py b/rdflib/graph.py
index 7bfb38aa..77d1fbc7 100644
--- a/rdflib/graph.py
+++ b/rdflib/graph.py
@@ -293,7 +293,9 @@ class Graph(Node):
For more on named graphs, see: http://www.w3.org/2004/03/trix/
"""
- def __init__(self, store="default", identifier=None, namespace_manager=None, base=None):
+ def __init__(
+ self, store="default", identifier=None, namespace_manager=None, base=None
+ ):
super(Graph, self).__init__()
self.base = base
self.__identifier = identifier or BNode()
@@ -1496,7 +1498,9 @@ class ConjunctiveGraph(Graph):
identifier must be a URIRef or BNode.
"""
- return Graph(store=self.store, identifier=identifier, namespace_manager=self, base=base)
+ return Graph(
+ store=self.store, identifier=identifier, namespace_manager=self, base=base
+ )
def remove_context(self, context):
"""Removes the given context from the graph"""
@@ -1665,7 +1669,9 @@ class Dataset(ConjunctiveGraph):
if not self.store.graph_aware:
raise Exception("DataSet must be backed by a graph-aware store!")
self.default_context = Graph(
- store=self.store, identifier=DATASET_DEFAULT_GRAPH_ID, base=default_graph_base
+ store=self.store,
+ identifier=DATASET_DEFAULT_GRAPH_ID,
+ base=default_graph_base,
)
self.default_union = default_union
@@ -2014,7 +2020,7 @@ def _assertnode(*terms):
class BatchAddGraph(object):
- '''
+ """
Wrapper around graph that turns calls to :meth:`add` (and optionally, :meth:`addN`)
into calls to :meth:`~rdflib.graph.Graph.addN`.
@@ -2031,7 +2037,7 @@ class BatchAddGraph(object):
to :meth:`reset`
:ivar batch: The current buffer of triples
- '''
+ """
def __init__(self, graph, batch_size=1000, batch_addn=False):
if not batch_size or batch_size < 2:
@@ -2043,18 +2049,18 @@ class BatchAddGraph(object):
self.reset()
def reset(self):
- '''
+ """
Manually clear the buffered triples and reset the count to zero
- '''
+ """
self.batch = []
self.count = 0
def add(self, triple_or_quad):
- '''
+ """
Add a triple to the buffer
:param triple: The triple to add
- '''
+ """
if len(self.batch) >= self.__batch_size:
self.graph.addN(self.batch)
self.batch = []
diff --git a/rdflib/namespace.py b/rdflib/namespace.py
index 03623c17..92016319 100644
--- a/rdflib/namespace.py
+++ b/rdflib/namespace.py
@@ -8,8 +8,6 @@ import os
from unicodedata import category
-
-
from urllib.request import pathname2url
from urllib.parse import urldefrag
from urllib.parse import urljoin
@@ -73,10 +71,23 @@ The following namespaces are available by directly importing from rdflib:
"""
__all__ = [
- 'is_ncname', 'split_uri', 'Namespace',
- 'ClosedNamespace', 'NamespaceManager',
- 'XMLNS', 'RDF', 'RDFS', 'XSD', 'OWL',
- 'SKOS', 'DOAP', 'FOAF', 'DC', 'DCTERMS', 'VOID']
+ "is_ncname",
+ "split_uri",
+ "Namespace",
+ "ClosedNamespace",
+ "NamespaceManager",
+ "XMLNS",
+ "RDF",
+ "RDFS",
+ "XSD",
+ "OWL",
+ "SKOS",
+ "DOAP",
+ "FOAF",
+ "DC",
+ "DCTERMS",
+ "VOID",
+]
logger = logging.getLogger(__name__)
@@ -99,16 +110,16 @@ class Namespace(str):
try:
rt = str.__new__(cls, value)
except UnicodeDecodeError:
- rt = str.__new__(cls, value, 'utf-8')
+ rt = str.__new__(cls, value, "utf-8")
return rt
@property
def title(self):
- return URIRef(self + 'title')
+ return URIRef(self + "title")
def term(self, name):
# need to handle slices explicitly because of __getitem__ override
- return URIRef(self + (name if isinstance(name, str) else ''))
+ return URIRef(self + (name if isinstance(name, str) else ""))
def __getitem__(self, key, default=None):
return self.term(key)
@@ -140,7 +151,7 @@ class URIPattern(str):
try:
rt = str.__new__(cls, value)
except UnicodeDecodeError:
- rt = str.__new__(cls, value, 'utf-8')
+ rt = str.__new__(cls, value, "utf-8")
return rt
def __mod__(self, *args, **kwargs):
@@ -169,9 +180,7 @@ class ClosedNamespace(object):
def term(self, name):
uri = self.__uris.get(name)
if uri is None:
- raise KeyError(
- "term '{}' not in namespace '{}'".format(name, self.uri)
- )
+ raise KeyError("term '{}' not in namespace '{}'".format(name, self.uri))
else:
return uri
@@ -204,31 +213,49 @@ class _RDFNamespace(ClosedNamespace):
URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#"),
terms=[
# Syntax Names
- "RDF", "Description", "ID", "about", "parseType",
- "resource", "li", "nodeID", "datatype",
-
+ "RDF",
+ "Description",
+ "ID",
+ "about",
+ "parseType",
+ "resource",
+ "li",
+ "nodeID",
+ "datatype",
# RDF Classes
- "Seq", "Bag", "Alt", "Statement", "Property",
- "List", "PlainLiteral",
-
+ "Seq",
+ "Bag",
+ "Alt",
+ "Statement",
+ "Property",
+ "List",
+ "PlainLiteral",
# RDF Properties
- "subject", "predicate", "object", "type",
- "value", "first", "rest",
+ "subject",
+ "predicate",
+ "object",
+ "type",
+ "value",
+ "first",
+ "rest",
# and _n where n is a non-negative integer
-
# RDF Resources
"nil",
-
# Added in RDF 1.1
- "XMLLiteral", "HTML", "langString",
-
+ "XMLLiteral",
+ "HTML",
+ "langString",
# Added in JSON-LD 1.1
- "JSON", "CompoundLiteral", "language", "direction"]
+ "JSON",
+ "CompoundLiteral",
+ "language",
+ "direction",
+ ],
)
def term(self, name):
# Container membership properties
- if name.startswith('_'):
+ if name.startswith("_"):
try:
i = int(name[1:])
except ValueError:
@@ -246,79 +273,219 @@ RDF = _RDFNamespace()
RDFS = ClosedNamespace(
uri=URIRef("http://www.w3.org/2000/01/rdf-schema#"),
terms=[
- "Resource", "Class", "subClassOf", "subPropertyOf", "comment", "label",
- "domain", "range", "seeAlso", "isDefinedBy", "Literal", "Container",
- "ContainerMembershipProperty", "member", "Datatype"]
+ "Resource",
+ "Class",
+ "subClassOf",
+ "subPropertyOf",
+ "comment",
+ "label",
+ "domain",
+ "range",
+ "seeAlso",
+ "isDefinedBy",
+ "Literal",
+ "Container",
+ "ContainerMembershipProperty",
+ "member",
+ "Datatype",
+ ],
)
-OWL = Namespace('http://www.w3.org/2002/07/owl#')
+OWL = Namespace("http://www.w3.org/2002/07/owl#")
XSD = Namespace(_XSD_PFX)
-CSVW = Namespace('http://www.w3.org/ns/csvw#')
-DC = Namespace('http://purl.org/dc/elements/1.1/')
-DCAT = Namespace('http://www.w3.org/ns/dcat#')
-DCTERMS = Namespace('http://purl.org/dc/terms/')
-DOAP = Namespace('http://usefulinc.com/ns/doap#')
+CSVW = Namespace("http://www.w3.org/ns/csvw#")
+DC = Namespace("http://purl.org/dc/elements/1.1/")
+DCAT = Namespace("http://www.w3.org/ns/dcat#")
+DCTERMS = Namespace("http://purl.org/dc/terms/")
+DOAP = Namespace("http://usefulinc.com/ns/doap#")
FOAF = ClosedNamespace(
- uri=URIRef('http://xmlns.com/foaf/0.1/'),
+ uri=URIRef("http://xmlns.com/foaf/0.1/"),
terms=[
# all taken from http://xmlns.com/foaf/spec/
- 'Agent', 'Person', 'name', 'title', 'img',
- 'depiction', 'depicts', 'familyName',
- 'givenName', 'knows', 'based_near', 'age', 'made',
- 'maker', 'primaryTopic', 'primaryTopicOf', 'Project', 'Organization',
- 'Group', 'member', 'Document', 'Image', 'nick',
- 'mbox', 'homepage', 'weblog', 'openid', 'jabberID',
- 'mbox_sha1sum', 'interest', 'topic_interest', 'topic', 'page',
- 'workplaceHomepage', 'workInfoHomepage', 'schoolHomepage', 'publications', 'currentProject',
- 'pastProject', 'account', 'OnlineAccount', 'accountName', 'accountServiceHomepage',
- 'PersonalProfileDocument', 'tipjar', 'sha1', 'thumbnail', 'logo'
- ]
+ "Agent",
+ "Person",
+ "name",
+ "title",
+ "img",
+ "depiction",
+ "depicts",
+ "familyName",
+ "givenName",
+ "knows",
+ "based_near",
+ "age",
+ "made",
+ "maker",
+ "primaryTopic",
+ "primaryTopicOf",
+ "Project",
+ "Organization",
+ "Group",
+ "member",
+ "Document",
+ "Image",
+ "nick",
+ "mbox",
+ "homepage",
+ "weblog",
+ "openid",
+ "jabberID",
+ "mbox_sha1sum",
+ "interest",
+ "topic_interest",
+ "topic",
+ "page",
+ "workplaceHomepage",
+ "workInfoHomepage",
+ "schoolHomepage",
+ "publications",
+ "currentProject",
+ "pastProject",
+ "account",
+ "OnlineAccount",
+ "accountName",
+ "accountServiceHomepage",
+ "PersonalProfileDocument",
+ "tipjar",
+ "sha1",
+ "thumbnail",
+ "logo",
+ ],
)
-ODRL2 = Namespace('http://www.w3.org/ns/odrl/2/')
-ORG = Namespace('http://www.w3.org/ns/org#')
+ODRL2 = Namespace("http://www.w3.org/ns/odrl/2/")
+ORG = Namespace("http://www.w3.org/ns/org#")
PROV = ClosedNamespace(
- uri=URIRef('http://www.w3.org/ns/prov#'),
+ uri=URIRef("http://www.w3.org/ns/prov#"),
terms=[
- 'Entity', 'Activity', 'Agent', 'wasGeneratedBy', 'wasDerivedFrom',
- 'wasAttributedTo', 'startedAtTime', 'used', 'wasInformedBy', 'endedAtTime',
- 'wasAssociatedWith', 'actedOnBehalfOf', 'Collection', 'EmptyCollection', 'Bundle',
- 'Person', 'SoftwareAgent', 'Organization', 'Location', 'alternateOf',
- 'specializationOf', 'generatedAtTime', 'hadPrimarySource', 'value', 'wasQuotedFrom',
- 'wasRevisionOf', 'invalidatedAtTime', 'wasInvalidatedBy', 'hadMember', 'wasStartedBy',
- 'wasEndedBy', 'invalidated', 'influenced', 'atLocation', 'generated',
- 'Influence', 'EntityInfluence', 'Usage', 'Start', 'End',
- 'Derivation', 'PrimarySource', 'Quotation', 'Revision', 'ActivityInfluence',
- 'Generation', 'Communication', 'Invalidation', 'AgentInfluence',
- 'Attribution', 'Association', 'Plan', 'Delegation', 'InstantaneousEvent',
- 'Role', 'wasInfluencedBy', 'qualifiedInfluence', 'qualifiedGeneration', 'qualifiedDerivation',
- 'qualifiedPrimarySource', 'qualifiedQuotation', 'qualifiedRevision', 'qualifiedAttribution',
- 'qualifiedInvalidation', 'qualifiedStart', 'qualifiedUsage', 'qualifiedCommunication', 'qualifiedAssociation',
- 'qualifiedEnd', 'qualifiedDelegation', 'influencer', 'entity', 'hadUsage', 'hadGeneration',
- 'activity', 'agent', 'hadPlan', 'hadActivity', 'atTime', 'hadRole'
- ]
+ "Entity",
+ "Activity",
+ "Agent",
+ "wasGeneratedBy",
+ "wasDerivedFrom",
+ "wasAttributedTo",
+ "startedAtTime",
+ "used",
+ "wasInformedBy",
+ "endedAtTime",
+ "wasAssociatedWith",
+ "actedOnBehalfOf",
+ "Collection",
+ "EmptyCollection",
+ "Bundle",
+ "Person",
+ "SoftwareAgent",
+ "Organization",
+ "Location",
+ "alternateOf",
+ "specializationOf",
+ "generatedAtTime",
+ "hadPrimarySource",
+ "value",
+ "wasQuotedFrom",
+ "wasRevisionOf",
+ "invalidatedAtTime",
+ "wasInvalidatedBy",
+ "hadMember",
+ "wasStartedBy",
+ "wasEndedBy",
+ "invalidated",
+ "influenced",
+ "atLocation",
+ "generated",
+ "Influence",
+ "EntityInfluence",
+ "Usage",
+ "Start",
+ "End",
+ "Derivation",
+ "PrimarySource",
+ "Quotation",
+ "Revision",
+ "ActivityInfluence",
+ "Generation",
+ "Communication",
+ "Invalidation",
+ "AgentInfluence",
+ "Attribution",
+ "Association",
+ "Plan",
+ "Delegation",
+ "InstantaneousEvent",
+ "Role",
+ "wasInfluencedBy",
+ "qualifiedInfluence",
+ "qualifiedGeneration",
+ "qualifiedDerivation",
+ "qualifiedPrimarySource",
+ "qualifiedQuotation",
+ "qualifiedRevision",
+ "qualifiedAttribution",
+ "qualifiedInvalidation",
+ "qualifiedStart",
+ "qualifiedUsage",
+ "qualifiedCommunication",
+ "qualifiedAssociation",
+ "qualifiedEnd",
+ "qualifiedDelegation",
+ "influencer",
+ "entity",
+ "hadUsage",
+ "hadGeneration",
+ "activity",
+ "agent",
+ "hadPlan",
+ "hadActivity",
+ "atTime",
+ "hadRole",
+ ],
)
-PROF = Namespace('http://www.w3.org/ns/dx/prof/')
-SDO = Namespace('https://schema.org/')
-SH = Namespace('http://www.w3.org/ns/shacl#')
+PROF = Namespace("http://www.w3.org/ns/dx/prof/")
+SDO = Namespace("https://schema.org/")
+SH = Namespace("http://www.w3.org/ns/shacl#")
SKOS = ClosedNamespace(
- uri=URIRef('http://www.w3.org/2004/02/skos/core#'),
+ uri=URIRef("http://www.w3.org/2004/02/skos/core#"),
terms=[
# all taken from https://www.w3.org/TR/skos-reference/#L1302
- 'Concept', 'ConceptScheme', 'inScheme', 'hasTopConcept', 'topConceptOf',
- 'altLabel', 'hiddenLabel', 'prefLabel', 'notation', 'changeNote',
- 'definition', 'editorialNote', 'example', 'historyNote', 'note',
- 'scopeNote', 'broader', 'broaderTransitive', 'narrower', 'narrowerTransitive',
- 'related', 'semanticRelation', 'Collection', 'OrderedCollection', 'member',
- 'memberList', 'broadMatch', 'closeMatch', 'exactMatch', 'mappingRelation',
- 'narrowMatch', 'relatedMatch'
- ]
+ "Concept",
+ "ConceptScheme",
+ "inScheme",
+ "hasTopConcept",
+ "topConceptOf",
+ "altLabel",
+ "hiddenLabel",
+ "prefLabel",
+ "notation",
+ "changeNote",
+ "definition",
+ "editorialNote",
+ "example",
+ "historyNote",
+ "note",
+ "scopeNote",
+ "broader",
+ "broaderTransitive",
+ "narrower",
+ "narrowerTransitive",
+ "related",
+ "semanticRelation",
+ "Collection",
+ "OrderedCollection",
+ "member",
+ "memberList",
+ "broadMatch",
+ "closeMatch",
+ "exactMatch",
+ "mappingRelation",
+ "narrowMatch",
+ "relatedMatch",
+ ],
)
-SOSA = Namespace('http://www.w3.org/ns/ssn/')
-SSN = Namespace('http://www.w3.org/ns/sosa/')
-TIME = Namespace('http://www.w3.org/2006/time#')
-VOID = Namespace('http://rdfs.org/ns/void#')
+SOSA = Namespace("http://www.w3.org/ns/ssn/")
+SSN = Namespace("http://www.w3.org/ns/sosa/")
+TIME = Namespace("http://www.w3.org/2006/time#")
+VOID = Namespace("http://rdfs.org/ns/void#")
class NamespaceManager(object):
@@ -376,6 +543,7 @@ class NamespaceManager(object):
def __get_store(self):
return self.graph.store
+
store = property(__get_store)
def qname(self, uri):
@@ -387,10 +555,10 @@ class NamespaceManager(object):
def qname_strict(self, uri):
prefix, namespace, name = self.compute_qname_strict(uri)
- if prefix == '':
+ if prefix == "":
return name
else:
- return ':'.join((prefix, name))
+ return ":".join((prefix, name))
def normalizeUri(self, rdfTerm):
"""
@@ -415,13 +583,15 @@ class NamespaceManager(object):
return "<%s>" % rdfTerm
else:
qNameParts = self.compute_qname(rdfTerm)
- return ':'.join([qNameParts[0], qNameParts[-1]])
+ return ":".join([qNameParts[0], qNameParts[-1]])
def compute_qname(self, uri, generate=True):
if not _is_valid_uri(uri):
raise ValueError(
- '"{}" does not look like a valid URI, cannot serialize this. Did you want to urlencode it?'.format(uri)
+ '"{}" does not look like a valid URI, cannot serialize this. Did you want to urlencode it?'.format(
+ uri
+ )
)
if uri not in self.__cache:
@@ -439,7 +609,7 @@ class NamespaceManager(object):
pl_namespace = get_longest_namespace(self.__strie[namespace], uri)
if pl_namespace is not None:
namespace = pl_namespace
- name = uri[len(namespace):]
+ name = uri[len(namespace) :]
namespace = URIRef(namespace)
prefix = self.store.prefix(namespace) # warning multiple prefixes problem
@@ -471,32 +641,38 @@ class NamespaceManager(object):
try:
namespace, name = split_uri(uri, NAME_START_CATEGORIES)
except ValueError as e:
- message = ('This graph cannot be serialized to a strict format '
- 'because there is no valid way to shorten {}'.format(uri))
+ message = (
+ "This graph cannot be serialized to a strict format "
+ "because there is no valid way to shorten {}".format(uri)
+ )
raise ValueError(message)
# omitted for strict since NCNames cannot be empty
- #namespace = URIRef(uri)
- #prefix = self.store.prefix(namespace)
- #if not prefix:
- #raise e
+ # namespace = URIRef(uri)
+ # prefix = self.store.prefix(namespace)
+ # if not prefix:
+ # raise e
if namespace not in self.__strie:
insert_strie(self.__strie, self.__trie, namespace)
# omitted for strict
- #if self.__strie[namespace]:
- #pl_namespace = get_longest_namespace(self.__strie[namespace], uri)
- #if pl_namespace is not None:
- #namespace = pl_namespace
- #name = uri[len(namespace):]
+ # if self.__strie[namespace]:
+ # pl_namespace = get_longest_namespace(self.__strie[namespace], uri)
+ # if pl_namespace is not None:
+ # namespace = pl_namespace
+ # name = uri[len(namespace):]
namespace = URIRef(namespace)
- prefix = self.store.prefix(namespace) # warning multiple prefixes problem
+ prefix = self.store.prefix(
+ namespace
+ ) # warning multiple prefixes problem
if prefix is None:
if not generate:
raise KeyError(
- "No known prefix for {} and generate=False".format(namespace)
+ "No known prefix for {} and generate=False".format(
+ namespace
+ )
)
num = 1
while 1:
@@ -522,7 +698,7 @@ class NamespaceManager(object):
namespace = URIRef(str(namespace))
# When documenting explain that override only applies in what cases
if prefix is None:
- prefix = ''
+ prefix = ""
bound_namespace = self.store.namespace(prefix)
# Check if the bound_namespace contains a URI
# and if so convert it into a URIRef for comparison
@@ -581,6 +757,7 @@ class NamespaceManager(object):
result = "%s#" % result
return URIRef(result)
+
# From: http://www.w3.org/TR/REC-xml#NT-CombiningChar
#
# * Name start characters must have one of the categories Ll, Lu, Lo,
@@ -616,7 +793,7 @@ class NamespaceManager(object):
NAME_START_CATEGORIES = ["Ll", "Lu", "Lo", "Lt", "Nl"]
-SPLIT_START_CATEGORIES = NAME_START_CATEGORIES + ['Nd']
+SPLIT_START_CATEGORIES = NAME_START_CATEGORIES + ["Nd"]
NAME_CATEGORIES = NAME_START_CATEGORIES + ["Mc", "Me", "Mn", "Lm", "Nd"]
ALLOWED_NAME_CHARS = [u"\u00B7", u"\u0387", u"-", u".", u"_", u":"]
@@ -635,7 +812,7 @@ def is_ncname(name):
for i in range(1, len(name)):
c = name[i]
if not category(c) in NAME_CATEGORIES:
- if c != ':' and c in ALLOWED_NAME_CHARS:
+ if c != ":" and c in ALLOWED_NAME_CHARS:
continue
return 0
# if in compatibility area
@@ -670,6 +847,7 @@ def split_uri(uri, split_start=SPLIT_START_CATEGORIES):
break
raise ValueError("Can't split '{}'".format(uri))
+
def insert_trie(trie, value): # aka get_subtrie_or_insert
""" Insert a value into the trie if it is not already contained in the trie.
Return the subtree for the value regardless of whether it is a new value
@@ -684,16 +862,20 @@ def insert_trie(trie, value): # aka get_subtrie_or_insert
if not multi_check:
trie[value] = {}
multi_check = True # there can be multiple longer existing prefixes
- dict_ = trie.pop(key) # does not break strie since key<->dict_ remains unchanged
+ dict_ = trie.pop(
+ key
+ ) # does not break strie since key<->dict_ remains unchanged
trie[value][key] = dict_
if value not in trie:
trie[value] = {}
return trie[value]
+
def insert_strie(strie, trie, value):
if value not in strie:
strie[value] = insert_trie(trie, value)
+
def get_longest_namespace(trie, value):
for key in trie:
if value.startswith(key):
diff --git a/rdflib/parser.py b/rdflib/parser.py
index 39a6bdcc..9e501c03 100644
--- a/rdflib/parser.py
+++ b/rdflib/parser.py
@@ -19,7 +19,6 @@ import sys
from io import BytesIO
-
from urllib.request import pathname2url
from urllib.request import Request
from urllib.request import url2pathname
@@ -33,12 +32,15 @@ from rdflib.term import URIRef
from rdflib.namespace import Namespace
__all__ = [
- 'Parser', 'InputSource', 'StringInputSource',
- 'URLInputSource', 'FileInputSource']
+ "Parser",
+ "InputSource",
+ "StringInputSource",
+ "URLInputSource",
+ "FileInputSource",
+]
class Parser(object):
-
def __init__(self):
pass
@@ -58,7 +60,7 @@ class InputSource(xmlreader.InputSource, object):
def close(self):
f = self.getByteStream()
- if f and hasattr(f, 'close'):
+ if f and hasattr(f, "close"):
f.close()
@@ -77,8 +79,7 @@ class StringInputSource(InputSource):
headers = {
- 'User-agent':
- 'rdflib-%s (http://rdflib.net/; eikeon@eikeon.com)' % __version__
+ "User-agent": "rdflib-%s (http://rdflib.net/; eikeon@eikeon.com)" % __version__
}
@@ -93,28 +94,30 @@ class URLInputSource(InputSource):
# copy headers to change
myheaders = dict(headers)
- if format == 'application/rdf+xml':
- myheaders['Accept'] = 'application/rdf+xml, */*;q=0.1'
- elif format == 'n3':
- myheaders['Accept'] = 'text/n3, */*;q=0.1'
- elif format == 'turtle':
- myheaders['Accept'] = 'text/turtle,application/x-turtle, */*;q=0.1'
- elif format == 'nt':
- myheaders['Accept'] = 'text/plain, */*;q=0.1'
- elif format == 'json-ld':
- myheaders['Accept'] = (
- 'application/ld+json, application/json;q=0.9, */*;q=0.1')
+ if format == "application/rdf+xml":
+ myheaders["Accept"] = "application/rdf+xml, */*;q=0.1"
+ elif format == "n3":
+ myheaders["Accept"] = "text/n3, */*;q=0.1"
+ elif format == "turtle":
+ myheaders["Accept"] = "text/turtle,application/x-turtle, */*;q=0.1"
+ elif format == "nt":
+ myheaders["Accept"] = "text/plain, */*;q=0.1"
+ elif format == "json-ld":
+ myheaders[
+ "Accept"
+ ] = "application/ld+json, application/json;q=0.9, */*;q=0.1"
else:
- myheaders['Accept'] = (
- 'application/rdf+xml,text/rdf+n3;q=0.9,' +
- 'application/xhtml+xml;q=0.5, */*;q=0.1')
+ myheaders["Accept"] = (
+ "application/rdf+xml,text/rdf+n3;q=0.9,"
+ + "application/xhtml+xml;q=0.5, */*;q=0.1"
+ )
req = Request(system_id, None, myheaders)
file = urlopen(req)
# Fix for issue 130 https://github.com/RDFLib/rdflib/issues/130
- self.url = file.geturl() # in case redirections took place
+ self.url = file.geturl() # in case redirections took place
self.setPublicId(self.url)
- self.content_type = file.info().get('content-type')
+ self.content_type = file.info().get("content-type")
if self.content_type is not None:
self.content_type = self.content_type.split(";", 1)[0]
self.setByteStream(file)
@@ -126,7 +129,6 @@ class URLInputSource(InputSource):
class FileInputSource(InputSource):
-
def __init__(self, file):
base = urljoin("file:", pathname2url(os.getcwd()))
system_id = URIRef(urljoin("file:", pathname2url(file.name)), base=base)
@@ -139,23 +141,27 @@ class FileInputSource(InputSource):
return repr(self.file)
-def create_input_source(source=None, publicID=None,
- location=None, file=None, data=None, format=None):
+def create_input_source(
+ source=None, publicID=None, location=None, file=None, data=None, format=None
+):
"""
Return an appropriate InputSource instance for the given
parameters.
"""
# test that exactly one of source, location, file, and data is not None.
- if sum((
- source is not None,
- location is not None,
- file is not None,
- data is not None,
- )) != 1:
- raise ValueError(
- 'exactly one of source, location, file or data must be given'
+ if (
+ sum(
+ (
+ source is not None,
+ location is not None,
+ file is not None,
+ data is not None,
+ )
)
+ != 1
+ ):
+ raise ValueError("exactly one of source, location, file or data must be given")
input_source = None
@@ -174,8 +180,9 @@ def create_input_source(source=None, publicID=None,
elif hasattr(f, "name"):
input_source.setSystemId(f.name)
else:
- raise Exception("Unexpected type '%s' for source '%s'" %
- (type(source), source))
+ raise Exception(
+ "Unexpected type '%s' for source '%s'" % (type(source), source)
+ )
absolute_location = None # Further to fix for issue 130
@@ -200,7 +207,7 @@ def create_input_source(source=None, publicID=None,
if data is not None:
if isinstance(data, str):
- data = data.encode('utf-8')
+ data = data.encode("utf-8")
input_source = StringInputSource(data)
auto_close = True
diff --git a/rdflib/paths.py b/rdflib/paths.py
index 28d3e603..d2fccd22 100644
--- a/rdflib/paths.py
+++ b/rdflib/paths.py
@@ -1,6 +1,3 @@
-
-
-
__doc__ = """
This module implements the SPARQL 1.1 Property path operators, as
@@ -191,9 +188,9 @@ from rdflib.term import URIRef, Node
# property paths
-ZeroOrMore = '*'
-OneOrMore = '+'
-ZeroOrOne = '?'
+ZeroOrMore = "*"
+OneOrMore = "+"
+ZeroOrOne = "?"
class Path(object):
@@ -208,14 +205,16 @@ class Path(object):
def __lt__(self, other):
if not isinstance(other, (Path, Node)):
- raise TypeError('unorderable types: %s() < %s()' % (
- repr(self), repr(other)))
+ raise TypeError(
+ "unorderable types: %s() < %s()" % (repr(self), repr(other))
+ )
return repr(self) < repr(other)
def __le__(self, other):
if not isinstance(other, (Path, Node)):
- raise TypeError('unorderable types: %s() < %s()' % (
- repr(self), repr(other)))
+ raise TypeError(
+ "unorderable types: %s() < %s()" % (repr(self), repr(other))
+ )
return repr(self) <= repr(other)
def __ne__(self, other):
@@ -229,7 +228,6 @@ class Path(object):
class InvPath(Path):
-
def __init__(self, arg):
self.arg = arg
@@ -241,7 +239,7 @@ class InvPath(Path):
return "Path(~%s)" % (self.arg,)
def n3(self):
- return '^%s' % self.arg.n3()
+ return "^%s" % self.arg.n3()
class SequencePath(Path):
@@ -285,7 +283,7 @@ class SequencePath(Path):
return "Path(%s)" % " / ".join(str(x) for x in self.args)
def n3(self):
- return '/'.join(a.n3() for a in self.args)
+ return "/".join(a.n3() for a in self.args)
class AlternativePath(Path):
@@ -306,7 +304,7 @@ class AlternativePath(Path):
return "Path(%s)" % " | ".join(str(x) for x in self.args)
def n3(self):
- return '|'.join(a.n3() for a in self.args)
+ return "|".join(a.n3() for a in self.args)
class MulPath(Path):
@@ -324,7 +322,7 @@ class MulPath(Path):
self.zero = False
self.more = True
else:
- raise Exception('Unknown modifier %s' % mod)
+ raise Exception("Unknown modifier %s" % mod)
def eval(self, graph, subj=None, obj=None, first=True):
if self.zero and first:
@@ -387,7 +385,7 @@ class MulPath(Path):
f = list(_fwd(s, None, set()))
for s1, o1 in f:
assert s1 == s
- yield(s1, o1)
+ yield (s1, o1)
done = set() # the spec does by defn. not allow duplicates
if subj:
@@ -410,7 +408,7 @@ class MulPath(Path):
return "Path(%s%s)" % (self.path, self.mod)
def n3(self):
- return '%s%s' % (self.path.n3(), self.mod)
+ return "%s%s" % (self.path.n3(), self.mod)
class NegatedPath(Path):
@@ -421,8 +419,9 @@ class NegatedPath(Path):
self.args = arg.args
else:
raise Exception(
- 'Can only negate URIRefs, InvPaths or ' +
- 'AlternativePaths, not: %s' % (arg,))
+ "Can only negate URIRefs, InvPaths or "
+ + "AlternativePaths, not: %s" % (arg,)
+ )
def eval(self, graph, subj=None, obj=None):
for s, p, o in graph.triples((subj, None, obj)):
@@ -434,7 +433,7 @@ class NegatedPath(Path):
if (o, a.arg, s) in graph:
break
else:
- raise Exception('Invalid path in NegatedPath: %s' % a)
+ raise Exception("Invalid path in NegatedPath: %s" % a)
else:
yield s, o
@@ -442,7 +441,7 @@ class NegatedPath(Path):
return "Path(! %s)" % ",".join(str(x) for x in self.args)
def n3(self):
- return '!(%s)' % ('|'.join(self.args))
+ return "!(%s)" % ("|".join(self.args))
class PathList(list):
@@ -454,7 +453,7 @@ def path_alternative(self, other):
alternative path
"""
if not isinstance(other, (URIRef, Path)):
- raise Exception('Only URIRefs or Paths can be in paths!')
+ raise Exception("Only URIRefs or Paths can be in paths!")
return AlternativePath(self, other)
@@ -463,7 +462,7 @@ def path_sequence(self, other):
sequence path
"""
if not isinstance(other, (URIRef, Path)):
- raise Exception('Only URIRefs or Paths can be in paths!')
+ raise Exception("Only URIRefs or Paths can be in paths!")
return SequencePath(self, other)
@@ -492,9 +491,10 @@ def neg_path(p):
return NegatedPath(p)
-if __name__ == '__main__':
+if __name__ == "__main__":
import doctest
+
doctest.testmod()
else:
# monkey patch
diff --git a/rdflib/plugin.py b/rdflib/plugin.py
index 2653b188..cc5b6d35 100644
--- a/rdflib/plugin.py
+++ b/rdflib/plugin.py
@@ -31,22 +31,27 @@ from __future__ import print_function
from rdflib.store import Store
from rdflib.parser import Parser
from rdflib.serializer import Serializer
-from rdflib.query import ResultParser, ResultSerializer, \
- Processor, Result, UpdateProcessor
+from rdflib.query import (
+ ResultParser,
+ ResultSerializer,
+ Processor,
+ Result,
+ UpdateProcessor,
+)
from rdflib.exceptions import Error
-__all__ = [
- 'register', 'get', 'plugins', 'PluginException', 'Plugin', 'PKGPlugin']
+__all__ = ["register", "get", "plugins", "PluginException", "Plugin", "PKGPlugin"]
-entry_points = {'rdf.plugins.store': Store,
- 'rdf.plugins.serializer': Serializer,
- 'rdf.plugins.parser': Parser,
- 'rdf.plugins.resultparser': ResultParser,
- 'rdf.plugins.resultserializer': ResultSerializer,
- 'rdf.plugins.queryprocessor': Processor,
- 'rdf.plugins.queryresult': Result,
- 'rdf.plugins.updateprocessor': UpdateProcessor
- }
+entry_points = {
+ "rdf.plugins.store": Store,
+ "rdf.plugins.serializer": Serializer,
+ "rdf.plugins.parser": Parser,
+ "rdf.plugins.resultparser": ResultParser,
+ "rdf.plugins.resultserializer": ResultSerializer,
+ "rdf.plugins.queryprocessor": Processor,
+ "rdf.plugins.queryresult": Result,
+ "rdf.plugins.updateprocessor": UpdateProcessor,
+}
_plugins = {}
@@ -56,7 +61,6 @@ class PluginException(Error):
class Plugin(object):
-
def __init__(self, name, kind, module_path, class_name):
self.name = name
self.kind = kind
@@ -72,7 +76,6 @@ class Plugin(object):
class PKGPlugin(Plugin):
-
def __init__(self, name, kind, ep):
self.name = name
self.kind = kind
@@ -102,8 +105,7 @@ def get(name, kind):
try:
p = _plugins[(name, kind)]
except KeyError:
- raise PluginException(
- "No plugin registered for (%s, %s)" % (name, kind))
+ raise PluginException("No plugin registered for (%s, %s)" % (name, kind))
return p.getClass()
@@ -125,202 +127,182 @@ def plugins(name=None, kind=None):
Pass in name and kind to filter... else leave None to match all.
"""
for p in _plugins.values():
- if (name is None or name == p.name) and (
- kind is None or kind == p.kind):
+ if (name is None or name == p.name) and (kind is None or kind == p.kind):
yield p
+register("default", Store, "rdflib.plugins.memory", "IOMemory")
+register("IOMemory", Store, "rdflib.plugins.memory", "IOMemory")
+register("Auditable", Store, "rdflib.plugins.stores.auditable", "AuditableStore")
+register("Concurrent", Store, "rdflib.plugins.stores.concurrent", "ConcurrentStore")
+register("Sleepycat", Store, "rdflib.plugins.sleepycat", "Sleepycat")
+register("SPARQLStore", Store, "rdflib.plugins.stores.sparqlstore", "SPARQLStore")
+register(
+ "SPARQLUpdateStore", Store, "rdflib.plugins.stores.sparqlstore", "SPARQLUpdateStore"
+)
+
+register(
+ "application/rdf+xml",
+ Serializer,
+ "rdflib.plugins.serializers.rdfxml",
+ "XMLSerializer",
+)
+register("xml", Serializer, "rdflib.plugins.serializers.rdfxml", "XMLSerializer")
+register("text/n3", Serializer, "rdflib.plugins.serializers.n3", "N3Serializer")
+register("n3", Serializer, "rdflib.plugins.serializers.n3", "N3Serializer")
+register(
+ "text/turtle", Serializer, "rdflib.plugins.serializers.turtle", "TurtleSerializer"
+)
+register("turtle", Serializer, "rdflib.plugins.serializers.turtle", "TurtleSerializer")
+register("ttl", Serializer, "rdflib.plugins.serializers.turtle", "TurtleSerializer")
+register("trig", Serializer, "rdflib.plugins.serializers.trig", "TrigSerializer")
+register(
+ "application/n-triples", Serializer, "rdflib.plugins.serializers.nt", "NTSerializer"
+)
+register("ntriples", Serializer, "rdflib.plugins.serializers.nt", "NTSerializer")
+register("nt", Serializer, "rdflib.plugins.serializers.nt", "NTSerializer")
+register("nt11", Serializer, "rdflib.plugins.serializers.nt", "NT11Serializer")
+
+register(
+ "pretty-xml", Serializer, "rdflib.plugins.serializers.rdfxml", "PrettyXMLSerializer"
+)
+register("trix", Serializer, "rdflib.plugins.serializers.trix", "TriXSerializer")
+register(
+ "application/trix", Serializer, "rdflib.plugins.serializers.trix", "TriXSerializer"
+)
+register(
+ "application/n-quads",
+ Serializer,
+ "rdflib.plugins.serializers.nquads",
+ "NQuadsSerializer",
+)
+register("nquads", Serializer, "rdflib.plugins.serializers.nquads", "NQuadsSerializer")
+
+register("application/rdf+xml", Parser, "rdflib.plugins.parsers.rdfxml", "RDFXMLParser")
+register("xml", Parser, "rdflib.plugins.parsers.rdfxml", "RDFXMLParser")
+register("text/n3", Parser, "rdflib.plugins.parsers.notation3", "N3Parser")
+register("n3", Parser, "rdflib.plugins.parsers.notation3", "N3Parser")
+register("text/turtle", Parser, "rdflib.plugins.parsers.notation3", "TurtleParser")
+register("turtle", Parser, "rdflib.plugins.parsers.notation3", "TurtleParser")
+register("ttl", Parser, "rdflib.plugins.parsers.notation3", "TurtleParser")
+register("application/n-triples", Parser, "rdflib.plugins.parsers.nt", "NTParser")
+register("ntriples", Parser, "rdflib.plugins.parsers.nt", "NTParser")
+register("nt", Parser, "rdflib.plugins.parsers.nt", "NTParser")
+register("nt11", Parser, "rdflib.plugins.parsers.nt", "NTParser")
+register("application/n-quads", Parser, "rdflib.plugins.parsers.nquads", "NQuadsParser")
+register("nquads", Parser, "rdflib.plugins.parsers.nquads", "NQuadsParser")
+register("application/trix", Parser, "rdflib.plugins.parsers.trix", "TriXParser")
+register("trix", Parser, "rdflib.plugins.parsers.trix", "TriXParser")
+register("trig", Parser, "rdflib.plugins.parsers.trig", "TrigParser")
+
+
+register("sparql", Result, "rdflib.plugins.sparql.processor", "SPARQLResult")
+register("sparql", Processor, "rdflib.plugins.sparql.processor", "SPARQLProcessor")
+
+register(
+ "sparql",
+ UpdateProcessor,
+ "rdflib.plugins.sparql.processor",
+ "SPARQLUpdateProcessor",
+)
+
+
+register(
+ "xml",
+ ResultSerializer,
+ "rdflib.plugins.sparql.results.xmlresults",
+ "XMLResultSerializer",
+)
+register(
+ "application/sparql-results+xml",
+ ResultSerializer,
+ "rdflib.plugins.sparql.results.xmlresults",
+ "XMLResultSerializer",
+)
+register(
+ "txt",
+ ResultSerializer,
+ "rdflib.plugins.sparql.results.txtresults",
+ "TXTResultSerializer",
+)
+register(
+ "json",
+ ResultSerializer,
+ "rdflib.plugins.sparql.results.jsonresults",
+ "JSONResultSerializer",
+)
+register(
+ "application/sparql-results+json",
+ ResultSerializer,
+ "rdflib.plugins.sparql.results.jsonresults",
+ "JSONResultSerializer",
+)
+register(
+ "csv",
+ ResultSerializer,
+ "rdflib.plugins.sparql.results.csvresults",
+ "CSVResultSerializer",
+)
+register(
+ "text/csv",
+ ResultSerializer,
+ "rdflib.plugins.sparql.results.csvresults",
+ "CSVResultSerializer",
+)
+
+register(
+ "xml", ResultParser, "rdflib.plugins.sparql.results.xmlresults", "XMLResultParser"
+)
+register(
+ "application/sparql-results+xml",
+ ResultParser,
+ "rdflib.plugins.sparql.results.xmlresults",
+ "XMLResultParser",
+)
+register(
+ "application/sparql-results+xml; charset=UTF-8",
+ ResultParser,
+ "rdflib.plugins.sparql.results.xmlresults",
+ "XMLResultParser",
+)
+
+register(
+ "application/rdf+xml",
+ ResultParser,
+ "rdflib.plugins.sparql.results.graph",
+ "GraphResultParser",
+)
+
+
+register(
+ "json",
+ ResultParser,
+ "rdflib.plugins.sparql.results.jsonresults",
+ "JSONResultParser",
+)
+register(
+ "application/sparql-results+json",
+ ResultParser,
+ "rdflib.plugins.sparql.results.jsonresults",
+ "JSONResultParser",
+)
+
register(
- 'default', Store,
- 'rdflib.plugins.memory', 'IOMemory')
-register(
- 'IOMemory', Store,
- 'rdflib.plugins.memory', 'IOMemory')
-register(
- 'Auditable', Store,
- 'rdflib.plugins.stores.auditable', 'AuditableStore')
-register(
- 'Concurrent', Store,
- 'rdflib.plugins.stores.concurrent', 'ConcurrentStore')
-register(
- 'Sleepycat', Store,
- 'rdflib.plugins.sleepycat', 'Sleepycat')
-register(
- 'SPARQLStore', Store,
- 'rdflib.plugins.stores.sparqlstore', 'SPARQLStore')
-register(
- 'SPARQLUpdateStore', Store,
- 'rdflib.plugins.stores.sparqlstore', 'SPARQLUpdateStore')
-
-register(
- 'application/rdf+xml', Serializer,
- 'rdflib.plugins.serializers.rdfxml', 'XMLSerializer')
-register(
- 'xml', Serializer,
- 'rdflib.plugins.serializers.rdfxml', 'XMLSerializer')
-register(
- 'text/n3', Serializer,
- 'rdflib.plugins.serializers.n3', 'N3Serializer')
-register(
- 'n3', Serializer,
- 'rdflib.plugins.serializers.n3', 'N3Serializer')
-register(
- 'text/turtle', Serializer,
- 'rdflib.plugins.serializers.turtle', 'TurtleSerializer')
-register(
- 'turtle', Serializer,
- 'rdflib.plugins.serializers.turtle', 'TurtleSerializer')
-register(
- 'ttl', Serializer,
- 'rdflib.plugins.serializers.turtle', 'TurtleSerializer')
-register(
- 'trig', Serializer,
- 'rdflib.plugins.serializers.trig', 'TrigSerializer')
-register(
- 'application/n-triples', Serializer,
- 'rdflib.plugins.serializers.nt', 'NTSerializer')
-register(
- 'ntriples', Serializer,
- 'rdflib.plugins.serializers.nt', 'NTSerializer')
-register(
- 'nt', Serializer,
- 'rdflib.plugins.serializers.nt', 'NTSerializer')
-register(
- 'nt11', Serializer,
- 'rdflib.plugins.serializers.nt', 'NT11Serializer')
-
-register(
- 'pretty-xml', Serializer,
- 'rdflib.plugins.serializers.rdfxml', 'PrettyXMLSerializer')
-register(
- 'trix', Serializer,
- 'rdflib.plugins.serializers.trix', 'TriXSerializer')
-register(
- 'application/trix', Serializer,
- 'rdflib.plugins.serializers.trix', 'TriXSerializer')
-register(
- 'application/n-quads', Serializer,
- 'rdflib.plugins.serializers.nquads', 'NQuadsSerializer')
-register(
- 'nquads', Serializer,
- 'rdflib.plugins.serializers.nquads', 'NQuadsSerializer')
-
-register(
- 'application/rdf+xml', Parser,
- 'rdflib.plugins.parsers.rdfxml', 'RDFXMLParser')
-register(
- 'xml', Parser,
- 'rdflib.plugins.parsers.rdfxml', 'RDFXMLParser')
-register(
- 'text/n3', Parser,
- 'rdflib.plugins.parsers.notation3', 'N3Parser')
-register(
- 'n3', Parser,
- 'rdflib.plugins.parsers.notation3', 'N3Parser')
-register(
- 'text/turtle', Parser,
- 'rdflib.plugins.parsers.notation3', 'TurtleParser')
-register(
- 'turtle', Parser,
- 'rdflib.plugins.parsers.notation3', 'TurtleParser')
-register(
- 'ttl', Parser,
- 'rdflib.plugins.parsers.notation3', 'TurtleParser')
-register(
- 'application/n-triples', Parser,
- 'rdflib.plugins.parsers.nt', 'NTParser')
-register(
- 'ntriples', Parser,
- 'rdflib.plugins.parsers.nt', 'NTParser')
-register(
- 'nt', Parser,
- 'rdflib.plugins.parsers.nt', 'NTParser')
-register(
- 'nt11', Parser,
- 'rdflib.plugins.parsers.nt', 'NTParser')
-register(
- 'application/n-quads', Parser,
- 'rdflib.plugins.parsers.nquads', 'NQuadsParser')
-register(
- 'nquads', Parser,
- 'rdflib.plugins.parsers.nquads', 'NQuadsParser')
-register(
- 'application/trix', Parser,
- 'rdflib.plugins.parsers.trix', 'TriXParser')
-register(
- 'trix', Parser,
- 'rdflib.plugins.parsers.trix', 'TriXParser')
-register(
- 'trig', Parser,
- 'rdflib.plugins.parsers.trig', 'TrigParser')
-
-
-register(
- 'sparql', Result,
- 'rdflib.plugins.sparql.processor', 'SPARQLResult')
-register(
- 'sparql', Processor,
- 'rdflib.plugins.sparql.processor', 'SPARQLProcessor')
-
-register(
- 'sparql', UpdateProcessor,
- 'rdflib.plugins.sparql.processor', 'SPARQLUpdateProcessor')
-
-
-register(
- 'xml', ResultSerializer,
- 'rdflib.plugins.sparql.results.xmlresults', 'XMLResultSerializer')
-register(
- 'application/sparql-results+xml', ResultSerializer,
- 'rdflib.plugins.sparql.results.xmlresults', 'XMLResultSerializer')
-register(
- 'txt', ResultSerializer,
- 'rdflib.plugins.sparql.results.txtresults', 'TXTResultSerializer')
-register(
- 'json', ResultSerializer,
- 'rdflib.plugins.sparql.results.jsonresults', 'JSONResultSerializer')
-register(
- 'application/sparql-results+json', ResultSerializer,
- 'rdflib.plugins.sparql.results.jsonresults', 'JSONResultSerializer')
-register(
- 'csv', ResultSerializer,
- 'rdflib.plugins.sparql.results.csvresults', 'CSVResultSerializer')
-register(
- 'text/csv', ResultSerializer,
- 'rdflib.plugins.sparql.results.csvresults', 'CSVResultSerializer')
-
-register(
- 'xml', ResultParser,
- 'rdflib.plugins.sparql.results.xmlresults', 'XMLResultParser')
-register(
- 'application/sparql-results+xml', ResultParser,
- 'rdflib.plugins.sparql.results.xmlresults', 'XMLResultParser')
-register(
- 'application/sparql-results+xml; charset=UTF-8', ResultParser,
- 'rdflib.plugins.sparql.results.xmlresults', 'XMLResultParser')
-
-register(
- 'application/rdf+xml', ResultParser,
- 'rdflib.plugins.sparql.results.graph', 'GraphResultParser')
-
-
-register(
- 'json', ResultParser,
- 'rdflib.plugins.sparql.results.jsonresults', 'JSONResultParser')
+ "csv", ResultParser, "rdflib.plugins.sparql.results.csvresults", "CSVResultParser"
+)
register(
- 'application/sparql-results+json', ResultParser,
- 'rdflib.plugins.sparql.results.jsonresults', 'JSONResultParser')
+ "text/csv",
+ ResultParser,
+ "rdflib.plugins.sparql.results.csvresults",
+ "CSVResultParser",
+)
register(
- 'csv', ResultParser,
- 'rdflib.plugins.sparql.results.csvresults', 'CSVResultParser')
-register(
- 'text/csv', ResultParser,
- 'rdflib.plugins.sparql.results.csvresults', 'CSVResultParser')
-
-register(
- 'tsv', ResultParser,
- 'rdflib.plugins.sparql.results.tsvresults', 'TSVResultParser')
-register(
- 'text/tab-separated-values', ResultParser,
- 'rdflib.plugins.sparql.results.tsvresults', 'TSVResultParser')
+ "tsv", ResultParser, "rdflib.plugins.sparql.results.tsvresults", "TSVResultParser"
+)
+register(
+ "text/tab-separated-values",
+ ResultParser,
+ "rdflib.plugins.sparql.results.tsvresults",
+ "TSVResultParser",
+)
diff --git a/rdflib/plugins/memory.py b/rdflib/plugins/memory.py
index 1c1161e9..c102799f 100644
--- a/rdflib/plugins/memory.py
+++ b/rdflib/plugins/memory.py
@@ -6,7 +6,7 @@ import random
from rdflib.store import Store, NO_STORE, VALID_STORE
-__all__ = ['Memory', 'IOMemory']
+__all__ = ["Memory", "IOMemory"]
ANY = Any = None
@@ -96,14 +96,12 @@ class Memory(Store):
if predicate in subjectDictionary:
if object != ANY: # subject+predicate+object is given
if object in subjectDictionary[predicate]:
- yield (subject, predicate, object), \
- self.__contexts()
+ yield (subject, predicate, object), self.__contexts()
else: # given object not found
pass
else: # subject+predicate is given, object unbound
for o in subjectDictionary[predicate].keys():
- yield (subject, predicate, o), \
- self.__contexts()
+ yield (subject, predicate, o), self.__contexts()
else: # given predicate not found
pass
else: # subject given, predicate unbound
@@ -196,6 +194,7 @@ class IOMemory(Store):
slow.
"""
+
context_aware = True
formula_aware = True
graph_aware = True
@@ -222,11 +221,12 @@ class IOMemory(Store):
self.__obj2int = {None: None} # maps objects to integer keys
# Indexes for each triple part, and a list of contexts for each triple
- self.__subjectIndex = {} # key: sid val: set(enctriples)
+ self.__subjectIndex = {} # key: sid val: set(enctriples)
self.__predicateIndex = {} # key: pid val: set(enctriples)
- self.__objectIndex = {} # key: oid val: set(enctriples)
- self.__tripleContexts = {
- } # key: enctriple val: {cid1: quoted, cid2: quoted ...}
+ self.__objectIndex = {} # key: oid val: set(enctriples)
+ self.__tripleContexts = (
+ {}
+ ) # key: enctriple val: {cid1: quoted, cid2: quoted ...}
self.__contextTriples = {None: set()} # key: cid val: set(enctriples)
# all contexts used in store (unencoded)
@@ -294,16 +294,20 @@ class IOMemory(Store):
del self.__tripleContexts[enctriple]
- if not req_cid is None and \
- req_cid in self.__contextTriples and \
- len(self.__contextTriples[req_cid]) == 0:
+ if (
+ not req_cid is None
+ and req_cid in self.__contextTriples
+ and len(self.__contextTriples[req_cid]) == 0
+ ):
# all triples are removed out of this context
# and it's not the default context so delete it
del self.__contextTriples[req_cid]
- if triplepat == (None, None, None) and \
- context in self.__all_contexts and \
- not self.graph_aware:
+ if (
+ triplepat == (None, None, None)
+ and context in self.__all_contexts
+ and not self.graph_aware
+ ):
# remove the whole context
self.__all_contexts.remove(context)
@@ -322,9 +326,11 @@ class IOMemory(Store):
# optimize "triple in graph" case (all parts given)
if sid is not None and pid is not None and oid is not None:
- if sid in self.__subjectIndex and \
- enctriple in self.__subjectIndex[sid] and \
- self.__tripleHasContext(enctriple, cid):
+ if (
+ sid in self.__subjectIndex
+ and enctriple in self.__subjectIndex[sid]
+ and self.__tripleHasContext(enctriple, cid)
+ ):
return ((triplein, self.__contexts(enctriple)) for i in [0])
else:
return self.__emptygen()
@@ -353,9 +359,11 @@ class IOMemory(Store):
else:
enctriples = sets[0].copy()
- return ((self.__decodeTriple(enctriple), self.__contexts(enctriple))
- for enctriple in enctriples
- if self.__tripleHasContext(enctriple, cid))
+ return (
+ (self.__decodeTriple(enctriple), self.__contexts(enctriple))
+ for enctriple in enctriples
+ if self.__tripleHasContext(enctriple, cid)
+ )
def contexts(self, triple=None):
if triple is None or triple == (None, None, None):
@@ -402,8 +410,7 @@ class IOMemory(Store):
if enctriple not in self.__tripleContexts:
# triple exists with default ctx info
# start with a copy of the default ctx info
- self.__tripleContexts[
- enctriple] = self.__defaultContexts.copy()
+ self.__tripleContexts[enctriple] = self.__defaultContexts.copy()
self.__tripleContexts[enctriple][cid] = quoted
if not quoted:
@@ -446,12 +453,11 @@ class IOMemory(Store):
def __tripleHasContext(self, enctriple, cid):
"""return True iff the triple exists in the given context"""
ctxs = self.__tripleContexts.get(enctriple, self.__defaultContexts)
- return (cid in ctxs)
+ return cid in ctxs
def __removeTripleContext(self, enctriple, cid):
"""remove the context from the triple"""
- ctxs = self.__tripleContexts.get(
- enctriple, self.__defaultContexts).copy()
+ ctxs = self.__tripleContexts.get(enctriple, self.__defaultContexts).copy()
del ctxs[cid]
if ctxs == self.__defaultContexts:
del self.__tripleContexts[enctriple]
@@ -491,7 +497,11 @@ class IOMemory(Store):
def __contexts(self, enctriple):
"""return a generator for all the non-quoted contexts
(unencoded) the encoded triple appears in"""
- return (self.__int2obj.get(cid) for cid in self.__getTripleContexts(enctriple, skipQuoted=True) if cid is not None)
+ return (
+ self.__int2obj.get(cid)
+ for cid in self.__getTripleContexts(enctriple, skipQuoted=True)
+ if cid is not None
+ )
def __emptygen(self):
"""return an empty generator"""
diff --git a/rdflib/plugins/parsers/notation3.py b/rdflib/plugins/parsers/notation3.py
index 44a25adc..3bc2169f 100755
--- a/rdflib/plugins/parsers/notation3.py
+++ b/rdflib/plugins/parsers/notation3.py
@@ -48,9 +48,17 @@ from rdflib.graph import QuotedGraph, ConjunctiveGraph, Graph
from rdflib.compat import long_type
from rdflib.compat import narrow_build
-__all__ = ['BadSyntax', 'N3Parser', 'TurtleParser',
- "splitFragP", "join", "base",
- "runNamespace", "uniqueURI", "hexify"]
+__all__ = [
+ "BadSyntax",
+ "N3Parser",
+ "TurtleParser",
+ "splitFragP",
+ "join",
+ "base",
+ "runNamespace",
+ "uniqueURI",
+ "hexify",
+]
from rdflib.parser import Parser
@@ -74,7 +82,7 @@ def splitFragP(uriref, punct=0):
if i >= 0:
return uriref[:i], uriref[i:]
else:
- return uriref, ''
+ return uriref, ""
def join(here, there):
@@ -112,65 +120,67 @@ def join(here, there):
u'http://example.org/#Andr\\xe9'
"""
-# assert(here.find("#") < 0), \
-# "Base may not contain hash: '%s'" % here # why must caller splitFrag?
+ # assert(here.find("#") < 0), \
+ # "Base may not contain hash: '%s'" % here # why must caller splitFrag?
- slashl = there.find('/')
- colonl = there.find(':')
+ slashl = there.find("/")
+ colonl = there.find(":")
# join(base, 'foo:/') -- absolute
if colonl >= 0 and (slashl < 0 or colonl < slashl):
return there
- bcolonl = here.find(':')
- assert(bcolonl >= 0), \
- "Base uri '%s' is not absolute" % here # else it's not absolute
+ bcolonl = here.find(":")
+ assert bcolonl >= 0, (
+ "Base uri '%s' is not absolute" % here
+ ) # else it's not absolute
path, frag = splitFragP(there)
if not path:
return here + frag
# join('mid:foo@example', '../foo') bzzt
- if here[bcolonl + 1:bcolonl + 2] != '/':
+ if here[bcolonl + 1 : bcolonl + 2] != "/":
raise ValueError(
- ("Base <%s> has no slash after "
- "colon - with relative '%s'.") % (here, there))
+ ("Base <%s> has no slash after " "colon - with relative '%s'.")
+ % (here, there)
+ )
- if here[bcolonl + 1:bcolonl + 3] == '//':
- bpath = here.find('/', bcolonl + 3)
+ if here[bcolonl + 1 : bcolonl + 3] == "//":
+ bpath = here.find("/", bcolonl + 3)
else:
bpath = bcolonl + 1
# join('http://xyz', 'foo')
if bpath < 0:
bpath = len(here)
- here = here + '/'
+ here = here + "/"
# join('http://xyz/', '//abc') => 'http://abc'
- if there[:2] == '//':
- return here[:bcolonl + 1] + there
+ if there[:2] == "//":
+ return here[: bcolonl + 1] + there
# join('http://xyz/', '/abc') => 'http://xyz/abc'
- if there[:1] == '/':
+ if there[:1] == "/":
return here[:bpath] + there
- slashr = here.rfind('/')
+ slashr = here.rfind("/")
while 1:
- if path[:2] == './':
+ if path[:2] == "./":
path = path[2:]
- if path == '.':
- path = ''
- elif path[:3] == '../' or path == '..':
+ if path == ".":
+ path = ""
+ elif path[:3] == "../" or path == "..":
path = path[3:]
- i = here.rfind('/', bpath, slashr)
+ i = here.rfind("/", bpath, slashr)
if i >= 0:
- here = here[:i + 1]
+ here = here[: i + 1]
slashr = i
else:
break
- return here[:slashr + 1] + path + frag
+ return here[: slashr + 1] + path + frag
def base():
@@ -190,7 +200,7 @@ def _fixslash(s):
""" Fix windowslike filename to unixlike - (#ifdef WINDOWS)"""
s = s.replace("\\", "/")
if s[0] != "/" and s[1] == ":":
- s = s[2:] # @@@ Hack when drive letter present
+ s = s[2:] # @@@ Hack when drive letter present
return s
@@ -211,7 +221,7 @@ ANONYMOUS = 3
XMLLITERAL = 25
Logic_NS = "http://www.w3.org/2000/10/swap/log#"
-NODE_MERGE_URI = Logic_NS + "is" # Pseudo-property indicating node merging
+NODE_MERGE_URI = Logic_NS + "is" # Pseudo-property indicating node merging
forSomeSym = Logic_NS + "forSome"
forAllSym = Logic_NS + "forAll"
@@ -222,7 +232,7 @@ DAML_sameAs_URI = OWL_NS + "sameAs"
parsesTo_URI = Logic_NS + "parsesTo"
RDF_spec = "http://www.w3.org/TR/REC-rdf-syntax/"
-List_NS = RDF_NS_URI # From 20030808
+List_NS = RDF_NS_URI # From 20030808
_Old_Logic_NS = "http://www.w3.org/2000/10/swap/log.n3#"
N3_first = (SYMBOL, List_NS + "first")
@@ -238,12 +248,13 @@ runNamespaceValue = None
def runNamespace():
"Return a URI suitable as a namespace for run-local objects"
- # @@@ include hostname (privacy?) (hash it?)
+ # @@@ include hostname (privacy?) (hash it?)
global runNamespaceValue
if runNamespaceValue is None:
- runNamespaceValue = join(base(), _unique_id()) + '#'
+ runNamespaceValue = join(base(), _unique_id()) + "#"
return runNamespaceValue
+
nextu = 0
@@ -262,20 +273,21 @@ chatty_flag = 50
def BecauseOfData(*args, **kargs):
- # print args, kargs
+ # print args, kargs
pass
def becauseSubexpression(*args, **kargs):
- # print args, kargs
+ # print args, kargs
pass
+
N3_forSome_URI = forSomeSym
N3_forAll_URI = forAllSym
# Magic resources we know about
-ADDED_HASH = "#" # Stop where we use this in case we want to remove it!
+ADDED_HASH = "#" # Stop where we use this in case we want to remove it!
# This is the hash on namespace URIs
RDF_type = (SYMBOL, RDF_type_URI)
@@ -289,20 +301,20 @@ DOUBLE_DATATYPE = _XSD_PFX + "double"
FLOAT_DATATYPE = _XSD_PFX + "float"
INTEGER_DATATYPE = _XSD_PFX + "integer"
-option_noregen = 0 # If set, do not regenerate genids on output
+option_noregen = 0 # If set, do not regenerate genids on output
# @@ I18n - the notname chars need extending for well known unicode non-text
# characters. The XML spec switched to assuming unknown things were name
# characaters.
# _namechars = string.lowercase + string.uppercase + string.digits + '_-'
-_notQNameChars = \
- "\t\r\n !\"#$&'()*,+/;<=>?@[\\]^`{|}~" # else valid qname :-/
+_notQNameChars = "\t\r\n !\"#$&'()*,+/;<=>?@[\\]^`{|}~" # else valid qname :-/
_notKeywordsChars = _notQNameChars + "."
-_notNameChars = _notQNameChars + ":" # Assume anything else valid name :-/
-_rdfns = 'http://www.w3.org/1999/02/22-rdf-syntax-ns#'
+_notNameChars = _notQNameChars + ":" # Assume anything else valid name :-/
+_rdfns = "http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+
+hexChars = "ABCDEFabcdef0123456789"
+escapeChars = "(_~.-!$&'()*+,;=/?#@%)" # valid for \ escapes in localnames
-hexChars = 'ABCDEFabcdef0123456789'
-escapeChars = "(_~.-!$&'()*+,;=/?#@%)" # valid for \ escapes in localnames
def unicodeExpand(m):
try:
@@ -310,81 +322,90 @@ def unicodeExpand(m):
except:
raise Exception("Invalid unicode code point: " + m.group(1))
+
if narrow_build:
+
def unicodeExpand(m):
try:
return chr(int(m.group(1), 16))
except ValueError:
warnings.warn(
- 'Encountered a unicode char > 0xFFFF in a narrow python build. '
- 'Trying to degrade gracefully, but this can cause problems '
- 'later when working with the string:\n%s' % m.group(0))
- return codecs.decode(m.group(0), 'unicode_escape')
+ "Encountered a unicode char > 0xFFFF in a narrow python build. "
+ "Trying to degrade gracefully, but this can cause problems "
+ "later when working with the string:\n%s" % m.group(0)
+ )
+ return codecs.decode(m.group(0), "unicode_escape")
-unicodeEscape4 = re.compile(
- r'\\u([0-9a-fA-F]{4})')
-unicodeEscape8 = re.compile(
- r'\\U([0-9a-fA-F]{8})')
+unicodeEscape4 = re.compile(r"\\u([0-9a-fA-F]{4})")
+unicodeEscape8 = re.compile(r"\\U([0-9a-fA-F]{8})")
-N3CommentCharacter = "#" # For unix script # ! compatabilty
+N3CommentCharacter = "#" # For unix script # ! compatabilty
########################################## Parse string to sink
#
# Regular expressions:
-eol = re.compile(
- r'[ \t]*(#[^\n]*)?\r?\n') # end of line, poss. w/comment
-eof = re.compile(
- r'[ \t]*(#[^\n]*)?$') # end of file, poss. w/comment
-ws = re.compile(r'[ \t]*') # Whitespace not including NL
-signed_integer = re.compile(r'[-+]?[0-9]+') # integer
-integer_syntax = re.compile(r'[-+]?[0-9]+')
-decimal_syntax = re.compile(r'[-+]?[0-9]*\.[0-9]+')
-exponent_syntax = re.compile(r'[-+]?(?:[0-9]+\.[0-9]*|\.[0-9]+|[0-9]+)(?:e|E)[-+]?[0-9]+')
-digitstring = re.compile(r'[0-9]+') # Unsigned integer
+eol = re.compile(r"[ \t]*(#[^\n]*)?\r?\n") # end of line, poss. w/comment
+eof = re.compile(r"[ \t]*(#[^\n]*)?$") # end of file, poss. w/comment
+ws = re.compile(r"[ \t]*") # Whitespace not including NL
+signed_integer = re.compile(r"[-+]?[0-9]+") # integer
+integer_syntax = re.compile(r"[-+]?[0-9]+")
+decimal_syntax = re.compile(r"[-+]?[0-9]*\.[0-9]+")
+exponent_syntax = re.compile(
+ r"[-+]?(?:[0-9]+\.[0-9]*|\.[0-9]+|[0-9]+)(?:e|E)[-+]?[0-9]+"
+)
+digitstring = re.compile(r"[0-9]+") # Unsigned integer
interesting = re.compile(r"""[\\\r\n\"\']""")
-langcode = re.compile(r'[a-zA-Z0-9]+(-[a-zA-Z0-9]+)*')
+langcode = re.compile(r"[a-zA-Z0-9]+(-[a-zA-Z0-9]+)*")
class SinkParser:
- def __init__(self, store, openFormula=None, thisDoc="", baseURI=None,
- genPrefix="", why=None, turtle=False):
+ def __init__(
+ self,
+ store,
+ openFormula=None,
+ thisDoc="",
+ baseURI=None,
+ genPrefix="",
+ why=None,
+ turtle=False,
+ ):
""" note: namespace names should *not* end in # ;
the # will get added during qname processing """
self._bindings = {}
if thisDoc != "":
- assert ':' in thisDoc, "Document URI not absolute: <%s>" % thisDoc
- self._bindings[""] = thisDoc + "#" # default
+ assert ":" in thisDoc, "Document URI not absolute: <%s>" % thisDoc
+ self._bindings[""] = thisDoc + "#" # default
self._store = store
if genPrefix:
store.setGenPrefix(genPrefix) # pass it on
self._thisDoc = thisDoc
- self.lines = 0 # for error handling
- self.startOfLine = 0 # For calculating character number
+ self.lines = 0 # for error handling
+ self.startOfLine = 0 # For calculating character number
self._genPrefix = genPrefix
- self.keywords = ['a', 'this', 'bind', 'has', 'is', 'of',
- 'true', 'false']
- self.keywordsSet = 0 # Then only can others be considerd qnames
+ self.keywords = ["a", "this", "bind", "has", "is", "of", "true", "false"]
+ self.keywordsSet = 0 # Then only can others be considerd qnames
self._anonymousNodes = {}
- # Dict of anon nodes already declared ln: Term
+ # Dict of anon nodes already declared ln: Term
self._variables = {}
self._parentVariables = {}
- self._reason = why # Why the parser was asked to parse this
+ self._reason = why # Why the parser was asked to parse this
- self.turtle = turtle # raise exception when encountering N3 extensions
+ self.turtle = turtle # raise exception when encountering N3 extensions
# Turtle allows single or double quotes around strings, whereas N3
# only allows double quotes.
self.string_delimiters = ('"', "'") if turtle else ('"',)
- self._reason2 = None # Why these triples
- # was: diag.tracking
+ self._reason2 = None # Why these triples
+ # was: diag.tracking
if tracking:
self._reason2 = BecauseOfData(
- store.newSymbol(thisDoc), because=self._reason)
+ store.newSymbol(thisDoc), because=self._reason
+ )
if baseURI:
self._baseURI = baseURI
@@ -394,7 +415,7 @@ class SinkParser:
else:
self._baseURI = None
- assert not self._baseURI or ':' in self._baseURI
+ assert not self._baseURI or ":" in self._baseURI
if not self._genPrefix:
if self._thisDoc:
@@ -424,21 +445,20 @@ class SinkParser:
_L1C1. It used to be used only for tracking, but for tests in general
it makes the canonical ordering of bnodes repeatable."""
- return "%s_L%iC%i" % (self._genPrefix, self.lines,
- i - self.startOfLine + 1)
+ return "%s_L%iC%i" % (self._genPrefix, self.lines, i - self.startOfLine + 1)
def formula(self):
return self._formula
def loadStream(self, stream):
- return self.loadBuf(stream.read()) # Not ideal
+ return self.loadBuf(stream.read()) # Not ideal
def loadBuf(self, buf):
"""Parses a buffer and returns its top level formula"""
self.startDoc()
self.feed(buf)
- return self.endDoc() # self._formula
+ return self.endDoc() # self._formula
def feed(self, octets):
"""Feed an octet stream tothe parser
@@ -450,9 +470,9 @@ class SinkParser:
parser, it should be straightforward to recover."""
if not isinstance(octets, str):
- s = octets.decode('utf-8')
- # NB already decoded, so \ufeff
- if len(s) > 0 and s[0] == codecs.BOM_UTF8.decode('utf-8'):
+ s = octets.decode("utf-8")
+ # NB already decoded, so \ufeff
+ if len(s) > 0 and s[0] == codecs.BOM_UTF8.decode("utf-8"):
s = s[1:]
else:
s = octets
@@ -465,15 +485,14 @@ class SinkParser:
i = self.directiveOrStatement(s, j)
if i < 0:
- #print("# next char: %s" % s[j])
- self.BadSyntax(s, j,
- "expected directive or statement")
+ # print("# next char: %s" % s[j])
+ self.BadSyntax(s, j, "expected directive or statement")
def directiveOrStatement(self, argstr, h):
i = self.skipSpace(argstr, h)
if i < 0:
- return i # EOF
+ return i # EOF
if self.turtle:
j = self.sparqlDirective(argstr, i)
@@ -490,8 +509,8 @@ class SinkParser:
return j
- # @@I18N
- # _namechars = string.lowercase + string.uppercase + string.digits + '_-'
+ # @@I18N
+ # _namechars = string.lowercase + string.uppercase + string.digits + '_-'
def tok(self, tok, argstr, i, colon=False):
"""Check for keyword. Space must have been stripped on entry and
@@ -502,15 +521,17 @@ class SinkParser:
"""
assert tok[0] not in _notNameChars # not for punctuation
- if argstr[i:i + 1] == "@":
+ if argstr[i : i + 1] == "@":
i = i + 1
else:
if tok not in self.keywords:
return -1 # No, this has neither keywords declaration nor "@"
- if (argstr[i:i + len(tok)] == tok
- and ( argstr[i + len(tok)] in _notKeywordsChars)
- or (colon and argstr[i+len(tok)] == ':')):
+ if (
+ argstr[i : i + len(tok)] == tok
+ and (argstr[i + len(tok)] in _notKeywordsChars)
+ or (colon and argstr[i + len(tok)] == ":")
+ ):
i = i + len(tok)
return i
else:
@@ -524,109 +545,114 @@ class SinkParser:
assert tok[0] not in _notNameChars # not for punctuation
- if (argstr[i:i + len(tok)].lower() == tok.lower()
- and (argstr[i + len(tok)] in _notQNameChars)):
+ if argstr[i : i + len(tok)].lower() == tok.lower() and (
+ argstr[i + len(tok)] in _notQNameChars
+ ):
i = i + len(tok)
return i
else:
return -1
-
def directive(self, argstr, i):
j = self.skipSpace(argstr, i)
if j < 0:
return j # eof
res = []
- j = self.tok('bind', argstr, i) # implied "#". Obsolete.
+ j = self.tok("bind", argstr, i) # implied "#". Obsolete.
if j > 0:
- self.BadSyntax(argstr, i,
- "keyword bind is obsolete: use @prefix")
+ self.BadSyntax(argstr, i, "keyword bind is obsolete: use @prefix")
- j = self.tok('keywords', argstr, i)
+ j = self.tok("keywords", argstr, i)
if j > 0:
if self.turtle:
self.BadSyntax(argstr, i, "Found 'keywords' when in Turtle mode.")
i = self.commaSeparatedList(argstr, j, res, self.bareWord)
if i < 0:
- self.BadSyntax(argstr, i,
- "'@keywords' needs comma separated list of words")
+ self.BadSyntax(
+ argstr, i, "'@keywords' needs comma separated list of words"
+ )
self.setKeywords(res[:])
return i
- j = self.tok('forAll', argstr, i)
+ j = self.tok("forAll", argstr, i)
if j > 0:
if self.turtle:
self.BadSyntax(argstr, i, "Found 'forAll' when in Turtle mode.")
i = self.commaSeparatedList(argstr, j, res, self.uri_ref2)
if i < 0:
- self.BadSyntax(argstr, i,
- "Bad variable list after @forAll")
+ self.BadSyntax(argstr, i, "Bad variable list after @forAll")
for x in res:
- # self._context.declareUniversal(x)
+ # self._context.declareUniversal(x)
if x not in self._variables or x in self._parentVariables:
self._variables[x] = self._context.newUniversal(x)
return i
- j = self.tok('forSome', argstr, i)
+ j = self.tok("forSome", argstr, i)
if j > 0:
if self.turtle:
self.BadSyntax(argstr, i, "Found 'forSome' when in Turtle mode.")
- i = self. commaSeparatedList(argstr, j, res, self.uri_ref2)
+ i = self.commaSeparatedList(argstr, j, res, self.uri_ref2)
if i < 0:
- self.BadSyntax(argstr, i,
- "Bad variable list after @forSome")
+ self.BadSyntax(argstr, i, "Bad variable list after @forSome")
for x in res:
self._context.declareExistential(x)
return i
- j = self.tok('prefix', argstr, i, colon=True) # no implied "#"
+ j = self.tok("prefix", argstr, i, colon=True) # no implied "#"
if j >= 0:
t = []
i = self.qname(argstr, j, t)
if i < 0:
- self.BadSyntax(argstr, j,
- "expected qname after @prefix")
+ self.BadSyntax(argstr, j, "expected qname after @prefix")
j = self.uri_ref2(argstr, i, t)
if j < 0:
- self.BadSyntax(argstr, i,
- "expected <uriref> after @prefix _qname_")
+ self.BadSyntax(argstr, i, "expected <uriref> after @prefix _qname_")
ns = self.uriOf(t[1])
if self._baseURI:
ns = join(self._baseURI, ns)
elif ":" not in ns:
- self.BadSyntax(argstr, j,
- "With no base URI, cannot use " +
- "relative URI in @prefix <" + ns + ">")
- assert ':' in ns # must be absolute
+ self.BadSyntax(
+ argstr,
+ j,
+ "With no base URI, cannot use "
+ + "relative URI in @prefix <"
+ + ns
+ + ">",
+ )
+ assert ":" in ns # must be absolute
self._bindings[t[0][0]] = ns
self.bind(t[0][0], hexify(ns))
return j
- j = self.tok('base', argstr, i) # Added 2007/7/7
+ j = self.tok("base", argstr, i) # Added 2007/7/7
if j >= 0:
t = []
i = self.uri_ref2(argstr, j, t)
if i < 0:
- self.BadSyntax(argstr, j,
- "expected <uri> after @base ")
+ self.BadSyntax(argstr, j, "expected <uri> after @base ")
ns = self.uriOf(t[0])
if self._baseURI:
ns = join(self._baseURI, ns)
else:
- self.BadSyntax(argstr, j,
- "With no previous base URI, cannot use " +
- "relative URI in @base <" + ns + ">")
- assert ':' in ns # must be absolute
+ self.BadSyntax(
+ argstr,
+ j,
+ "With no previous base URI, cannot use "
+ + "relative URI in @base <"
+ + ns
+ + ">",
+ )
+ assert ":" in ns # must be absolute
self._baseURI = ns
return i
- return -1 # Not a directive, could be something else.
+ return -1 # Not a directive, could be something else.
def sparqlDirective(self, argstr, i):
@@ -639,55 +665,60 @@ class SinkParser:
if j < 0:
return j # eof
- j = self.sparqlTok('PREFIX', argstr, i)
+ j = self.sparqlTok("PREFIX", argstr, i)
if j >= 0:
t = []
i = self.qname(argstr, j, t)
if i < 0:
- self.BadSyntax(argstr, j,
- "expected qname after @prefix")
+ self.BadSyntax(argstr, j, "expected qname after @prefix")
j = self.uri_ref2(argstr, i, t)
if j < 0:
- self.BadSyntax(argstr, i,
- "expected <uriref> after @prefix _qname_")
+ self.BadSyntax(argstr, i, "expected <uriref> after @prefix _qname_")
ns = self.uriOf(t[1])
if self._baseURI:
ns = join(self._baseURI, ns)
elif ":" not in ns:
- self.BadSyntax(argstr, j,
- "With no base URI, cannot use " +
- "relative URI in @prefix <" + ns + ">")
- assert ':' in ns # must be absolute
+ self.BadSyntax(
+ argstr,
+ j,
+ "With no base URI, cannot use "
+ + "relative URI in @prefix <"
+ + ns
+ + ">",
+ )
+ assert ":" in ns # must be absolute
self._bindings[t[0][0]] = ns
self.bind(t[0][0], hexify(ns))
return j
- j = self.sparqlTok('BASE', argstr, i)
+ j = self.sparqlTok("BASE", argstr, i)
if j >= 0:
t = []
i = self.uri_ref2(argstr, j, t)
if i < 0:
- self.BadSyntax(argstr, j,
- "expected <uri> after @base ")
+ self.BadSyntax(argstr, j, "expected <uri> after @base ")
ns = self.uriOf(t[0])
if self._baseURI:
ns = join(self._baseURI, ns)
else:
- self.BadSyntax(argstr, j,
- "With no previous base URI, cannot use " +
- "relative URI in @base <" + ns + ">")
- assert ':' in ns # must be absolute
+ self.BadSyntax(
+ argstr,
+ j,
+ "With no previous base URI, cannot use "
+ + "relative URI in @base <"
+ + ns
+ + ">",
+ )
+ assert ":" in ns # must be absolute
self._baseURI = ns
return i
- return -1 # Not a directive, could be something else.
-
+ return -1 # Not a directive, could be something else.
def bind(self, qn, uri):
- assert isinstance(
- uri, bytes), "Any unicode must be %x-encoded already"
+ assert isinstance(uri, bytes), "Any unicode must be %x-encoded already"
if qn == "":
self._store.setDefaultNamespace(uri)
else:
@@ -702,31 +733,29 @@ class SinkParser:
self.keywordsSet = 1
def startDoc(self):
- # was: self._store.startDoc()
+ # was: self._store.startDoc()
self._store.startDoc(self._formula)
def endDoc(self):
"""Signal end of document and stop parsing. returns formula"""
- self._store.endDoc(self._formula) # don't canonicalize yet
+ self._store.endDoc(self._formula) # don't canonicalize yet
return self._formula
def makeStatement(self, quadruple):
- # $$$$$$$$$$$$$$$$$$$$$
- # print "# Parser output: ", `quadruple`
+ # $$$$$$$$$$$$$$$$$$$$$
+ # print "# Parser output: ", `quadruple`
self._store.makeStatement(quadruple, why=self._reason2)
def statement(self, argstr, i):
r = []
- i = self.object(
- argstr, i, r) # Allow literal for subject - extends RDF
+ i = self.object(argstr, i, r) # Allow literal for subject - extends RDF
if i < 0:
return i
j = self.property_list(argstr, i, r[0])
if j < 0:
- self.BadSyntax(
- argstr, i, "expected propertylist")
+ self.BadSyntax(argstr, i, "expected propertylist")
return j
def subject(self, argstr, i, res):
@@ -748,77 +777,73 @@ class SinkParser:
r = []
- j = self.tok('has', argstr, i)
+ j = self.tok("has", argstr, i)
if j >= 0:
if self.turtle:
self.BadSyntax(argstr, i, "Found 'has' keyword in Turtle mode")
i = self.prop(argstr, j, r)
if i < 0:
- self.BadSyntax(argstr, j,
- "expected property after 'has'")
- res.append(('->', r[0]))
+ self.BadSyntax(argstr, j, "expected property after 'has'")
+ res.append(("->", r[0]))
return i
- j = self.tok('is', argstr, i)
+ j = self.tok("is", argstr, i)
if j >= 0:
if self.turtle:
self.BadSyntax(argstr, i, "Found 'is' keyword in Turtle mode")
i = self.prop(argstr, j, r)
if i < 0:
- self.BadSyntax(argstr, j,
- "expected <property> after 'is'")
+ self.BadSyntax(argstr, j, "expected <property> after 'is'")
j = self.skipSpace(argstr, i)
if j < 0:
- self.BadSyntax(argstr, i,
- "End of file found, expected property after 'is'")
+ self.BadSyntax(
+ argstr, i, "End of file found, expected property after 'is'"
+ )
i = j
- j = self.tok('of', argstr, i)
+ j = self.tok("of", argstr, i)
if j < 0:
- self.BadSyntax(argstr, i,
- "expected 'of' after 'is' <prop>")
- res.append(('<-', r[0]))
+ self.BadSyntax(argstr, i, "expected 'of' after 'is' <prop>")
+ res.append(("<-", r[0]))
return j
- j = self.tok('a', argstr, i)
+ j = self.tok("a", argstr, i)
if j >= 0:
- res.append(('->', RDF_type))
+ res.append(("->", RDF_type))
return j
- if argstr[i:i + 2] == "<=":
+ if argstr[i : i + 2] == "<=":
if self.turtle:
- self.BadSyntax(argstr, i,
- "Found '<=' in Turtle mode. ")
+ self.BadSyntax(argstr, i, "Found '<=' in Turtle mode. ")
- res.append(('<-', self._store.newSymbol(Logic_NS + "implies")))
+ res.append(("<-", self._store.newSymbol(Logic_NS + "implies")))
return i + 2
- if argstr[i:i + 1] == "=":
+ if argstr[i : i + 1] == "=":
if self.turtle:
self.BadSyntax(argstr, i, "Found '=' in Turtle mode")
- if argstr[i + 1:i + 2] == ">":
- res.append(('->', self._store.newSymbol(Logic_NS + "implies")))
+ if argstr[i + 1 : i + 2] == ">":
+ res.append(("->", self._store.newSymbol(Logic_NS + "implies")))
return i + 2
- res.append(('->', DAML_sameAs))
+ res.append(("->", DAML_sameAs))
return i + 1
- if argstr[i:i + 2] == ":=":
+ if argstr[i : i + 2] == ":=":
if self.turtle:
self.BadSyntax(argstr, i, "Found ':=' in Turtle mode")
- # patch file relates two formulae, uses this @@ really?
- res.append(('->', Logic_NS + "becomes"))
+ # patch file relates two formulae, uses this @@ really?
+ res.append(("->", Logic_NS + "becomes"))
return i + 2
j = self.prop(argstr, i, r)
if j >= 0:
- res.append(('->', r[0]))
+ res.append(("->", r[0]))
return j
- if argstr[i:i + 2] == ">-" or argstr[i:i + 2] == "<-":
- self.BadSyntax(argstr, j,
- ">- ... -> syntax is obsolete.")
+ if argstr[i : i + 2] == ">-" or argstr[i : i + 2] == "<-":
+ self.BadSyntax(argstr, j, ">- ... -> syntax is obsolete.")
return -1
@@ -836,16 +861,15 @@ class SinkParser:
"""
j = self.nodeOrLiteral(argstr, i, res)
if j < 0:
- return j # nope
+ return j # nope
- while argstr[j:j + 1] in "!^": # no spaces, must follow exactly (?)
- ch = argstr[j:j + 1]
+ while argstr[j : j + 1] in "!^": # no spaces, must follow exactly (?)
+ ch = argstr[j : j + 1]
subj = res.pop()
obj = self.blankNode(uri=self.here(j))
j = self.node(argstr, j + 1, res)
if j < 0:
- self.BadSyntax(argstr, j,
- "EOF found in middle of path syntax")
+ self.BadSyntax(argstr, j, "EOF found in middle of path syntax")
pred = res.pop()
if ch == "^": # Reverse traverse
self.makeStatement((self._context, pred, obj, subj))
@@ -874,18 +898,19 @@ class SinkParser:
if j < 0:
return j # eof
i = j
- ch = argstr[i:i + 1] # Quick 1-character checks first:
+ ch = argstr[i : i + 1] # Quick 1-character checks first:
if ch == "[":
bnodeID = self.here(i)
j = self.skipSpace(argstr, i + 1)
if j < 0:
- self.BadSyntax(argstr, i,
- "EOF after '['")
+ self.BadSyntax(argstr, i, "EOF after '['")
# Hack for "is" binding name to anon node
- if argstr[j:j + 1] == "=":
+ if argstr[j : j + 1] == "=":
if self.turtle:
- self.BadSyntax(argstr, j, "Found '[=' or '[ =' when in turtle mode.")
+ self.BadSyntax(
+ argstr, j, "Found '[=' or '[ =' when in turtle mode."
+ )
i = j + 1
objs = []
j = self.objectList(argstr, i, objs)
@@ -893,33 +918,31 @@ class SinkParser:
subj = objs[0]
if len(objs) > 1:
for obj in objs:
- self.makeStatement((self._context,
- DAML_sameAs, subj, obj))
+ self.makeStatement((self._context, DAML_sameAs, subj, obj))
j = self.skipSpace(argstr, j)
if j < 0:
- self.BadSyntax(argstr, i,
- "EOF when objectList expected after [ = ")
- if argstr[j:j + 1] == ";":
+ self.BadSyntax(
+ argstr, i, "EOF when objectList expected after [ = "
+ )
+ if argstr[j : j + 1] == ";":
j = j + 1
else:
- self.BadSyntax(argstr, i,
- "objectList expected after [= ")
+ self.BadSyntax(argstr, i, "objectList expected after [= ")
if subj is None:
subj = self.blankNode(uri=bnodeID)
i = self.property_list(argstr, j, subj)
if i < 0:
- self.BadSyntax(argstr, j,
- "property_list expected")
+ self.BadSyntax(argstr, j, "property_list expected")
j = self.skipSpace(argstr, i)
if j < 0:
- self.BadSyntax(argstr, i,
- "EOF when ']' expected after [ <propertyList>")
- if argstr[j:j + 1] != "]":
- self.BadSyntax(argstr, j,
- "']' expected")
+ self.BadSyntax(
+ argstr, i, "EOF when ']' expected after [ <propertyList>"
+ )
+ if argstr[j : j + 1] != "]":
+ self.BadSyntax(argstr, j, "']' expected")
res.append(subj)
return j + 1
@@ -927,8 +950,8 @@ class SinkParser:
# if self.turtle:
# self.BadSyntax(argstr, i,
# "found '{' while in Turtle mode, Formulas not supported!")
- ch2 = argstr[i + 1:i + 2]
- if ch2 == '$':
+ ch2 = argstr[i + 1 : i + 2]
+ if ch2 == "$":
# a set
i += 1
j = i + 1
@@ -937,27 +960,23 @@ class SinkParser:
while 1:
i = self.skipSpace(argstr, j)
if i < 0:
- self.BadSyntax(argstr, i,
- "needed '$}', found end.")
- if argstr[i:i + 2] == '$}':
+ self.BadSyntax(argstr, i, "needed '$}', found end.")
+ if argstr[i : i + 2] == "$}":
j = i + 2
break
if not first_run:
- if argstr[i:i + 1] == ',':
+ if argstr[i : i + 1] == ",":
i += 1
else:
- self.BadSyntax(
- argstr, i, "expected: ','")
+ self.BadSyntax(argstr, i, "expected: ','")
else:
first_run = False
item = []
- j = self.item(
- argstr, i, item) # @@@@@ should be path, was object
+ j = self.item(argstr, i, item) # @@@@@ should be path, was object
if j < 0:
- self.BadSyntax(argstr, i,
- "expected item in set or '$}'")
+ self.BadSyntax(argstr, i, "expected item in set or '$}'")
List.append(self._store.intern(item[0]))
res.append(self._store.newSet(List, self._context))
return j
@@ -980,17 +999,15 @@ class SinkParser:
while 1:
i = self.skipSpace(argstr, j)
if i < 0:
- self.BadSyntax(
- argstr, i, "needed '}', found end.")
+ self.BadSyntax(argstr, i, "needed '}', found end.")
- if argstr[i:i + 1] == "}":
+ if argstr[i : i + 1] == "}":
j = i + 1
break
j = self.directiveOrStatement(argstr, i)
if j < 0:
- self.BadSyntax(
- argstr, i, "expected statement or '}'")
+ self.BadSyntax(argstr, i, "expected statement or '}'")
self._anonymousNodes = parentAnonymousNodes
self._variables = self._parentVariables
@@ -998,13 +1015,13 @@ class SinkParser:
self._context = self._parentContext
self._reason2 = reason2
self._parentContext = oldParentContext
- res.append(subj.close()) # No use until closed
+ res.append(subj.close()) # No use until closed
return j
if ch == "(":
thing_type = self._store.newList
- ch2 = argstr[i + 1:i + 2]
- if ch2 == '$':
+ ch2 = argstr[i + 1 : i + 2]
+ if ch2 == "$":
thing_type = self._store.newSet
i += 1
j = i + 1
@@ -1013,34 +1030,34 @@ class SinkParser:
while 1:
i = self.skipSpace(argstr, j)
if i < 0:
- self.BadSyntax(
- argstr, i, "needed ')', found end.")
- if argstr[i:i + 1] == ')':
+ self.BadSyntax(argstr, i, "needed ')', found end.")
+ if argstr[i : i + 1] == ")":
j = i + 1
break
item = []
- j = self.item(
- argstr, i, item) # @@@@@ should be path, was object
+ j = self.item(argstr, i, item) # @@@@@ should be path, was object
if j < 0:
- self.BadSyntax(argstr, i,
- "expected item in list or ')'")
+ self.BadSyntax(argstr, i, "expected item in list or ')'")
List.append(self._store.intern(item[0]))
res.append(thing_type(List, self._context))
return j
- j = self.tok('this', argstr, i) # This context
+ j = self.tok("this", argstr, i) # This context
if j >= 0:
- self.BadSyntax(argstr, i,
- "Keyword 'this' was ancient N3. Now use " +
- "@forSome and @forAll keywords.")
-
- # booleans
- j = self.tok('true', argstr, i)
+ self.BadSyntax(
+ argstr,
+ i,
+ "Keyword 'this' was ancient N3. Now use "
+ + "@forSome and @forAll keywords.",
+ )
+
+ # booleans
+ j = self.tok("true", argstr, i)
if j >= 0:
res.append(True)
return j
- j = self.tok('false', argstr, i)
+ j = self.tok("false", argstr, i)
if j >= 0:
res.append(False)
return j
@@ -1057,23 +1074,24 @@ class SinkParser:
Leaves the terminating punctuation in the buffer
"""
while 1:
- while 1: # skip repeat ;
+ while 1: # skip repeat ;
j = self.skipSpace(argstr, i)
if j < 0:
- self.BadSyntax(argstr, i,
- "EOF found when expected verb in property list")
- if argstr[j]!=';': break
- i = j+1
+ self.BadSyntax(
+ argstr, i, "EOF found when expected verb in property list"
+ )
+ if argstr[j] != ";":
+ break
+ i = j + 1
- if argstr[j:j + 2] == ":-":
+ if argstr[j : j + 2] == ":-":
if self.turtle:
self.BadSyntax(argstr, j, "Found in ':-' in Turtle mode")
i = j + 2
res = []
j = self.node(argstr, i, res, subj)
if j < 0:
- self.BadSyntax(argstr, i,
- "bad {} or () or [] node after :- ")
+ self.BadSyntax(argstr, i, "bad {} or () or [] node after :- ")
i = j
continue
i = j
@@ -1085,20 +1103,18 @@ class SinkParser:
objs = []
i = self.objectList(argstr, j, objs)
if i < 0:
- self.BadSyntax(argstr, j,
- "objectList expected")
+ self.BadSyntax(argstr, j, "objectList expected")
for obj in objs:
dira, sym = v[0]
- if dira == '->':
+ if dira == "->":
self.makeStatement((self._context, sym, subj, obj))
else:
self.makeStatement((self._context, sym, obj, subj))
j = self.skipSpace(argstr, i)
if j < 0:
- self.BadSyntax(argstr, j,
- "EOF found in list of objects")
- if argstr[i:i + 1] != ";":
+ self.BadSyntax(argstr, j, "EOF found in list of objects")
+ if argstr[i : i + 1] != ";":
return i
i = i + 1 # skip semicolon and continue
@@ -1108,10 +1124,9 @@ class SinkParser:
"""
i = self.skipSpace(argstr, j)
if i < 0:
- self.BadSyntax(argstr, i,
- "EOF found expecting comma sep list")
+ self.BadSyntax(argstr, i, "EOF found expecting comma sep list")
if argstr[i] == ".":
- return j # empty list is OK
+ return j # empty list is OK
i = what(argstr, i, res)
if i < 0:
return -1
@@ -1120,15 +1135,14 @@ class SinkParser:
j = self.skipSpace(argstr, i)
if j < 0:
return j # eof
- ch = argstr[j:j + 1]
+ ch = argstr[j : j + 1]
if ch != ",":
if ch != ".":
return -1
- return j # Found but not swallowed "."
+ return j # Found but not swallowed "."
i = what(argstr, j + 1, res)
if i < 0:
- self.BadSyntax(argstr, i,
- "bad list content")
+ self.BadSyntax(argstr, i, "bad list content")
def objectList(self, argstr, i, res):
i = self.object(argstr, i, res)
@@ -1137,10 +1151,9 @@ class SinkParser:
while 1:
j = self.skipSpace(argstr, i)
if j < 0:
- self.BadSyntax(argstr, j,
- "EOF found after object")
- if argstr[j:j + 1] != ",":
- return j # Found something else!
+ self.BadSyntax(argstr, j, "EOF found after object")
+ if argstr[j : j + 1] != ",":
+ return j # Found something else!
i = self.object(argstr, j + 1, res)
if i < 0:
return i
@@ -1149,14 +1162,13 @@ class SinkParser:
j = self.skipSpace(argstr, i)
if j < 0:
return j # eof
- if argstr[j:j + 1] == ".":
- return j + 1 # skip
- if argstr[j:j + 1] == "}":
- return j # don't skip it
- if argstr[j:j + 1] == "]":
+ if argstr[j : j + 1] == ".":
+ return j + 1 # skip
+ if argstr[j : j + 1] == "}":
+ return j # don't skip it
+ if argstr[j : j + 1] == "]":
return j
- self.BadSyntax(argstr, j,
- "expected '.' or '}' or ']' at end of statement")
+ self.BadSyntax(argstr, j, "expected '.' or '}' or ']' at end of statement")
def uri_ref2(self, argstr, i, res):
"""Generate uri from n3 representation.
@@ -1182,8 +1194,7 @@ class SinkParser:
if not self.turtle and pfx == "":
ns = join(self._baseURI or "", "#")
else:
- self.BadSyntax(argstr, i,
- "Prefix \"%s:\" not bound" % (pfx))
+ self.BadSyntax(argstr, i, 'Prefix "%s:" not bound' % (pfx))
symb = self._store.newSymbol(ns + ln)
if symb in self._variables:
res.append(self._variables[symb])
@@ -1217,11 +1228,11 @@ class SinkParser:
if self._baseURI:
uref = join(self._baseURI, uref) # was: uripath.join
else:
- assert ":" in uref, \
- "With no base URI, cannot deal with relative URIs"
- if argstr[i - 1:i] == "#" and not uref[-1:] == "#":
- uref = uref + \
- "#" # She meant it! Weirdness in urlparse?
+ assert (
+ ":" in uref
+ ), "With no base URI, cannot deal with relative URIs"
+ if argstr[i - 1 : i] == "#" and not uref[-1:] == "#":
+ uref = uref + "#" # She meant it! Weirdness in urlparse?
symb = self._store.newSymbol(uref)
if symb in self._variables:
res.append(self._variables[symb])
@@ -1229,17 +1240,15 @@ class SinkParser:
res.append(symb)
return i + 1
i = i + 1
- self.BadSyntax(argstr, j,
- "unterminated URI reference")
+ self.BadSyntax(argstr, j, "unterminated URI reference")
elif self.keywordsSet:
v = []
j = self.bareWord(argstr, i, v)
if j < 0:
- return -1 # Forget varibles as a class, only in context.
+ return -1 # Forget varibles as a class, only in context.
if v[0] in self.keywords:
- self.BadSyntax(argstr, i,
- 'Keyword "%s" not allowed here.' % v[0])
+ self.BadSyntax(argstr, i, 'Keyword "%s" not allowed here.' % v[0])
res.append(self._store.newSymbol(self._bindings[""] + v[0]))
return j
else:
@@ -1253,7 +1262,7 @@ class SinkParser:
if m is None:
break
self.lines = self.lines + 1
- i = m.end() # Point to first character unmatched
+ i = m.end() # Point to first character unmatched
self.startOfLine = i
m = ws.match(argstr, i)
if m is not None:
@@ -1271,30 +1280,31 @@ class SinkParser:
if j < 0:
return -1
- if argstr[j:j + 1] != "?":
+ if argstr[j : j + 1] != "?":
return -1
j = j + 1
i = j
if argstr[j] in "0123456789-":
- self.BadSyntax(argstr, j,
- "Varible name can't start with '%s'" % argstr[j])
+ self.BadSyntax(argstr, j, "Varible name can't start with '%s'" % argstr[j])
while i < len(argstr) and argstr[i] not in _notKeywordsChars:
i = i + 1
if self._parentContext is None:
varURI = self._store.newSymbol(self._baseURI + "#" + argstr[j:i])
if varURI not in self._variables:
self._variables[varURI] = self._context.newUniversal(
- varURI, why=self._reason2)
+ varURI, why=self._reason2
+ )
res.append(self._variables[varURI])
return i
- # @@ was:
- # self.BadSyntax(argstr, j,
- # "Can't use ?xxx syntax for variable in outermost level: %s"
- # % argstr[j-1:i])
+ # @@ was:
+ # self.BadSyntax(argstr, j,
+ # "Can't use ?xxx syntax for variable in outermost level: %s"
+ # % argstr[j-1:i])
varURI = self._store.newSymbol(self._baseURI + "#" + argstr[j:i])
if varURI not in self._parentVariables:
self._parentVariables[varURI] = self._parentContext.newUniversal(
- varURI, why=self._reason2)
+ varURI, why=self._reason2
+ )
res.append(self._parentVariables[varURI])
return i
@@ -1340,16 +1350,17 @@ class SinkParser:
if argstr[i - 1] == ".": # qname cannot end with "."
ln = ln[:-1]
- if not ln: return -1
+ if not ln:
+ return -1
i -= 1
else: # First character is non-alpha
- ln = '' # Was: None - TBL (why? useful?)
+ ln = "" # Was: None - TBL (why? useful?)
- if i < len(argstr) and argstr[i] == ':':
+ if i < len(argstr) and argstr[i] == ":":
pfx = ln
# bnodes names have different rules
- if pfx == '_':
+ if pfx == "_":
allowedChars = _notNameChars
else:
allowedChars = _notQNameChars
@@ -1357,10 +1368,10 @@ class SinkParser:
i = i + 1
lastslash = False
# start = i # TODO first char .
- ln = ''
+ ln = ""
while i < len(argstr):
c = argstr[i]
- if not lastslash and c == '\\':
+ if not lastslash and c == "\\":
lastslash = True
i += 1
@@ -1368,12 +1379,25 @@ class SinkParser:
if lastslash:
if c not in escapeChars:
- raise BadSyntax(self._thisDoc, self.line, argstr, i,
- "illegal escape "+c)
- elif c=='%':
- if argstr[i+1] not in hexChars or argstr[i+2] not in hexChars:
- raise BadSyntax(self._thisDoc, self.line, argstr, i,
- "illegal hex escape "+c)
+ raise BadSyntax(
+ self._thisDoc,
+ self.line,
+ argstr,
+ i,
+ "illegal escape " + c,
+ )
+ elif c == "%":
+ if (
+ argstr[i + 1] not in hexChars
+ or argstr[i + 2] not in hexChars
+ ):
+ raise BadSyntax(
+ self._thisDoc,
+ self.line,
+ argstr,
+ i,
+ "illegal hex escape " + c,
+ )
ln = ln + c
i = i + 1
@@ -1383,22 +1407,22 @@ class SinkParser:
if lastslash:
raise BadSyntax(
- self._thisDoc, self.line, argstr, i,
- "qname cannot end with \\")
-
+ self._thisDoc, self.line, argstr, i, "qname cannot end with \\"
+ )
- if argstr[i-1]=='.':
+ if argstr[i - 1] == ".":
# localname cannot end in .
ln = ln[:-1]
- if not ln: return -1
+ if not ln:
+ return -1
i -= 1
res.append((pfx, ln))
return i
- else: # delimiter was not ":"
+ else: # delimiter was not ":"
if ln and self.keywordsSet and ln not in self.keywords:
- res.append(('', ln))
+ res.append(("", ln))
return i
return -1
@@ -1414,7 +1438,7 @@ class SinkParser:
i = j
if argstr[i] in self.string_delimiters:
- if argstr[i:i + 3] == argstr[i] * 3:
+ if argstr[i : i + 3] == argstr[i] * 3:
delim = argstr[i] * 3
else:
delim = argstr[i]
@@ -1462,7 +1486,7 @@ class SinkParser:
# return -1 ## or fall through?
if argstr[i] in self.string_delimiters:
- if argstr[i:i + 3] == argstr[i] * 3:
+ if argstr[i : i + 3] == argstr[i] * 3:
delim = argstr[i] * 3
else:
delim = argstr[i]
@@ -1471,17 +1495,20 @@ class SinkParser:
dt = None
j, s = self.strconst(argstr, i, delim)
lang = None
- if argstr[j:j + 1] == "@": # Language?
+ if argstr[j : j + 1] == "@": # Language?
m = langcode.match(argstr, j + 1)
if m is None:
raise BadSyntax(
- self._thisDoc, startline, argstr, i,
- "Bad language code syntax on string " +
- "literal, after @")
+ self._thisDoc,
+ startline,
+ argstr,
+ i,
+ "Bad language code syntax on string " + "literal, after @",
+ )
i = m.end()
- lang = argstr[j + 1:i]
+ lang = argstr[j + 1 : i]
j = i
- if argstr[j:j + 2] == "^^":
+ if argstr[j : j + 2] == "^^":
res2 = []
j = self.uri_ref2(argstr, j + 2, res2) # Read datatype URI
dt = res2[0]
@@ -1493,7 +1520,7 @@ class SinkParser:
def uriOf(self, sym):
if isinstance(sym, tuple):
return sym[1] # old system for --pipe
- # return sym.uriref() # cwm api
+ # return sym.uriref() # cwm api
return sym
def strconst(self, argstr, i, delim):
@@ -1504,35 +1531,39 @@ class SinkParser:
delim2, delim3, delim4, delim5 = delim1 * 2, delim1 * 3, delim1 * 4, delim1 * 5
j = i
- ustr = u"" # Empty unicode string
+ ustr = u"" # Empty unicode string
startline = self.lines # Remember where for error messages
while j < len(argstr):
if argstr[j] == delim1:
if delim == delim1: # done when delim is " or '
i = j + 1
return i, ustr
- if delim == delim3: # done when delim is """ or ''' and, respectively ...
- if argstr[j:j + 5] == delim5: # ... we have "" or '' before
+ if (
+ delim == delim3
+ ): # done when delim is """ or ''' and, respectively ...
+ if argstr[j : j + 5] == delim5: # ... we have "" or '' before
i = j + 5
ustr = ustr + delim2
return i, ustr
- if argstr[j:j + 4] == delim4: # ... we have " or ' before
+ if argstr[j : j + 4] == delim4: # ... we have " or ' before
i = j + 4
ustr = ustr + delim1
return i, ustr
- if argstr[j:j + 3] == delim3: # current " or ' is part of delim
+ if argstr[j : j + 3] == delim3: # current " or ' is part of delim
i = j + 3
return i, ustr
- # we are inside of the string and current char is " or '
+ # we are inside of the string and current char is " or '
j = j + 1
ustr = ustr + delim1
continue
- m = interesting.search(argstr, j) # was argstr[j:].
- # Note for pos param to work, MUST be compiled ... re bug?
+ m = interesting.search(argstr, j) # was argstr[j:].
+ # Note for pos param to work, MUST be compiled ... re bug?
assert m, "Quote expected in string at ^ in %s^%s" % (
- argstr[j - 20:j], argstr[j:j + 20]) # at least need a quote
+ argstr[j - 20 : j],
+ argstr[j : j + 20],
+ ) # at least need a quote
i = m.start()
try:
@@ -1543,12 +1574,15 @@ class SinkParser:
err = err + (" %02x" % ord(c))
streason = sys.exc_info()[1].__str__()
raise BadSyntax(
- self._thisDoc, startline, argstr, j,
- "Unicode error appending characters" +
- " %s to string, because\n\t%s"
- % (err, streason))
+ self._thisDoc,
+ startline,
+ argstr,
+ j,
+ "Unicode error appending characters"
+ + " %s to string, because\n\t%s" % (err, streason),
+ )
- # print "@@@ i = ",i, " j=",j, "m.end=", m.end()
+ # print "@@@ i = ",i, " j=",j, "m.end=", m.end()
ch = argstr[i]
if ch == delim1:
@@ -1561,8 +1595,12 @@ class SinkParser:
elif ch in "\r\n":
if delim == delim1:
raise BadSyntax(
- self._thisDoc, startline, argstr, i,
- "newline found in string literal")
+ self._thisDoc,
+ startline,
+ argstr,
+ i,
+ "newline found in string literal",
+ )
self.lines = self.lines + 1
ustr = ustr + ch
j = i + 1
@@ -1570,14 +1608,18 @@ class SinkParser:
elif ch == "\\":
j = i + 1
- ch = argstr[j:j + 1] # Will be empty if string ends
+ ch = argstr[j : j + 1] # Will be empty if string ends
if not ch:
raise BadSyntax(
- self._thisDoc, startline, argstr, i,
- "unterminated string literal (2)")
- k = 'abfrtvn\\"\''.find(ch)
+ self._thisDoc,
+ startline,
+ argstr,
+ i,
+ "unterminated string literal (2)",
+ )
+ k = "abfrtvn\\\"'".find(ch)
if k >= 0:
- uch = '\a\b\f\r\t\v\n\\"\''[k]
+ uch = "\a\b\f\r\t\v\n\\\"'"[k]
ustr = ustr + uch
j = j + 1
elif ch == "u":
@@ -1587,41 +1629,43 @@ class SinkParser:
j, ch = self.UEscape(argstr, j + 1, startline)
ustr = ustr + ch
else:
- self.BadSyntax(argstr, i,
- "bad escape")
+ self.BadSyntax(argstr, i, "bad escape")
- self.BadSyntax(argstr, i,
- "unterminated string literal")
+ self.BadSyntax(argstr, i, "unterminated string literal")
def _unicodeEscape(self, argstr, i, startline, reg, n, prefix):
- if len(argstr)<i+n:
+ if len(argstr) < i + n:
raise BadSyntax(
- self._thisDoc, startline, argstr, i,
- "unterminated string literal(3)")
+ self._thisDoc, startline, argstr, i, "unterminated string literal(3)"
+ )
try:
- return i+n, reg.sub(unicodeExpand, '\\'+prefix+argstr[i:i+n])
+ return i + n, reg.sub(unicodeExpand, "\\" + prefix + argstr[i : i + n])
except:
raise BadSyntax(
- self._thisDoc, startline, argstr, i,
- "bad string literal hex escape: "+argstr[i:i+n])
+ self._thisDoc,
+ startline,
+ argstr,
+ i,
+ "bad string literal hex escape: " + argstr[i : i + n],
+ )
def uEscape(self, argstr, i, startline):
- return self._unicodeEscape(argstr, i, startline, unicodeEscape4, 4, 'u')
+ return self._unicodeEscape(argstr, i, startline, unicodeEscape4, 4, "u")
def UEscape(self, argstr, i, startline):
- return self._unicodeEscape(argstr, i, startline, unicodeEscape8, 8, 'U')
+ return self._unicodeEscape(argstr, i, startline, unicodeEscape8, 8, "U")
def BadSyntax(self, argstr, i, msg):
raise BadSyntax(self._thisDoc, self.lines, argstr, i, msg)
+
# If we are going to do operators then they should generate
# [ is operator:plus of ( \1 \2 ) ]
class BadSyntax(SyntaxError):
def __init__(self, uri, lines, argstr, i, why):
- self._str = argstr.encode(
- 'utf-8') # Better go back to strings for errors
+ self._str = argstr.encode("utf-8") # Better go back to strings for errors
self._i = i
self._why = why
self.lines = lines
@@ -1641,16 +1685,21 @@ class BadSyntax(SyntaxError):
else:
post = ""
- return 'at line %i of <%s>:\nBad syntax (%s) at ^ in:\n"%s%s^%s%s"' \
- % (self.lines + 1, self._uri, self._why, pre,
- argstr[st:i], argstr[i:i + 60], post)
+ return 'at line %i of <%s>:\nBad syntax (%s) at ^ in:\n"%s%s^%s%s"' % (
+ self.lines + 1,
+ self._uri,
+ self._why,
+ pre,
+ argstr[st:i],
+ argstr[i : i + 60],
+ post,
+ )
@property
def message(self):
return str(self)
-
###############################################################################
class Formula(object):
number = 0
@@ -1663,25 +1712,24 @@ class Formula(object):
self.existentials = {}
self.universals = {}
- self.quotedgraph = QuotedGraph(
- store=parent.store, identifier=self.id())
+ self.quotedgraph = QuotedGraph(store=parent.store, identifier=self.id())
def __str__(self):
- return '_:Formula%s' % self.number
+ return "_:Formula%s" % self.number
def id(self):
- return BNode('_:Formula%s' % self.number)
+ return BNode("_:Formula%s" % self.number)
def newBlankNode(self, uri=None, why=None):
if uri is None:
self.counter += 1
- bn = BNode('f%sb%s' % (self.uuid, self.counter))
+ bn = BNode("f%sb%s" % (self.uuid, self.counter))
else:
- bn = BNode(uri.split('#').pop().replace('_', 'b'))
+ bn = BNode(uri.split("#").pop().replace("_", "b"))
return bn
def newUniversal(self, uri, why=None):
- return Variable(uri.split('#').pop())
+ return Variable(uri.split("#").pop())
def declareExistential(self, x):
self.existentials[x] = self.newBlankNode()
@@ -1691,7 +1739,7 @@ class Formula(object):
return self.quotedgraph
-r_hibyte = re.compile(r'([\x80-\xff])')
+r_hibyte = re.compile(r"([\x80-\xff])")
class RDFSink(object):
@@ -1716,9 +1764,9 @@ class RDFSink(object):
return arg.newBlankNode(uri)
elif isinstance(arg, Graph) or arg is None:
self.counter += 1
- bn = BNode('n' + str(self.counter))
+ bn = BNode("n" + str(self.counter))
else:
- bn = BNode(str(arg[0]).split('#').pop().replace('_', 'b'))
+ bn = BNode(str(arg[0]).split("#").pop().replace("_", "b"))
return bn
def newLiteral(self, s, dt, lang):
@@ -1728,18 +1776,12 @@ class RDFSink(object):
return Literal(s, lang=lang)
def newList(self, n, f):
- nil = self.newSymbol(
- 'http://www.w3.org/1999/02/22-rdf-syntax-ns#nil'
- )
+ nil = self.newSymbol("http://www.w3.org/1999/02/22-rdf-syntax-ns#nil")
if not n:
return nil
- first = self.newSymbol(
- 'http://www.w3.org/1999/02/22-rdf-syntax-ns#first'
- )
- rest = self.newSymbol(
- 'http://www.w3.org/1999/02/22-rdf-syntax-ns#rest'
- )
+ first = self.newSymbol("http://www.w3.org/1999/02/22-rdf-syntax-ns#first")
+ rest = self.newSymbol("http://www.w3.org/1999/02/22-rdf-syntax-ns#rest")
af = a = self.newBlankNode(f)
for ne in n[:-1]:
@@ -1755,12 +1797,12 @@ class RDFSink(object):
return set(args)
def setDefaultNamespace(self, *args):
- return ':'.join(repr(n) for n in args)
+ return ":".join(repr(n) for n in args)
def makeStatement(self, quadruple, why=None):
f, p, s, o = quadruple
- if hasattr(p, 'formula'):
+ if hasattr(p, "formula"):
raise Exception("Formula used as predicate")
s = self.normalise(f, s)
@@ -1768,14 +1810,14 @@ class RDFSink(object):
o = self.normalise(f, o)
if f == self.rootFormula:
- # print s, p, o, '.'
+ # print s, p, o, '.'
self.graph.add((s, p, o))
elif isinstance(f, Formula):
f.quotedgraph.add((s, p, o))
else:
- f.add((s,p,o))
+ f.add((s, p, o))
- # return str(quadruple)
+ # return str(quadruple)
def normalise(self, f, n):
if isinstance(n, tuple):
@@ -1791,8 +1833,8 @@ class RDFSink(object):
if isinstance(n, Decimal):
value = str(n)
- if value == '-0':
- value = '0'
+ if value == "-0":
+ value = "0"
s = Literal(value, datatype=DECIMAL_DATATYPE)
return s
@@ -1804,11 +1846,11 @@ class RDFSink(object):
if n in f.existentials:
return f.existentials[n]
- # if isinstance(n, Var):
- # if f.universals.has_key(n):
- # return f.universals[n]
- # f.universals[n] = f.newBlankNode()
- # return f.universals[n]
+ # if isinstance(n, Var):
+ # if f.universals.has_key(n):
+ # return f.universals[n]
+ # f.universals[n] = f.newBlankNode()
+ # return f.universals[n]
return n
@@ -1841,7 +1883,7 @@ def hexify(ustr):
"""
# s1=ustr.encode('utf-8')
s = ""
- for ch in ustr: # .encode('utf-8'):
+ for ch in ustr: # .encode('utf-8'):
if ord(ch) > 126 or ord(ch) < 33:
ch = "%%%02X" % ord(ch)
else:
@@ -1865,13 +1907,13 @@ class TurtleParser(Parser):
if encoding not in [None, "utf-8"]:
raise Exception(
- ("N3/Turtle files are always utf-8 encoded, ",
- "I was passed: %s") % encoding)
+ ("N3/Turtle files are always utf-8 encoded, ", "I was passed: %s")
+ % encoding
+ )
sink = RDFSink(graph)
- baseURI = graph.absolutize(
- source.getPublicId() or source.getSystemId() or "")
+ baseURI = graph.absolutize(source.getPublicId() or source.getSystemId() or "")
p = SinkParser(sink, baseURI=baseURI, turtle=turtle)
p.loadStream(source.getByteStream())
@@ -1893,38 +1935,40 @@ class N3Parser(TurtleParser):
pass
def parse(self, source, graph, encoding="utf-8"):
- # we're currently being handed a Graph, not a ConjunctiveGraph
+ # we're currently being handed a Graph, not a ConjunctiveGraph
assert graph.store.context_aware # is this implied by formula_aware
assert graph.store.formula_aware
conj_graph = ConjunctiveGraph(store=graph.store)
conj_graph.default_context = graph # TODO: CG __init__ should have a
- # default_context arg
- # TODO: update N3Processor so that it can use conj_graph as the sink
+ # default_context arg
+ # TODO: update N3Processor so that it can use conj_graph as the sink
conj_graph.namespace_manager = graph.namespace_manager
TurtleParser.parse(self, source, conj_graph, encoding, turtle=False)
-def _test(): # pragma: no cover
+def _test(): # pragma: no cover
import doctest
+
doctest.testmod()
# if __name__ == '__main__':
# _test()
-def main(): # pragma: no cover
+
+def main(): # pragma: no cover
g = ConjunctiveGraph()
sink = RDFSink(g)
- base_uri = 'file://' + os.path.join(os.getcwd(), sys.argv[1])
+ base_uri = "file://" + os.path.join(os.getcwd(), sys.argv[1])
p = SinkParser(sink, baseURI=base_uri)
- p._bindings[''] = p._baseURI + '#'
+ p._bindings[""] = p._baseURI + "#"
p.startDoc()
- f = open(sys.argv[1], 'rb')
+ f = open(sys.argv[1], "rb")
rdbytes = f.read()
f.close()
@@ -1934,7 +1978,8 @@ def main(): # pragma: no cover
print(t)
-if __name__ == '__main__':
+
+if __name__ == "__main__":
main()
# ends
diff --git a/rdflib/plugins/parsers/nquads.py b/rdflib/plugins/parsers/nquads.py
index 8bc415bd..0c29fc4c 100644
--- a/rdflib/plugins/parsers/nquads.py
+++ b/rdflib/plugins/parsers/nquads.py
@@ -36,26 +36,26 @@ from rdflib.plugins.parsers.ntriples import ParseError
from rdflib.plugins.parsers.ntriples import r_tail
from rdflib.plugins.parsers.ntriples import r_wspace
-__all__ = ['NQuadsParser']
+__all__ = ["NQuadsParser"]
class NQuadsParser(NTriplesParser):
-
def parse(self, inputsource, sink, **kwargs):
"""Parse f as an N-Triples file."""
- assert sink.store.context_aware, ("NQuadsParser must be given"
- " a context aware store.")
+ assert sink.store.context_aware, (
+ "NQuadsParser must be given" " a context aware store."
+ )
self.sink = ConjunctiveGraph(store=sink.store, identifier=sink.identifier)
source = inputsource.getByteStream()
- if not hasattr(source, 'read'):
+ if not hasattr(source, "read"):
raise ParseError("Item to parse must be a file-like object.")
- source = getreader('utf-8')(source)
+ source = getreader("utf-8")(source)
self.file = source
- self.buffer = ''
+ self.buffer = ""
while True:
self.line = __line = self.readline()
if self.line is None:
@@ -69,7 +69,7 @@ class NQuadsParser(NTriplesParser):
def parseline(self):
self.eat(r_wspace)
- if (not self.line) or self.line.startswith(('#')):
+ if (not self.line) or self.line.startswith(("#")):
return # The line is empty or a comment
subject = self.subject()
diff --git a/rdflib/plugins/parsers/nt.py b/rdflib/plugins/parsers/nt.py
index 783488af..d7d3b336 100644
--- a/rdflib/plugins/parsers/nt.py
+++ b/rdflib/plugins/parsers/nt.py
@@ -1,7 +1,7 @@
from rdflib.parser import Parser
from rdflib.plugins.parsers.ntriples import NTriplesParser
-__all__ = ['NTSink', 'NTParser']
+__all__ = ["NTSink", "NTParser"]
class NTSink(object):
diff --git a/rdflib/plugins/parsers/ntriples.py b/rdflib/plugins/parsers/ntriples.py
index 0724a86e..9398c8de 100644
--- a/rdflib/plugins/parsers/ntriples.py
+++ b/rdflib/plugins/parsers/ntriples.py
@@ -22,18 +22,18 @@ from rdflib.compat import decodeUnicodeEscape
from io import BytesIO
-__all__ = ['unquote', 'uriquote', 'Sink', 'NTriplesParser']
+__all__ = ["unquote", "uriquote", "Sink", "NTriplesParser"]
uriref = r'<([^:]+:[^\s"<>]*)>'
literal = r'"([^"\\]*(?:\\.[^"\\]*)*)"'
-litinfo = r'(?:@([a-zA-Z]+(?:-[a-zA-Z0-9]+)*)|\^\^' + uriref + r')?'
+litinfo = r"(?:@([a-zA-Z]+(?:-[a-zA-Z0-9]+)*)|\^\^" + uriref + r")?"
-r_line = re.compile(r'([^\r\n]*)(?:\r\n|\r|\n)')
-r_wspace = re.compile(r'[ \t]*')
-r_wspaces = re.compile(r'[ \t]+')
-r_tail = re.compile(r'[ \t]*\.[ \t]*(#.*)?')
+r_line = re.compile(r"([^\r\n]*)(?:\r\n|\r|\n)")
+r_wspace = re.compile(r"[ \t]*")
+r_wspaces = re.compile(r"[ \t]+")
+r_tail = re.compile(r"[ \t]*\.[ \t]*(#.*)?")
r_uriref = re.compile(uriref)
-r_nodeid = re.compile(r'_:([A-Za-z0-9_:]([-A-Za-z0-9_:\.]*[-A-Za-z0-9_:])?)')
+r_nodeid = re.compile(r"_:([A-Za-z0-9_:]([-A-Za-z0-9_:\.]*[-A-Za-z0-9_:])?)")
r_literal = re.compile(literal + litinfo)
bufsiz = 2048
@@ -57,11 +57,10 @@ class Sink(object):
print(s, p, o)
-quot = {'t': u'\t', 'n': u'\n', 'r': u'\r', '"': u'"', '\\':
- u'\\'}
-r_safe = re.compile(r'([\x20\x21\x23-\x5B\x5D-\x7E]+)')
+quot = {"t": u"\t", "n": u"\n", "r": u"\r", '"': u'"', "\\": u"\\"}
+r_safe = re.compile(r"([\x20\x21\x23-\x5B\x5D-\x7E]+)")
r_quot = re.compile(r'\\(t|n|r|"|\\)')
-r_uniquot = re.compile(r'\\u([0-9A-F]{4})|\\U([0-9A-F]{8})')
+r_uniquot = re.compile(r"\\u([0-9A-F]{4})|\\U([0-9A-F]{8})")
def unquote(s):
@@ -71,7 +70,7 @@ def unquote(s):
if isinstance(s, str): # nquads
s = decodeUnicodeEscape(s)
else:
- s = s.decode('unicode-escape')
+ s = s.decode("unicode-escape")
return s
else:
@@ -79,7 +78,7 @@ def unquote(s):
while s:
m = r_safe.match(s)
if m:
- s = s[m.end():]
+ s = s[m.end() :]
result.append(m.group(1))
continue
@@ -91,28 +90,27 @@ def unquote(s):
m = r_uniquot.match(s)
if m:
- s = s[m.end():]
+ s = s[m.end() :]
u, U = m.groups()
codepoint = int(u or U, 16)
if codepoint > 0x10FFFF:
raise ParseError("Disallowed codepoint: %08X" % codepoint)
result.append(chr(codepoint))
- elif s.startswith('\\'):
+ elif s.startswith("\\"):
raise ParseError("Illegal escape at: %s..." % s[:10])
else:
raise ParseError("Illegal literal character: %r" % s[0])
- return u''.join(result)
+ return u"".join(result)
-r_hibyte = re.compile(r'([\x80-\xFF])')
+r_hibyte = re.compile(r"([\x80-\xFF])")
def uriquote(uri):
if not validate:
return uri
else:
- return r_hibyte.sub(
- lambda m: '%%%02X' % ord(m.group(1)), uri)
+ return r_hibyte.sub(lambda m: "%%%02X" % ord(m.group(1)), uri)
class NTriplesParser(object):
@@ -134,14 +132,14 @@ class NTriplesParser(object):
def parse(self, f):
"""Parse f as an N-Triples file."""
- if not hasattr(f, 'read'):
+ if not hasattr(f, "read"):
raise ParseError("Item to parse must be a file-like object.")
# since N-Triples 1.1 files can and should be utf-8 encoded
- f = codecs.getreader('utf-8')(f)
+ f = codecs.getreader("utf-8")(f)
self.file = f
- self.buffer = ''
+ self.buffer = ""
while True:
self.line = self.readline()
if self.line is None:
@@ -174,7 +172,7 @@ class NTriplesParser(object):
while True:
m = r_line.match(self.buffer)
if m: # the more likely prospect
- self.buffer = self.buffer[m.end():]
+ self.buffer = self.buffer[m.end() :]
return m.group(1)
else:
buffer = self.file.read(bufsiz)
@@ -187,7 +185,7 @@ class NTriplesParser(object):
def parseline(self):
self.eat(r_wspace)
- if (not self.line) or self.line.startswith('#'):
+ if (not self.line) or self.line.startswith("#"):
return # The line is empty or a comment
subject = self.subject()
@@ -212,7 +210,7 @@ class NTriplesParser(object):
# print(dir(pattern))
# print repr(self.line), type(self.line)
raise ParseError("Failed to eat %s at %s" % (pattern.pattern, self.line))
- self.line = self.line[m.end():]
+ self.line = self.line[m.end() :]
return m
def subject(self):
@@ -235,7 +233,7 @@ class NTriplesParser(object):
return objt
def uriref(self):
- if self.peek('<'):
+ if self.peek("<"):
uri = self.eat(r_uriref).group(1)
uri = unquote(uri)
uri = uriquote(uri)
@@ -243,7 +241,7 @@ class NTriplesParser(object):
return False
def nodeid(self):
- if self.peek('_'):
+ if self.peek("_"):
# Fix for https://github.com/RDFLib/rdflib/issues/204
bnode_id = self.eat(r_nodeid).group(1)
new_id = self._bnode_ids.get(bnode_id, None)
@@ -277,6 +275,7 @@ class NTriplesParser(object):
return Literal(lit, lang, dtype)
return False
+
# # Obsolete, unused
# def parseURI(uri):
# import urllib
diff --git a/rdflib/plugins/parsers/rdfxml.py b/rdflib/plugins/parsers/rdfxml.py
index 17554ba1..976edf2c 100644
--- a/rdflib/plugins/parsers/rdfxml.py
+++ b/rdflib/plugins/parsers/rdfxml.py
@@ -15,22 +15,30 @@ from rdflib.term import Literal
from rdflib.exceptions import ParserError, Error
from rdflib.parser import Parser
-__all__ = ['create_parser', 'BagID', 'ElementHandler',
- 'RDFXMLHandler', 'RDFXMLParser']
+__all__ = ["create_parser", "BagID", "ElementHandler", "RDFXMLHandler", "RDFXMLParser"]
RDFNS = RDF
# http://www.w3.org/TR/rdf-syntax-grammar/#eventterm-attribute-URI
# A mapping from unqualified terms to their qualified version.
-UNQUALIFIED = {"about": RDF.about,
- "ID": RDF.ID,
- "type": RDF.type,
- "resource": RDF.resource,
- "parseType": RDF.parseType}
+UNQUALIFIED = {
+ "about": RDF.about,
+ "ID": RDF.ID,
+ "type": RDF.type,
+ "resource": RDF.resource,
+ "parseType": RDF.parseType,
+}
# http://www.w3.org/TR/rdf-syntax-grammar/#coreSyntaxTerms
-CORE_SYNTAX_TERMS = [RDF.RDF, RDF.ID, RDF.about, RDF.parseType,
- RDF.resource, RDF.nodeID, RDF.datatype]
+CORE_SYNTAX_TERMS = [
+ RDF.RDF,
+ RDF.ID,
+ RDF.about,
+ RDF.parseType,
+ RDF.resource,
+ RDF.nodeID,
+ RDF.datatype,
+]
# http://www.w3.org/TR/rdf-syntax-grammar/#syntaxTerms
SYNTAX_TERMS = CORE_SYNTAX_TERMS + [RDF.Description, RDF.li]
@@ -39,15 +47,16 @@ SYNTAX_TERMS = CORE_SYNTAX_TERMS + [RDF.Description, RDF.li]
OLD_TERMS = [
URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#aboutEach"),
URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#aboutEachPrefix"),
- URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#bagID")]
+ URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#bagID"),
+]
-NODE_ELEMENT_EXCEPTIONS = CORE_SYNTAX_TERMS + [RDF.li, ] + OLD_TERMS
+NODE_ELEMENT_EXCEPTIONS = CORE_SYNTAX_TERMS + [RDF.li,] + OLD_TERMS
NODE_ELEMENT_ATTRIBUTES = [RDF.ID, RDF.nodeID, RDF.about]
-PROPERTY_ELEMENT_EXCEPTIONS = \
- CORE_SYNTAX_TERMS + [RDF.Description, ] + OLD_TERMS
-PROPERTY_ATTRIBUTE_EXCEPTIONS = \
+PROPERTY_ELEMENT_EXCEPTIONS = CORE_SYNTAX_TERMS + [RDF.Description,] + OLD_TERMS
+PROPERTY_ATTRIBUTE_EXCEPTIONS = (
CORE_SYNTAX_TERMS + [RDF.Description, RDF.li] + OLD_TERMS
+)
PROPERTY_ELEMENT_ATTRIBUTES = [RDF.ID, RDF.resource, RDF.nodeID]
XMLNS = "http://www.w3.org/XML/1998/namespace"
@@ -56,7 +65,7 @@ LANG = (XMLNS, "lang")
class BagID(URIRef):
- __slots__ = ['li']
+ __slots__ = ["li"]
def __init__(self, val):
super(URIRef, self).__init__(val)
@@ -64,13 +73,26 @@ class BagID(URIRef):
def next_li(self):
self.li += 1
- return RDFNS['_%s' % self.li]
+ return RDFNS["_%s" % self.li]
class ElementHandler(object):
- __slots__ = ['start', 'char', 'end', 'li', 'id',
- 'base', 'subject', 'predicate', 'object',
- 'list', 'language', 'datatype', 'declared', 'data']
+ __slots__ = [
+ "start",
+ "char",
+ "end",
+ "li",
+ "id",
+ "base",
+ "subject",
+ "predicate",
+ "object",
+ "list",
+ "language",
+ "datatype",
+ "declared",
+ "data",
+ ]
def __init__(self):
self.start = None
@@ -89,11 +111,10 @@ class ElementHandler(object):
def next_li(self):
self.li += 1
- return RDFNS['_%s' % self.li]
+ return RDFNS["_%s" % self.li]
class RDFXMLHandler(handler.ContentHandler):
-
def __init__(self, store):
self.store = store
self.preserve_bnode_ids = False
@@ -103,7 +124,10 @@ class RDFXMLHandler(handler.ContentHandler):
document_element = ElementHandler()
document_element.start = self.document_element_start
document_element.end = lambda name, qname: None
- self.stack = [None, document_element, ]
+ self.stack = [
+ None,
+ document_element,
+ ]
self.ids = {} # remember IDs we have already seen
self.bnode = {}
self._ns_contexts = [{}] # contains uri -> prefix dicts
@@ -137,16 +161,14 @@ class RDFXMLHandler(handler.ContentHandler):
if parent and parent.base:
base = urljoin(parent.base, base)
else:
- systemId = self.locator.getPublicId() \
- or self.locator.getSystemId()
+ systemId = self.locator.getPublicId() or self.locator.getSystemId()
if systemId:
base = urljoin(systemId, base)
else:
if parent:
base = parent.base
if base is None:
- systemId = self.locator.getPublicId() \
- or self.locator.getSystemId()
+ systemId = self.locator.getPublicId() or self.locator.getSystemId()
if systemId:
base, frag = urldefrag(systemId)
current.base = base
@@ -181,25 +203,30 @@ class RDFXMLHandler(handler.ContentHandler):
def error(self, message):
locator = self.locator
- info = "%s:%s:%s: " % (locator.getSystemId(),
- locator.getLineNumber(),
- locator.getColumnNumber())
+ info = "%s:%s:%s: " % (
+ locator.getSystemId(),
+ locator.getLineNumber(),
+ locator.getColumnNumber(),
+ )
raise ParserError(info + message)
def get_current(self):
return self.stack[-2]
+
# Create a read only property called current so that self.current
# give the current element handler.
current = property(get_current)
def get_next(self):
return self.stack[-1]
+
# Create a read only property that gives the element handler to be
# used for the next element.
next = property(get_next)
def get_parent(self):
return self.stack[-3]
+
# Create a read only property that gives the current parent
# element handler
parent = property(get_parent)
@@ -233,7 +260,7 @@ class RDFXMLHandler(handler.ContentHandler):
def document_element_start(self, name, qname, attrs):
if name[0] and URIRef("".join(name)) == RDF.RDF:
# Cheap hack so 2to3 doesn't turn it into __next__
- next = getattr(self, 'next')
+ next = getattr(self, "next")
next.start = self.node_element_start
next.end = self.node_element_end
else:
@@ -248,7 +275,7 @@ class RDFXMLHandler(handler.ContentHandler):
absolutize = self.absolutize
# Cheap hack so 2to3 doesn't turn it into __next__
- next = getattr(self, 'next')
+ next = getattr(self, "next")
next.start = self.property_element_start
next.end = self.property_element_end
@@ -257,27 +284,21 @@ class RDFXMLHandler(handler.ContentHandler):
if RDF.ID in atts:
if RDF.about in atts or RDF.nodeID in atts:
- self.error(
- "Can have at most one of rdf:ID, rdf:about, and rdf:nodeID"
- )
+ self.error("Can have at most one of rdf:ID, rdf:about, and rdf:nodeID")
id = atts[RDF.ID]
if not is_ncname(id):
self.error("rdf:ID value is not a valid NCName: %s" % id)
subject = absolutize("#%s" % id)
if subject in self.ids:
- self.error(
- "two elements cannot use the same ID: '%s'" % subject)
+ self.error("two elements cannot use the same ID: '%s'" % subject)
self.ids[subject] = 1 # IDs can only appear once within a document
elif RDF.nodeID in atts:
if RDF.ID in atts or RDF.about in atts:
- self.error(
- "Can have at most one of rdf:ID, rdf:about, and rdf:nodeID"
- )
+ self.error("Can have at most one of rdf:ID, rdf:about, and rdf:nodeID")
nodeID = atts[RDF.nodeID]
if not is_ncname(nodeID):
- self.error(
- "rdf:nodeID value is not a valid NCName: %s" % nodeID)
+ self.error("rdf:nodeID value is not a valid NCName: %s" % nodeID)
if self.preserve_bnode_ids is False:
if nodeID in self.bnode:
subject = self.bnode[nodeID]
@@ -288,9 +309,7 @@ class RDFXMLHandler(handler.ContentHandler):
subject = BNode(nodeID)
elif RDF.about in atts:
if RDF.ID in atts or RDF.nodeID in atts:
- self.error(
- "Can have at most one of rdf:ID, rdf:about, and rdf:nodeID"
- )
+ self.error("Can have at most one of rdf:ID, rdf:about, and rdf:nodeID")
subject = absolutize(atts[RDF.about])
else:
subject = BNode()
@@ -330,7 +349,9 @@ class RDFXMLHandler(handler.ContentHandler):
if self.parent.object and self.current != self.stack[2]:
- self.error("Repeat node-elements inside property elements: %s"%"".join(name))
+ self.error(
+ "Repeat node-elements inside property elements: %s" % "".join(name)
+ )
self.parent.object = self.current.subject
@@ -340,7 +361,7 @@ class RDFXMLHandler(handler.ContentHandler):
absolutize = self.absolutize
# Cheap hack so 2to3 doesn't turn it into __next__
- next = getattr(self, 'next')
+ next = getattr(self, "next")
object = None
current.data = None
current.list = None
@@ -366,17 +387,14 @@ class RDFXMLHandler(handler.ContentHandler):
nodeID = atts.get(RDF.nodeID, None)
parse_type = atts.get(RDF.parseType, None)
if resource is not None and nodeID is not None:
- self.error(
- "Property element cannot have both rdf:nodeID and rdf:resource"
- )
+ self.error("Property element cannot have both rdf:nodeID and rdf:resource")
if resource is not None:
object = absolutize(resource)
next.start = self.node_element_start
next.end = self.node_element_end
elif nodeID is not None:
if not is_ncname(nodeID):
- self.error(
- "rdf:nodeID value is not a valid NCName: %s" % nodeID)
+ self.error("rdf:nodeID value is not a valid NCName: %s" % nodeID)
if self.preserve_bnode_ids is False:
if nodeID in self.bnode:
object = self.bnode[nodeID]
@@ -401,16 +419,16 @@ class RDFXMLHandler(handler.ContentHandler):
elif parse_type == "Collection":
current.char = None
object = current.list = RDF.nil # BNode()
- # self.parent.subject
+ # self.parent.subject
next.start = self.node_element_start
next.end = self.list_node_element_end
else: # if parse_type=="Literal":
- # All other values are treated as Literal
- # See: http://www.w3.org/TR/rdf-syntax-grammar/
- # parseTypeOtherPropertyElt
+ # All other values are treated as Literal
+ # See: http://www.w3.org/TR/rdf-syntax-grammar/
+ # parseTypeOtherPropertyElt
object = Literal("", datatype=RDF.XMLLiteral)
current.char = self.literal_element_char
- current.declared = {XMLNS: 'xml'}
+ current.declared = {XMLNS: "xml"}
next.start = self.literal_element_start
next.char = self.literal_element_char
next.end = self.literal_element_end
@@ -466,18 +484,17 @@ class RDFXMLHandler(handler.ContentHandler):
literalLang = current.language
if current.datatype is not None:
literalLang = None
- current.object = Literal(
- current.data, literalLang, current.datatype)
+ current.object = Literal(current.data, literalLang, current.datatype)
current.data = None
if self.next.end == self.list_node_element_end:
if current.object != RDF.nil:
self.store.add((current.list, RDF.rest, RDF.nil))
if current.object is not None:
- self.store.add(
- (self.parent.subject, current.predicate, current.object))
+ self.store.add((self.parent.subject, current.predicate, current.object))
if current.id is not None:
- self.add_reified(current.id, (self.parent.subject,
- current.predicate, current.object))
+ self.add_reified(
+ current.id, (self.parent.subject, current.predicate, current.object)
+ )
current.subject = None
def list_node_element_end(self, name, qname):
@@ -513,9 +530,9 @@ class RDFXMLHandler(handler.ContentHandler):
if not name[0] in current.declared:
current.declared[name[0]] = prefix
if prefix:
- current.object += (' xmlns:%s="%s"' % (prefix, name[0]))
+ current.object += ' xmlns:%s="%s"' % (prefix, name[0])
else:
- current.object += (' xmlns="%s"' % name[0])
+ current.object += ' xmlns="%s"' % name[0]
else:
current.object = "<%s" % name[1]
@@ -526,7 +543,7 @@ class RDFXMLHandler(handler.ContentHandler):
name = current.declared[name[0]] + ":" + name[1]
else:
name = name[1]
- current.object += (' %s=%s' % (name, quoteattr(value)))
+ current.object += " %s=%s" % (name, quoteattr(value))
current.object += ">"
def literal_element_char(self, data):
@@ -549,8 +566,7 @@ def create_parser(target, store):
try:
# Workaround for bug in expatreader.py. Needed when
# expatreader is trying to guess a prefix.
- parser.start_namespace_decl(
- "xml", "http://www.w3.org/XML/1998/namespace")
+ parser.start_namespace_decl("xml", "http://www.w3.org/XML/1998/namespace")
except AttributeError:
pass # Not present in Jython (at least)
parser.setFeature(handler.feature_namespaces, 1)
@@ -563,7 +579,6 @@ def create_parser(target, store):
class RDFXMLParser(Parser):
-
def __init__(self):
pass
diff --git a/rdflib/plugins/parsers/trig.py b/rdflib/plugins/parsers/trig.py
index f4c3ff1b..96c94503 100644
--- a/rdflib/plugins/parsers/trig.py
+++ b/rdflib/plugins/parsers/trig.py
@@ -5,18 +5,18 @@ from rdflib.parser import Parser
from .notation3 import SinkParser, RDFSink
-def becauseSubGraph(*args, **kwargs): pass
+def becauseSubGraph(*args, **kwargs):
+ pass
class TrigSinkParser(SinkParser):
-
def directiveOrStatement(self, argstr, h):
- #import pdb; pdb.set_trace()
+ # import pdb; pdb.set_trace()
i = self.skipSpace(argstr, h)
if i < 0:
- return i # EOF
+ return i # EOF
j = self.graph(argstr, i)
if j >= 0:
@@ -46,12 +46,11 @@ class TrigSinkParser(SinkParser):
if j >= 0:
return j
- if argstr[i] == '[':
+ if argstr[i] == "[":
j = self.skipSpace(argstr, i + 1)
if j < 0:
- self.BadSyntax(argstr, i,
- "Expected ] got EOF")
- if argstr[j] == ']':
+ self.BadSyntax(argstr, i, "Expected ] got EOF")
+ if argstr[j] == "]":
res.append(self.blankNode())
return j + 1
return -1
@@ -66,8 +65,8 @@ class TrigSinkParser(SinkParser):
raise Exception if it looks like a graph, but isn't.
"""
- #import pdb; pdb.set_trace()
- j = self.sparqlTok('GRAPH', argstr, i) # optional GRAPH keyword
+ # import pdb; pdb.set_trace()
+ j = self.sparqlTok("GRAPH", argstr, i) # optional GRAPH keyword
if j >= 0:
i = j
@@ -81,10 +80,9 @@ class TrigSinkParser(SinkParser):
j = self.skipSpace(argstr, i)
if j < 0:
- self.BadSyntax(argstr, i,
- "EOF found when expected graph")
+ self.BadSyntax(argstr, i, "EOF found when expected graph")
- if argstr[j:j + 1] == "=": # optional = for legacy support
+ if argstr[j : j + 1] == "=": # optional = for legacy support
i = self.skipSpace(argstr, j + 1)
if i < 0:
@@ -92,7 +90,7 @@ class TrigSinkParser(SinkParser):
else:
i = j
- if argstr[i:i + 1] != "{":
+ if argstr[i : i + 1] != "{":
return -1 # the node wasn't part of a graph
j = i + 1
@@ -106,17 +104,15 @@ class TrigSinkParser(SinkParser):
while 1:
i = self.skipSpace(argstr, j)
if i < 0:
- self.BadSyntax(
- argstr, i, "needed '}', found end.")
+ self.BadSyntax(argstr, i, "needed '}', found end.")
- if argstr[i:i + 1] == "}":
+ if argstr[i : i + 1] == "}":
j = i + 1
break
j = self.directiveOrStatement(argstr, i)
if j < 0:
- self.BadSyntax(
- argstr, i, "expected statement or '}'")
+ self.BadSyntax(argstr, i, "expected statement or '}'")
self._context = self._parentContext
self._reason2 = reason2
@@ -138,22 +134,23 @@ class TrigParser(Parser):
if encoding not in [None, "utf-8"]:
raise Exception(
- ("TriG files are always utf-8 encoded, ",
- "I was passed: %s") % encoding)
+ ("TriG files are always utf-8 encoded, ", "I was passed: %s") % encoding
+ )
# we're currently being handed a Graph, not a ConjunctiveGraph
assert graph.store.context_aware, "TriG Parser needs a context-aware store!"
conj_graph = ConjunctiveGraph(store=graph.store, identifier=graph.identifier)
conj_graph.default_context = graph # TODO: CG __init__ should have a
- # default_context arg
+ # default_context arg
# TODO: update N3Processor so that it can use conj_graph as the sink
conj_graph.namespace_manager = graph.namespace_manager
sink = RDFSink(conj_graph)
baseURI = conj_graph.absolutize(
- source.getPublicId() or source.getSystemId() or "")
+ source.getPublicId() or source.getSystemId() or ""
+ )
p = TrigSinkParser(sink, baseURI=baseURI, turtle=True)
p.loadStream(source.getByteStream())
diff --git a/rdflib/plugins/parsers/trix.py b/rdflib/plugins/parsers/trix.py
index ffd883fe..56819514 100644
--- a/rdflib/plugins/parsers/trix.py
+++ b/rdflib/plugins/parsers/trix.py
@@ -14,7 +14,7 @@ from xml.sax.saxutils import handler
from xml.sax import make_parser
from xml.sax.handler import ErrorHandler
-__all__ = ['create_parser', 'TriXHandler', 'TriXParser']
+__all__ = ["create_parser", "TriXHandler", "TriXParser"]
TRIXNS = Namespace("http://www.w3.org/2004/03/trix/trix-1/")
@@ -56,7 +56,8 @@ class TriXHandler(handler.ContentHandler):
if name[0] != str(TRIXNS):
self.error(
"Only elements in the TriX namespace are allowed. %s!=%s"
- % (name[0], TRIXNS))
+ % (name[0], TRIXNS)
+ )
if name[1] == "TriX":
if self.state == 0:
@@ -143,46 +144,55 @@ class TriXHandler(handler.ContentHandler):
if name[0] != str(TRIXNS):
self.error(
"Only elements in the TriX namespace are allowed. %s!=%s"
- % (name[0], TRIXNS))
+ % (name[0], TRIXNS)
+ )
if name[1] == "uri":
if self.state == 3:
- self.graph = Graph(store=self.store,
- identifier=URIRef(self.chars.strip()))
+ self.graph = Graph(
+ store=self.store, identifier=URIRef(self.chars.strip())
+ )
self.state = 2
elif self.state == 4:
self.triple += [URIRef(self.chars.strip())]
else:
self.error(
- "Illegal internal self.state - This should never " +
- "happen if the SAX parser ensures XML syntax correctness")
+ "Illegal internal self.state - This should never "
+ + "happen if the SAX parser ensures XML syntax correctness"
+ )
elif name[1] == "id":
if self.state == 3:
- self.graph = Graph(self.store, identifier=self.get_bnode(
- self.chars.strip()))
+ self.graph = Graph(
+ self.store, identifier=self.get_bnode(self.chars.strip())
+ )
self.state = 2
elif self.state == 4:
self.triple += [self.get_bnode(self.chars.strip())]
else:
self.error(
- "Illegal internal self.state - This should never " +
- "happen if the SAX parser ensures XML syntax correctness")
+ "Illegal internal self.state - This should never "
+ + "happen if the SAX parser ensures XML syntax correctness"
+ )
elif name[1] == "plainLiteral" or name[1] == "typedLiteral":
if self.state == 4:
- self.triple += [Literal(
- self.chars, lang=self.lang, datatype=self.datatype)]
+ self.triple += [
+ Literal(self.chars, lang=self.lang, datatype=self.datatype)
+ ]
else:
self.error(
- "This should never happen if the SAX parser " +
- "ensures XML syntax correctness")
+ "This should never happen if the SAX parser "
+ + "ensures XML syntax correctness"
+ )
elif name[1] == "triple":
if self.state == 4:
if len(self.triple) != 3:
- self.error("Triple has wrong length, got %d elements: %s" %
- (len(self.triple), self.triple))
+ self.error(
+ "Triple has wrong length, got %d elements: %s"
+ % (len(self.triple), self.triple)
+ )
self.graph.add(self.triple)
# self.store.store.add(self.triple,context=self.graph)
@@ -190,8 +200,9 @@ class TriXHandler(handler.ContentHandler):
self.state = 2
else:
self.error(
- "This should never happen if the SAX parser " +
- "ensures XML syntax correctness")
+ "This should never happen if the SAX parser "
+ + "ensures XML syntax correctness"
+ )
elif name[1] == "graph":
self.graph = None
@@ -228,7 +239,8 @@ class TriXHandler(handler.ContentHandler):
info = "%s:%s:%s: " % (
locator.getSystemId(),
locator.getLineNumber(),
- locator.getColumnNumber())
+ locator.getColumnNumber(),
+ )
raise ParserError(info + message)
@@ -237,8 +249,7 @@ def create_parser(store):
try:
# Workaround for bug in expatreader.py. Needed when
# expatreader is trying to guess a prefix.
- parser.start_namespace_decl(
- "xml", "http://www.w3.org/XML/1998/namespace")
+ parser.start_namespace_decl("xml", "http://www.w3.org/XML/1998/namespace")
except AttributeError:
pass # Not present in Jython (at least)
parser.setFeature(handler.feature_namespaces, 1)
@@ -255,8 +266,9 @@ class TriXParser(Parser):
pass
def parse(self, source, sink, **args):
- assert sink.store.context_aware, (
- "TriXParser must be given a context aware store.")
+ assert (
+ sink.store.context_aware
+ ), "TriXParser must be given a context aware store."
self._parser = create_parser(sink.store)
content_handler = self._parser.getContentHandler()
diff --git a/rdflib/plugins/serializers/n3.py b/rdflib/plugins/serializers/n3.py
index c5efc735..6c4e2ec4 100644
--- a/rdflib/plugins/serializers/n3.py
+++ b/rdflib/plugins/serializers/n3.py
@@ -3,10 +3,9 @@ Notation 3 (N3) RDF graph serializer for RDFLib.
"""
from rdflib.graph import Graph
from rdflib.namespace import Namespace, OWL
-from rdflib.plugins.serializers.turtle import (
- TurtleSerializer, SUBJECT, OBJECT)
+from rdflib.plugins.serializers.turtle import TurtleSerializer, SUBJECT, OBJECT
-__all__ = ['N3Serializer']
+__all__ = ["N3Serializer"]
SWAP_LOG = Namespace("http://www.w3.org/2000/10/swap/log#")
@@ -17,10 +16,7 @@ class N3Serializer(TurtleSerializer):
def __init__(self, store, parent=None):
super(N3Serializer, self).__init__(store)
- self.keywords.update({
- OWL.sameAs: '=',
- SWAP_LOG.implies: '=>'
- })
+ self.keywords.update({OWL.sameAs: "=", SWAP_LOG.implies: "=>"})
self.parent = parent
def reset(self):
@@ -33,8 +29,9 @@ class N3Serializer(TurtleSerializer):
self.parent.subjectDone(subject)
def isDone(self, subject):
- return (super(N3Serializer, self).isDone(subject)
- and (not self.parent or self.parent.isDone(subject)))
+ return super(N3Serializer, self).isDone(subject) and (
+ not self.parent or self.parent.isDone(subject)
+ )
def startDocument(self):
super(N3Serializer, self).startDocument()
@@ -88,8 +85,7 @@ class N3Serializer(TurtleSerializer):
properties = self.buildPredicateHash(subject)
if len(properties) == 0:
return False
- return (self.s_clause(subject)
- or super(N3Serializer, self).statement(subject))
+ return self.s_clause(subject) or super(N3Serializer, self).statement(subject)
def path(self, node, position, newline=False):
if not self.p_clause(node, position):
@@ -97,10 +93,10 @@ class N3Serializer(TurtleSerializer):
def s_clause(self, subject):
if isinstance(subject, Graph):
- self.write('\n' + self.indent())
+ self.write("\n" + self.indent())
self.p_clause(subject, SUBJECT)
self.predicateList(subject)
- self.write(' .')
+ self.write(" .")
return True
else:
return False
@@ -109,13 +105,13 @@ class N3Serializer(TurtleSerializer):
if isinstance(node, Graph):
self.subjectDone(node)
if position is OBJECT:
- self.write(' ')
- self.write('{')
+ self.write(" ")
+ self.write("{")
self.depth += 1
serializer = N3Serializer(node, parent=self)
serializer.serialize(self.stream)
self.depth -= 1
- self.write(self.indent() + '}')
+ self.write(self.indent() + "}")
return True
else:
return False
diff --git a/rdflib/plugins/serializers/nquads.py b/rdflib/plugins/serializers/nquads.py
index a193e125..70c414cd 100644
--- a/rdflib/plugins/serializers/nquads.py
+++ b/rdflib/plugins/serializers/nquads.py
@@ -5,16 +5,15 @@ from rdflib.serializer import Serializer
from rdflib.plugins.serializers.nt import _quoteLiteral
-__all__ = ['NQuadsSerializer']
+__all__ = ["NQuadsSerializer"]
class NQuadsSerializer(Serializer):
-
def __init__(self, store):
if not store.context_aware:
raise Exception(
- "NQuads serialization only makes "
- "sense for context-aware stores!")
+ "NQuads serialization only makes " "sense for context-aware stores!"
+ )
super(NQuadsSerializer, self).__init__(store)
@@ -26,19 +25,24 @@ class NQuadsSerializer(Serializer):
encoding = self.encoding
for context in self.store.contexts():
for triple in context:
- stream.write(_nq_row(
- triple, context.identifier).encode(encoding, "replace"))
+ stream.write(
+ _nq_row(triple, context.identifier).encode(encoding, "replace")
+ )
stream.write("\n".encode("latin-1"))
def _nq_row(triple, context):
if isinstance(triple[2], Literal):
- return u"%s %s %s %s .\n" % (triple[0].n3(),
- triple[1].n3(),
- _quoteLiteral(triple[2]),
- context.n3())
+ return u"%s %s %s %s .\n" % (
+ triple[0].n3(),
+ triple[1].n3(),
+ _quoteLiteral(triple[2]),
+ context.n3(),
+ )
else:
- return u"%s %s %s %s .\n" % (triple[0].n3(),
- triple[1].n3(),
- triple[2].n3(),
- context.n3())
+ return u"%s %s %s %s .\n" % (
+ triple[0].n3(),
+ triple[1].n3(),
+ triple[2].n3(),
+ context.n3(),
+ )
diff --git a/rdflib/plugins/serializers/nt.py b/rdflib/plugins/serializers/nt.py
index 95a88ae3..94632155 100644
--- a/rdflib/plugins/serializers/nt.py
+++ b/rdflib/plugins/serializers/nt.py
@@ -9,7 +9,7 @@ from rdflib.serializer import Serializer
import warnings
import codecs
-__all__ = ['NTSerializer']
+__all__ = ["NTSerializer"]
class NTSerializer(Serializer):
@@ -19,7 +19,7 @@ class NTSerializer(Serializer):
def __init__(self, store):
Serializer.__init__(self, store)
- self.encoding = 'ascii' # n-triples are ascii encoded
+ self.encoding = "ascii" # n-triples are ascii encoded
def serialize(self, stream, base=None, encoding=None, **args):
if base is not None:
@@ -48,35 +48,33 @@ def _nt_row(triple):
return u"%s %s %s .\n" % (
triple[0].n3(),
triple[1].n3(),
- _quoteLiteral(triple[2]))
+ _quoteLiteral(triple[2]),
+ )
else:
- return u"%s %s %s .\n" % (triple[0].n3(),
- triple[1].n3(),
- triple[2].n3())
+ return u"%s %s %s .\n" % (triple[0].n3(), triple[1].n3(), triple[2].n3())
def _quoteLiteral(l):
- '''
+ """
a simpler version of term.Literal.n3()
- '''
+ """
encoded = _quote_encode(l)
if l.language:
if l.datatype:
raise Exception("Literal has datatype AND language!")
- return '%s@%s' % (encoded, l.language)
+ return "%s@%s" % (encoded, l.language)
elif l.datatype:
- return '%s^^<%s>' % (encoded, l.datatype)
+ return "%s^^<%s>" % (encoded, l.datatype)
else:
- return '%s' % encoded
+ return "%s" % encoded
def _quote_encode(l):
- return '"%s"' % l.replace('\\', '\\\\')\
- .replace('\n', '\\n')\
- .replace('"', '\\"')\
- .replace('\r', '\\r')
+ return '"%s"' % l.replace("\\", "\\\\").replace("\n", "\\n").replace(
+ '"', '\\"'
+ ).replace("\r", "\\r")
def _nt_unicode_error_resolver(err):
@@ -86,11 +84,11 @@ def _nt_unicode_error_resolver(err):
def _replace_single(c):
c = ord(c)
- fmt = u'\\u%04X' if c <= 0xFFFF else u'\\U%08X'
+ fmt = u"\\u%04X" if c <= 0xFFFF else u"\\U%08X"
return fmt % c
- string = err.object[err.start:err.end]
+ string = err.object[err.start : err.end]
return ("".join(_replace_single(c) for c in string), err.end)
-codecs.register_error('_rdflib_nt_escape', _nt_unicode_error_resolver)
+codecs.register_error("_rdflib_nt_escape", _nt_unicode_error_resolver)
diff --git a/rdflib/plugins/serializers/rdfxml.py b/rdflib/plugins/serializers/rdfxml.py
index d3a705d2..425c0a7e 100644
--- a/rdflib/plugins/serializers/rdfxml.py
+++ b/rdflib/plugins/serializers/rdfxml.py
@@ -14,11 +14,10 @@ import xml.dom.minidom
from .xmlwriter import ESCAPE_ENTITIES
-__all__ = ['fix', 'XMLSerializer', 'PrettyXMLSerializer']
+__all__ = ["fix", "XMLSerializer", "PrettyXMLSerializer"]
class XMLSerializer(Serializer):
-
def __init__(self, store):
super(XMLSerializer, self).__init__(store)
@@ -50,18 +49,17 @@ class XMLSerializer(Serializer):
self.__stream = stream
self.__serialized = {}
encoding = self.encoding
- self.write = write = lambda uni: stream.write(
- uni.encode(encoding, 'replace'))
+ self.write = write = lambda uni: stream.write(uni.encode(encoding, "replace"))
# startDocument
write('<?xml version="1.0" encoding="%s"?>\n' % self.encoding)
# startRDF
- write('<rdf:RDF\n')
+ write("<rdf:RDF\n")
# If provided, write xml:base attribute for the RDF
if "xml_base" in args:
- write(' xml:base="%s"\n' % args['xml_base'])
+ write(' xml:base="%s"\n' % args["xml_base"])
elif self.base:
write(' xml:base="%s"\n' % self.base)
# TODO:
@@ -75,7 +73,7 @@ class XMLSerializer(Serializer):
write(' xmlns:%s="%s"\n' % (prefix, namespace))
else:
write(' xmlns="%s"\n' % namespace)
- write('>\n')
+ write(">\n")
# write out triples by subject
for subject in self.store.subjects():
@@ -98,8 +96,7 @@ class XMLSerializer(Serializer):
element_name = "rdf:Description"
if isinstance(subject, BNode):
- write('%s<%s rdf:nodeID="%s"' % (
- indent, element_name, subject))
+ write('%s<%s rdf:nodeID="%s"' % (indent, element_name, subject))
else:
uri = quoteattr(self.relativize(subject))
write("%s<%s rdf:about=%s" % (indent, element_name, uri))
@@ -107,8 +104,7 @@ class XMLSerializer(Serializer):
if (subject, None, None) in self.store:
write(">\n")
- for predicate, object in self.store.predicate_objects(
- subject):
+ for predicate, object in self.store.predicate_objects(subject):
self.predicate(predicate, object, depth + 1)
write("%s</%s>\n" % (indent, element_name))
@@ -129,22 +125,24 @@ class XMLSerializer(Serializer):
if object.datatype:
attributes += ' rdf:datatype="%s"' % object.datatype
- write("%s<%s%s>%s</%s>\n" %
- (indent, qname, attributes,
- escape(object, ESCAPE_ENTITIES), qname))
+ write(
+ "%s<%s%s>%s</%s>\n"
+ % (indent, qname, attributes, escape(object, ESCAPE_ENTITIES), qname)
+ )
else:
if isinstance(object, BNode):
- write('%s<%s rdf:nodeID="%s"/>\n' %
- (indent, qname, object))
+ write('%s<%s rdf:nodeID="%s"/>\n' % (indent, qname, object))
else:
- write("%s<%s rdf:resource=%s/>\n" %
- (indent, qname, quoteattr(self.relativize(object))))
+ write(
+ "%s<%s rdf:resource=%s/>\n"
+ % (indent, qname, quoteattr(self.relativize(object)))
+ )
XMLLANG = "http://www.w3.org/XML/1998/namespacelang"
XMLBASE = "http://www.w3.org/XML/1998/namespacebase"
-OWL_NS = Namespace('http://www.w3.org/2002/07/owl#')
+OWL_NS = Namespace("http://www.w3.org/2002/07/owl#")
# TODO:
@@ -157,7 +155,6 @@ def fix(val):
class PrettyXMLSerializer(Serializer):
-
def __init__(self, store, max_depth=3):
super(PrettyXMLSerializer, self).__init__(store)
self.forceRDFAbout = set()
@@ -177,8 +174,7 @@ class PrettyXMLSerializer(Serializer):
self.writer = writer = XMLWriter(stream, nm, encoding)
namespaces = {}
- possible = set(store.predicates()).union(
- store.objects(None, RDF.type))
+ possible = set(store.predicates()).union(store.objects(None, RDF.type))
for predicate in possible:
prefix, namespace, local = nm.compute_qname_strict(predicate)
@@ -247,6 +243,7 @@ class PrettyXMLSerializer(Serializer):
writer.push(element)
if isinstance(subject, BNode):
+
def subj_as_obj_more_than(ceil):
return True
# more_than(store.triples((None, None, subject)), ceil)
@@ -282,8 +279,9 @@ class PrettyXMLSerializer(Serializer):
if object.language:
writer.attribute(XMLLANG, object.language)
- if (object.datatype == RDF.XMLLiteral
- and isinstance(object.value, xml.dom.minidom.Document)):
+ if object.datatype == RDF.XMLLiteral and isinstance(
+ object.value, xml.dom.minidom.Document
+ ):
writer.attribute(RDF.parseType, "Literal")
writer.text(u"")
writer.stream.write(object)
@@ -302,17 +300,20 @@ class PrettyXMLSerializer(Serializer):
else:
if first(store.objects(object, RDF.first)): # may not have type
- # RDF.List
+ # RDF.List
self.__serialized[object] = 1
# Warn that any assertions on object other than
# RDF.first and RDF.rest are ignored... including RDF.List
import warnings
+
warnings.warn(
- "Assertions on %s other than RDF.first " % repr(object) +
- "and RDF.rest are ignored ... including RDF.List",
- UserWarning, stacklevel=2)
+ "Assertions on %s other than RDF.first " % repr(object)
+ + "and RDF.rest are ignored ... including RDF.List",
+ UserWarning,
+ stacklevel=2,
+ )
writer.attribute(RDF.parseType, "Collection")
col = Collection(store, object)
@@ -326,9 +327,11 @@ class PrettyXMLSerializer(Serializer):
if not isinstance(item, URIRef):
self.__serialized[item] = 1
else:
- if first(store.triples_choices(
- (object, RDF.type, [OWL_NS.Class, RDFS.Class]))) \
- and isinstance(object, URIRef):
+ if first(
+ store.triples_choices(
+ (object, RDF.type, [OWL_NS.Class, RDFS.Class])
+ )
+ ) and isinstance(object, URIRef):
writer.attribute(RDF.resource, self.relativize(object))
elif depth <= self.max_depth:
@@ -336,9 +339,11 @@ class PrettyXMLSerializer(Serializer):
elif isinstance(object, BNode):
- if not object in self.__serialized \
- and (object, None, None) in store \
- and len(list(store.subjects(object=object))) == 1:
+ if (
+ not object in self.__serialized
+ and (object, None, None) in store
+ and len(list(store.subjects(object=object))) == 1
+ ):
# inline blank nodes if they haven't been serialized yet
# and are only referenced once (regardless of depth)
self.subject(object, depth + 1)
diff --git a/rdflib/plugins/serializers/trig.py b/rdflib/plugins/serializers/trig.py
index 755587dc..432224e0 100644
--- a/rdflib/plugins/serializers/trig.py
+++ b/rdflib/plugins/serializers/trig.py
@@ -8,13 +8,13 @@ from collections import defaultdict
from rdflib.plugins.serializers.turtle import TurtleSerializer, _GEN_QNAME_FOR_DT, VERB
from rdflib.term import BNode, Literal
-__all__ = ['TrigSerializer']
+__all__ = ["TrigSerializer"]
class TrigSerializer(TurtleSerializer):
short_name = "trig"
- indentString = 4 * u' '
+ indentString = 4 * u" "
def __init__(self, store):
if store.context_aware:
@@ -38,14 +38,17 @@ class TrigSerializer(TurtleSerializer):
for triple in context:
self.preprocessTriple(triple)
- self._contexts[context] = (self.orderSubjects(), self._subjects, self._references)
+ self._contexts[context] = (
+ self.orderSubjects(),
+ self._subjects,
+ self._references,
+ )
def reset(self):
super(TrigSerializer, self).reset()
self._contexts = {}
- def serialize(self, stream, base=None, encoding=None,
- spacious=None, **args):
+ def serialize(self, stream, base=None, encoding=None, spacious=None, **args):
self.reset()
self.stream = stream
# if base is given here, use that, if not and a base is set for the graph use that
@@ -72,7 +75,7 @@ class TrigSerializer(TurtleSerializer):
self._subjects = subjects
if self.default_context and store.identifier == self.default_context:
- self.write(self.indent() + '\n{')
+ self.write(self.indent() + "\n{")
else:
if isinstance(store.identifier, BNode):
iri = store.identifier.n3()
@@ -80,7 +83,7 @@ class TrigSerializer(TurtleSerializer):
iri = self.getQName(store.identifier)
if iri is None:
iri = store.identifier.n3()
- self.write(self.indent() + '\n%s {' % iri)
+ self.write(self.indent() + "\n%s {" % iri)
self.depth += 1
for subject in ordered_subjects:
@@ -89,9 +92,9 @@ class TrigSerializer(TurtleSerializer):
if firstTime:
firstTime = False
if self.statement(subject) and not firstTime:
- self.write('\n')
+ self.write("\n")
self.depth -= 1
- self.write('}\n')
+ self.write("}\n")
self.endDocument()
stream.write("\n".encode("latin-1"))
diff --git a/rdflib/plugins/serializers/trix.py b/rdflib/plugins/serializers/trix.py
index e6651c70..f6115bf8 100644
--- a/rdflib/plugins/serializers/trix.py
+++ b/rdflib/plugins/serializers/trix.py
@@ -7,7 +7,7 @@ from rdflib.namespace import Namespace
from rdflib.graph import Graph, ConjunctiveGraph
-__all__ = ['TriXSerializer']
+__all__ = ["TriXSerializer"]
# TODO: Move this somewhere central
TRIXNS = Namespace("http://www.w3.org/2004/03/trix/trix-1/")
@@ -19,7 +19,8 @@ class TriXSerializer(Serializer):
super(TriXSerializer, self).__init__(store)
if not store.context_aware:
raise Exception(
- "TriX serialization only makes sense for context-aware stores")
+ "TriX serialization only makes sense for context-aware stores"
+ )
def serialize(self, stream, base=None, encoding=None, **args):
@@ -49,10 +50,11 @@ class TriXSerializer(Serializer):
def _writeGraph(self, graph):
self.writer.push(TRIXNS[u"graph"])
if graph.base:
- self.writer.attribute("http://www.w3.org/XML/1998/namespacebase", graph.base)
+ self.writer.attribute(
+ "http://www.w3.org/XML/1998/namespacebase", graph.base
+ )
if isinstance(graph.identifier, URIRef):
- self.writer.element(
- TRIXNS[u"uri"], content=str(graph.identifier))
+ self.writer.element(TRIXNS[u"uri"], content=str(graph.identifier))
for triple in graph.triples((None, None, None)):
self._writeTriple(triple)
@@ -62,23 +64,22 @@ class TriXSerializer(Serializer):
self.writer.push(TRIXNS[u"triple"])
for component in triple:
if isinstance(component, URIRef):
- self.writer.element(TRIXNS[u"uri"],
- content=str(component))
+ self.writer.element(TRIXNS[u"uri"], content=str(component))
elif isinstance(component, BNode):
- self.writer.element(TRIXNS[u"id"],
- content=str(component))
+ self.writer.element(TRIXNS[u"id"], content=str(component))
elif isinstance(component, Literal):
if component.datatype:
- self.writer.element(TRIXNS[u"typedLiteral"],
- content=str(component),
- attributes={TRIXNS[u"datatype"]:
- str(component.datatype)})
+ self.writer.element(
+ TRIXNS[u"typedLiteral"],
+ content=str(component),
+ attributes={TRIXNS[u"datatype"]: str(component.datatype)},
+ )
elif component.language:
- self.writer.element(TRIXNS[u"plainLiteral"],
- content=str(component),
- attributes={XMLNS[u"lang"]:
- str(component.language)})
+ self.writer.element(
+ TRIXNS[u"plainLiteral"],
+ content=str(component),
+ attributes={XMLNS[u"lang"]: str(component.language)},
+ )
else:
- self.writer.element(TRIXNS[u"plainLiteral"],
- content=str(component))
+ self.writer.element(TRIXNS[u"plainLiteral"], content=str(component))
self.writer.pop()
diff --git a/rdflib/plugins/serializers/turtle.py b/rdflib/plugins/serializers/turtle.py
index b89ff2d8..52693a8c 100644
--- a/rdflib/plugins/serializers/turtle.py
+++ b/rdflib/plugins/serializers/turtle.py
@@ -11,7 +11,7 @@ from rdflib.exceptions import Error
from rdflib.serializer import Serializer
from rdflib.namespace import RDF, RDFS
-__all__ = ['RecursiveSerializer', 'TurtleSerializer']
+__all__ = ["RecursiveSerializer", "TurtleSerializer"]
def _object_comparator(a, b):
@@ -52,16 +52,20 @@ class RecursiveSerializer(Serializer):
def addNamespace(self, prefix, uri):
if prefix in self.namespaces and self.namespaces[prefix] != uri:
- raise Exception("Trying to override namespace prefix %s => %s, but it's already bound to %s" % (prefix, uri, self.namespaces[prefix]))
+ raise Exception(
+ "Trying to override namespace prefix %s => %s, but it's already bound to %s"
+ % (prefix, uri, self.namespaces[prefix])
+ )
self.namespaces[prefix] = uri
def checkSubject(self, subject):
"""Check to see if the subject should be serialized yet"""
- if ((self.isDone(subject))
+ if (
+ (self.isDone(subject))
or (subject not in self._subjects)
or ((subject in self._topLevels) and (self.depth > 1))
- or (isinstance(subject, URIRef) and
- (self.depth >= self.maxDepth))):
+ or (isinstance(subject, URIRef) and (self.depth >= self.maxDepth))
+ ):
return False
return True
@@ -83,9 +87,10 @@ class RecursiveSerializer(Serializer):
seen[member] = True
recursable = [
- (isinstance(subject, BNode),
- self._references[subject], subject)
- for subject in self._subjects if subject not in seen]
+ (isinstance(subject, BNode), self._references[subject], subject)
+ for subject in self._subjects
+ if subject not in seen
+ ]
recursable.sort()
subjects.extend([subject for (isbnode, refs, subject) in recursable])
@@ -111,7 +116,7 @@ class RecursiveSerializer(Serializer):
self._topLevels = {}
if self.roundtrip_prefixes:
- if hasattr(self.roundtrip_prefixes, '__iter__'):
+ if hasattr(self.roundtrip_prefixes, "__iter__"):
for prefix, ns in self.store.namespaces():
if prefix in self.roundtrip_prefixes:
self.addNamespace(prefix, ns)
@@ -163,7 +168,7 @@ class RecursiveSerializer(Serializer):
def write(self, text):
"""Write text in given encoding."""
- self.stream.write(text.encode(self.encoding, 'replace'))
+ self.stream.write(text.encode(self.encoding, "replace"))
SUBJECT = 0
@@ -177,14 +182,12 @@ _SPACIOUS_OUTPUT = False
class TurtleSerializer(RecursiveSerializer):
short_name = "turtle"
- indentString = ' '
+ indentString = " "
def __init__(self, store):
self._ns_rewrite = {}
super(TurtleSerializer, self).__init__(store)
- self.keywords = {
- RDF.type: 'a'
- }
+ self.keywords = {RDF.type: "a"}
self.reset()
self.stream = None
self._spacious = _SPACIOUS_OUTPUT
@@ -199,8 +202,9 @@ class TurtleSerializer(RecursiveSerializer):
# so we need to keep track of ns rewrites we made so far.
- if (prefix > '' and prefix[0] == '_') \
- or self.namespaces.get(prefix, namespace) != namespace:
+ if (prefix > "" and prefix[0] == "_") or self.namespaces.get(
+ prefix, namespace
+ ) != namespace:
if prefix not in self._ns_rewrite:
p = "p" + prefix
@@ -219,8 +223,7 @@ class TurtleSerializer(RecursiveSerializer):
self._started = False
self._ns_rewrite = {}
- def serialize(self, stream, base=None, encoding=None,
- spacious=None, **args):
+ def serialize(self, stream, base=None, encoding=None, spacious=None, **args):
self.reset()
self.stream = stream
# if base is given here, use that, if not and a base is set for the graph use that
@@ -244,7 +247,7 @@ class TurtleSerializer(RecursiveSerializer):
if firstTime:
firstTime = False
if self.statement(subject) and not firstTime:
- self.write('\n')
+ self.write("\n")
self.endDocument()
stream.write("\n".encode("latin-1"))
@@ -278,7 +281,7 @@ class TurtleSerializer(RecursiveSerializer):
pfx = self.store.store.prefix(uri)
if pfx is not None:
- parts = (pfx, uri, '')
+ parts = (pfx, uri, "")
else:
# nothing worked
return None
@@ -291,95 +294,99 @@ class TurtleSerializer(RecursiveSerializer):
prefix = self.addNamespace(prefix, namespace)
- return u'%s:%s' % (prefix, local)
+ return u"%s:%s" % (prefix, local)
def startDocument(self):
self._started = True
ns_list = sorted(self.namespaces.items())
if self.base:
- self.write(self.indent() + '@base <%s> .\n' % self.base)
+ self.write(self.indent() + "@base <%s> .\n" % self.base)
for prefix, uri in ns_list:
- self.write(self.indent() + '@prefix %s: <%s> .\n' % (prefix, uri))
+ self.write(self.indent() + "@prefix %s: <%s> .\n" % (prefix, uri))
if ns_list and self._spacious:
- self.write('\n')
+ self.write("\n")
def endDocument(self):
if self._spacious:
- self.write('\n')
+ self.write("\n")
def statement(self, subject):
self.subjectDone(subject)
return self.s_squared(subject) or self.s_default(subject)
def s_default(self, subject):
- self.write('\n' + self.indent())
+ self.write("\n" + self.indent())
self.path(subject, SUBJECT)
self.predicateList(subject)
- self.write(' .')
+ self.write(" .")
return True
def s_squared(self, subject):
if (self._references[subject] > 0) or not isinstance(subject, BNode):
return False
- self.write('\n' + self.indent() + '[]')
+ self.write("\n" + self.indent() + "[]")
self.predicateList(subject)
- self.write(' .')
+ self.write(" .")
return True
def path(self, node, position, newline=False):
- if not (self.p_squared(node, position, newline)
- or self.p_default(node, position, newline)):
- raise Error("Cannot serialize node '%s'" % (node, ))
+ if not (
+ self.p_squared(node, position, newline)
+ or self.p_default(node, position, newline)
+ ):
+ raise Error("Cannot serialize node '%s'" % (node,))
def p_default(self, node, position, newline=False):
if position != SUBJECT and not newline:
- self.write(' ')
+ self.write(" ")
self.write(self.label(node, position))
return True
def label(self, node, position):
if node == RDF.nil:
- return '()'
+ return "()"
if position is VERB and node in self.keywords:
return self.keywords[node]
if isinstance(node, Literal):
return node._literal_n3(
use_plain=True,
- qname_callback=lambda dt: self.getQName(
- dt, _GEN_QNAME_FOR_DT))
+ qname_callback=lambda dt: self.getQName(dt, _GEN_QNAME_FOR_DT),
+ )
else:
node = self.relativize(node)
return self.getQName(node, position == VERB) or node.n3()
def p_squared(self, node, position, newline=False):
- if (not isinstance(node, BNode)
- or node in self._serialized
- or self._references[node] > 1
- or position == SUBJECT):
+ if (
+ not isinstance(node, BNode)
+ or node in self._serialized
+ or self._references[node] > 1
+ or position == SUBJECT
+ ):
return False
if not newline:
- self.write(' ')
+ self.write(" ")
if self.isValidList(node):
# this is a list
- self.write('(')
+ self.write("(")
self.depth += 1 # 2
self.doList(node)
self.depth -= 1 # 2
- self.write(' )')
+ self.write(" )")
else:
self.subjectDone(node)
self.depth += 2
# self.write('[\n' + self.indent())
- self.write('[')
+ self.write("[")
self.depth -= 1
# self.predicateList(node, newline=True)
self.predicateList(node, newline=False)
# self.write('\n' + self.indent() + ']')
- self.write(' ]')
+ self.write(" ]")
self.depth -= 1
return True
@@ -394,8 +401,7 @@ class TurtleSerializer(RecursiveSerializer):
except:
return False
while l:
- if l != RDF.nil and len(
- list(self.store.predicate_objects(l))) != 2:
+ if l != RDF.nil and len(list(self.store.predicate_objects(l))) != 2:
return False
l = self.store.value(l, RDF.rest)
return True
@@ -416,7 +422,7 @@ class TurtleSerializer(RecursiveSerializer):
self.verb(propList[0], newline=newline)
self.objectList(properties[propList[0]])
for predicate in propList[1:]:
- self.write(' ;\n' + self.indent(1))
+ self.write(" ;\n" + self.indent(1))
self.verb(predicate, newline=True)
self.objectList(properties[predicate])
@@ -431,6 +437,6 @@ class TurtleSerializer(RecursiveSerializer):
self.depth += depthmod
self.path(objects[0], OBJECT)
for obj in objects[1:]:
- self.write(',\n' + self.indent(1))
+ self.write(",\n" + self.indent(1))
self.path(obj, OBJECT, newline=True)
self.depth -= depthmod
diff --git a/rdflib/plugins/serializers/xmlwriter.py b/rdflib/plugins/serializers/xmlwriter.py
index de720e8c..b6f0acb5 100644
--- a/rdflib/plugins/serializers/xmlwriter.py
+++ b/rdflib/plugins/serializers/xmlwriter.py
@@ -1,19 +1,15 @@
import codecs
from xml.sax.saxutils import quoteattr, escape
-__all__ = ['XMLWriter']
+__all__ = ["XMLWriter"]
-ESCAPE_ENTITIES = {
- '\r': '&#13;'
-}
+ESCAPE_ENTITIES = {"\r": "&#13;"}
class XMLWriter(object):
- def __init__(self, stream, namespace_manager, encoding=None,
- decl=1, extra_ns=None):
- encoding = encoding or 'utf-8'
- encoder, decoder, stream_reader, stream_writer = \
- codecs.lookup(encoding)
+ def __init__(self, stream, namespace_manager, encoding=None, decl=1, extra_ns=None):
+ encoding = encoding or "utf-8"
+ encoder, decoder, stream_reader, stream_writer = codecs.lookup(encoding)
self.stream = stream = stream_writer(stream)
if decl:
stream.write('<?xml version="1.0" encoding="%s"?>' % encoding)
@@ -24,6 +20,7 @@ class XMLWriter(object):
def __get_indent(self):
return " " * len(self.element_stack)
+
indent = property(__get_indent)
def __close_start_tag(self):
@@ -103,8 +100,8 @@ class XMLWriter(object):
for pre, ns in self.extra_ns.items():
if uri.startswith(ns):
if pre != "":
- return ":".join(pre, uri[len(ns):])
+ return ":".join(pre, uri[len(ns) :])
else:
- return uri[len(ns):]
+ return uri[len(ns) :]
return self.nm.qname_strict(uri)
diff --git a/rdflib/plugins/sleepycat.py b/rdflib/plugins/sleepycat.py
index 2d08b45b..7729969e 100644
--- a/rdflib/plugins/sleepycat.py
+++ b/rdflib/plugins/sleepycat.py
@@ -8,15 +8,17 @@ from urllib.request import pathname2url
def bb(u):
- return u.encode('utf-8')
+ return u.encode("utf-8")
try:
from bsddb import db
+
has_bsddb = True
except ImportError:
try:
from bsddb3 import db
+
has_bsddb = True
except ImportError:
has_bsddb = False
@@ -36,7 +38,7 @@ if has_bsddb:
logger = logging.getLogger(__name__)
-__all__ = ['Sleepycat']
+__all__ = ["Sleepycat"]
class Sleepycat(Store):
@@ -48,8 +50,7 @@ class Sleepycat(Store):
def __init__(self, configuration=None, identifier=None):
if not has_bsddb:
- raise ImportError(
- "Unable to import bsddb/bsddb3, store is unusable.")
+ raise ImportError("Unable to import bsddb/bsddb3, store is unusable.")
self.__open = False
self.__identifier = identifier
super(Sleepycat, self).__init__(configuration)
@@ -58,6 +59,7 @@ class Sleepycat(Store):
def __get_identifier(self):
return self.__identifier
+
identifier = property(__get_identifier)
def _init_db_environment(self, homeDir, create=True):
@@ -108,11 +110,13 @@ class Sleepycat(Store):
dbsetflags = 0
# create and open the DBs
- self.__indicies = [None, ] * 3
- self.__indicies_info = [None, ] * 3
+ self.__indicies = [None,] * 3
+ self.__indicies_info = [None,] * 3
for i in range(0, 3):
- index_name = to_key_func(
- i)(("s".encode("latin-1"), "p".encode("latin-1"), "o".encode("latin-1")), "c".encode("latin-1")).decode()
+ index_name = to_key_func(i)(
+ ("s".encode("latin-1"), "p".encode("latin-1"), "o".encode("latin-1")),
+ "c".encode("latin-1"),
+ ).decode()
index = db.DB(db_env)
index.set_flags(dbsetflags)
index.open(index_name, dbname, dbtype, dbopenflags, dbmode)
@@ -148,13 +152,15 @@ class Sleepycat(Store):
yield triple[i % 3]
i += 1
yield ""
+
return get_prefix
lookup[i] = (
self.__indicies[start],
get_prefix_func(start, start + len),
from_key_func(start),
- results_from_key_func(start, self._from_string))
+ results_from_key_func(start, self._from_string),
+ )
self.__lookup_dict = lookup
@@ -187,6 +193,7 @@ class Sleepycat(Store):
def __sync_run(self):
from time import sleep, time
+
try:
min_seconds, max_seconds = 10, 300
while self.__open:
@@ -194,12 +201,11 @@ class Sleepycat(Store):
t0 = t1 = time()
self.__needs_sync = False
while self.__open:
- sleep(.1)
+ sleep(0.1)
if self.__needs_sync:
t1 = time()
self.__needs_sync = False
- if time() - t1 > min_seconds \
- or time() - t0 > max_seconds:
+ if time() - t1 > min_seconds or time() - t0 > max_seconds:
self.__needs_sync = False
logger.debug("sync")
self.sync()
@@ -254,7 +260,8 @@ class Sleepycat(Store):
self.__contexts.put(bb(c), "", txn=txn)
contexts_value = cspo.get(
- bb("%s^%s^%s^%s^" % ("", s, p, o)), txn=txn) or "".encode("latin-1")
+ bb("%s^%s^%s^%s^" % ("", s, p, o)), txn=txn
+ ) or "".encode("latin-1")
contexts = set(contexts_value.split("^".encode("latin-1")))
contexts.add(bb(c))
contexts_value = "^".encode("latin-1").join(contexts)
@@ -264,12 +271,9 @@ class Sleepycat(Store):
cpos.put(bb("%s^%s^%s^%s^" % (c, p, o, s)), "", txn=txn)
cosp.put(bb("%s^%s^%s^%s^" % (c, o, s, p)), "", txn=txn)
if not quoted:
- cspo.put(bb(
- "%s^%s^%s^%s^" % ("", s, p, o)), contexts_value, txn=txn)
- cpos.put(bb(
- "%s^%s^%s^%s^" % ("", p, o, s)), contexts_value, txn=txn)
- cosp.put(bb(
- "%s^%s^%s^%s^" % ("", o, s, p)), contexts_value, txn=txn)
+ cspo.put(bb("%s^%s^%s^%s^" % ("", s, p, o)), contexts_value, txn=txn)
+ cpos.put(bb("%s^%s^%s^%s^" % ("", p, o, s)), contexts_value, txn=txn)
+ cosp.put(bb("%s^%s^%s^%s^" % ("", o, s, p)), contexts_value, txn=txn)
self.__needs_sync = True
@@ -277,7 +281,11 @@ class Sleepycat(Store):
s, p, o = spo
cspo, cpos, cosp = self.__indicies
contexts_value = cspo.get(
- "^".encode("latin-1").join(["".encode("latin-1"), s, p, o, "".encode("latin-1")]), txn=txn) or "".encode("latin-1")
+ "^".encode("latin-1").join(
+ ["".encode("latin-1"), s, p, o, "".encode("latin-1")]
+ ),
+ txn=txn,
+ ) or "".encode("latin-1")
contexts = set(contexts_value.split("^".encode("latin-1")))
contexts.discard(c)
contexts_value = "^".encode("latin-1").join(contexts)
@@ -286,7 +294,11 @@ class Sleepycat(Store):
if not quoted:
if contexts_value:
for i, _to_key, _from_key in self.__indicies_info:
- i.put(_to_key((s, p, o), "".encode("latin-1")), contexts_value, txn=txn)
+ i.put(
+ _to_key((s, p, o), "".encode("latin-1")),
+ contexts_value,
+ txn=txn,
+ )
else:
for i, _to_key, _from_key in self.__indicies_info:
try:
@@ -304,23 +316,25 @@ class Sleepycat(Store):
if context == self:
context = None
- if subject is not None \
- and predicate is not None \
- and object is not None \
- and context is not None:
+ if (
+ subject is not None
+ and predicate is not None
+ and object is not None
+ and context is not None
+ ):
s = _to_string(subject, txn=txn)
p = _to_string(predicate, txn=txn)
o = _to_string(object, txn=txn)
c = _to_string(context, txn=txn)
- value = self.__indicies[0].get(bb("%s^%s^%s^%s^" %
- (c, s, p, o)), txn=txn)
+ value = self.__indicies[0].get(bb("%s^%s^%s^%s^" % (c, s, p, o)), txn=txn)
if value is not None:
self.__remove((bb(s), bb(p), bb(o)), bb(c), txn=txn)
self.__needs_sync = True
else:
cspo, cpos, cosp = self.__indicies
index, prefix, from_key, results_from_key = self.__lookup(
- (subject, predicate, object), context, txn=txn)
+ (subject, predicate, object), context, txn=txn
+ )
cursor = index.cursor(txn=txn)
try:
@@ -336,7 +350,7 @@ class Sleepycat(Store):
try:
cursor.set_range(key)
# Hack to stop 2to3 converting this to next(cursor)
- current = getattr(cursor, 'next')()
+ current = getattr(cursor, "next")()
except db.DBNotFoundError:
current = None
cursor.close()
@@ -362,7 +376,8 @@ class Sleepycat(Store):
# remove((None, None, None), c)
try:
self.__contexts.delete(
- bb(_to_string(context, txn=txn)), txn=txn)
+ bb(_to_string(context, txn=txn)), txn=txn
+ )
except db.DBNotFoundError:
pass
@@ -380,7 +395,8 @@ class Sleepycat(Store):
# _from_string = self._from_string ## UNUSED
index, prefix, from_key, results_from_key = self.__lookup(
- (subject, predicate, object), context, txn=txn)
+ (subject, predicate, object), context, txn=txn
+ )
cursor = index.cursor(txn=txn)
try:
@@ -394,14 +410,13 @@ class Sleepycat(Store):
try:
cursor.set_range(key)
# Cheap hack so 2to3 doesn't convert to next(cursor)
- current = getattr(cursor, 'next')()
+ current = getattr(cursor, "next")()
except db.DBNotFoundError:
current = None
cursor.close()
if key and key.startswith(prefix):
contexts_value = index.get(key, txn=txn)
- yield results_from_key(
- key, subject, predicate, object, contexts_value)
+ yield results_from_key(key, subject, predicate, object, contexts_value)
else:
break
@@ -425,7 +440,7 @@ class Sleepycat(Store):
if key.startswith(prefix):
count += 1
# Hack to stop 2to3 converting this to next(cursor)
- current = getattr(cursor, 'next')()
+ current = getattr(cursor, "next")()
else:
break
cursor.close()
@@ -444,14 +459,14 @@ class Sleepycat(Store):
prefix = prefix.encode("utf-8")
ns = self.__namespace.get(prefix, None)
if ns is not None:
- return URIRef(ns.decode('utf-8'))
+ return URIRef(ns.decode("utf-8"))
return None
def prefix(self, namespace):
namespace = namespace.encode("utf-8")
prefix = self.__prefix.get(namespace, None)
if prefix is not None:
- return prefix.decode('utf-8')
+ return prefix.decode("utf-8")
return None
def namespaces(self):
@@ -460,9 +475,9 @@ class Sleepycat(Store):
current = cursor.first()
while current:
prefix, namespace = current
- results.append((prefix.decode('utf-8'), namespace.decode('utf-8')))
+ results.append((prefix.decode("utf-8"), namespace.decode("utf-8")))
# Hack to stop 2to3 converting this to next(cursor)
- current = getattr(cursor, 'next')()
+ current = getattr(cursor, "next")()
cursor.close()
for prefix, namespace in results:
yield prefix, URIRef(namespace)
@@ -476,8 +491,7 @@ class Sleepycat(Store):
s = _to_string(s)
p = _to_string(p)
o = _to_string(o)
- contexts = self.__indicies[0].get(bb(
- "%s^%s^%s^%s^" % ("", s, p, o)))
+ contexts = self.__indicies[0].get(bb("%s^%s^%s^%s^" % ("", s, p, o)))
if contexts:
for c in contexts.split("^".encode("latin-1")):
if c:
@@ -495,7 +509,7 @@ class Sleepycat(Store):
try:
cursor.set_range(key)
# Hack to stop 2to3 converting this to next(cursor)
- current = getattr(cursor, 'next')()
+ current = getattr(cursor, "next")()
except db.DBNotFoundError:
current = None
cursor.close()
@@ -544,8 +558,7 @@ class Sleepycat(Store):
index, prefix_func, from_key, results_from_key = self.__lookup_dict[i]
# print (subject, predicate, object), context, prefix_func, index
# #DEBUG
- prefix = bb(
- "^".join(prefix_func((subject, predicate, object), context)))
+ prefix = bb("^".join(prefix_func((subject, predicate, object), context)))
return index, prefix, from_key, results_from_key
@@ -553,10 +566,15 @@ def to_key_func(i):
def to_key(triple, context):
"Takes a string; returns key"
return "^".encode("latin-1").join(
- (context,
- triple[i % 3],
- triple[(i + 1) % 3],
- triple[(i + 2) % 3], "".encode("latin-1"))) # "" to tac on the trailing ^
+ (
+ context,
+ triple[i % 3],
+ triple[(i + 1) % 3],
+ triple[(i + 2) % 3],
+ "".encode("latin-1"),
+ )
+ ) # "" to tac on the trailing ^
+
return to_key
@@ -564,11 +582,13 @@ def from_key_func(i):
def from_key(key):
"Takes a key; returns string"
parts = key.split("^".encode("latin-1"))
- return \
- parts[0], \
- parts[(3 - i + 0) % 3 + 1], \
- parts[(3 - i + 1) % 3 + 1], \
- parts[(3 - i + 2) % 3 + 1]
+ return (
+ parts[0],
+ parts[(3 - i + 0) % 3 + 1],
+ parts[(3 - i + 1) % 3 + 1],
+ parts[(3 - i + 2) % 3 + 1],
+ )
+
return from_key
@@ -590,8 +610,11 @@ def results_from_key_func(i, from_string):
o = from_string(parts[(3 - i + 2) % 3 + 1])
else:
o = object
- return (s, p, o), (
- from_string(c) for c in contexts_value.split("^".encode("latin-1")) if c)
+ return (
+ (s, p, o),
+ (from_string(c) for c in contexts_value.split("^".encode("latin-1")) if c),
+ )
+
return from_key
diff --git a/rdflib/plugins/sparql/__init__.py b/rdflib/plugins/sparql/__init__.py
index bc1227f2..9efbd87f 100644
--- a/rdflib/plugins/sparql/__init__.py
+++ b/rdflib/plugins/sparql/__init__.py
@@ -28,7 +28,7 @@ NotImplementedError if they cannot handle a certain part
"""
-PLUGIN_ENTRY_POINT = 'rdf.plugins.sparqleval'
+PLUGIN_ENTRY_POINT = "rdf.plugins.sparqleval"
from . import parser
from . import operators
diff --git a/rdflib/plugins/sparql/aggregates.py b/rdflib/plugins/sparql/aggregates.py
index 11144778..8c70aeb1 100644
--- a/rdflib/plugins/sparql/aggregates.py
+++ b/rdflib/plugins/sparql/aggregates.py
@@ -39,7 +39,6 @@ class Accumulator(object):
class Counter(Accumulator):
-
def __init__(self, aggregation):
super(Counter, self).__init__(aggregation)
self.value = 0
@@ -71,16 +70,14 @@ class Counter(Accumulator):
def type_safe_numbers(*args):
- if (
- any(isinstance(arg, float) for arg in args) and
- any(isinstance(arg, Decimal) for arg in args)
+ if any(isinstance(arg, float) for arg in args) and any(
+ isinstance(arg, Decimal) for arg in args
):
return map(float, args)
return args
class Sum(Accumulator):
-
def __init__(self, aggregation):
super(Sum, self).__init__(aggregation)
self.value = 0
@@ -107,7 +104,6 @@ class Sum(Accumulator):
class Average(Accumulator):
-
def __init__(self, aggregation):
super(Average, self).__init__(aggregation)
self.counter = 0
@@ -171,13 +167,11 @@ class Extremum(Accumulator):
class Minimum(Extremum):
-
def compare(self, val1, val2):
return min(val1, val2, key=_val)
class Maximum(Extremum):
-
def compare(self, val1, val2):
return max(val1, val2, key=_val)
@@ -205,7 +199,6 @@ class Sample(Accumulator):
class GroupConcat(Accumulator):
-
def __init__(self, aggregation):
super(GroupConcat, self).__init__(aggregation)
# only GROUPCONCAT needs to have a list as accumlator
diff --git a/rdflib/plugins/sparql/algebra.py b/rdflib/plugins/sparql/algebra.py
index 00a6d0b2..f84e51c9 100644
--- a/rdflib/plugins/sparql/algebra.py
+++ b/rdflib/plugins/sparql/algebra.py
@@ -1,4 +1,3 @@
-
"""
Converting the 'parse-tree' output of pyparsing to a SPARQL Algebra expression
@@ -19,9 +18,11 @@ from rdflib import Literal, Variable, URIRef, BNode
from rdflib.plugins.sparql.sparql import Prologue, Query
from rdflib.plugins.sparql.parserutils import CompValue, Expr
from rdflib.plugins.sparql.operators import (
- and_, TrueFilter, simplify as simplifyFilters)
-from rdflib.paths import (
- InvPath, AlternativePath, SequencePath, MulPath, NegatedPath)
+ and_,
+ TrueFilter,
+ simplify as simplifyFilters,
+)
+from rdflib.paths import InvPath, AlternativePath, SequencePath, MulPath, NegatedPath
from pyparsing import ParseResults
@@ -29,63 +30,69 @@ from pyparsing import ParseResults
# ---------------------------
# Some convenience methods
def OrderBy(p, expr):
- return CompValue('OrderBy', p=p, expr=expr)
+ return CompValue("OrderBy", p=p, expr=expr)
def ToMultiSet(p):
- return CompValue('ToMultiSet', p=p)
+ return CompValue("ToMultiSet", p=p)
def Union(p1, p2):
- return CompValue('Union', p1=p1, p2=p2)
+ return CompValue("Union", p1=p1, p2=p2)
def Join(p1, p2):
- return CompValue('Join', p1=p1, p2=p2)
+ return CompValue("Join", p1=p1, p2=p2)
def Minus(p1, p2):
- return CompValue('Minus', p1=p1, p2=p2)
+ return CompValue("Minus", p1=p1, p2=p2)
def Graph(term, graph):
- return CompValue('Graph', term=term, p=graph)
+ return CompValue("Graph", term=term, p=graph)
def BGP(triples=None):
- return CompValue('BGP', triples=triples or [])
+ return CompValue("BGP", triples=triples or [])
def LeftJoin(p1, p2, expr):
- return CompValue('LeftJoin', p1=p1, p2=p2, expr=expr)
+ return CompValue("LeftJoin", p1=p1, p2=p2, expr=expr)
def Filter(expr, p):
- return CompValue('Filter', expr=expr, p=p)
+ return CompValue("Filter", expr=expr, p=p)
def Extend(p, expr, var):
- return CompValue('Extend', p=p, expr=expr, var=var)
+ return CompValue("Extend", p=p, expr=expr, var=var)
def Values(res):
- return CompValue('values', res=res)
+ return CompValue("values", res=res)
def Project(p, PV):
- return CompValue('Project', p=p, PV=PV)
+ return CompValue("Project", p=p, PV=PV)
def Group(p, expr=None):
- return CompValue('Group', p=p, expr=expr)
+ return CompValue("Group", p=p, expr=expr)
def _knownTerms(triple, varsknown, varscount):
- return (len([x for x in triple if x not in varsknown and
- isinstance(x, (Variable, BNode))]),
- -sum(varscount.get(x, 0) for x in triple),
- not isinstance(triple[2], Literal),
- )
+ return (
+ len(
+ [
+ x
+ for x in triple
+ if x not in varsknown and isinstance(x, (Variable, BNode))
+ ]
+ ),
+ -sum(varscount.get(x, 0) for x in triple),
+ not isinstance(triple[2], Literal),
+ )
def reorderTriples(l):
@@ -115,8 +122,7 @@ def reorderTriples(l):
# we sort by decorate/undecorate, since we need the value of the sort keys
while i < len(l):
- l[i:] = sorted((_knownTerms(x[
- 1], varsknown, varscount), x[1]) for x in l[i:])
+ l[i:] = sorted((_knownTerms(x[1], varsknown, varscount), x[1]) for x in l[i:])
t = l[i][0][0] # top block has this many terms bound
j = 0
while i + j < len(l) and l[i + j][0][0] == t:
@@ -132,9 +138,8 @@ def triples(l):
l = reduce(lambda x, y: x + y, l)
if (len(l) % 3) != 0:
- raise Exception('these aint triples')
- return reorderTriples((l[x], l[x + 1], l[x + 2])
- for x in range(0, len(l), 3))
+ raise Exception("these aint triples")
+ return reorderTriples((l[x], l[x + 1], l[x + 2]) for x in range(0, len(l), 3))
def translatePName(p, prologue):
@@ -142,11 +147,12 @@ def translatePName(p, prologue):
Expand prefixed/relative URIs
"""
if isinstance(p, CompValue):
- if p.name == 'pname':
+ if p.name == "pname":
return prologue.absolutize(p)
- if p.name == 'literal':
- return Literal(p.string, lang=p.lang,
- datatype=prologue.absolutize(p.datatype))
+ if p.name == "literal":
+ return Literal(
+ p.string, lang=p.lang, datatype=prologue.absolutize(p.datatype)
+ )
elif isinstance(p, URIRef):
return prologue.absolutize(p)
@@ -157,39 +163,39 @@ def translatePath(p):
"""
if isinstance(p, CompValue):
- if p.name == 'PathAlternative':
+ if p.name == "PathAlternative":
if len(p.part) == 1:
return p.part[0]
else:
return AlternativePath(*p.part)
- elif p.name == 'PathSequence':
+ elif p.name == "PathSequence":
if len(p.part) == 1:
return p.part[0]
else:
return SequencePath(*p.part)
- elif p.name == 'PathElt':
+ elif p.name == "PathElt":
if not p.mod:
return p.part
else:
if isinstance(p.part, list):
if len(p.part) != 1:
- raise Exception('Denkfehler!')
+ raise Exception("Denkfehler!")
return MulPath(p.part[0], p.mod)
else:
return MulPath(p.part, p.mod)
- elif p.name == 'PathEltOrInverse':
+ elif p.name == "PathEltOrInverse":
if isinstance(p.part, list):
if len(p.part) != 1:
- raise Exception('Denkfehler!')
+ raise Exception("Denkfehler!")
return InvPath(p.part[0])
else:
return InvPath(p.part)
- elif p.name == 'PathNegatedPropertySet':
+ elif p.name == "PathNegatedPropertySet":
if isinstance(p.part, list):
return NegatedPath(AlternativePath(*p.part))
else:
@@ -204,9 +210,9 @@ def translateExists(e):
def _c(n):
if isinstance(n, CompValue):
- if n.name in ('Builtin_EXISTS', 'Builtin_NOTEXISTS'):
+ if n.name in ("Builtin_EXISTS", "Builtin_NOTEXISTS"):
n.graph = translateGroupGraphPattern(n.graph)
- if n.graph.name == 'Filter':
+ if n.graph.name == "Filter":
# filters inside (NOT) EXISTS can see vars bound outside
n.graph.no_isolated_scope = True
@@ -229,7 +235,7 @@ def collectAndRemoveFilters(parts):
i = 0
while i < len(parts):
p = parts[i]
- if p.name == 'Filter':
+ if p.name == "Filter":
filters.append(translateExists(p.expr))
parts.pop(i)
else:
@@ -254,8 +260,7 @@ def translateGroupOrUnionGraphPattern(graphPattern):
def translateGraphGraphPattern(graphPattern):
- return Graph(graphPattern.term,
- translateGroupGraphPattern(graphPattern.graph))
+ return Graph(graphPattern.term, translateGroupGraphPattern(graphPattern.graph))
def translateInlineData(graphPattern):
@@ -267,7 +272,7 @@ def translateGroupGraphPattern(graphPattern):
http://www.w3.org/TR/sparql11-query/#convertGraphPattern
"""
- if graphPattern.name == 'SubSelect':
+ if graphPattern.name == "SubSelect":
return ToMultiSet(translate(graphPattern)[0])
if not graphPattern.part:
@@ -277,9 +282,9 @@ def translateGroupGraphPattern(graphPattern):
g = []
for p in graphPattern.part:
- if p.name == 'TriplesBlock':
+ if p.name == "TriplesBlock":
# merge adjacent TripleBlocks
- if not (g and g[-1].name == 'BGP'):
+ if not (g and g[-1].name == "BGP"):
g.append(BGP())
g[-1]["triples"] += triples(p.triples)
else:
@@ -287,30 +292,31 @@ def translateGroupGraphPattern(graphPattern):
G = BGP()
for p in g:
- if p.name == 'OptionalGraphPattern':
+ if p.name == "OptionalGraphPattern":
A = translateGroupGraphPattern(p.graph)
- if A.name == 'Filter':
+ if A.name == "Filter":
G = LeftJoin(G, A.p, A.expr)
else:
G = LeftJoin(G, A, TrueFilter)
- elif p.name == 'MinusGraphPattern':
+ elif p.name == "MinusGraphPattern":
G = Minus(p1=G, p2=translateGroupGraphPattern(p.graph))
- elif p.name == 'GroupOrUnionGraphPattern':
+ elif p.name == "GroupOrUnionGraphPattern":
G = Join(p1=G, p2=translateGroupOrUnionGraphPattern(p))
- elif p.name == 'GraphGraphPattern':
+ elif p.name == "GraphGraphPattern":
G = Join(p1=G, p2=translateGraphGraphPattern(p))
- elif p.name == 'InlineData':
+ elif p.name == "InlineData":
G = Join(p1=G, p2=translateInlineData(p))
- elif p.name == 'ServiceGraphPattern':
+ elif p.name == "ServiceGraphPattern":
G = Join(p1=G, p2=p)
- elif p.name in ('BGP', 'Extend'):
+ elif p.name in ("BGP", "Extend"):
G = Join(p1=G, p2=p)
- elif p.name == 'Bind':
+ elif p.name == "Bind":
G = Extend(G, p.expr, p.var)
else:
- raise Exception('Unknown part in GroupGraphPattern: %s - %s' %
- (type(p), p.name))
+ raise Exception(
+ "Unknown part in GroupGraphPattern: %s - %s" % (type(p), p.name)
+ )
if filters:
G = Filter(expr=filters, p=G)
@@ -372,9 +378,7 @@ def _traverseAgg(e, visitor=lambda n, v: None):
return visitor(e, res)
-def traverse(
- tree, visitPre=lambda n: None,
- visitPost=lambda n: None, complete=None):
+def traverse(tree, visitPre=lambda n: None, visitPost=lambda n: None, complete=None):
"""
Traverse tree, visit each node with visit function
visit function may raise StopTraversal to stop traversal
@@ -397,7 +401,7 @@ def _hasAggregate(x):
"""
if isinstance(x, CompValue):
- if x.name.startswith('Aggregate_'):
+ if x.name.startswith("Aggregate_"):
raise StopTraversal(True)
@@ -409,9 +413,9 @@ def _aggs(e, A):
# TODO: nested Aggregates?
- if isinstance(e, CompValue) and e.name.startswith('Aggregate_'):
+ if isinstance(e, CompValue) and e.name.startswith("Aggregate_"):
A.append(e)
- aggvar = Variable('__agg_%d__' % len(A))
+ aggvar = Variable("__agg_%d__" % len(A))
e["res"] = aggvar
return aggvar
@@ -426,7 +430,7 @@ def _findVars(x, res):
if x.name == "Bind":
res.add(x.var)
return x # stop recursion and finding vars in the expr
- elif x.name == 'SubSelect':
+ elif x.name == "SubSelect":
if x.projection:
res.update(v.var or v.evar for v in x.projection)
return x
@@ -443,13 +447,16 @@ def _addVars(x, children):
x["_vars"] = set()
elif x.name == "Extend":
# vars only used in the expr for a bind should not be included
- x["_vars"] = reduce(operator.or_, [child for child,
- part in zip(children, x) if part != 'expr'], set())
+ x["_vars"] = reduce(
+ operator.or_,
+ [child for child, part in zip(children, x) if part != "expr"],
+ set(),
+ )
else:
x["_vars"] = set(reduce(operator.or_, children, set()))
- if x.name == 'SubSelect':
+ if x.name == "SubSelect":
if x.projection:
s = set(v.var or v.evar for v in x.projection)
else:
@@ -470,7 +477,7 @@ def _sample(e, v=None):
if isinstance(e, CompValue) and e.name.startswith("Aggregate_"):
return e # do not replace vars in aggregates
if isinstance(e, Variable) and v != e:
- return CompValue('Aggregate_Sample', vars=e)
+ return CompValue("Aggregate_Sample", vars=e)
def _simplifyFilters(e):
@@ -505,11 +512,11 @@ def translateAggregates(q, M):
if q.projection:
for v in q.projection:
if v.var:
- rv = Variable('__agg_%d__' % (len(A) + 1))
- A.append(CompValue('Aggregate_Sample', vars=v.var, res=rv))
+ rv = Variable("__agg_%d__" % (len(A) + 1))
+ A.append(CompValue("Aggregate_Sample", vars=v.var, res=rv))
E.append((rv, v.var))
- return CompValue('AggregateJoin', A=A, p=M), E
+ return CompValue("AggregateJoin", A=A, p=M), E
def translateValues(v):
@@ -554,17 +561,22 @@ def translate(q):
conditions = []
# convert "GROUP BY (?expr as ?var)" to an Extend
for c in q.groupby.condition:
- if isinstance(c, CompValue) and c.name == 'GroupAs':
+ if isinstance(c, CompValue) and c.name == "GroupAs":
M = Extend(M, c.expr, c.var)
c = c.var
conditions.append(c)
M = Group(p=M, expr=conditions)
aggregate = True
- elif traverse(q.having, _hasAggregate, complete=False) or \
- traverse(q.orderby, _hasAggregate, complete=False) or \
- any(traverse(x.expr, _hasAggregate, complete=False)
- for x in q.projection or [] if x.evar):
+ elif (
+ traverse(q.having, _hasAggregate, complete=False)
+ or traverse(q.orderby, _hasAggregate, complete=False)
+ or any(
+ traverse(x.expr, _hasAggregate, complete=False)
+ for x in q.projection or []
+ if x.evar
+ )
+ ):
# if any aggregate is used, implicit group by
M = Group(p=M)
aggregate = True
@@ -604,17 +616,22 @@ def translate(q):
# ORDER BY
if q.orderby:
- M = OrderBy(M, [CompValue('OrderCondition', expr=c.expr,
- order=c.order) for c in q.orderby.condition])
+ M = OrderBy(
+ M,
+ [
+ CompValue("OrderCondition", expr=c.expr, order=c.order)
+ for c in q.orderby.condition
+ ],
+ )
# PROJECT
M = Project(M, PV)
if q.modifier:
- if q.modifier == 'DISTINCT':
- M = CompValue('Distinct', p=M)
- elif q.modifier == 'REDUCED':
- M = CompValue('Reduced', p=M)
+ if q.modifier == "DISTINCT":
+ M = CompValue("Distinct", p=M)
+ elif q.modifier == "REDUCED":
+ M = CompValue("Reduced", p=M)
if q.limitoffset:
offset = 0
@@ -622,10 +639,11 @@ def translate(q):
offset = q.limitoffset.offset.toPython()
if q.limitoffset.limit is not None:
- M = CompValue('Slice', p=M, start=offset,
- length=q.limitoffset.limit.toPython())
+ M = CompValue(
+ "Slice", p=M, start=offset, length=q.limitoffset.limit.toPython()
+ )
else:
- M = CompValue('Slice', p=M, start=offset)
+ M = CompValue("Slice", p=M, start=offset)
return M, PV
@@ -633,12 +651,12 @@ def translate(q):
def simplify(n):
"""Remove joins to empty BGPs"""
if isinstance(n, CompValue):
- if n.name == 'Join':
- if n.p1.name == 'BGP' and len(n.p1.triples) == 0:
+ if n.name == "Join":
+ if n.p1.name == "BGP" and len(n.p1.triples) == 0:
return n.p2
- if n.p2.name == 'BGP' and len(n.p2.triples) == 0:
+ if n.p2.name == "BGP" and len(n.p2.triples) == 0:
return n.p1
- elif n.name == 'BGP':
+ elif n.name == "BGP":
n["triples"] = reorderTriples(n.triples)
return n
@@ -651,10 +669,10 @@ def analyse(n, children):
"""
if isinstance(n, CompValue):
- if n.name == 'Join':
+ if n.name == "Join":
n["lazy"] = all(children)
return False
- elif n.name in ('Slice', 'Distinct'):
+ elif n.name in ("Slice", "Distinct"):
return False
else:
return all(children)
@@ -674,9 +692,9 @@ def translatePrologue(p, base, initNs=None, prologue=None):
prologue.bind(k, v)
for x in p:
- if x.name == 'Base':
+ if x.name == "Base":
prologue.base = x.iri
- elif x.name == 'PrefixDecl':
+ elif x.name == "PrefixDecl":
prologue.bind(x.prefix, prologue.absolutize(x.iri))
return prologue
@@ -699,26 +717,24 @@ def translateQuads(quads):
def translateUpdate1(u, prologue):
- if u.name in ('Load', 'Clear', 'Drop', 'Create'):
+ if u.name in ("Load", "Clear", "Drop", "Create"):
pass # no translation needed
- elif u.name in ('Add', 'Move', 'Copy'):
+ elif u.name in ("Add", "Move", "Copy"):
pass
- elif u.name in ('InsertData', 'DeleteData', 'DeleteWhere'):
+ elif u.name in ("InsertData", "DeleteData", "DeleteWhere"):
t, q = translateQuads(u.quads)
u["quads"] = q
u["triples"] = t
- if u.name in ('DeleteWhere', 'DeleteData'):
+ if u.name in ("DeleteWhere", "DeleteData"):
pass # TODO: check for bnodes in triples
- elif u.name == 'Modify':
+ elif u.name == "Modify":
if u.delete:
- u.delete["triples"], u.delete[
- "quads"] = translateQuads(u.delete.quads)
+ u.delete["triples"], u.delete["quads"] = translateQuads(u.delete.quads)
if u.insert:
- u.insert["triples"], u.insert[
- "quads"] = translateQuads(u.insert.quads)
+ u.insert["triples"], u.insert["quads"] = translateQuads(u.insert.quads)
u["where"] = translateGroupGraphPattern(u.where)
else:
- raise Exception('Unknown type of update operation: %s' % u)
+ raise Exception("Unknown type of update operation: %s" % u)
u.prologue = prologue
return u
@@ -737,8 +753,7 @@ def translateUpdate(q, base=None, initNs=None):
prologue = translatePrologue(p, base, initNs, prologue)
# absolutize/resolve prefixes
- u = traverse(
- u, visitPost=functools.partial(translatePName, prologue=prologue))
+ u = traverse(u, visitPost=functools.partial(translatePName, prologue=prologue))
u = _traverse(u, _simplifyFilters)
u = traverse(u, visitPost=translatePath)
@@ -761,17 +776,16 @@ def translateQuery(q, base=None, initNs=None):
# absolutize/resolve prefixes
q[1] = traverse(
- q[1], visitPost=functools.partial(translatePName, prologue=prologue))
+ q[1], visitPost=functools.partial(translatePName, prologue=prologue)
+ )
P, PV = translate(q[1])
datasetClause = q[1].datasetClause
- if q[1].name == 'ConstructQuery':
+ if q[1].name == "ConstructQuery":
template = triples(q[1].template) if q[1].template else None
- res = CompValue(q[1].name, p=P,
- template=template,
- datasetClause=datasetClause)
+ res = CompValue(q[1].name, p=P, template=template, datasetClause=datasetClause)
else:
res = CompValue(q[1].name, p=P, datasetClause=datasetClause, PV=PV)
@@ -792,9 +806,9 @@ def pprintAlgebra(q):
if not isinstance(p, CompValue):
print(p)
return
- print("%s(" % (p.name, ))
+ print("%s(" % (p.name,))
for k in p:
- print("%s%s =" % (ind, k,), end=' ')
+ print("%s%s =" % (ind, k,), end=" ")
pp(p[k], ind + " ")
print("%s)" % ind)
@@ -806,7 +820,7 @@ def pprintAlgebra(q):
pp(x)
-if __name__ == '__main__':
+if __name__ == "__main__":
import sys
from rdflib.plugins.sparql import parser
import os.path
diff --git a/rdflib/plugins/sparql/datatypes.py b/rdflib/plugins/sparql/datatypes.py
index 1e8475e0..5ab8c92f 100644
--- a/rdflib/plugins/sparql/datatypes.py
+++ b/rdflib/plugins/sparql/datatypes.py
@@ -5,17 +5,45 @@ Utility functions for supporting the XML Schema Datatypes hierarchy
from rdflib import XSD
XSD_DTs = set(
- (XSD.integer, XSD.decimal, XSD.float, XSD.double, XSD.string,
- XSD.boolean, XSD.dateTime, XSD.nonPositiveInteger, XSD.negativeInteger,
- XSD.long, XSD.int, XSD.short, XSD.byte, XSD.nonNegativeInteger,
- XSD.unsignedLong, XSD.unsignedInt, XSD.unsignedShort, XSD.unsignedByte,
- XSD.positiveInteger, XSD.date))
+ (
+ XSD.integer,
+ XSD.decimal,
+ XSD.float,
+ XSD.double,
+ XSD.string,
+ XSD.boolean,
+ XSD.dateTime,
+ XSD.nonPositiveInteger,
+ XSD.negativeInteger,
+ XSD.long,
+ XSD.int,
+ XSD.short,
+ XSD.byte,
+ XSD.nonNegativeInteger,
+ XSD.unsignedLong,
+ XSD.unsignedInt,
+ XSD.unsignedShort,
+ XSD.unsignedByte,
+ XSD.positiveInteger,
+ XSD.date,
+ )
+)
_sub_types = {
XSD.integer: [
- XSD.nonPositiveInteger, XSD.negativeInteger, XSD.long, XSD.int,
- XSD.short, XSD.byte, XSD.nonNegativeInteger, XSD.positiveInteger,
- XSD.unsignedLong, XSD.unsignedInt, XSD.unsignedShort, XSD.unsignedByte],
+ XSD.nonPositiveInteger,
+ XSD.negativeInteger,
+ XSD.long,
+ XSD.int,
+ XSD.short,
+ XSD.byte,
+ XSD.nonNegativeInteger,
+ XSD.positiveInteger,
+ XSD.unsignedLong,
+ XSD.unsignedInt,
+ XSD.unsignedShort,
+ XSD.unsignedByte,
+ ],
}
_super_types = {}
@@ -25,21 +53,22 @@ for superdt in XSD_DTs:
# we only care about float, double, integer, decimal
_typePromotionMap = {
- XSD.float: {XSD.integer: XSD.float,
- XSD.decimal: XSD.float,
- XSD.double: XSD.double},
-
- XSD.double: {XSD.integer: XSD.double,
- XSD.float: XSD.double,
- XSD.decimal: XSD.double},
-
- XSD.decimal: {XSD.integer: XSD.decimal,
- XSD.float: XSD.float,
- XSD.double: XSD.double},
-
- XSD.integer: {XSD.decimal: XSD.decimal,
- XSD.float: XSD.float,
- XSD.double: XSD.double}
+ XSD.float: {XSD.integer: XSD.float, XSD.decimal: XSD.float, XSD.double: XSD.double},
+ XSD.double: {
+ XSD.integer: XSD.double,
+ XSD.float: XSD.double,
+ XSD.decimal: XSD.double,
+ },
+ XSD.decimal: {
+ XSD.integer: XSD.decimal,
+ XSD.float: XSD.float,
+ XSD.double: XSD.double,
+ },
+ XSD.integer: {
+ XSD.decimal: XSD.decimal,
+ XSD.float: XSD.float,
+ XSD.double: XSD.double,
+ },
}
@@ -53,5 +82,4 @@ def type_promotion(t1, t2):
try:
return _typePromotionMap[t1][t2]
except KeyError:
- raise TypeError(
- 'Operators cannot combine datatypes %s and %s' % (t1, t2))
+ raise TypeError("Operators cannot combine datatypes %s and %s" % (t1, t2))
diff --git a/rdflib/plugins/sparql/evaluate.py b/rdflib/plugins/sparql/evaluate.py
index fc9b0c30..43b3d0b0 100644
--- a/rdflib/plugins/sparql/evaluate.py
+++ b/rdflib/plugins/sparql/evaluate.py
@@ -24,9 +24,22 @@ from rdflib import Variable, Graph, BNode, URIRef, Literal
from rdflib.plugins.sparql import CUSTOM_EVALS
from rdflib.plugins.sparql.parserutils import value
from rdflib.plugins.sparql.sparql import (
- QueryContext, AlreadyBound, FrozenBindings, Bindings, SPARQLError)
+ QueryContext,
+ AlreadyBound,
+ FrozenBindings,
+ Bindings,
+ SPARQLError,
+)
from rdflib.plugins.sparql.evalutils import (
- _filter, _eval, _join, _diff, _minus, _fillTemplate, _ebv, _val)
+ _filter,
+ _eval,
+ _join,
+ _diff,
+ _minus,
+ _fillTemplate,
+ _ebv,
+ _val,
+)
from rdflib.plugins.sparql.aggregates import Aggregator
from rdflib.plugins.sparql.algebra import Join, ToMultiSet, Values
@@ -143,9 +156,10 @@ def evalLeftJoin(ctx, join):
# check that we would have had no OPTIONAL matches
# even without prior bindings...
p1_vars = join.p1._vars
- if p1_vars is None \
- or not any(_ebv(join.expr, b) for b in
- evalPart(ctx.thaw(a.remember(p1_vars)), join.p2)):
+ if p1_vars is None or not any(
+ _ebv(join.expr, b)
+ for b in evalPart(ctx.thaw(a.remember(p1_vars)), join.p2)
+ ):
yield a
@@ -153,7 +167,10 @@ def evalLeftJoin(ctx, join):
def evalFilter(ctx, part):
# TODO: Deal with dict returned from evalPart!
for c in evalPart(ctx, part.p):
- if _ebv(part.expr, c.forget(ctx, _except=part._vars) if not part.no_isolated_scope else c):
+ if _ebv(
+ part.expr,
+ c.forget(ctx, _except=part._vars) if not part.no_isolated_scope else c,
+ ):
yield c
@@ -161,8 +178,9 @@ def evalGraph(ctx, part):
if ctx.dataset is None:
raise Exception(
- "Non-conjunctive-graph doesn't know about " +
- "graphs. Try a query without GRAPH.")
+ "Non-conjunctive-graph doesn't know about "
+ + "graphs. Try a query without GRAPH."
+ )
ctx = ctx.clone()
graph = ctx[part.term]
@@ -191,7 +209,7 @@ def evalValues(ctx, part):
c = ctx.push()
try:
for k, v in r.items():
- if v != 'UNDEF':
+ if v != "UNDEF":
c[k] = v
except AlreadyBound:
continue
@@ -201,7 +219,7 @@ def evalValues(ctx, part):
def evalMultiset(ctx, part):
- if part.p.name == 'values':
+ if part.p.name == "values":
return evalValues(ctx, part)
return evalPart(ctx, part.p)
@@ -216,91 +234,102 @@ def evalPart(ctx, part):
except NotImplementedError:
pass # the given custome-function did not handle this part
- if part.name == 'BGP':
+ if part.name == "BGP":
# Reorder triples patterns by number of bound nodes in the current ctx
# Do patterns with more bound nodes first
- triples = sorted(part.triples, key=lambda t: len([n for n in t if ctx[n] is None]))
+ triples = sorted(
+ part.triples, key=lambda t: len([n for n in t if ctx[n] is None])
+ )
return evalBGP(ctx, triples)
- elif part.name == 'Filter':
+ elif part.name == "Filter":
return evalFilter(ctx, part)
- elif part.name == 'Join':
+ elif part.name == "Join":
return evalJoin(ctx, part)
- elif part.name == 'LeftJoin':
+ elif part.name == "LeftJoin":
return evalLeftJoin(ctx, part)
- elif part.name == 'Graph':
+ elif part.name == "Graph":
return evalGraph(ctx, part)
- elif part.name == 'Union':
+ elif part.name == "Union":
return evalUnion(ctx, part)
- elif part.name == 'ToMultiSet':
+ elif part.name == "ToMultiSet":
return evalMultiset(ctx, part)
- elif part.name == 'Extend':
+ elif part.name == "Extend":
return evalExtend(ctx, part)
- elif part.name == 'Minus':
+ elif part.name == "Minus":
return evalMinus(ctx, part)
- elif part.name == 'Project':
+ elif part.name == "Project":
return evalProject(ctx, part)
- elif part.name == 'Slice':
+ elif part.name == "Slice":
return evalSlice(ctx, part)
- elif part.name == 'Distinct':
+ elif part.name == "Distinct":
return evalDistinct(ctx, part)
- elif part.name == 'Reduced':
+ elif part.name == "Reduced":
return evalReduced(ctx, part)
- elif part.name == 'OrderBy':
+ elif part.name == "OrderBy":
return evalOrderBy(ctx, part)
- elif part.name == 'Group':
+ elif part.name == "Group":
return evalGroup(ctx, part)
- elif part.name == 'AggregateJoin':
+ elif part.name == "AggregateJoin":
return evalAggregateJoin(ctx, part)
- elif part.name == 'SelectQuery':
+ elif part.name == "SelectQuery":
return evalSelectQuery(ctx, part)
- elif part.name == 'AskQuery':
+ elif part.name == "AskQuery":
return evalAskQuery(ctx, part)
- elif part.name == 'ConstructQuery':
+ elif part.name == "ConstructQuery":
return evalConstructQuery(ctx, part)
- elif part.name == 'ServiceGraphPattern':
+ elif part.name == "ServiceGraphPattern":
return evalServiceQuery(ctx, part)
- #raise Exception('ServiceGraphPattern not implemented')
+ # raise Exception('ServiceGraphPattern not implemented')
- elif part.name == 'DescribeQuery':
- raise Exception('DESCRIBE not implemented')
+ elif part.name == "DescribeQuery":
+ raise Exception("DESCRIBE not implemented")
else:
- raise Exception('I dont know: %s' % part.name)
+ raise Exception("I dont know: %s" % part.name)
+
def evalServiceQuery(ctx, part):
res = {}
- match = re.match('^service <(.*)>[ \n]*{(.*)}[ \n]*$',
- part.get('service_string', ''), re.DOTALL | re.I)
+ match = re.match(
+ "^service <(.*)>[ \n]*{(.*)}[ \n]*$",
+ part.get("service_string", ""),
+ re.DOTALL | re.I,
+ )
if match:
service_url = match.group(1)
service_query = _buildQueryStringForServiceCall(ctx, match)
- query_settings = {'query': service_query,
- 'output': 'json'}
- headers = {'accept' : 'application/sparql-results+json',
- 'user-agent': 'rdflibForAnUser'}
+ query_settings = {"query": service_query, "output": "json"}
+ headers = {
+ "accept": "application/sparql-results+json",
+ "user-agent": "rdflibForAnUser",
+ }
# GET is easier to cache so prefer that if the query is not to long
if len(service_query) < 600:
response = requests.get(service_url, params=query_settings, headers=headers)
else:
- response = requests.post(service_url, params=query_settings, headers=headers)
+ response = requests.post(
+ service_url, params=query_settings, headers=headers
+ )
if response.status_code == 200:
- json = response.json();
- variables = res["vars_"] = json['head']['vars']
+ json = response.json()
+ variables = res["vars_"] = json["head"]["vars"]
# or just return the bindings?
- res = json['results']['bindings']
+ res = json["results"]["bindings"]
if len(res) > 0:
for r in res:
for bound in _yieldBindingsFromServiceCallResult(ctx, r, variables):
yield bound
else:
- raise Exception("Service: %s responded with code: %s", service_url, response.status_code);
+ raise Exception(
+ "Service: %s responded with code: %s", service_url, response.status_code
+ )
"""
@@ -309,6 +338,8 @@ def evalServiceQuery(ctx, part):
Re-adds prefixes if added and sets the base.
Wraps it in select if needed.
"""
+
+
def _buildQueryStringForServiceCall(ctx, match):
service_query = match.group(2)
@@ -316,18 +347,20 @@ def _buildQueryStringForServiceCall(ctx, match):
parser.parseQuery(service_query)
except ParseException:
# This could be because we don't have a select around the service call.
- service_query = 'SELECT REDUCED * WHERE {' + service_query + '}'
+ service_query = "SELECT REDUCED * WHERE {" + service_query + "}"
for p in ctx.prologue.namespace_manager.store.namespaces():
- service_query = 'PREFIX ' + p[0] + ':' + p[1].n3() + ' ' + service_query
+ service_query = "PREFIX " + p[0] + ":" + p[1].n3() + " " + service_query
# re add the base if one was defined
base = ctx.prologue.base
if base is not None and len(base) > 0:
- service_query = 'BASE <' + base + '> ' + service_query
- sol = ctx.solution();
+ service_query = "BASE <" + base + "> " + service_query
+ sol = ctx.solution()
if len(sol) > 0:
- variables = ' '.join(map(lambda v:v.n3(), sol))
- variables_bound = ' '.join(map(lambda v: ctx.get(v).n3(), sol))
- service_query = service_query + 'VALUES (' + variables + ') {(' + variables_bound + ')}'
+ variables = " ".join(map(lambda v: v.n3(), sol))
+ variables_bound = " ".join(map(lambda v: ctx.get(v).n3(), sol))
+ service_query = (
+ service_query + "VALUES (" + variables + ") {(" + variables_bound + ")}"
+ )
return service_query
@@ -335,14 +368,18 @@ def _yieldBindingsFromServiceCallResult(ctx, r, variables):
res_dict = {}
for var in variables:
if var in r and r[var]:
- if r[var]['type'] == "uri":
+ if r[var]["type"] == "uri":
res_dict[Variable(var)] = URIRef(r[var]["value"])
- elif r[var]['type'] == "bnode":
+ elif r[var]["type"] == "bnode":
res_dict[Variable(var)] = BNode(r[var]["value"])
- elif r[var]['type'] == "literal" and 'datatype' in r[var]:
- res_dict[Variable(var)] = Literal(r[var]["value"], datatype=r[var]['datatype'])
- elif r[var]['type'] == "literal" and 'xml:lang' in r[var]:
- res_dict[Variable(var)] = Literal(r[var]["value"], lang=r[var]['xml:lang'])
+ elif r[var]["type"] == "literal" and "datatype" in r[var]:
+ res_dict[Variable(var)] = Literal(
+ r[var]["value"], datatype=r[var]["datatype"]
+ )
+ elif r[var]["type"] == "literal" and "xml:lang" in r[var]:
+ res_dict[Variable(var)] = Literal(
+ r[var]["value"], lang=r[var]["xml:lang"]
+ )
yield FrozenBindings(ctx, res_dict)
@@ -389,8 +426,10 @@ def evalOrderBy(ctx, part):
for e in reversed(part.expr):
- reverse = bool(e.order and e.order == 'DESC')
- res = sorted(res, key=lambda x: _val(value(x, e.expr, variables=True)), reverse=reverse)
+ reverse = bool(e.order and e.order == "DESC")
+ res = sorted(
+ res, key=lambda x: _val(value(x, e.expr, variables=True)), reverse=reverse
+ )
return res
@@ -398,7 +437,11 @@ def evalOrderBy(ctx, part):
def evalSlice(ctx, slice):
res = evalPart(ctx, slice.p)
- return itertools.islice(res, slice.start, slice.start + slice.length if slice.length is not None else None)
+ return itertools.islice(
+ res,
+ slice.start,
+ slice.start + slice.length if slice.length is not None else None,
+ )
def evalReduced(ctx, part):
@@ -506,8 +549,9 @@ def evalQuery(graph, query, initBindings, base=None):
if main.datasetClause:
if ctx.dataset is None:
raise Exception(
- "Non-conjunctive-graph doesn't know about " +
- "graphs! Try a query without FROM (NAMED).")
+ "Non-conjunctive-graph doesn't know about "
+ + "graphs! Try a query without FROM (NAMED)."
+ )
ctx = ctx.clone() # or push/pop?
diff --git a/rdflib/plugins/sparql/evalutils.py b/rdflib/plugins/sparql/evalutils.py
index 25353fe0..8bf1981d 100644
--- a/rdflib/plugins/sparql/evalutils.py
+++ b/rdflib/plugins/sparql/evalutils.py
@@ -49,8 +49,7 @@ def _ebv(expr, ctx):
except SPARQLError:
return False # filter error == False
elif isinstance(expr, CompValue):
- raise Exception(
- "Weird - filter got a CompValue without evalfn! %r" % expr)
+ raise Exception("Weird - filter got a CompValue without evalfn! %r" % expr)
elif isinstance(expr, Variable):
try:
return EBV(ctx[expr])
@@ -73,8 +72,7 @@ def _eval(expr, ctx, raise_not_bound_error=True):
else:
return None
elif isinstance(expr, CompValue):
- raise Exception(
- "Weird - _eval got a CompValue without evalfn! %r" % expr)
+ raise Exception("Weird - _eval got a CompValue without evalfn! %r" % expr)
else:
raise Exception("Cannot eval thing: %s (%s)" % (expr, type(expr)))
@@ -101,12 +99,11 @@ def _fillTemplate(template, solution):
_o = solution.get(o)
# instantiate new bnodes for each solution
- _s, _p, _o = [bnodeMap[x] if isinstance(
- x, BNode) else y for x, y in zip(t, (_s, _p, _o))]
+ _s, _p, _o = [
+ bnodeMap[x] if isinstance(x, BNode) else y for x, y in zip(t, (_s, _p, _o))
+ ]
- if _s is not None and \
- _p is not None and \
- _o is not None:
+ if _s is not None and _p is not None and _o is not None:
yield (_s, _p, _o)
diff --git a/rdflib/plugins/sparql/operators.py b/rdflib/plugins/sparql/operators.py
index 52558aa9..8f644a1f 100644
--- a/rdflib/plugins/sparql/operators.py
+++ b/rdflib/plugins/sparql/operators.py
@@ -49,7 +49,7 @@ def Builtin_IRI(expr, ctx):
if isinstance(a, Literal):
return ctx.prologue.absolutize(URIRef(a))
- raise SPARQLError('IRI function only accepts URIRefs or Literals/Strings!')
+ raise SPARQLError("IRI function only accepts URIRefs or Literals/Strings!")
def Builtin_isBLANK(expr, ctx):
@@ -85,8 +85,7 @@ def Builtin_BNODE(expr, ctx):
if isinstance(a, Literal):
return ctx.bnodes[a] # defaultdict does the right thing
- raise SPARQLError(
- 'BNode function only accepts no argument or literal/string')
+ raise SPARQLError("BNode function only accepts no argument or literal/string")
def Builtin_ABS(expr, ctx):
@@ -158,11 +157,10 @@ def Builtin_COALESCE(expr, ctx):
"""
http://www.w3.org/TR/sparql11-query/#func-coalesce
"""
- for x in expr.get('arg', variables=True):
+ for x in expr.get("arg", variables=True):
if x is not None and not isinstance(x, (SPARQLError, Variable)):
return x
- raise SPARQLError(
- "COALESCE got no arguments that did not evaluate to an error")
+ raise SPARQLError("COALESCE got no arguments that did not evaluate to an error")
def Builtin_CEIL(expr, ctx):
@@ -214,8 +212,7 @@ def Builtin_REGEX(expr, ctx):
if flags:
# Maps XPath REGEX flags (http://www.w3.org/TR/xpath-functions/#flags)
# to Python's re flags
- flagMap = dict(
- [('i', re.IGNORECASE), ('s', re.DOTALL), ('m', re.MULTILINE)])
+ flagMap = dict([("i", re.IGNORECASE), ("s", re.DOTALL), ("m", re.MULTILINE)])
cFlag = reduce(pyop.or_, [flagMap.get(f, 0) for f in flags])
return Literal(bool(re.search(str(pattern), text, cFlag)))
@@ -231,7 +228,7 @@ def Builtin_REPLACE(expr, ctx):
flags = expr.flags
# python uses \1, xpath/sparql uses $1
- replacement = re.sub('\\$([0-9]*)', r'\\\1', replacement)
+ replacement = re.sub("\\$([0-9]*)", r"\\\1", replacement)
def _r(m):
@@ -245,7 +242,7 @@ def Builtin_REPLACE(expr, ctx):
# the match object is replaced with a wrapper that
# returns "" instead of None for unmatched groups
- class _m():
+ class _m:
def __init__(self, m):
self.m = m
self.string = m.string
@@ -259,8 +256,7 @@ def Builtin_REPLACE(expr, ctx):
if flags:
# Maps XPath REGEX flags (http://www.w3.org/TR/xpath-functions/#flags)
# to Python's re flags
- flagMap = dict(
- [('i', re.IGNORECASE), ('s', re.DOTALL), ('m', re.MULTILINE)])
+ flagMap = dict([("i", re.IGNORECASE), ("s", re.DOTALL), ("m", re.MULTILINE)])
cFlag = reduce(pyop.or_, [flagMap.get(f, 0) for f in flags])
# @@FIXME@@ either datatype OR lang, NOT both
@@ -269,8 +265,11 @@ def Builtin_REPLACE(expr, ctx):
# python versions. see comments above in _r(m).
compat_r = str(replacement) if sys.version_info[:2] >= (3, 5) else _r
- return Literal(re.sub(str(pattern), compat_r, text, cFlag),
- datatype=text.datatype, lang=text.language)
+ return Literal(
+ re.sub(str(pattern), compat_r, text, cFlag),
+ datatype=text.datatype,
+ lang=text.language,
+ )
def Builtin_STRDT(expr, ctx):
@@ -288,7 +287,7 @@ def Builtin_STRLANG(expr, ctx):
s = string(expr.arg1)
if s.language or s.datatype:
- raise SPARQLError('STRLANG expects a simple literal')
+ raise SPARQLError("STRLANG expects a simple literal")
# TODO: normalisation of lang tag to lower-case
# should probably happen in literal __init__
@@ -308,8 +307,7 @@ def Builtin_CONCAT(expr, ctx):
lang = set(x.language for x in expr.arg)
lang = lang.pop() if len(lang) == 1 else None
- return Literal("".join(string(x)
- for x in expr.arg), datatype=dt, lang=lang)
+ return Literal("".join(string(x) for x in expr.arg), datatype=dt, lang=lang)
def _compatibleStrings(a, b):
@@ -317,7 +315,7 @@ def _compatibleStrings(a, b):
string(b)
if b.language and a.language != b.language:
- raise SPARQLError('incompatible arguments to str functions')
+ raise SPARQLError("incompatible arguments to str functions")
def Builtin_STRSTARTS(expr, ctx):
@@ -373,7 +371,7 @@ def Builtin_STRAFTER(expr, ctx):
if i == -1:
return Literal("")
else:
- return Literal(a[i + len(b):], lang=a.language, datatype=a.datatype)
+ return Literal(a[i + len(b) :], lang=a.language, datatype=a.datatype)
def Builtin_CONTAINS(expr, ctx):
@@ -491,7 +489,7 @@ def Builtin_TIMEZONE(e, ctx):
"""
dt = datetime(e.arg)
if not dt.tzinfo:
- raise SPARQLError('datatime has no timezone: %r' % dt)
+ raise SPARQLError("datatime has no timezone: %r" % dt)
delta = dt.tzinfo.utcoffset(ctx.now)
@@ -508,11 +506,13 @@ def Builtin_TIMEZONE(e, ctx):
m = (s - h * 60 * 60) / 60
s = s - h * 60 * 60 - m * 60
- tzdelta = "%sP%sT%s%s%s" % (neg,
- "%dD" % d if d else "",
- "%dH" % h if h else "",
- "%dM" % m if m else "",
- "%dS" % s if not d and not h and not m else "")
+ tzdelta = "%sP%sT%s%s%s" % (
+ neg,
+ "%dD" % d if d else "",
+ "%dH" % h if h else "",
+ "%dM" % m if m else "",
+ "%dS" % s if not d and not h and not m else "",
+ )
return Literal(tzdelta, datatype=XSD.dayTimeDuration)
@@ -549,7 +549,7 @@ def Builtin_LANG(e, ctx):
def Builtin_DATATYPE(e, ctx):
l = e.arg
if not isinstance(l, Literal):
- raise SPARQLError('Can only get datatype of literal: %r' % l)
+ raise SPARQLError("Can only get datatype of literal: %r" % l)
if l.language:
return RDF_langString
if not l.datatype and not l.language:
@@ -567,7 +567,7 @@ def Builtin_BOUND(e, ctx):
"""
http://www.w3.org/TR/sparql11-query/#func-bound
"""
- n = e.get('arg', variables=True)
+ n = e.get("arg", variables=True)
return Literal(not isinstance(n, Variable))
@@ -576,7 +576,7 @@ def Builtin_EXISTS(e, ctx):
# damn...
from rdflib.plugins.sparql.evaluate import evalPart
- exists = e.name == 'Builtin_EXISTS'
+ exists = e.name == "Builtin_EXISTS"
ctx = ctx.ctx.thaw(ctx) # hmm
for x in evalPart(ctx, e.graph):
@@ -605,9 +605,11 @@ def custom_function(uri, override=False, raw=False):
"""
Decorator version of :func:`register_custom_function`.
"""
+
def decorator(func):
register_custom_function(uri, func, override=override, raw=raw)
return func
+
return decorator
@@ -624,7 +626,7 @@ def Function(e, ctx):
pair = _CUSTOM_FUNCTIONS.get(e.iri)
if pair is None:
# no such function is registered
- raise SPARQLError('Unknown function %r' % e.iri)
+ raise SPARQLError("Unknown function %r" % e.iri)
func, raw = pair
if raw:
# function expects expression and context
@@ -658,21 +660,17 @@ def default_cast(e, ctx):
if isinstance(x, (URIRef, Literal)):
return Literal(x, datatype=XSD.string)
else:
- raise SPARQLError(
- "Cannot cast term %r of type %r" % (x, type(x)))
+ raise SPARQLError("Cannot cast term %r of type %r" % (x, type(x)))
if not isinstance(x, Literal):
- raise SPARQLError(
- "Can only cast Literals to non-string data-types")
+ raise SPARQLError("Can only cast Literals to non-string data-types")
if x.datatype and not x.datatype in XSD_DTs:
- raise SPARQLError(
- "Cannot cast literal with unknown datatype: %r" % x.datatype)
+ raise SPARQLError("Cannot cast literal with unknown datatype: %r" % x.datatype)
if e.iri == XSD.dateTime:
if x.datatype and x.datatype not in (XSD.dateTime, XSD.string):
- raise SPARQLError(
- "Cannot cast %r to XSD:dateTime" % x.datatype)
+ raise SPARQLError("Cannot cast %r to XSD:dateTime" % x.datatype)
try:
return Literal(isodate.parse_datetime(x), datatype=e.iri)
except:
@@ -742,12 +740,12 @@ def MultiplicativeExpression(e, ctx):
if type(f) == float:
res = float(res)
- if op == '*':
+ if op == "*":
res *= f
else:
res /= f
except (InvalidOperation, ZeroDivisionError):
- raise SPARQLError('divide by 0')
+ raise SPARQLError("divide by 0")
return Literal(res)
@@ -775,7 +773,7 @@ def AdditiveExpression(e, ctx):
dt = type_promotion(dt, term.datatype)
- if op == '+':
+ if op == "+":
res += n
else:
res -= n
@@ -794,18 +792,22 @@ def RelationalExpression(e, ctx):
if other is None:
return expr
- ops = dict([('>', lambda x, y: x.__gt__(y)),
- ('<', lambda x, y: x.__lt__(y)),
- ('=', lambda x, y: x.eq(y)),
- ('!=', lambda x, y: x.neq(y)),
- ('>=', lambda x, y: x.__ge__(y)),
- ('<=', lambda x, y: x.__le__(y)),
- ('IN', pyop.contains),
- ('NOT IN', lambda x, y: not pyop.contains(x, y))])
+ ops = dict(
+ [
+ (">", lambda x, y: x.__gt__(y)),
+ ("<", lambda x, y: x.__lt__(y)),
+ ("=", lambda x, y: x.eq(y)),
+ ("!=", lambda x, y: x.neq(y)),
+ (">=", lambda x, y: x.__ge__(y)),
+ ("<=", lambda x, y: x.__le__(y)),
+ ("IN", pyop.contains),
+ ("NOT IN", lambda x, y: not pyop.contains(x, y)),
+ ]
+ )
- if op in ('IN', 'NOT IN'):
+ if op in ("IN", "NOT IN"):
- res = (op == 'NOT IN')
+ res = op == "NOT IN"
error = False
@@ -823,33 +825,37 @@ def RelationalExpression(e, ctx):
else:
raise error
- if not op in ('=', '!=', 'IN', 'NOT IN'):
+ if not op in ("=", "!=", "IN", "NOT IN"):
if not isinstance(expr, Literal):
raise SPARQLError(
- "Compare other than =, != of non-literals is an error: %r" %
- expr)
+ "Compare other than =, != of non-literals is an error: %r" % expr
+ )
if not isinstance(other, Literal):
raise SPARQLError(
- "Compare other than =, != of non-literals is an error: %r" %
- other)
+ "Compare other than =, != of non-literals is an error: %r" % other
+ )
else:
if not isinstance(expr, Node):
- raise SPARQLError('I cannot compare this non-node: %r' % expr)
+ raise SPARQLError("I cannot compare this non-node: %r" % expr)
if not isinstance(other, Node):
- raise SPARQLError('I cannot compare this non-node: %r' % other)
+ raise SPARQLError("I cannot compare this non-node: %r" % other)
if isinstance(expr, Literal) and isinstance(other, Literal):
- if expr.datatype is not None and expr.datatype not in XSD_DTs and other.datatype is not None and other.datatype not in XSD_DTs:
+ if (
+ expr.datatype is not None
+ and expr.datatype not in XSD_DTs
+ and other.datatype is not None
+ and other.datatype not in XSD_DTs
+ ):
# in SPARQL for non-XSD DT Literals we can only do =,!=
- if op not in ('=', '!='):
- raise SPARQLError(
- 'Can only do =,!= comparisons of non-XSD Literals')
+ if op not in ("=", "!="):
+ raise SPARQLError("Can only do =,!= comparisons of non-XSD Literals")
try:
r = ops[op](expr, other)
if r == NotImplemented:
- raise SPARQLError('Error when comparing')
+ raise SPARQLError("Error when comparing")
except TypeError as te:
raise SPARQLError(*te.args)
return Literal(r)
@@ -897,18 +903,22 @@ def ConditionalOrExpression(e, ctx):
def not_(arg):
- return Expr('UnaryNot', UnaryNot, expr=arg)
+ return Expr("UnaryNot", UnaryNot, expr=arg)
def and_(*args):
if len(args) == 1:
return args[0]
- return Expr('ConditionalAndExpression', ConditionalAndExpression,
- expr=args[0], other=list(args[1:]))
+ return Expr(
+ "ConditionalAndExpression",
+ ConditionalAndExpression,
+ expr=args[0],
+ other=list(args[1:]),
+ )
-TrueFilter = Expr('TrueFilter', lambda _1, _2: Literal(True))
+TrueFilter = Expr("TrueFilter", lambda _1, _2: Literal(True))
def simplify(expr):
@@ -919,7 +929,7 @@ def simplify(expr):
return list(map(simplify, expr))
if not isinstance(expr, CompValue):
return expr
- if expr.name.endswith('Expression'):
+ if expr.name.endswith("Expression"):
if expr.other is None:
return simplify(expr.expr)
@@ -941,8 +951,7 @@ def datetime(e):
if not isinstance(e, Literal):
raise SPARQLError("Non-literal passed as datetime: %r" % e)
if not e.datatype == XSD.dateTime:
- raise SPARQLError(
- "Literal with wrong datatype passed as datetime: %r" % e)
+ raise SPARQLError("Literal with wrong datatype passed as datetime: %r" % e)
return e.toPython()
@@ -954,8 +963,7 @@ def string(s):
if not isinstance(s, Literal):
raise SPARQLError("Non-literal passes as string: %r" % s)
if s.datatype and s.datatype != XSD.string:
- raise SPARQLError(
- "Non-string datatype-literal passes as string: %r" % s)
+ raise SPARQLError("Non-string datatype-literal passes as string: %r" % s)
return s
@@ -970,13 +978,24 @@ def numeric(expr):
if not isinstance(expr, Literal):
raise SPARQLTypeError("%r is not a literal!" % expr)
- if expr.datatype not in (XSD.float, XSD.double,
- XSD.decimal, XSD.integer,
- XSD.nonPositiveInteger, XSD.negativeInteger,
- XSD.nonNegativeInteger, XSD.positiveInteger,
- XSD.unsignedLong, XSD.unsignedInt,
- XSD.unsignedShort, XSD.unsignedByte,
- XSD.long, XSD.int, XSD.short, XSD.byte):
+ if expr.datatype not in (
+ XSD.float,
+ XSD.double,
+ XSD.decimal,
+ XSD.integer,
+ XSD.nonPositiveInteger,
+ XSD.negativeInteger,
+ XSD.nonNegativeInteger,
+ XSD.positiveInteger,
+ XSD.unsignedLong,
+ XSD.unsignedInt,
+ XSD.unsignedShort,
+ XSD.unsignedByte,
+ XSD.long,
+ XSD.int,
+ XSD.short,
+ XSD.byte,
+ ):
raise SPARQLTypeError("%r does not have a numeric datatype!" % expr)
return expr.toPython()
@@ -1011,14 +1030,18 @@ def EBV(rt):
# Type error, see: http://www.w3.org/TR/rdf-sparql-query/#ebv
raise SPARQLTypeError(
"http://www.w3.org/TR/rdf-sparql-query/#ebv - ' + \
- 'Could not determine the EBV for : %r" % rt)
+ 'Could not determine the EBV for : %r"
+ % rt
+ )
else:
return bool(pyRT)
else:
raise SPARQLTypeError(
"http://www.w3.org/TR/rdf-sparql-query/#ebv - ' + \
- 'Only literals have Boolean values! %r" % rt)
+ 'Only literals have Boolean values! %r"
+ % rt
+ )
def _lang_range_check(range, lang):
@@ -1038,6 +1061,7 @@ def _lang_range_check(range, lang):
.. __:http://dev.w3.org/2004/PythonLib-IH/RDFClosure/RestrictedDatatype.py
"""
+
def _match(r, l):
"""
Matching of a range and language item: either range is a wildcard
@@ -1046,10 +1070,10 @@ def _lang_range_check(range, lang):
@param l: language tag item
@rtype: boolean
"""
- return r == '*' or r == l
+ return r == "*" or r == l
- rangeList = range.strip().lower().split('-')
- langList = lang.strip().lower().split('-')
+ rangeList = range.strip().lower().split("-")
+ langList = lang.strip().lower().split("-")
if not _match(rangeList[0], langList[0]):
return False
if len(rangeList) > len(langList):
diff --git a/rdflib/plugins/sparql/parser.py b/rdflib/plugins/sparql/parser.py
index cc785f4c..e8c37a2e 100644
--- a/rdflib/plugins/sparql/parser.py
+++ b/rdflib/plugins/sparql/parser.py
@@ -10,10 +10,22 @@ import sys
import re
from pyparsing import (
- Literal, Regex, Optional, OneOrMore, ZeroOrMore, Forward,
- ParseException, Suppress, Combine, restOfLine, Group,
- ParseResults, delimitedList)
+ Literal,
+ Regex,
+ Optional,
+ OneOrMore,
+ ZeroOrMore,
+ Forward,
+ ParseException,
+ Suppress,
+ Combine,
+ restOfLine,
+ Group,
+ ParseResults,
+ delimitedList,
+)
from pyparsing import CaselessKeyword as Keyword # watch out :)
+
# from pyparsing import Keyword as CaseSensitiveKeyword
from .parserutils import Comp, Param, ParamList
@@ -51,9 +63,9 @@ def expandTriples(terms):
print("Terms", terms)
l = len(terms)
for i, t in enumerate(terms):
- if t == ',':
+ if t == ",":
res.extend([res[-3], res[-2]])
- elif t == ';':
+ elif t == ";":
if i + 1 == len(terms) or terms[i + 1] == ";" or terms[i + 1] == ".":
continue # this semicolon is spurious
res.append(res[0])
@@ -70,12 +82,13 @@ def expandTriples(terms):
res.append(t[0])
elif isinstance(t, ParseResults):
res += t.asList()
- elif t != '.':
+ elif t != ".":
res.append(t)
if DEBUG:
print(len(res), t)
if DEBUG:
import json
+
print(json.dumps(res, indent=2))
return res
@@ -87,6 +100,7 @@ def expandTriples(terms):
except:
if DEBUG:
import traceback
+
traceback.print_exc()
raise
@@ -139,13 +153,16 @@ def expandCollection(terms):
# SPARQL Grammar from http://www.w3.org/TR/sparql11-query/#grammar
# ------ TERMINALS --------------
# [139] IRIREF ::= '<' ([^<>"{}|^`\]-[#x00-#x20])* '>'
-IRIREF = Combine(Suppress('<') + Regex(r'[^<>"{}|^`\\%s]*' % ''.join(
- '\\x%02X' % i for i in range(33))) + Suppress('>'))
+IRIREF = Combine(
+ Suppress("<")
+ + Regex(r'[^<>"{}|^`\\%s]*' % "".join("\\x%02X" % i for i in range(33)))
+ + Suppress(">")
+)
IRIREF.setParseAction(lambda x: rdflib.URIRef(x[0]))
# [164] P_CHARS_BASE ::= [A-Z] | [a-z] | [#x00C0-#x00D6] | [#x00D8-#x00F6] | [#x00F8-#x02FF] | [#x0370-#x037D] | [#x037F-#x1FFF] | [#x200C-#x200D] | [#x2070-#x218F] | [#x2C00-#x2FEF] | [#x3001-#xD7FF] | [#xF900-#xFDCF] | [#xFDF0-#xFFFD] | [#x10000-#xEFFFF]
-if sys.maxunicode == 0xffff:
+if sys.maxunicode == 0xFFFF:
# this is narrow python build (default on windows/osx)
# this means that unicode code points over 0xffff are stored
# as several characters, which in turn means that regex character
@@ -162,52 +179,54 @@ if sys.maxunicode == 0xffff:
#
# in py3.3 this is fixed
- PN_CHARS_BASE_re = u'A-Za-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD'
+ PN_CHARS_BASE_re = u"A-Za-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD"
else:
# wide python build
- PN_CHARS_BASE_re = u'A-Za-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD\U00010000-\U000EFFFF'
+ PN_CHARS_BASE_re = u"A-Za-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD\U00010000-\U000EFFFF"
# [165] PN_CHARS_U ::= PN_CHARS_BASE | '_'
-PN_CHARS_U_re = '_' + PN_CHARS_BASE_re
+PN_CHARS_U_re = "_" + PN_CHARS_BASE_re
# [167] PN_CHARS ::= PN_CHARS_U | '-' | [0-9] | #x00B7 | [#x0300-#x036F] | [#x203F-#x2040]
-PN_CHARS_re = u'\\-0-9\u00B7\u0300-\u036F\u203F-\u2040' + PN_CHARS_U_re
+PN_CHARS_re = u"\\-0-9\u00B7\u0300-\u036F\u203F-\u2040" + PN_CHARS_U_re
# PN_CHARS = Regex(u'[%s]'%PN_CHARS_re, flags=re.U)
# [168] PN_PREFIX ::= PN_CHARS_BASE ((PN_CHARS|'.')* PN_CHARS)?
-PN_PREFIX = Regex(u'[%s](?:[%s\\.]*[%s])?' % (PN_CHARS_BASE_re,
- PN_CHARS_re, PN_CHARS_re), flags=re.U)
+PN_PREFIX = Regex(
+ u"[%s](?:[%s\\.]*[%s])?" % (PN_CHARS_BASE_re, PN_CHARS_re, PN_CHARS_re), flags=re.U
+)
# [140] PNAME_NS ::= PN_PREFIX? ':'
-PNAME_NS = Optional(
- Param('prefix', PN_PREFIX)) + Suppress(':').leaveWhitespace()
+PNAME_NS = Optional(Param("prefix", PN_PREFIX)) + Suppress(":").leaveWhitespace()
# [173] PN_LOCAL_ESC ::= '\' ( '_' | '~' | '.' | '-' | '!' | '$' | '&' | "'" | '(' | ')' | '*' | '+' | ',' | ';' | '=' | '/' | '?' | '#' | '@' | '%' )
-PN_LOCAL_ESC_re = '\\\\[_~\\.\\-!$&"\'()*+,;=/?#@%]'
+PN_LOCAL_ESC_re = "\\\\[_~\\.\\-!$&\"'()*+,;=/?#@%]"
# PN_LOCAL_ESC = Regex(PN_LOCAL_ESC_re) # regex'd
-#PN_LOCAL_ESC.setParseAction(lambda x: x[0][1:])
+# PN_LOCAL_ESC.setParseAction(lambda x: x[0][1:])
# [172] HEX ::= [0-9] | [A-F] | [a-f]
# HEX = Regex('[0-9A-Fa-f]') # not needed
# [171] PERCENT ::= '%' HEX HEX
-PERCENT_re = '%[0-9a-fA-F]{2}'
+PERCENT_re = "%[0-9a-fA-F]{2}"
# PERCENT = Regex(PERCENT_re) # regex'd
-#PERCENT.setParseAction(lambda x: chr(int(x[0][1:], 16)))
+# PERCENT.setParseAction(lambda x: chr(int(x[0][1:], 16)))
# [170] PLX ::= PERCENT | PN_LOCAL_ESC
-PLX_re = '(%s|%s)' % (PN_LOCAL_ESC_re, PERCENT_re)
+PLX_re = "(%s|%s)" % (PN_LOCAL_ESC_re, PERCENT_re)
# PLX = PERCENT | PN_LOCAL_ESC # regex'd
# [169] PN_LOCAL ::= (PN_CHARS_U | ':' | [0-9] | PLX ) ((PN_CHARS | '.' | ':' | PLX)* (PN_CHARS | ':' | PLX) )?
-PN_LOCAL = Regex(u"""([%(PN_CHARS_U)s:0-9]|%(PLX)s)
+PN_LOCAL = Regex(
+ u"""([%(PN_CHARS_U)s:0-9]|%(PLX)s)
(([%(PN_CHARS)s\\.:]|%(PLX)s)*
- ([%(PN_CHARS)s:]|%(PLX)s) )?""" % dict(PN_CHARS_U=PN_CHARS_U_re,
- PN_CHARS=PN_CHARS_re,
- PLX=PLX_re), flags=re.X | re.UNICODE)
+ ([%(PN_CHARS)s:]|%(PLX)s) )?"""
+ % dict(PN_CHARS_U=PN_CHARS_U_re, PN_CHARS=PN_CHARS_re, PLX=PLX_re),
+ flags=re.X | re.UNICODE,
+)
def _hexExpand(match):
@@ -218,71 +237,72 @@ PN_LOCAL.setParseAction(lambda x: re.sub("(%s)" % PERCENT_re, _hexExpand, x[0]))
# [141] PNAME_LN ::= PNAME_NS PN_LOCAL
-PNAME_LN = PNAME_NS + Param('localname', PN_LOCAL.leaveWhitespace())
+PNAME_LN = PNAME_NS + Param("localname", PN_LOCAL.leaveWhitespace())
# [142] BLANK_NODE_LABEL ::= '_:' ( PN_CHARS_U | [0-9] ) ((PN_CHARS|'.')* PN_CHARS)?
-BLANK_NODE_LABEL = Regex(u'_:[0-9%s](?:[\\.%s]*[%s])?' % (
- PN_CHARS_U_re, PN_CHARS_re, PN_CHARS_re), flags=re.U)
+BLANK_NODE_LABEL = Regex(
+ u"_:[0-9%s](?:[\\.%s]*[%s])?" % (PN_CHARS_U_re, PN_CHARS_re, PN_CHARS_re),
+ flags=re.U,
+)
BLANK_NODE_LABEL.setParseAction(lambda x: rdflib.BNode(x[0][2:]))
# [166] VARNAME ::= ( PN_CHARS_U | [0-9] ) ( PN_CHARS_U | [0-9] | #x00B7 | [#x0300-#x036F] | [#x203F-#x2040] )*
-VARNAME = Regex(u'[%s0-9][%s0-9\u00B7\u0300-\u036F\u203F-\u2040]*' % (
- PN_CHARS_U_re, PN_CHARS_U_re), flags=re.U)
+VARNAME = Regex(
+ u"[%s0-9][%s0-9\u00B7\u0300-\u036F\u203F-\u2040]*" % (PN_CHARS_U_re, PN_CHARS_U_re),
+ flags=re.U,
+)
# [143] VAR1 ::= '?' VARNAME
-VAR1 = Combine(Suppress('?') + VARNAME)
+VAR1 = Combine(Suppress("?") + VARNAME)
# [144] VAR2 ::= '$' VARNAME
-VAR2 = Combine(Suppress('$') + VARNAME)
+VAR2 = Combine(Suppress("$") + VARNAME)
# [145] LANGTAG ::= '@' [a-zA-Z]+ ('-' [a-zA-Z0-9]+)*
-LANGTAG = Combine(Suppress('@') + Regex('[a-zA-Z]+(?:-[a-zA-Z0-9]+)*'))
+LANGTAG = Combine(Suppress("@") + Regex("[a-zA-Z]+(?:-[a-zA-Z0-9]+)*"))
# [146] INTEGER ::= [0-9]+
INTEGER = Regex(r"[0-9]+")
# INTEGER.setResultsName('integer')
-INTEGER.setParseAction(
- lambda x: rdflib.Literal(x[0], datatype=rdflib.XSD.integer))
+INTEGER.setParseAction(lambda x: rdflib.Literal(x[0], datatype=rdflib.XSD.integer))
# [155] EXPONENT ::= [eE] [+-]? [0-9]+
-EXPONENT_re = '[eE][+-]?[0-9]+'
+EXPONENT_re = "[eE][+-]?[0-9]+"
# [147] DECIMAL ::= [0-9]* '.' [0-9]+
-DECIMAL = Regex(r'[0-9]*\.[0-9]+') # (?![eE])
+DECIMAL = Regex(r"[0-9]*\.[0-9]+") # (?![eE])
# DECIMAL.setResultsName('decimal')
-DECIMAL.setParseAction(
- lambda x: rdflib.Literal(x[0], datatype=rdflib.XSD.decimal))
+DECIMAL.setParseAction(lambda x: rdflib.Literal(x[0], datatype=rdflib.XSD.decimal))
# [148] DOUBLE ::= [0-9]+ '.' [0-9]* EXPONENT | '.' ([0-9])+ EXPONENT | ([0-9])+ EXPONENT
-DOUBLE = Regex(
- r'[0-9]+\.[0-9]*%(e)s|\.([0-9])+%(e)s|[0-9]+%(e)s' % {'e': EXPONENT_re})
+DOUBLE = Regex(r"[0-9]+\.[0-9]*%(e)s|\.([0-9])+%(e)s|[0-9]+%(e)s" % {"e": EXPONENT_re})
# DOUBLE.setResultsName('double')
-DOUBLE.setParseAction(
- lambda x: rdflib.Literal(x[0], datatype=rdflib.XSD.double))
+DOUBLE.setParseAction(lambda x: rdflib.Literal(x[0], datatype=rdflib.XSD.double))
# [149] INTEGER_POSITIVE ::= '+' INTEGER
-INTEGER_POSITIVE = Suppress('+') + INTEGER.copy().leaveWhitespace()
-INTEGER_POSITIVE.setParseAction(lambda x: rdflib.Literal(
- "+" + x[0], datatype=rdflib.XSD.integer))
+INTEGER_POSITIVE = Suppress("+") + INTEGER.copy().leaveWhitespace()
+INTEGER_POSITIVE.setParseAction(
+ lambda x: rdflib.Literal("+" + x[0], datatype=rdflib.XSD.integer)
+)
# [150] DECIMAL_POSITIVE ::= '+' DECIMAL
-DECIMAL_POSITIVE = Suppress('+') + DECIMAL.copy().leaveWhitespace()
+DECIMAL_POSITIVE = Suppress("+") + DECIMAL.copy().leaveWhitespace()
# [151] DOUBLE_POSITIVE ::= '+' DOUBLE
-DOUBLE_POSITIVE = Suppress('+') + DOUBLE.copy().leaveWhitespace()
+DOUBLE_POSITIVE = Suppress("+") + DOUBLE.copy().leaveWhitespace()
# [152] INTEGER_NEGATIVE ::= '-' INTEGER
-INTEGER_NEGATIVE = Suppress('-') + INTEGER.copy().leaveWhitespace()
+INTEGER_NEGATIVE = Suppress("-") + INTEGER.copy().leaveWhitespace()
INTEGER_NEGATIVE.setParseAction(lambda x: neg(x[0]))
# [153] DECIMAL_NEGATIVE ::= '-' DECIMAL
-DECIMAL_NEGATIVE = Suppress('-') + DECIMAL.copy().leaveWhitespace()
+DECIMAL_NEGATIVE = Suppress("-") + DECIMAL.copy().leaveWhitespace()
DECIMAL_NEGATIVE.setParseAction(lambda x: neg(x[0]))
# [154] DOUBLE_NEGATIVE ::= '-' DOUBLE
-DOUBLE_NEGATIVE = Suppress('-') + DOUBLE.copy().leaveWhitespace()
+DOUBLE_NEGATIVE = Suppress("-") + DOUBLE.copy().leaveWhitespace()
DOUBLE_NEGATIVE.setParseAction(lambda x: neg(x[0]))
# [160] ECHAR ::= '\' [tbnrf\"']
@@ -294,57 +314,58 @@ DOUBLE_NEGATIVE.setParseAction(lambda x: neg(x[0]))
# ) + ZeroOrMore( ~ Literal("'\\") | ECHAR ) ) + "'''"
STRING_LITERAL_LONG1 = Regex(u"'''((?:'|'')?(?:[^'\\\\]|\\\\['ntbrf\\\\]))*'''")
STRING_LITERAL_LONG1.setParseAction(
- lambda x: rdflib.Literal(decodeUnicodeEscape(x[0][3:-3])))
+ lambda x: rdflib.Literal(decodeUnicodeEscape(x[0][3:-3]))
+)
# [159] STRING_LITERAL_LONG2 ::= '"""' ( ( '"' | '""' )? ( [^"\] | ECHAR ) )* '"""'
# STRING_LITERAL_LONG2 = Literal('"""') + ( Optional( Literal('"') | '""'
# ) + ZeroOrMore( ~ Literal('"\\') | ECHAR ) ) + '"""'
STRING_LITERAL_LONG2 = Regex(u'"""(?:(?:"|"")?(?:[^"\\\\]|\\\\["ntbrf\\\\]))*"""')
STRING_LITERAL_LONG2.setParseAction(
- lambda x: rdflib.Literal(decodeUnicodeEscape(x[0][3:-3])))
+ lambda x: rdflib.Literal(decodeUnicodeEscape(x[0][3:-3]))
+)
# [156] STRING_LITERAL1 ::= "'" ( ([^#x27#x5C#xA#xD]) | ECHAR )* "'"
# STRING_LITERAL1 = Literal("'") + ZeroOrMore(
# Regex(u'[^\u0027\u005C\u000A\u000D]',flags=re.U) | ECHAR ) + "'"
-STRING_LITERAL1 = Regex(
- u"'(?:[^'\\n\\r\\\\]|\\\\['ntbrf\\\\])*'(?!')", flags=re.U)
+STRING_LITERAL1 = Regex(u"'(?:[^'\\n\\r\\\\]|\\\\['ntbrf\\\\])*'(?!')", flags=re.U)
STRING_LITERAL1.setParseAction(
- lambda x: rdflib.Literal(decodeUnicodeEscape(x[0][1:-1])))
+ lambda x: rdflib.Literal(decodeUnicodeEscape(x[0][1:-1]))
+)
# [157] STRING_LITERAL2 ::= '"' ( ([^#x22#x5C#xA#xD]) | ECHAR )* '"'
# STRING_LITERAL2 = Literal('"') + ZeroOrMore (
# Regex(u'[^\u0022\u005C\u000A\u000D]',flags=re.U) | ECHAR ) + '"'
-STRING_LITERAL2 = Regex(
- u'"(?:[^"\\n\\r\\\\]|\\\\["ntbrf\\\\])*"(?!")', flags=re.U)
+STRING_LITERAL2 = Regex(u'"(?:[^"\\n\\r\\\\]|\\\\["ntbrf\\\\])*"(?!")', flags=re.U)
STRING_LITERAL2.setParseAction(
- lambda x: rdflib.Literal(decodeUnicodeEscape(x[0][1:-1])))
+ lambda x: rdflib.Literal(decodeUnicodeEscape(x[0][1:-1]))
+)
# [161] NIL ::= '(' WS* ')'
-NIL = Literal('(') + ')'
+NIL = Literal("(") + ")"
NIL.setParseAction(lambda x: rdflib.RDF.nil)
# [162] WS ::= #x20 | #x9 | #xD | #xA
# Not needed?
# WS = #x20 | #x9 | #xD | #xA
# [163] ANON ::= '[' WS* ']'
-ANON = Literal('[') + ']'
+ANON = Literal("[") + "]"
ANON.setParseAction(lambda x: rdflib.BNode())
# A = CaseSensitiveKeyword('a')
-A = Literal('a')
+A = Literal("a")
A.setParseAction(lambda x: rdflib.RDF.type)
# ------ NON-TERMINALS --------------
# [5] BaseDecl ::= 'BASE' IRIREF
-BaseDecl = Comp('Base', Keyword('BASE') + Param('iri', IRIREF))
+BaseDecl = Comp("Base", Keyword("BASE") + Param("iri", IRIREF))
# [6] PrefixDecl ::= 'PREFIX' PNAME_NS IRIREF
-PrefixDecl = Comp(
- 'PrefixDecl', Keyword('PREFIX') + PNAME_NS + Param('iri', IRIREF))
+PrefixDecl = Comp("PrefixDecl", Keyword("PREFIX") + PNAME_NS + Param("iri", IRIREF))
# [4] Prologue ::= ( BaseDecl | PrefixDecl )*
Prologue = Group(ZeroOrMore(BaseDecl | PrefixDecl))
@@ -354,7 +375,7 @@ Var = VAR1 | VAR2
Var.setParseAction(lambda x: rdflib.term.Variable(x[0]))
# [137] PrefixedName ::= PNAME_LN | PNAME_NS
-PrefixedName = Comp('pname', PNAME_LN | PNAME_NS)
+PrefixedName = Comp("pname", PNAME_LN | PNAME_NS)
# [136] iri ::= IRIREF | PrefixedName
iri = IRIREF | PrefixedName
@@ -364,8 +385,14 @@ String = STRING_LITERAL_LONG1 | STRING_LITERAL_LONG2 | STRING_LITERAL1 | STRING_
# [129] RDFLiteral ::= String ( LANGTAG | ( '^^' iri ) )?
-RDFLiteral = Comp('literal', Param('string', String) + Optional(Param(
- 'lang', LANGTAG.leaveWhitespace()) | Literal('^^').leaveWhitespace() + Param('datatype', iri).leaveWhitespace()))
+RDFLiteral = Comp(
+ "literal",
+ Param("string", String)
+ + Optional(
+ Param("lang", LANGTAG.leaveWhitespace())
+ | Literal("^^").leaveWhitespace() + Param("datatype", iri).leaveWhitespace()
+ ),
+)
# [132] NumericLiteralPositive ::= INTEGER_POSITIVE | DECIMAL_POSITIVE | DOUBLE_POSITIVE
NumericLiteralPositive = DOUBLE_POSITIVE | DECIMAL_POSITIVE | INTEGER_POSITIVE
@@ -377,11 +404,14 @@ NumericLiteralNegative = DOUBLE_NEGATIVE | DECIMAL_NEGATIVE | INTEGER_NEGATIVE
NumericLiteralUnsigned = DOUBLE | DECIMAL | INTEGER
# [130] NumericLiteral ::= NumericLiteralUnsigned | NumericLiteralPositive | NumericLiteralNegative
-NumericLiteral = NumericLiteralUnsigned | NumericLiteralPositive | NumericLiteralNegative
+NumericLiteral = (
+ NumericLiteralUnsigned | NumericLiteralPositive | NumericLiteralNegative
+)
# [134] BooleanLiteral ::= 'true' | 'false'
-BooleanLiteral = Keyword('true').setParseAction(lambda: rdflib.Literal(True)) |\
- Keyword('false').setParseAction(lambda: rdflib.Literal(False))
+BooleanLiteral = Keyword("true").setParseAction(lambda: rdflib.Literal(True)) | Keyword(
+ "false"
+).setParseAction(lambda: rdflib.Literal(False))
# [138] BlankNode ::= BLANK_NODE_LABEL | ANON
BlankNode = BLANK_NODE_LABEL | ANON
@@ -396,19 +426,23 @@ VarOrTerm = Var | GraphTerm
VarOrIri = Var | iri
# [46] GraphRef ::= 'GRAPH' iri
-GraphRef = Keyword('GRAPH') + Param('graphiri', iri)
+GraphRef = Keyword("GRAPH") + Param("graphiri", iri)
# [47] GraphRefAll ::= GraphRef | 'DEFAULT' | 'NAMED' | 'ALL'
-GraphRefAll = GraphRef | Param('graphiri', Keyword('DEFAULT')) | Param(
- 'graphiri', Keyword('NAMED')) | Param('graphiri', Keyword('ALL'))
+GraphRefAll = (
+ GraphRef
+ | Param("graphiri", Keyword("DEFAULT"))
+ | Param("graphiri", Keyword("NAMED"))
+ | Param("graphiri", Keyword("ALL"))
+)
# [45] GraphOrDefault ::= 'DEFAULT' | 'GRAPH'? iri
-GraphOrDefault = ParamList('graph', Keyword(
- 'DEFAULT')) | Optional(Keyword('GRAPH')) + ParamList('graph', iri)
+GraphOrDefault = ParamList("graph", Keyword("DEFAULT")) | Optional(
+ Keyword("GRAPH")
+) + ParamList("graph", iri)
# [65] DataBlockValue ::= iri | RDFLiteral | NumericLiteral | BooleanLiteral | 'UNDEF'
-DataBlockValue = iri | RDFLiteral | NumericLiteral | BooleanLiteral | Keyword(
- 'UNDEF')
+DataBlockValue = iri | RDFLiteral | NumericLiteral | BooleanLiteral | Keyword("UNDEF")
# [78] Verb ::= VarOrIri | A
Verb = VarOrIri | A
@@ -432,37 +466,58 @@ GraphNodePath = VarOrTerm | TriplesNodePath
# [93] PathMod ::= '?' | '*' | '+'
-PathMod = Literal('?') | '*' | '+'
+PathMod = Literal("?") | "*" | "+"
# [96] PathOneInPropertySet ::= iri | A | '^' ( iri | A )
-PathOneInPropertySet = iri | A | Comp('InversePath', '^' + (iri | A))
+PathOneInPropertySet = iri | A | Comp("InversePath", "^" + (iri | A))
Path = Forward()
# [95] PathNegatedPropertySet ::= PathOneInPropertySet | '(' ( PathOneInPropertySet ( '|' PathOneInPropertySet )* )? ')'
-PathNegatedPropertySet = Comp('PathNegatedPropertySet', ParamList('part', PathOneInPropertySet) | '(' + Optional(
- ParamList('part', PathOneInPropertySet) + ZeroOrMore('|' + ParamList('part', PathOneInPropertySet))) + ')')
+PathNegatedPropertySet = Comp(
+ "PathNegatedPropertySet",
+ ParamList("part", PathOneInPropertySet)
+ | "("
+ + Optional(
+ ParamList("part", PathOneInPropertySet)
+ + ZeroOrMore("|" + ParamList("part", PathOneInPropertySet))
+ )
+ + ")",
+)
# [94] PathPrimary ::= iri | A | '!' PathNegatedPropertySet | '(' Path ')' | 'DISTINCT' '(' Path ')'
-PathPrimary = iri | A | Suppress('!') + PathNegatedPropertySet | Suppress('(') + Path + Suppress(
- ')') | Comp('DistinctPath', Keyword('DISTINCT') + '(' + Param('part', Path) + ')')
+PathPrimary = (
+ iri
+ | A
+ | Suppress("!") + PathNegatedPropertySet
+ | Suppress("(") + Path + Suppress(")")
+ | Comp("DistinctPath", Keyword("DISTINCT") + "(" + Param("part", Path) + ")")
+)
# [91] PathElt ::= PathPrimary Optional(PathMod)
-PathElt = Comp('PathElt', Param(
- 'part', PathPrimary) + Optional(Param('mod', PathMod.leaveWhitespace())))
+PathElt = Comp(
+ "PathElt",
+ Param("part", PathPrimary) + Optional(Param("mod", PathMod.leaveWhitespace())),
+)
# [92] PathEltOrInverse ::= PathElt | '^' PathElt
-PathEltOrInverse = PathElt | Suppress(
- '^') + Comp('PathEltOrInverse', Param('part', PathElt))
+PathEltOrInverse = PathElt | Suppress("^") + Comp(
+ "PathEltOrInverse", Param("part", PathElt)
+)
# [90] PathSequence ::= PathEltOrInverse ( '/' PathEltOrInverse )*
-PathSequence = Comp('PathSequence', ParamList('part', PathEltOrInverse) +
- ZeroOrMore('/' + ParamList('part', PathEltOrInverse)))
+PathSequence = Comp(
+ "PathSequence",
+ ParamList("part", PathEltOrInverse)
+ + ZeroOrMore("/" + ParamList("part", PathEltOrInverse)),
+)
# [89] PathAlternative ::= PathSequence ( '|' PathSequence )*
-PathAlternative = Comp('PathAlternative', ParamList('part', PathSequence) +
- ZeroOrMore('|' + ParamList('part', PathSequence)))
+PathAlternative = Comp(
+ "PathAlternative",
+ ParamList("part", PathSequence) + ZeroOrMore("|" + ParamList("part", PathSequence)),
+)
# [88] Path ::= PathAlternative
Path <<= PathAlternative
@@ -474,127 +529,172 @@ VerbPath = Path
ObjectPath = GraphNodePath
# [86] ObjectListPath ::= ObjectPath ( ',' ObjectPath )*
-ObjectListPath = ObjectPath + ZeroOrMore(',' + ObjectPath)
+ObjectListPath = ObjectPath + ZeroOrMore("," + ObjectPath)
GroupGraphPattern = Forward()
# [102] Collection ::= '(' OneOrMore(GraphNode) ')'
-Collection = Suppress('(') + OneOrMore(GraphNode) + Suppress(')')
+Collection = Suppress("(") + OneOrMore(GraphNode) + Suppress(")")
Collection.setParseAction(expandCollection)
# [103] CollectionPath ::= '(' OneOrMore(GraphNodePath) ')'
-CollectionPath = Suppress('(') + OneOrMore(GraphNodePath) + Suppress(')')
+CollectionPath = Suppress("(") + OneOrMore(GraphNodePath) + Suppress(")")
CollectionPath.setParseAction(expandCollection)
# [80] Object ::= GraphNode
Object = GraphNode
# [79] ObjectList ::= Object ( ',' Object )*
-ObjectList = Object + ZeroOrMore(',' + Object)
+ObjectList = Object + ZeroOrMore("," + Object)
# [83] PropertyListPathNotEmpty ::= ( VerbPath | VerbSimple ) ObjectListPath ( ';' ( ( VerbPath | VerbSimple ) ObjectList )? )*
-PropertyListPathNotEmpty = (VerbPath | VerbSimple) + ObjectListPath + ZeroOrMore(
- ';' + Optional((VerbPath | VerbSimple) + ObjectListPath))
+PropertyListPathNotEmpty = (
+ (VerbPath | VerbSimple)
+ + ObjectListPath
+ + ZeroOrMore(";" + Optional((VerbPath | VerbSimple) + ObjectListPath))
+)
# [82] PropertyListPath ::= Optional(PropertyListPathNotEmpty)
PropertyListPath = Optional(PropertyListPathNotEmpty)
# [77] PropertyListNotEmpty ::= Verb ObjectList ( ';' ( Verb ObjectList )? )*
-PropertyListNotEmpty = Verb + ObjectList + ZeroOrMore(';' + Optional(Verb +
- ObjectList))
+PropertyListNotEmpty = Verb + ObjectList + ZeroOrMore(";" + Optional(Verb + ObjectList))
# [76] PropertyList ::= Optional(PropertyListNotEmpty)
PropertyList = Optional(PropertyListNotEmpty)
# [99] BlankNodePropertyList ::= '[' PropertyListNotEmpty ']'
-BlankNodePropertyList = Group(
- Suppress('[') + PropertyListNotEmpty + Suppress(']'))
+BlankNodePropertyList = Group(Suppress("[") + PropertyListNotEmpty + Suppress("]"))
BlankNodePropertyList.setParseAction(expandBNodeTriples)
# [101] BlankNodePropertyListPath ::= '[' PropertyListPathNotEmpty ']'
BlankNodePropertyListPath = Group(
- Suppress('[') + PropertyListPathNotEmpty + Suppress(']'))
+ Suppress("[") + PropertyListPathNotEmpty + Suppress("]")
+)
BlankNodePropertyListPath.setParseAction(expandBNodeTriples)
# [98] TriplesNode ::= Collection | BlankNodePropertyList
-TriplesNode <<= (Collection | BlankNodePropertyList)
+TriplesNode <<= Collection | BlankNodePropertyList
# [100] TriplesNodePath ::= CollectionPath | BlankNodePropertyListPath
-TriplesNodePath <<= (CollectionPath | BlankNodePropertyListPath)
+TriplesNodePath <<= CollectionPath | BlankNodePropertyListPath
# [75] TriplesSameSubject ::= VarOrTerm PropertyListNotEmpty | TriplesNode PropertyList
-TriplesSameSubject = VarOrTerm + PropertyListNotEmpty | TriplesNode + \
- PropertyList
+TriplesSameSubject = VarOrTerm + PropertyListNotEmpty | TriplesNode + PropertyList
TriplesSameSubject.setParseAction(expandTriples)
# [52] TriplesTemplate ::= TriplesSameSubject ( '.' Optional(TriplesTemplate) )?
TriplesTemplate = Forward()
-TriplesTemplate <<= (ParamList('triples', TriplesSameSubject) + Optional(
- Suppress('.') + Optional(TriplesTemplate)))
+TriplesTemplate <<= ParamList("triples", TriplesSameSubject) + Optional(
+ Suppress(".") + Optional(TriplesTemplate)
+)
# [51] QuadsNotTriples ::= 'GRAPH' VarOrIri '{' Optional(TriplesTemplate) '}'
-QuadsNotTriples = Comp('QuadsNotTriples', Keyword('GRAPH') + Param(
- 'term', VarOrIri) + '{' + Optional(TriplesTemplate) + '}')
+QuadsNotTriples = Comp(
+ "QuadsNotTriples",
+ Keyword("GRAPH") + Param("term", VarOrIri) + "{" + Optional(TriplesTemplate) + "}",
+)
# [50] Quads ::= Optional(TriplesTemplate) ( QuadsNotTriples '.'? Optional(TriplesTemplate) )*
-Quads = Comp('Quads', Optional(TriplesTemplate) + ZeroOrMore(ParamList(
- 'quadsNotTriples', QuadsNotTriples) + Optional(Suppress('.')) + Optional(TriplesTemplate)))
+Quads = Comp(
+ "Quads",
+ Optional(TriplesTemplate)
+ + ZeroOrMore(
+ ParamList("quadsNotTriples", QuadsNotTriples)
+ + Optional(Suppress("."))
+ + Optional(TriplesTemplate)
+ ),
+)
# [48] QuadPattern ::= '{' Quads '}'
-QuadPattern = '{' + Param('quads', Quads) + '}'
+QuadPattern = "{" + Param("quads", Quads) + "}"
# [49] QuadData ::= '{' Quads '}'
-QuadData = '{' + Param('quads', Quads) + '}'
+QuadData = "{" + Param("quads", Quads) + "}"
# [81] TriplesSameSubjectPath ::= VarOrTerm PropertyListPathNotEmpty | TriplesNodePath PropertyListPath
-TriplesSameSubjectPath = VarOrTerm + \
- PropertyListPathNotEmpty | TriplesNodePath + PropertyListPath
+TriplesSameSubjectPath = (
+ VarOrTerm + PropertyListPathNotEmpty | TriplesNodePath + PropertyListPath
+)
TriplesSameSubjectPath.setParseAction(expandTriples)
# [55] TriplesBlock ::= TriplesSameSubjectPath ( '.' Optional(TriplesBlock) )?
TriplesBlock = Forward()
-TriplesBlock <<= (ParamList('triples', TriplesSameSubjectPath) + Optional(
- Suppress('.') + Optional(TriplesBlock)))
+TriplesBlock <<= ParamList("triples", TriplesSameSubjectPath) + Optional(
+ Suppress(".") + Optional(TriplesBlock)
+)
# [66] MinusGraphPattern ::= 'MINUS' GroupGraphPattern
MinusGraphPattern = Comp(
- 'MinusGraphPattern', Keyword('MINUS') + Param('graph', GroupGraphPattern))
+ "MinusGraphPattern", Keyword("MINUS") + Param("graph", GroupGraphPattern)
+)
# [67] GroupOrUnionGraphPattern ::= GroupGraphPattern ( 'UNION' GroupGraphPattern )*
-GroupOrUnionGraphPattern = Comp('GroupOrUnionGraphPattern', ParamList(
- 'graph', GroupGraphPattern) + ZeroOrMore(Keyword('UNION') + ParamList('graph', GroupGraphPattern)))
+GroupOrUnionGraphPattern = Comp(
+ "GroupOrUnionGraphPattern",
+ ParamList("graph", GroupGraphPattern)
+ + ZeroOrMore(Keyword("UNION") + ParamList("graph", GroupGraphPattern)),
+)
Expression = Forward()
# [72] ExpressionList ::= NIL | '(' Expression ( ',' Expression )* ')'
-ExpressionList = NIL | Group(
- Suppress('(') + delimitedList(Expression) + Suppress(')'))
+ExpressionList = NIL | Group(Suppress("(") + delimitedList(Expression) + Suppress(")"))
# [122] RegexExpression ::= 'REGEX' '(' Expression ',' Expression ( ',' Expression )? ')'
-RegexExpression = Comp('Builtin_REGEX', Keyword('REGEX') + '(' + Param('text', Expression) + ',' + Param(
- 'pattern', Expression) + Optional(',' + Param('flags', Expression)) + ')')
+RegexExpression = Comp(
+ "Builtin_REGEX",
+ Keyword("REGEX")
+ + "("
+ + Param("text", Expression)
+ + ","
+ + Param("pattern", Expression)
+ + Optional("," + Param("flags", Expression))
+ + ")",
+)
RegexExpression.setEvalFn(op.Builtin_REGEX)
# [123] SubstringExpression ::= 'SUBSTR' '(' Expression ',' Expression ( ',' Expression )? ')'
-SubstringExpression = Comp('Builtin_SUBSTR', Keyword('SUBSTR') + '(' + Param('arg', Expression) + ',' + Param(
- 'start', Expression) + Optional(',' + Param('length', Expression)) + ')').setEvalFn(op.Builtin_SUBSTR)
+SubstringExpression = Comp(
+ "Builtin_SUBSTR",
+ Keyword("SUBSTR")
+ + "("
+ + Param("arg", Expression)
+ + ","
+ + Param("start", Expression)
+ + Optional("," + Param("length", Expression))
+ + ")",
+).setEvalFn(op.Builtin_SUBSTR)
# [124] StrReplaceExpression ::= 'REPLACE' '(' Expression ',' Expression ',' Expression ( ',' Expression )? ')'
-StrReplaceExpression = Comp('Builtin_REPLACE', Keyword('REPLACE') + '(' + Param('arg', Expression) + ',' + Param(
- 'pattern', Expression) + ',' + Param('replacement', Expression) + Optional(',' + Param('flags', Expression)) + ')').setEvalFn(op.Builtin_REPLACE)
+StrReplaceExpression = Comp(
+ "Builtin_REPLACE",
+ Keyword("REPLACE")
+ + "("
+ + Param("arg", Expression)
+ + ","
+ + Param("pattern", Expression)
+ + ","
+ + Param("replacement", Expression)
+ + Optional("," + Param("flags", Expression))
+ + ")",
+).setEvalFn(op.Builtin_REPLACE)
# [125] ExistsFunc ::= 'EXISTS' GroupGraphPattern
-ExistsFunc = Comp('Builtin_EXISTS', Keyword('EXISTS') + Param(
- 'graph', GroupGraphPattern)).setEvalFn(op.Builtin_EXISTS)
+ExistsFunc = Comp(
+ "Builtin_EXISTS", Keyword("EXISTS") + Param("graph", GroupGraphPattern)
+).setEvalFn(op.Builtin_EXISTS)
# [126] NotExistsFunc ::= 'NOT' 'EXISTS' GroupGraphPattern
-NotExistsFunc = Comp('Builtin_NOTEXISTS', Keyword('NOT') + Keyword(
- 'EXISTS') + Param('graph', GroupGraphPattern)).setEvalFn(op.Builtin_EXISTS)
+NotExistsFunc = Comp(
+ "Builtin_NOTEXISTS",
+ Keyword("NOT") + Keyword("EXISTS") + Param("graph", GroupGraphPattern),
+).setEvalFn(op.Builtin_EXISTS)
# [127] Aggregate ::= 'COUNT' '(' 'DISTINCT'? ( '*' | Expression ) ')'
@@ -605,17 +705,33 @@ NotExistsFunc = Comp('Builtin_NOTEXISTS', Keyword('NOT') + Keyword(
# | 'SAMPLE' '(' Optional('DISTINCT') Expression ')'
# | 'GROUP_CONCAT' '(' Optional('DISTINCT') Expression ( ';' 'SEPARATOR' '=' String )? ')'
-_Distinct = Optional(Keyword('DISTINCT'))
-_AggregateParams = '(' + Param(
- 'distinct', _Distinct) + Param('vars', Expression) + ')'
-
-Aggregate = Comp('Aggregate_Count', Keyword('COUNT') + '(' + Param('distinct', _Distinct) + Param('vars', '*' | Expression) + ')')\
- | Comp('Aggregate_Sum', Keyword('SUM') + _AggregateParams)\
- | Comp('Aggregate_Min', Keyword('MIN') + _AggregateParams)\
- | Comp('Aggregate_Max', Keyword('MAX') + _AggregateParams)\
- | Comp('Aggregate_Avg', Keyword('AVG') + _AggregateParams)\
- | Comp('Aggregate_Sample', Keyword('SAMPLE') + _AggregateParams)\
- | Comp('Aggregate_GroupConcat', Keyword('GROUP_CONCAT') + '(' + Param('distinct', _Distinct) + Param('vars', Expression) + Optional(';' + Keyword('SEPARATOR') + '=' + Param('separator', String)) + ')')
+_Distinct = Optional(Keyword("DISTINCT"))
+_AggregateParams = "(" + Param("distinct", _Distinct) + Param("vars", Expression) + ")"
+
+Aggregate = (
+ Comp(
+ "Aggregate_Count",
+ Keyword("COUNT")
+ + "("
+ + Param("distinct", _Distinct)
+ + Param("vars", "*" | Expression)
+ + ")",
+ )
+ | Comp("Aggregate_Sum", Keyword("SUM") + _AggregateParams)
+ | Comp("Aggregate_Min", Keyword("MIN") + _AggregateParams)
+ | Comp("Aggregate_Max", Keyword("MAX") + _AggregateParams)
+ | Comp("Aggregate_Avg", Keyword("AVG") + _AggregateParams)
+ | Comp("Aggregate_Sample", Keyword("SAMPLE") + _AggregateParams)
+ | Comp(
+ "Aggregate_GroupConcat",
+ Keyword("GROUP_CONCAT")
+ + "("
+ + Param("distinct", _Distinct)
+ + Param("vars", Expression)
+ + Optional(";" + Keyword("SEPARATOR") + "=" + Param("separator", String))
+ + ")",
+ )
+)
# [121] BuiltInCall ::= Aggregate
# | 'STR' '(' + Expression + ')'
@@ -673,93 +789,271 @@ Aggregate = Comp('Aggregate_Count', Keyword('COUNT') + '(' + Param('distinct', _
# | ExistsFunc
# | NotExistsFunc
-BuiltInCall = Aggregate \
- | Comp('Builtin_STR', Keyword('STR') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_STR) \
- | Comp('Builtin_LANG', Keyword('LANG') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_LANG) \
- | Comp('Builtin_LANGMATCHES', Keyword('LANGMATCHES') + '(' + Param('arg1', Expression) + ',' + Param('arg2', Expression) + ')').setEvalFn(op.Builtin_LANGMATCHES) \
- | Comp('Builtin_DATATYPE', Keyword('DATATYPE') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_DATATYPE) \
- | Comp('Builtin_BOUND', Keyword('BOUND') + '(' + Param('arg', Var) + ')').setEvalFn(op.Builtin_BOUND) \
- | Comp('Builtin_IRI', Keyword('IRI') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_IRI) \
- | Comp('Builtin_URI', Keyword('URI') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_IRI) \
- | Comp('Builtin_BNODE', Keyword('BNODE') + ('(' + Param('arg', Expression) + ')' | NIL)).setEvalFn(op.Builtin_BNODE) \
- | Comp('Builtin_RAND', Keyword('RAND') + NIL).setEvalFn(op.Builtin_RAND) \
- | Comp('Builtin_ABS', Keyword('ABS') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_ABS) \
- | Comp('Builtin_CEIL', Keyword('CEIL') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_CEIL) \
- | Comp('Builtin_FLOOR', Keyword('FLOOR') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_FLOOR) \
- | Comp('Builtin_ROUND', Keyword('ROUND') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_ROUND) \
- | Comp('Builtin_CONCAT', Keyword('CONCAT') + Param('arg', ExpressionList)).setEvalFn(op.Builtin_CONCAT) \
- | SubstringExpression \
- | Comp('Builtin_STRLEN', Keyword('STRLEN') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_STRLEN) \
- | StrReplaceExpression \
- | Comp('Builtin_UCASE', Keyword('UCASE') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_UCASE) \
- | Comp('Builtin_LCASE', Keyword('LCASE') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_LCASE) \
- | Comp('Builtin_ENCODE_FOR_URI', Keyword('ENCODE_FOR_URI') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_ENCODE_FOR_URI) \
- | Comp('Builtin_CONTAINS', Keyword('CONTAINS') + '(' + Param('arg1', Expression) + ',' + Param('arg2', Expression) + ')').setEvalFn(op.Builtin_CONTAINS) \
- | Comp('Builtin_STRSTARTS', Keyword('STRSTARTS') + '(' + Param('arg1', Expression) + ',' + Param('arg2', Expression) + ')').setEvalFn(op.Builtin_STRSTARTS) \
- | Comp('Builtin_STRENDS', Keyword('STRENDS') + '(' + Param('arg1', Expression) + ',' + Param('arg2', Expression) + ')').setEvalFn(op.Builtin_STRENDS) \
- | Comp('Builtin_STRBEFORE', Keyword('STRBEFORE') + '(' + Param('arg1', Expression) + ',' + Param('arg2', Expression) + ')').setEvalFn(op.Builtin_STRBEFORE) \
- | Comp('Builtin_STRAFTER', Keyword('STRAFTER') + '(' + Param('arg1', Expression) + ',' + Param('arg2', Expression) + ')').setEvalFn(op.Builtin_STRAFTER) \
- | Comp('Builtin_YEAR', Keyword('YEAR') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_YEAR) \
- | Comp('Builtin_MONTH', Keyword('MONTH') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_MONTH) \
- | Comp('Builtin_DAY', Keyword('DAY') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_DAY) \
- | Comp('Builtin_HOURS', Keyword('HOURS') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_HOURS) \
- | Comp('Builtin_MINUTES', Keyword('MINUTES') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_MINUTES) \
- | Comp('Builtin_SECONDS', Keyword('SECONDS') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_SECONDS) \
- | Comp('Builtin_TIMEZONE', Keyword('TIMEZONE') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_TIMEZONE) \
- | Comp('Builtin_TZ', Keyword('TZ') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_TZ) \
- | Comp('Builtin_NOW', Keyword('NOW') + NIL).setEvalFn(op.Builtin_NOW) \
- | Comp('Builtin_UUID', Keyword('UUID') + NIL).setEvalFn(op.Builtin_UUID) \
- | Comp('Builtin_STRUUID', Keyword('STRUUID') + NIL).setEvalFn(op.Builtin_STRUUID) \
- | Comp('Builtin_MD5', Keyword('MD5') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_MD5) \
- | Comp('Builtin_SHA1', Keyword('SHA1') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_SHA1) \
- | Comp('Builtin_SHA256', Keyword('SHA256') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_SHA256) \
- | Comp('Builtin_SHA384', Keyword('SHA384') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_SHA384) \
- | Comp('Builtin_SHA512', Keyword('SHA512') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_SHA512) \
- | Comp('Builtin_COALESCE', Keyword('COALESCE') + Param('arg', ExpressionList)).setEvalFn(op.Builtin_COALESCE) \
- | Comp('Builtin_IF', Keyword('IF') + '(' + Param('arg1', Expression) + ',' + Param('arg2', Expression) + ',' + Param('arg3', Expression) + ')').setEvalFn(op.Builtin_IF) \
- | Comp('Builtin_STRLANG', Keyword('STRLANG') + '(' + Param('arg1', Expression) + ',' + Param('arg2', Expression) + ')').setEvalFn(op.Builtin_STRLANG) \
- | Comp('Builtin_STRDT', Keyword('STRDT') + '(' + Param('arg1', Expression) + ',' + Param('arg2', Expression) + ')').setEvalFn(op.Builtin_STRDT) \
- | Comp('Builtin_sameTerm', Keyword('sameTerm') + '(' + Param('arg1', Expression) + ',' + Param('arg2', Expression) + ')').setEvalFn(op.Builtin_sameTerm) \
- | Comp('Builtin_isIRI', Keyword('isIRI') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_isIRI) \
- | Comp('Builtin_isURI', Keyword('isURI') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_isIRI) \
- | Comp('Builtin_isBLANK', Keyword('isBLANK') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_isBLANK) \
- | Comp('Builtin_isLITERAL', Keyword('isLITERAL') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_isLITERAL) \
- | Comp('Builtin_isNUMERIC', Keyword('isNUMERIC') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_isNUMERIC) \
- | RegexExpression \
- | ExistsFunc \
+BuiltInCall = (
+ Aggregate
+ | Comp(
+ "Builtin_STR", Keyword("STR") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_STR)
+ | Comp(
+ "Builtin_LANG", Keyword("LANG") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_LANG)
+ | Comp(
+ "Builtin_LANGMATCHES",
+ Keyword("LANGMATCHES")
+ + "("
+ + Param("arg1", Expression)
+ + ","
+ + Param("arg2", Expression)
+ + ")",
+ ).setEvalFn(op.Builtin_LANGMATCHES)
+ | Comp(
+ "Builtin_DATATYPE", Keyword("DATATYPE") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_DATATYPE)
+ | Comp("Builtin_BOUND", Keyword("BOUND") + "(" + Param("arg", Var) + ")").setEvalFn(
+ op.Builtin_BOUND
+ )
+ | Comp(
+ "Builtin_IRI", Keyword("IRI") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_IRI)
+ | Comp(
+ "Builtin_URI", Keyword("URI") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_IRI)
+ | Comp(
+ "Builtin_BNODE", Keyword("BNODE") + ("(" + Param("arg", Expression) + ")" | NIL)
+ ).setEvalFn(op.Builtin_BNODE)
+ | Comp("Builtin_RAND", Keyword("RAND") + NIL).setEvalFn(op.Builtin_RAND)
+ | Comp(
+ "Builtin_ABS", Keyword("ABS") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_ABS)
+ | Comp(
+ "Builtin_CEIL", Keyword("CEIL") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_CEIL)
+ | Comp(
+ "Builtin_FLOOR", Keyword("FLOOR") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_FLOOR)
+ | Comp(
+ "Builtin_ROUND", Keyword("ROUND") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_ROUND)
+ | Comp(
+ "Builtin_CONCAT", Keyword("CONCAT") + Param("arg", ExpressionList)
+ ).setEvalFn(op.Builtin_CONCAT)
+ | SubstringExpression
+ | Comp(
+ "Builtin_STRLEN", Keyword("STRLEN") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_STRLEN)
+ | StrReplaceExpression
+ | Comp(
+ "Builtin_UCASE", Keyword("UCASE") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_UCASE)
+ | Comp(
+ "Builtin_LCASE", Keyword("LCASE") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_LCASE)
+ | Comp(
+ "Builtin_ENCODE_FOR_URI",
+ Keyword("ENCODE_FOR_URI") + "(" + Param("arg", Expression) + ")",
+ ).setEvalFn(op.Builtin_ENCODE_FOR_URI)
+ | Comp(
+ "Builtin_CONTAINS",
+ Keyword("CONTAINS")
+ + "("
+ + Param("arg1", Expression)
+ + ","
+ + Param("arg2", Expression)
+ + ")",
+ ).setEvalFn(op.Builtin_CONTAINS)
+ | Comp(
+ "Builtin_STRSTARTS",
+ Keyword("STRSTARTS")
+ + "("
+ + Param("arg1", Expression)
+ + ","
+ + Param("arg2", Expression)
+ + ")",
+ ).setEvalFn(op.Builtin_STRSTARTS)
+ | Comp(
+ "Builtin_STRENDS",
+ Keyword("STRENDS")
+ + "("
+ + Param("arg1", Expression)
+ + ","
+ + Param("arg2", Expression)
+ + ")",
+ ).setEvalFn(op.Builtin_STRENDS)
+ | Comp(
+ "Builtin_STRBEFORE",
+ Keyword("STRBEFORE")
+ + "("
+ + Param("arg1", Expression)
+ + ","
+ + Param("arg2", Expression)
+ + ")",
+ ).setEvalFn(op.Builtin_STRBEFORE)
+ | Comp(
+ "Builtin_STRAFTER",
+ Keyword("STRAFTER")
+ + "("
+ + Param("arg1", Expression)
+ + ","
+ + Param("arg2", Expression)
+ + ")",
+ ).setEvalFn(op.Builtin_STRAFTER)
+ | Comp(
+ "Builtin_YEAR", Keyword("YEAR") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_YEAR)
+ | Comp(
+ "Builtin_MONTH", Keyword("MONTH") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_MONTH)
+ | Comp(
+ "Builtin_DAY", Keyword("DAY") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_DAY)
+ | Comp(
+ "Builtin_HOURS", Keyword("HOURS") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_HOURS)
+ | Comp(
+ "Builtin_MINUTES", Keyword("MINUTES") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_MINUTES)
+ | Comp(
+ "Builtin_SECONDS", Keyword("SECONDS") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_SECONDS)
+ | Comp(
+ "Builtin_TIMEZONE", Keyword("TIMEZONE") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_TIMEZONE)
+ | Comp(
+ "Builtin_TZ", Keyword("TZ") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_TZ)
+ | Comp("Builtin_NOW", Keyword("NOW") + NIL).setEvalFn(op.Builtin_NOW)
+ | Comp("Builtin_UUID", Keyword("UUID") + NIL).setEvalFn(op.Builtin_UUID)
+ | Comp("Builtin_STRUUID", Keyword("STRUUID") + NIL).setEvalFn(op.Builtin_STRUUID)
+ | Comp(
+ "Builtin_MD5", Keyword("MD5") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_MD5)
+ | Comp(
+ "Builtin_SHA1", Keyword("SHA1") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_SHA1)
+ | Comp(
+ "Builtin_SHA256", Keyword("SHA256") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_SHA256)
+ | Comp(
+ "Builtin_SHA384", Keyword("SHA384") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_SHA384)
+ | Comp(
+ "Builtin_SHA512", Keyword("SHA512") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_SHA512)
+ | Comp(
+ "Builtin_COALESCE", Keyword("COALESCE") + Param("arg", ExpressionList)
+ ).setEvalFn(op.Builtin_COALESCE)
+ | Comp(
+ "Builtin_IF",
+ Keyword("IF")
+ + "("
+ + Param("arg1", Expression)
+ + ","
+ + Param("arg2", Expression)
+ + ","
+ + Param("arg3", Expression)
+ + ")",
+ ).setEvalFn(op.Builtin_IF)
+ | Comp(
+ "Builtin_STRLANG",
+ Keyword("STRLANG")
+ + "("
+ + Param("arg1", Expression)
+ + ","
+ + Param("arg2", Expression)
+ + ")",
+ ).setEvalFn(op.Builtin_STRLANG)
+ | Comp(
+ "Builtin_STRDT",
+ Keyword("STRDT")
+ + "("
+ + Param("arg1", Expression)
+ + ","
+ + Param("arg2", Expression)
+ + ")",
+ ).setEvalFn(op.Builtin_STRDT)
+ | Comp(
+ "Builtin_sameTerm",
+ Keyword("sameTerm")
+ + "("
+ + Param("arg1", Expression)
+ + ","
+ + Param("arg2", Expression)
+ + ")",
+ ).setEvalFn(op.Builtin_sameTerm)
+ | Comp(
+ "Builtin_isIRI", Keyword("isIRI") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_isIRI)
+ | Comp(
+ "Builtin_isURI", Keyword("isURI") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_isIRI)
+ | Comp(
+ "Builtin_isBLANK", Keyword("isBLANK") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_isBLANK)
+ | Comp(
+ "Builtin_isLITERAL", Keyword("isLITERAL") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_isLITERAL)
+ | Comp(
+ "Builtin_isNUMERIC", Keyword("isNUMERIC") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_isNUMERIC)
+ | RegexExpression
+ | ExistsFunc
| NotExistsFunc
+)
# [71] ArgList ::= NIL | '(' 'DISTINCT'? Expression ( ',' Expression )* ')'
-ArgList = NIL | '(' + Param('distinct', _Distinct) + delimitedList(
- ParamList('expr', Expression)) + ')'
+ArgList = (
+ NIL
+ | "("
+ + Param("distinct", _Distinct)
+ + delimitedList(ParamList("expr", Expression))
+ + ")"
+)
# [128] iriOrFunction ::= iri Optional(ArgList)
-iriOrFunction = (Comp(
- 'Function', Param('iri', iri) + ArgList).setEvalFn(op.Function)) | iri
+iriOrFunction = (
+ Comp("Function", Param("iri", iri) + ArgList).setEvalFn(op.Function)
+) | iri
# [70] FunctionCall ::= iri ArgList
-FunctionCall = Comp(
- 'Function', Param('iri', iri) + ArgList).setEvalFn(op.Function)
+FunctionCall = Comp("Function", Param("iri", iri) + ArgList).setEvalFn(op.Function)
# [120] BrackettedExpression ::= '(' Expression ')'
-BrackettedExpression = Suppress('(') + Expression + Suppress(')')
+BrackettedExpression = Suppress("(") + Expression + Suppress(")")
# [119] PrimaryExpression ::= BrackettedExpression | BuiltInCall | iriOrFunction | RDFLiteral | NumericLiteral | BooleanLiteral | Var
-PrimaryExpression = BrackettedExpression | BuiltInCall | iriOrFunction | RDFLiteral | NumericLiteral | BooleanLiteral | Var
+PrimaryExpression = (
+ BrackettedExpression
+ | BuiltInCall
+ | iriOrFunction
+ | RDFLiteral
+ | NumericLiteral
+ | BooleanLiteral
+ | Var
+)
# [118] UnaryExpression ::= '!' PrimaryExpression
# | '+' PrimaryExpression
# | '-' PrimaryExpression
# | PrimaryExpression
-UnaryExpression = Comp('UnaryNot', '!' + Param('expr', PrimaryExpression)).setEvalFn(op.UnaryNot) \
- | Comp('UnaryPlus', '+' + Param('expr', PrimaryExpression)).setEvalFn(op.UnaryPlus) \
- | Comp('UnaryMinus', '-' + Param('expr', PrimaryExpression)).setEvalFn(op.UnaryMinus) \
+UnaryExpression = (
+ Comp("UnaryNot", "!" + Param("expr", PrimaryExpression)).setEvalFn(op.UnaryNot)
+ | Comp("UnaryPlus", "+" + Param("expr", PrimaryExpression)).setEvalFn(op.UnaryPlus)
+ | Comp("UnaryMinus", "-" + Param("expr", PrimaryExpression)).setEvalFn(
+ op.UnaryMinus
+ )
| PrimaryExpression
+)
# [117] MultiplicativeExpression ::= UnaryExpression ( '*' UnaryExpression | '/' UnaryExpression )*
-MultiplicativeExpression = Comp('MultiplicativeExpression', Param('expr', UnaryExpression) + ZeroOrMore(ParamList('op', '*') + ParamList(
- 'other', UnaryExpression) | ParamList('op', '/') + ParamList('other', UnaryExpression))).setEvalFn(op.MultiplicativeExpression)
+MultiplicativeExpression = Comp(
+ "MultiplicativeExpression",
+ Param("expr", UnaryExpression)
+ + ZeroOrMore(
+ ParamList("op", "*") + ParamList("other", UnaryExpression)
+ | ParamList("op", "/") + ParamList("other", UnaryExpression)
+ ),
+).setEvalFn(op.MultiplicativeExpression)
# [116] AdditiveExpression ::= MultiplicativeExpression ( '+' MultiplicativeExpression | '-' MultiplicativeExpression | ( NumericLiteralPositive | NumericLiteralNegative ) ( ( '*' UnaryExpression ) | ( '/' UnaryExpression ) )* )*
@@ -770,36 +1064,55 @@ MultiplicativeExpression = Comp('MultiplicativeExpression', Param('expr', UnaryE
# tokenizing and parsing
-AdditiveExpression = Comp('AdditiveExpression', Param('expr', MultiplicativeExpression) +
- ZeroOrMore(ParamList('op', '+') + ParamList('other', MultiplicativeExpression) |
- ParamList('op', '-') + ParamList('other', MultiplicativeExpression))).setEvalFn(op.AdditiveExpression)
+AdditiveExpression = Comp(
+ "AdditiveExpression",
+ Param("expr", MultiplicativeExpression)
+ + ZeroOrMore(
+ ParamList("op", "+") + ParamList("other", MultiplicativeExpression)
+ | ParamList("op", "-") + ParamList("other", MultiplicativeExpression)
+ ),
+).setEvalFn(op.AdditiveExpression)
# [115] NumericExpression ::= AdditiveExpression
NumericExpression = AdditiveExpression
# [114] RelationalExpression ::= NumericExpression ( '=' NumericExpression | '!=' NumericExpression | '<' NumericExpression | '>' NumericExpression | '<=' NumericExpression | '>=' NumericExpression | 'IN' ExpressionList | 'NOT' 'IN' ExpressionList )?
-RelationalExpression = Comp('RelationalExpression', Param('expr', NumericExpression) + Optional(
- Param('op', '=') + Param('other', NumericExpression) |
- Param('op', '!=') + Param('other', NumericExpression) |
- Param('op', '<') + Param('other', NumericExpression) |
- Param('op', '>') + Param('other', NumericExpression) |
- Param('op', '<=') + Param('other', NumericExpression) |
- Param('op', '>=') + Param('other', NumericExpression) |
- Param('op', Keyword('IN')) + Param('other', ExpressionList) |
- Param('op', Combine(Keyword('NOT') + Keyword('IN'), adjacent=False, joinString=" ")) + Param('other', ExpressionList))).setEvalFn(op.RelationalExpression)
+RelationalExpression = Comp(
+ "RelationalExpression",
+ Param("expr", NumericExpression)
+ + Optional(
+ Param("op", "=") + Param("other", NumericExpression)
+ | Param("op", "!=") + Param("other", NumericExpression)
+ | Param("op", "<") + Param("other", NumericExpression)
+ | Param("op", ">") + Param("other", NumericExpression)
+ | Param("op", "<=") + Param("other", NumericExpression)
+ | Param("op", ">=") + Param("other", NumericExpression)
+ | Param("op", Keyword("IN")) + Param("other", ExpressionList)
+ | Param(
+ "op",
+ Combine(Keyword("NOT") + Keyword("IN"), adjacent=False, joinString=" "),
+ )
+ + Param("other", ExpressionList)
+ ),
+).setEvalFn(op.RelationalExpression)
# [113] ValueLogical ::= RelationalExpression
ValueLogical = RelationalExpression
# [112] ConditionalAndExpression ::= ValueLogical ( '&&' ValueLogical )*
-ConditionalAndExpression = Comp('ConditionalAndExpression', Param('expr', ValueLogical) + ZeroOrMore(
- '&&' + ParamList('other', ValueLogical))).setEvalFn(op.ConditionalAndExpression)
+ConditionalAndExpression = Comp(
+ "ConditionalAndExpression",
+ Param("expr", ValueLogical) + ZeroOrMore("&&" + ParamList("other", ValueLogical)),
+).setEvalFn(op.ConditionalAndExpression)
# [111] ConditionalOrExpression ::= ConditionalAndExpression ( '||' ConditionalAndExpression )*
-ConditionalOrExpression = Comp('ConditionalOrExpression', Param('expr', ConditionalAndExpression) + ZeroOrMore(
- '||' + ParamList('other', ConditionalAndExpression))).setEvalFn(op.ConditionalOrExpression)
+ConditionalOrExpression = Comp(
+ "ConditionalOrExpression",
+ Param("expr", ConditionalAndExpression)
+ + ZeroOrMore("||" + ParamList("other", ConditionalAndExpression)),
+).setEvalFn(op.ConditionalOrExpression)
# [110] Expression ::= ConditionalOrExpression
Expression <<= ConditionalOrExpression
@@ -809,7 +1122,7 @@ Expression <<= ConditionalOrExpression
Constraint = BrackettedExpression | BuiltInCall | FunctionCall
# [68] Filter ::= 'FILTER' Constraint
-Filter = Comp('Filter', Keyword('FILTER') + Param('expr', Constraint))
+Filter = Comp("Filter", Keyword("FILTER") + Param("expr", Constraint))
# [16] SourceSelector ::= iri
@@ -819,128 +1132,217 @@ SourceSelector = iri
DefaultGraphClause = SourceSelector
# [15] NamedGraphClause ::= 'NAMED' SourceSelector
-NamedGraphClause = Keyword('NAMED') + Param('named', SourceSelector)
+NamedGraphClause = Keyword("NAMED") + Param("named", SourceSelector)
# [13] DatasetClause ::= 'FROM' ( DefaultGraphClause | NamedGraphClause )
-DatasetClause = Comp('DatasetClause', Keyword(
- 'FROM') + (Param('default', DefaultGraphClause) | NamedGraphClause))
+DatasetClause = Comp(
+ "DatasetClause",
+ Keyword("FROM") + (Param("default", DefaultGraphClause) | NamedGraphClause),
+)
# [20] GroupCondition ::= BuiltInCall | FunctionCall | '(' Expression ( 'AS' Var )? ')' | Var
-GroupCondition = BuiltInCall | FunctionCall | Comp('GroupAs', '(' + Param(
- 'expr', Expression) + Optional(Keyword('AS') + Param('var', Var)) + ')') | Var
+GroupCondition = (
+ BuiltInCall
+ | FunctionCall
+ | Comp(
+ "GroupAs",
+ "("
+ + Param("expr", Expression)
+ + Optional(Keyword("AS") + Param("var", Var))
+ + ")",
+ )
+ | Var
+)
# [19] GroupClause ::= 'GROUP' 'BY' GroupCondition+
-GroupClause = Comp('GroupClause', Keyword('GROUP') + Keyword(
- 'BY') + OneOrMore(ParamList('condition', GroupCondition)))
+GroupClause = Comp(
+ "GroupClause",
+ Keyword("GROUP")
+ + Keyword("BY")
+ + OneOrMore(ParamList("condition", GroupCondition)),
+)
-_Silent = Optional(Param('silent', Keyword('SILENT')))
+_Silent = Optional(Param("silent", Keyword("SILENT")))
# [31] Load ::= 'LOAD' 'SILENT'? iri ( 'INTO' GraphRef )?
-Load = Comp('Load', Keyword('LOAD') + _Silent + Param('iri', iri) +
- Optional(Keyword('INTO') + GraphRef))
+Load = Comp(
+ "Load",
+ Keyword("LOAD")
+ + _Silent
+ + Param("iri", iri)
+ + Optional(Keyword("INTO") + GraphRef),
+)
# [32] Clear ::= 'CLEAR' 'SILENT'? GraphRefAll
-Clear = Comp('Clear', Keyword('CLEAR') + _Silent + GraphRefAll)
+Clear = Comp("Clear", Keyword("CLEAR") + _Silent + GraphRefAll)
# [33] Drop ::= 'DROP' _Silent GraphRefAll
-Drop = Comp('Drop', Keyword('DROP') + _Silent + GraphRefAll)
+Drop = Comp("Drop", Keyword("DROP") + _Silent + GraphRefAll)
# [34] Create ::= 'CREATE' _Silent GraphRef
-Create = Comp('Create', Keyword('CREATE') + _Silent + GraphRef)
+Create = Comp("Create", Keyword("CREATE") + _Silent + GraphRef)
# [35] Add ::= 'ADD' _Silent GraphOrDefault 'TO' GraphOrDefault
-Add = Comp('Add', Keyword(
- 'ADD') + _Silent + GraphOrDefault + Keyword('TO') + GraphOrDefault)
+Add = Comp(
+ "Add", Keyword("ADD") + _Silent + GraphOrDefault + Keyword("TO") + GraphOrDefault
+)
# [36] Move ::= 'MOVE' _Silent GraphOrDefault 'TO' GraphOrDefault
-Move = Comp('Move', Keyword(
- 'MOVE') + _Silent + GraphOrDefault + Keyword('TO') + GraphOrDefault)
+Move = Comp(
+ "Move", Keyword("MOVE") + _Silent + GraphOrDefault + Keyword("TO") + GraphOrDefault
+)
# [37] Copy ::= 'COPY' _Silent GraphOrDefault 'TO' GraphOrDefault
-Copy = Comp('Copy', Keyword(
- 'COPY') + _Silent + GraphOrDefault + Keyword('TO') + GraphOrDefault)
+Copy = Comp(
+ "Copy", Keyword("COPY") + _Silent + GraphOrDefault + Keyword("TO") + GraphOrDefault
+)
# [38] InsertData ::= 'INSERT DATA' QuadData
-InsertData = Comp('InsertData', Keyword('INSERT') + Keyword('DATA') + QuadData)
+InsertData = Comp("InsertData", Keyword("INSERT") + Keyword("DATA") + QuadData)
# [39] DeleteData ::= 'DELETE DATA' QuadData
-DeleteData = Comp('DeleteData', Keyword('DELETE') + Keyword('DATA') + QuadData)
+DeleteData = Comp("DeleteData", Keyword("DELETE") + Keyword("DATA") + QuadData)
# [40] DeleteWhere ::= 'DELETE WHERE' QuadPattern
-DeleteWhere = Comp(
- 'DeleteWhere', Keyword('DELETE') + Keyword('WHERE') + QuadPattern)
+DeleteWhere = Comp("DeleteWhere", Keyword("DELETE") + Keyword("WHERE") + QuadPattern)
# [42] DeleteClause ::= 'DELETE' QuadPattern
-DeleteClause = Comp('DeleteClause', Keyword('DELETE') + QuadPattern)
+DeleteClause = Comp("DeleteClause", Keyword("DELETE") + QuadPattern)
# [43] InsertClause ::= 'INSERT' QuadPattern
-InsertClause = Comp('InsertClause', Keyword('INSERT') + QuadPattern)
+InsertClause = Comp("InsertClause", Keyword("INSERT") + QuadPattern)
# [44] UsingClause ::= 'USING' ( iri | 'NAMED' iri )
-UsingClause = Comp('UsingClause', Keyword('USING') + (
- Param('default', iri) | Keyword('NAMED') + Param('named', iri)))
+UsingClause = Comp(
+ "UsingClause",
+ Keyword("USING") + (Param("default", iri) | Keyword("NAMED") + Param("named", iri)),
+)
# [41] Modify ::= ( 'WITH' iri )? ( DeleteClause Optional(InsertClause) | InsertClause ) ZeroOrMore(UsingClause) 'WHERE' GroupGraphPattern
-Modify = Comp('Modify', Optional(Keyword('WITH') + Param('withClause', iri)) + (Param('delete', DeleteClause) + Optional(Param(
- 'insert', InsertClause)) | Param('insert', InsertClause)) + ZeroOrMore(ParamList('using', UsingClause)) + Keyword('WHERE') + Param('where', GroupGraphPattern))
+Modify = Comp(
+ "Modify",
+ Optional(Keyword("WITH") + Param("withClause", iri))
+ + (
+ Param("delete", DeleteClause) + Optional(Param("insert", InsertClause))
+ | Param("insert", InsertClause)
+ )
+ + ZeroOrMore(ParamList("using", UsingClause))
+ + Keyword("WHERE")
+ + Param("where", GroupGraphPattern),
+)
# [30] Update1 ::= Load | Clear | Drop | Add | Move | Copy | Create | InsertData | DeleteData | DeleteWhere | Modify
-Update1 = Load | Clear | Drop | Add | Move | Copy | Create | InsertData | DeleteData | DeleteWhere | Modify
+Update1 = (
+ Load
+ | Clear
+ | Drop
+ | Add
+ | Move
+ | Copy
+ | Create
+ | InsertData
+ | DeleteData
+ | DeleteWhere
+ | Modify
+)
# [63] InlineDataOneVar ::= Var '{' ZeroOrMore(DataBlockValue) '}'
-InlineDataOneVar = ParamList(
- 'var', Var) + '{' + ZeroOrMore(ParamList('value', DataBlockValue)) + '}'
+InlineDataOneVar = (
+ ParamList("var", Var) + "{" + ZeroOrMore(ParamList("value", DataBlockValue)) + "}"
+)
# [64] InlineDataFull ::= ( NIL | '(' ZeroOrMore(Var) ')' ) '{' ( '(' ZeroOrMore(DataBlockValue) ')' | NIL )* '}'
-InlineDataFull = (NIL | '(' + ZeroOrMore(ParamList('var', Var)) + ')') + '{' + ZeroOrMore(
- ParamList('value', Group(Suppress('(') + ZeroOrMore(DataBlockValue) + Suppress(')') | NIL))) + '}'
+InlineDataFull = (
+ (NIL | "(" + ZeroOrMore(ParamList("var", Var)) + ")")
+ + "{"
+ + ZeroOrMore(
+ ParamList(
+ "value",
+ Group(Suppress("(") + ZeroOrMore(DataBlockValue) + Suppress(")") | NIL),
+ )
+ )
+ + "}"
+)
# [62] DataBlock ::= InlineDataOneVar | InlineDataFull
DataBlock = InlineDataOneVar | InlineDataFull
# [28] ValuesClause ::= ( 'VALUES' DataBlock )?
-ValuesClause = Optional(Param(
- 'valuesClause', Comp('ValuesClause', Keyword('VALUES') + DataBlock)))
+ValuesClause = Optional(
+ Param("valuesClause", Comp("ValuesClause", Keyword("VALUES") + DataBlock))
+)
# [74] ConstructTriples ::= TriplesSameSubject ( '.' Optional(ConstructTriples) )?
ConstructTriples = Forward()
-ConstructTriples <<= (ParamList('template', TriplesSameSubject) + Optional(
- Suppress('.') + Optional(ConstructTriples)))
+ConstructTriples <<= ParamList("template", TriplesSameSubject) + Optional(
+ Suppress(".") + Optional(ConstructTriples)
+)
# [73] ConstructTemplate ::= '{' Optional(ConstructTriples) '}'
-ConstructTemplate = Suppress('{') + Optional(ConstructTriples) + Suppress('}')
+ConstructTemplate = Suppress("{") + Optional(ConstructTriples) + Suppress("}")
# [57] OptionalGraphPattern ::= 'OPTIONAL' GroupGraphPattern
-OptionalGraphPattern = Comp('OptionalGraphPattern', Keyword(
- 'OPTIONAL') + Param('graph', GroupGraphPattern))
+OptionalGraphPattern = Comp(
+ "OptionalGraphPattern", Keyword("OPTIONAL") + Param("graph", GroupGraphPattern)
+)
# [58] GraphGraphPattern ::= 'GRAPH' VarOrIri GroupGraphPattern
-GraphGraphPattern = Comp('GraphGraphPattern', Keyword(
- 'GRAPH') + Param('term', VarOrIri) + Param('graph', GroupGraphPattern))
+GraphGraphPattern = Comp(
+ "GraphGraphPattern",
+ Keyword("GRAPH") + Param("term", VarOrIri) + Param("graph", GroupGraphPattern),
+)
# [59] ServiceGraphPattern ::= 'SERVICE' _Silent VarOrIri GroupGraphPattern
-ServiceGraphPattern = Comp('ServiceGraphPattern', Keyword(
- 'SERVICE') + _Silent + Param('term', VarOrIri) + Param('graph', GroupGraphPattern))
+ServiceGraphPattern = Comp(
+ "ServiceGraphPattern",
+ Keyword("SERVICE")
+ + _Silent
+ + Param("term", VarOrIri)
+ + Param("graph", GroupGraphPattern),
+)
# [60] Bind ::= 'BIND' '(' Expression 'AS' Var ')'
-Bind = Comp('Bind', Keyword('BIND') + '(' + Param(
- 'expr', Expression) + Keyword('AS') + Param('var', Var) + ')')
+Bind = Comp(
+ "Bind",
+ Keyword("BIND")
+ + "("
+ + Param("expr", Expression)
+ + Keyword("AS")
+ + Param("var", Var)
+ + ")",
+)
# [61] InlineData ::= 'VALUES' DataBlock
-InlineData = Comp('InlineData', Keyword('VALUES') + DataBlock)
+InlineData = Comp("InlineData", Keyword("VALUES") + DataBlock)
# [56] GraphPatternNotTriples ::= GroupOrUnionGraphPattern | OptionalGraphPattern | MinusGraphPattern | GraphGraphPattern | ServiceGraphPattern | Filter | Bind | InlineData
-GraphPatternNotTriples = GroupOrUnionGraphPattern | OptionalGraphPattern | MinusGraphPattern | GraphGraphPattern | ServiceGraphPattern | Filter | Bind | InlineData
+GraphPatternNotTriples = (
+ GroupOrUnionGraphPattern
+ | OptionalGraphPattern
+ | MinusGraphPattern
+ | GraphGraphPattern
+ | ServiceGraphPattern
+ | Filter
+ | Bind
+ | InlineData
+)
# [54] GroupGraphPatternSub ::= Optional(TriplesBlock) ( GraphPatternNotTriples '.'? Optional(TriplesBlock) )*
-GroupGraphPatternSub = Comp('GroupGraphPatternSub', Optional(ParamList('part', Comp('TriplesBlock', TriplesBlock))) + ZeroOrMore(
- ParamList('part', GraphPatternNotTriples) + Optional('.') + Optional(ParamList('part', Comp('TriplesBlock', TriplesBlock)))))
+GroupGraphPatternSub = Comp(
+ "GroupGraphPatternSub",
+ Optional(ParamList("part", Comp("TriplesBlock", TriplesBlock)))
+ + ZeroOrMore(
+ ParamList("part", GraphPatternNotTriples)
+ + Optional(".")
+ + Optional(ParamList("part", Comp("TriplesBlock", TriplesBlock)))
+ ),
+)
# ----------------
@@ -948,70 +1350,151 @@ GroupGraphPatternSub = Comp('GroupGraphPatternSub', Optional(ParamList('part', C
HavingCondition = Constraint
# [21] HavingClause ::= 'HAVING' HavingCondition+
-HavingClause = Comp('HavingClause', Keyword(
- 'HAVING') + OneOrMore(ParamList('condition', HavingCondition)))
+HavingClause = Comp(
+ "HavingClause",
+ Keyword("HAVING") + OneOrMore(ParamList("condition", HavingCondition)),
+)
# [24] OrderCondition ::= ( ( 'ASC' | 'DESC' ) BrackettedExpression )
# | ( Constraint | Var )
-OrderCondition = Comp('OrderCondition', Param('order', Keyword('ASC') | Keyword(
- 'DESC')) + Param('expr', BrackettedExpression) | Param('expr', Constraint | Var))
+OrderCondition = Comp(
+ "OrderCondition",
+ Param("order", Keyword("ASC") | Keyword("DESC"))
+ + Param("expr", BrackettedExpression)
+ | Param("expr", Constraint | Var),
+)
# [23] OrderClause ::= 'ORDER' 'BY' OneOrMore(OrderCondition)
-OrderClause = Comp('OrderClause', Keyword('ORDER') + Keyword(
- 'BY') + OneOrMore(ParamList('condition', OrderCondition)))
+OrderClause = Comp(
+ "OrderClause",
+ Keyword("ORDER")
+ + Keyword("BY")
+ + OneOrMore(ParamList("condition", OrderCondition)),
+)
# [26] LimitClause ::= 'LIMIT' INTEGER
-LimitClause = Keyword('LIMIT') + Param('limit', INTEGER)
+LimitClause = Keyword("LIMIT") + Param("limit", INTEGER)
# [27] OffsetClause ::= 'OFFSET' INTEGER
-OffsetClause = Keyword('OFFSET') + Param('offset', INTEGER)
+OffsetClause = Keyword("OFFSET") + Param("offset", INTEGER)
# [25] LimitOffsetClauses ::= LimitClause Optional(OffsetClause) | OffsetClause Optional(LimitClause)
-LimitOffsetClauses = Comp('LimitOffsetClauses', LimitClause + Optional(
- OffsetClause) | OffsetClause + Optional(LimitClause))
+LimitOffsetClauses = Comp(
+ "LimitOffsetClauses",
+ LimitClause + Optional(OffsetClause) | OffsetClause + Optional(LimitClause),
+)
# [18] SolutionModifier ::= GroupClause? HavingClause? OrderClause? LimitOffsetClauses?
-SolutionModifier = Optional(Param('groupby', GroupClause)) + Optional(Param('having', HavingClause)) + Optional(
- Param('orderby', OrderClause)) + Optional(Param('limitoffset', LimitOffsetClauses))
+SolutionModifier = (
+ Optional(Param("groupby", GroupClause))
+ + Optional(Param("having", HavingClause))
+ + Optional(Param("orderby", OrderClause))
+ + Optional(Param("limitoffset", LimitOffsetClauses))
+)
# [9] SelectClause ::= 'SELECT' ( 'DISTINCT' | 'REDUCED' )? ( ( Var | ( '(' Expression 'AS' Var ')' ) )+ | '*' )
-SelectClause = Keyword('SELECT') + Optional(Param('modifier', Keyword('DISTINCT') | Keyword('REDUCED'))) + (OneOrMore(ParamList('projection', Comp('vars',
- Param('var', Var) | (Literal('(') + Param('expr', Expression) + Keyword('AS') + Param('evar', Var) + ')')))) | '*')
+SelectClause = (
+ Keyword("SELECT")
+ + Optional(Param("modifier", Keyword("DISTINCT") | Keyword("REDUCED")))
+ + (
+ OneOrMore(
+ ParamList(
+ "projection",
+ Comp(
+ "vars",
+ Param("var", Var)
+ | (
+ Literal("(")
+ + Param("expr", Expression)
+ + Keyword("AS")
+ + Param("evar", Var)
+ + ")"
+ ),
+ ),
+ )
+ )
+ | "*"
+ )
+)
# [17] WhereClause ::= 'WHERE'? GroupGraphPattern
-WhereClause = Optional(Keyword('WHERE')) + Param('where', GroupGraphPattern)
+WhereClause = Optional(Keyword("WHERE")) + Param("where", GroupGraphPattern)
# [8] SubSelect ::= SelectClause WhereClause SolutionModifier ValuesClause
-SubSelect = Comp('SubSelect', SelectClause + WhereClause +
- SolutionModifier + ValuesClause)
+SubSelect = Comp(
+ "SubSelect", SelectClause + WhereClause + SolutionModifier + ValuesClause
+)
# [53] GroupGraphPattern ::= '{' ( SubSelect | GroupGraphPatternSub ) '}'
-GroupGraphPattern <<= (
- Suppress('{') + (SubSelect | GroupGraphPatternSub) + Suppress('}'))
+GroupGraphPattern <<= Suppress("{") + (SubSelect | GroupGraphPatternSub) + Suppress("}")
# [7] SelectQuery ::= SelectClause DatasetClause* WhereClause SolutionModifier
-SelectQuery = Comp('SelectQuery', SelectClause + ZeroOrMore(ParamList(
- 'datasetClause', DatasetClause)) + WhereClause + SolutionModifier + ValuesClause)
+SelectQuery = Comp(
+ "SelectQuery",
+ SelectClause
+ + ZeroOrMore(ParamList("datasetClause", DatasetClause))
+ + WhereClause
+ + SolutionModifier
+ + ValuesClause,
+)
# [10] ConstructQuery ::= 'CONSTRUCT' ( ConstructTemplate DatasetClause* WhereClause SolutionModifier | DatasetClause* 'WHERE' '{' TriplesTemplate? '}' SolutionModifier )
# NOTE: The CONSTRUCT WHERE alternative has unnescessarily many Comp/Param pairs
# to allow it to through the same algebra translation process
-ConstructQuery = Comp('ConstructQuery', Keyword('CONSTRUCT') + (ConstructTemplate + ZeroOrMore(ParamList('datasetClause', DatasetClause)) + WhereClause + SolutionModifier + ValuesClause | ZeroOrMore(ParamList(
- 'datasetClause', DatasetClause)) + Keyword('WHERE') + '{' + Optional(Param('where', Comp('FakeGroupGraphPatten', ParamList('part', Comp('TriplesBlock', TriplesTemplate))))) + '}' + SolutionModifier + ValuesClause))
+ConstructQuery = Comp(
+ "ConstructQuery",
+ Keyword("CONSTRUCT")
+ + (
+ ConstructTemplate
+ + ZeroOrMore(ParamList("datasetClause", DatasetClause))
+ + WhereClause
+ + SolutionModifier
+ + ValuesClause
+ | ZeroOrMore(ParamList("datasetClause", DatasetClause))
+ + Keyword("WHERE")
+ + "{"
+ + Optional(
+ Param(
+ "where",
+ Comp(
+ "FakeGroupGraphPatten",
+ ParamList("part", Comp("TriplesBlock", TriplesTemplate)),
+ ),
+ )
+ )
+ + "}"
+ + SolutionModifier
+ + ValuesClause
+ ),
+)
# [12] AskQuery ::= 'ASK' DatasetClause* WhereClause SolutionModifier
-AskQuery = Comp('AskQuery', Keyword('ASK') + Param('datasetClause', ZeroOrMore(
- DatasetClause)) + WhereClause + SolutionModifier + ValuesClause)
+AskQuery = Comp(
+ "AskQuery",
+ Keyword("ASK")
+ + Param("datasetClause", ZeroOrMore(DatasetClause))
+ + WhereClause
+ + SolutionModifier
+ + ValuesClause,
+)
# [11] DescribeQuery ::= 'DESCRIBE' ( VarOrIri+ | '*' ) DatasetClause* WhereClause? SolutionModifier
-DescribeQuery = Comp('DescribeQuery', Keyword('DESCRIBE') + (OneOrMore(ParamList('var', VarOrIri)) | '*') + Param(
- 'datasetClause', ZeroOrMore(DatasetClause)) + Optional(WhereClause) + SolutionModifier + ValuesClause)
+DescribeQuery = Comp(
+ "DescribeQuery",
+ Keyword("DESCRIBE")
+ + (OneOrMore(ParamList("var", VarOrIri)) | "*")
+ + Param("datasetClause", ZeroOrMore(DatasetClause))
+ + Optional(WhereClause)
+ + SolutionModifier
+ + ValuesClause,
+)
# [29] Update ::= Prologue ( Update1 ( ';' Update )? )?
Update = Forward()
-Update <<= (ParamList('prologue', Prologue) + Optional(ParamList('request',
- Update1) + Optional(';' + Update)))
+Update <<= ParamList("prologue", Prologue) + Optional(
+ ParamList("request", Update1) + Optional(";" + Update)
+)
# [2] Query ::= Prologue
@@ -1021,17 +1504,16 @@ Update <<= (ParamList('prologue', Prologue) + Optional(ParamList('request',
Query = Prologue + (SelectQuery | ConstructQuery | DescribeQuery | AskQuery)
# [3] UpdateUnit ::= Update
-UpdateUnit = Comp('Update', Update)
+UpdateUnit = Comp("Update", Update)
# [1] QueryUnit ::= Query
QueryUnit = Query
-QueryUnit.ignore('#' + restOfLine)
-UpdateUnit.ignore('#' + restOfLine)
+QueryUnit.ignore("#" + restOfLine)
+UpdateUnit.ignore("#" + restOfLine)
-expandUnicodeEscapes_re = re.compile(
- r'\\u([0-9a-f]{4}(?:[0-9a-f]{4})?)', flags=re.I)
+expandUnicodeEscapes_re = re.compile(r"\\u([0-9a-f]{4}(?:[0-9a-f]{4})?)", flags=re.I)
def expandUnicodeEscapes(q):
@@ -1050,28 +1532,29 @@ def expandUnicodeEscapes(q):
def parseQuery(q):
- if hasattr(q, 'read'):
+ if hasattr(q, "read"):
q = q.read()
if isinstance(q, bytes):
- q = q.decode('utf-8')
+ q = q.decode("utf-8")
q = expandUnicodeEscapes(q)
return Query.parseString(q, parseAll=True)
def parseUpdate(q):
- if hasattr(q, 'read'):
+ if hasattr(q, "read"):
q = q.read()
if isinstance(q, bytes):
- q = q.decode('utf-8')
+ q = q.decode("utf-8")
q = expandUnicodeEscapes(q)
return UpdateUnit.parseString(q, parseAll=True)[0]
-if __name__ == '__main__':
+if __name__ == "__main__":
import sys
+
DEBUG = True
try:
q = Query.parseString(sys.argv[1])
diff --git a/rdflib/plugins/sparql/parserutils.py b/rdflib/plugins/sparql/parserutils.py
index 29804eea..e67b754b 100644
--- a/rdflib/plugins/sparql/parserutils.py
+++ b/rdflib/plugins/sparql/parserutils.py
@@ -1,4 +1,3 @@
-
from types import MethodType
from collections import OrderedDict
@@ -44,6 +43,7 @@ the resulting CompValue
# Comp('Sum')( Param('x')(Number) + '+' + Param('y')(Number) )
+
def value(ctx, val, variables=False, errors=False):
"""
utility function for evaluating something...
@@ -172,7 +172,7 @@ class CompValue(OrderedDict):
def __getattr__(self, a):
# Hack hack: OrderedDict relies on this
- if a in ('_OrderedDict__root', '_OrderedDict__end'):
+ if a in ("_OrderedDict__root", "_OrderedDict__end"):
raise AttributeError
try:
return self[a]
@@ -224,13 +224,13 @@ class Comp(TokenConverter):
res._evalfn = MethodType(self.evalfn, res)
else:
res = CompValue(self.name)
- if self.name == 'ServiceGraphPattern':
+ if self.name == "ServiceGraphPattern":
# Then this must be a service graph pattern and have
# already matched.
# lets assume there is one, for now, then test for two later.
sgp = originalTextFor(self.expr)
service_string = sgp.searchString(instring)[0][0]
- res['service_string'] = service_string
+ res["service_string"] = service_string
for t in tokenList:
if isinstance(t, ParamValue):
@@ -250,38 +250,38 @@ class Comp(TokenConverter):
return self
-def prettify_parsetree(t, indent='', depth=0):
+def prettify_parsetree(t, indent="", depth=0):
out = []
if isinstance(t, ParseResults):
for e in t.asList():
out.append(prettify_parsetree(e, indent, depth + 1))
for k, v in sorted(t.items()):
- out.append("%s%s- %s:\n" % (indent, ' ' * depth, k))
+ out.append("%s%s- %s:\n" % (indent, " " * depth, k))
out.append(prettify_parsetree(v, indent, depth + 1))
elif isinstance(t, CompValue):
- out.append("%s%s> %s:\n" % (indent, ' ' * depth, t.name))
+ out.append("%s%s> %s:\n" % (indent, " " * depth, t.name))
for k, v in t.items():
- out.append("%s%s- %s:\n" % (indent, ' ' * (depth + 1), k))
+ out.append("%s%s- %s:\n" % (indent, " " * (depth + 1), k))
out.append(prettify_parsetree(v, indent, depth + 2))
elif isinstance(t, dict):
for k, v in t.items():
- out.append("%s%s- %s:\n" % (indent, ' ' * (depth + 1), k))
+ out.append("%s%s- %s:\n" % (indent, " " * (depth + 1), k))
out.append(prettify_parsetree(v, indent, depth + 2))
elif isinstance(t, list):
for e in t:
out.append(prettify_parsetree(e, indent, depth + 1))
else:
- out.append("%s%s- %r\n" % (indent, ' ' * depth, t))
+ out.append("%s%s- %r\n" % (indent, " " * depth, t))
return "".join(out)
-if __name__ == '__main__':
+if __name__ == "__main__":
from pyparsing import Word, nums
import sys
Number = Word(nums)
Number.setParseAction(lambda x: int(x[0]))
- Plus = Comp('plus', Param('a', Number) + '+' + Param('b', Number))
+ Plus = Comp("plus", Param("a", Number) + "+" + Param("b", Number))
Plus.setEvalFn(lambda self, ctx: self.a + self.b)
r = Plus.parseString(sys.argv[1])
diff --git a/rdflib/plugins/sparql/processor.py b/rdflib/plugins/sparql/processor.py
index 073a387e..84e8c823 100644
--- a/rdflib/plugins/sparql/processor.py
+++ b/rdflib/plugins/sparql/processor.py
@@ -1,4 +1,3 @@
-
"""
Code for tying SPARQL Engine into RDFLib
@@ -7,7 +6,6 @@ These should be automatically registered with RDFLib
"""
-
from rdflib.query import Processor, Result, UpdateProcessor
from rdflib.plugins.sparql.sparql import Query
@@ -33,12 +31,12 @@ def processUpdate(graph, updateString, initBindings={}, initNs={}, base=None):
Process a SPARQL Update Request
returns Nothing on success or raises Exceptions on error
"""
- evalUpdate(graph, translateUpdate(
- parseUpdate(updateString), base, initNs), initBindings)
+ evalUpdate(
+ graph, translateUpdate(parseUpdate(updateString), base, initNs), initBindings
+ )
class SPARQLResult(Result):
-
def __init__(self, res):
Result.__init__(self, res["type_"])
self.vars = res.get("vars_")
@@ -59,13 +57,10 @@ class SPARQLUpdateProcessor(UpdateProcessor):
class SPARQLProcessor(Processor):
-
def __init__(self, graph):
self.graph = graph
- def query(
- self, strOrQuery, initBindings={},
- initNs={}, base=None, DEBUG=False):
+ def query(self, strOrQuery, initBindings={}, initNs={}, base=None, DEBUG=False):
"""
Evaluate a query with the given initial bindings, and initial
namespaces. The given base is used to resolve relative URIs in
diff --git a/rdflib/plugins/sparql/results/csvresults.py b/rdflib/plugins/sparql/results/csvresults.py
index d354ccf5..c87b6ea7 100644
--- a/rdflib/plugins/sparql/results/csvresults.py
+++ b/rdflib/plugins/sparql/results/csvresults.py
@@ -21,11 +21,11 @@ class CSVResultParser(ResultParser):
def parse(self, source, content_type=None):
- r = Result('SELECT')
+ r = Result("SELECT")
if isinstance(source.read(0), bytes):
# if reading from source returns bytes do utf-8 decoding
- source = codecs.getreader('utf-8')(source)
+ source = codecs.getreader("utf-8")(source)
reader = csv.reader(source, delimiter=self.delim)
r.vars = [Variable(x) for x in next(reader)]
@@ -37,9 +37,11 @@ class CSVResultParser(ResultParser):
return r
def parseRow(self, row, v):
- return dict((var, val)
- for var, val in zip(v, [self.convertTerm(t)
- for t in row]) if val is not None)
+ return dict(
+ (var, val)
+ for var, val in zip(v, [self.convertTerm(t) for t in row])
+ if val is not None
+ )
def convertTerm(self, t):
if t == "":
@@ -52,22 +54,21 @@ class CSVResultParser(ResultParser):
class CSVResultSerializer(ResultSerializer):
-
def __init__(self, result):
ResultSerializer.__init__(self, result)
self.delim = ","
if result.type != "SELECT":
- raise Exception(
- "CSVSerializer can only serialize select query results")
+ raise Exception("CSVSerializer can only serialize select query results")
- def serialize(self, stream, encoding='utf-8', **kwargs):
+ def serialize(self, stream, encoding="utf-8", **kwargs):
# the serialiser writes bytes in the given encoding
# in py3 csv.writer is unicode aware and writes STRINGS,
# so we encode afterwards
import codecs
+
stream = codecs.getwriter(encoding)(stream)
out = csv.writer(stream, delimiter=self.delim)
@@ -75,8 +76,9 @@ class CSVResultSerializer(ResultSerializer):
vs = [self.serializeTerm(v, encoding) for v in self.result.vars]
out.writerow(vs)
for row in self.result.bindings:
- out.writerow([self.serializeTerm(
- row.get(v), encoding) for v in self.result.vars])
+ out.writerow(
+ [self.serializeTerm(row.get(v), encoding) for v in self.result.vars]
+ )
def serializeTerm(self, term, encoding):
if term is None:
diff --git a/rdflib/plugins/sparql/results/graph.py b/rdflib/plugins/sparql/results/graph.py
index c47daa72..13e256bb 100644
--- a/rdflib/plugins/sparql/results/graph.py
+++ b/rdflib/plugins/sparql/results/graph.py
@@ -1,18 +1,12 @@
from rdflib import Graph
-from rdflib.query import (
- Result,
- ResultParser,
- ResultSerializer,
- ResultException
-)
+from rdflib.query import Result, ResultParser, ResultSerializer, ResultException
class GraphResultParser(ResultParser):
-
def parse(self, source, content_type):
- res = Result('CONSTRUCT') # hmm - or describe?type_)
+ res = Result("CONSTRUCT") # hmm - or describe?type_)
res.graph = Graph()
res.graph.parse(source, format=content_type)
diff --git a/rdflib/plugins/sparql/results/jsonresults.py b/rdflib/plugins/sparql/results/jsonresults.py
index 6110c324..13a8da5e 100644
--- a/rdflib/plugins/sparql/results/jsonresults.py
+++ b/rdflib/plugins/sparql/results/jsonresults.py
@@ -1,7 +1,6 @@
import json
-from rdflib.query import (
- Result, ResultException, ResultSerializer, ResultParser)
+from rdflib.query import Result, ResultException, ResultSerializer, ResultParser
from rdflib import Literal, URIRef, BNode, Variable
@@ -18,23 +17,21 @@ Authors: Drew Perttula, Gunnar Aastrand Grimnes
class JSONResultParser(ResultParser):
-
def parse(self, source, content_type=None):
inp = source.read()
if isinstance(inp, bytes):
- inp = inp.decode('utf-8')
+ inp = inp.decode("utf-8")
return JSONResult(json.loads(inp))
class JSONResultSerializer(ResultSerializer):
-
def __init__(self, result):
ResultSerializer.__init__(self, result)
def serialize(self, stream, encoding=None):
res = {}
- if self.result.type == 'ASK':
+ if self.result.type == "ASK":
res["head"] = {}
res["boolean"] = self.result.askAnswer
else:
@@ -42,8 +39,9 @@ class JSONResultSerializer(ResultSerializer):
res["results"] = {}
res["head"] = {}
res["head"]["vars"] = self.result.vars
- res["results"]["bindings"] = [self._bindingToJSON(
- x) for x in self.result.bindings]
+ res["results"]["bindings"] = [
+ self._bindingToJSON(x) for x in self.result.bindings
+ ]
r = json.dumps(res, allow_nan=False, ensure_ascii=False)
if encoding is not None:
@@ -61,27 +59,26 @@ class JSONResultSerializer(ResultSerializer):
class JSONResult(Result):
-
def __init__(self, json):
self.json = json
if "boolean" in json:
- type_ = 'ASK'
+ type_ = "ASK"
elif "results" in json:
- type_ = 'SELECT'
+ type_ = "SELECT"
else:
- raise ResultException('No boolean or results in json!')
+ raise ResultException("No boolean or results in json!")
Result.__init__(self, type_)
- if type_ == 'ASK':
- self.askAnswer = bool(json['boolean'])
+ if type_ == "ASK":
+ self.askAnswer = bool(json["boolean"])
else:
self.bindings = self._get_bindings()
self.vars = [Variable(x) for x in json["head"]["vars"]]
def _get_bindings(self):
ret = []
- for row in self.json['results']['bindings']:
+ for row in self.json["results"]["bindings"]:
outRow = {}
for k, v in row.items():
outRow[Variable(k)] = parseJsonTerm(v)
@@ -97,36 +94,34 @@ def parseJsonTerm(d):
{ 'type': 'literal', 'value': 'drewp' }
"""
- t = d['type']
- if t == 'uri':
- return URIRef(d['value'])
- elif t == 'literal':
- return Literal(d['value'], datatype=d.get('datatype'), lang=d.get('xml:lang'))
- elif t == 'typed-literal':
- return Literal(d['value'], datatype=URIRef(d['datatype']))
- elif t == 'bnode':
- return BNode(d['value'])
+ t = d["type"]
+ if t == "uri":
+ return URIRef(d["value"])
+ elif t == "literal":
+ return Literal(d["value"], datatype=d.get("datatype"), lang=d.get("xml:lang"))
+ elif t == "typed-literal":
+ return Literal(d["value"], datatype=URIRef(d["datatype"]))
+ elif t == "bnode":
+ return BNode(d["value"])
else:
raise NotImplementedError("json term type %r" % t)
def termToJSON(self, term):
if isinstance(term, URIRef):
- return {'type': 'uri', 'value': str(term)}
+ return {"type": "uri", "value": str(term)}
elif isinstance(term, Literal):
- r = {'type': 'literal',
- 'value': str(term)}
+ r = {"type": "literal", "value": str(term)}
if term.datatype is not None:
- r['datatype'] = str(term.datatype)
+ r["datatype"] = str(term.datatype)
if term.language is not None:
- r['xml:lang'] = term.language
+ r["xml:lang"] = term.language
return r
elif isinstance(term, BNode):
- return {'type': 'bnode', 'value': str(term)}
+ return {"type": "bnode", "value": str(term)}
elif term is None:
return None
else:
- raise ResultException(
- 'Unknown term type: %s (%s)' % (term, type(term)))
+ raise ResultException("Unknown term type: %s (%s)" % (term, type(term)))
diff --git a/rdflib/plugins/sparql/results/rdfresults.py b/rdflib/plugins/sparql/results/rdfresults.py
index ac71ff1d..7f64bbf4 100644
--- a/rdflib/plugins/sparql/results/rdfresults.py
+++ b/rdflib/plugins/sparql/results/rdfresults.py
@@ -2,7 +2,7 @@ from rdflib import Graph, Namespace, RDF, Variable
from rdflib.query import Result, ResultParser
-RS = Namespace('http://www.w3.org/2001/sw/DataAccess/tests/result-set#')
+RS = Namespace("http://www.w3.org/2001/sw/DataAccess/tests/result-set#")
class RDFResultParser(ResultParser):
@@ -11,7 +11,6 @@ class RDFResultParser(ResultParser):
class RDFResult(Result):
-
def __init__(self, source, **kwargs):
if not isinstance(source, Graph):
@@ -24,7 +23,7 @@ class RDFResult(Result):
# there better be only one :)
if rs is None:
- type_ = 'CONSTRUCT'
+ type_ = "CONSTRUCT"
# use a new graph
g = Graph()
@@ -35,27 +34,27 @@ class RDFResult(Result):
askAnswer = graph.value(rs, RS.boolean)
if askAnswer is not None:
- type_ = 'ASK'
+ type_ = "ASK"
else:
- type_ = 'SELECT'
+ type_ = "SELECT"
Result.__init__(self, type_)
- if type_ == 'SELECT':
- self.vars = [Variable(v) for v in graph.objects(rs,
- RS.resultVariable)]
+ if type_ == "SELECT":
+ self.vars = [Variable(v) for v in graph.objects(rs, RS.resultVariable)]
self.bindings = []
for s in graph.objects(rs, RS.solution):
sol = {}
for b in graph.objects(s, RS.binding):
- sol[Variable(graph.value(
- b, RS.variable))] = graph.value(b, RS.value)
+ sol[Variable(graph.value(b, RS.variable))] = graph.value(
+ b, RS.value
+ )
self.bindings.append(sol)
- elif type_ == 'ASK':
+ elif type_ == "ASK":
self.askAnswer = askAnswer.value
if askAnswer.value is None:
- raise Exception('Malformed boolean in ask answer!')
- elif type_ == 'CONSTRUCT':
+ raise Exception("Malformed boolean in ask answer!")
+ elif type_ == "CONSTRUCT":
self.graph = g
diff --git a/rdflib/plugins/sparql/results/tsvresults.py b/rdflib/plugins/sparql/results/tsvresults.py
index 1395eaff..bdfa2d4a 100644
--- a/rdflib/plugins/sparql/results/tsvresults.py
+++ b/rdflib/plugins/sparql/results/tsvresults.py
@@ -1,4 +1,3 @@
-
"""
This implements the Tab Separated SPARQL Result Format
@@ -8,14 +7,28 @@ It is implemented with pyparsing, reusing the elements from the SPARQL Parser
import codecs
from pyparsing import (
- Optional, ZeroOrMore, Literal, ParserElement, ParseException, Suppress,
- FollowedBy, LineEnd)
+ Optional,
+ ZeroOrMore,
+ Literal,
+ ParserElement,
+ ParseException,
+ Suppress,
+ FollowedBy,
+ LineEnd,
+)
from rdflib.query import Result, ResultParser
from rdflib.plugins.sparql.parser import (
- Var, STRING_LITERAL1, STRING_LITERAL2, IRIREF, BLANK_NODE_LABEL,
- NumericLiteral, BooleanLiteral, LANGTAG)
+ Var,
+ STRING_LITERAL1,
+ STRING_LITERAL2,
+ IRIREF,
+ BLANK_NODE_LABEL,
+ NumericLiteral,
+ BooleanLiteral,
+ LANGTAG,
+)
from rdflib.plugins.sparql.parserutils import Comp, Param, CompValue
from rdflib import Literal as RDFLiteral
@@ -25,10 +38,14 @@ ParserElement.setDefaultWhitespaceChars(" \n")
String = STRING_LITERAL1 | STRING_LITERAL2
-RDFLITERAL = Comp('literal', Param('string', String) + Optional(
- Param('lang', LANGTAG.leaveWhitespace()
- ) | Literal('^^').leaveWhitespace(
- ) + Param('datatype', IRIREF).leaveWhitespace()))
+RDFLITERAL = Comp(
+ "literal",
+ Param("string", String)
+ + Optional(
+ Param("lang", LANGTAG.leaveWhitespace())
+ | Literal("^^").leaveWhitespace() + Param("datatype", IRIREF).leaveWhitespace()
+ ),
+)
NONE_VALUE = object()
@@ -49,10 +66,10 @@ class TSVResultParser(ResultParser):
if isinstance(source.read(0), bytes):
# if reading from source returns bytes do utf-8 decoding
- source = codecs.getreader('utf-8')(source)
+ source = codecs.getreader("utf-8")(source)
try:
- r = Result('SELECT')
+ r = Result("SELECT")
header = source.readline()
@@ -62,13 +79,12 @@ class TSVResultParser(ResultParser):
line = source.readline()
if not line:
break
- line = line.strip('\n')
+ line = line.strip("\n")
if line == "":
continue
row = ROW.parseString(line, parseAll=True)
- r.bindings.append(
- dict(zip(r.vars, (self.convertTerm(x) for x in row))))
+ r.bindings.append(dict(zip(r.vars, (self.convertTerm(x) for x in row))))
return r
@@ -81,7 +97,7 @@ class TSVResultParser(ResultParser):
if t is NONE_VALUE:
return None
if isinstance(t, CompValue):
- if t.name == 'literal':
+ if t.name == "literal":
return RDFLiteral(t.string, lang=t.lang, datatype=t.datatype)
else:
raise Exception("I dont know how to handle this: %s" % (t,))
@@ -89,9 +105,10 @@ class TSVResultParser(ResultParser):
return t
-if __name__ == '__main__':
+if __name__ == "__main__":
import sys
- r = Result.parse(file(sys.argv[1]), format='tsv')
+
+ r = Result.parse(file(sys.argv[1]), format="tsv")
print(r.vars)
print(r.bindings)
# print r.serialize(format='json')
diff --git a/rdflib/plugins/sparql/results/txtresults.py b/rdflib/plugins/sparql/results/txtresults.py
index c42f24c4..426dd9a1 100644
--- a/rdflib/plugins/sparql/results/txtresults.py
+++ b/rdflib/plugins/sparql/results/txtresults.py
@@ -1,4 +1,3 @@
-
from rdflib import URIRef, BNode, Literal
from rdflib.query import ResultSerializer
@@ -37,7 +36,7 @@ class TXTResultSerializer(ResultSerializer):
h2 += 1
return " " * h1 + s + " " * h2
- if self.result.type != 'SELECT':
+ if self.result.type != "SELECT":
raise Exception("Can only pretty print SELECT results!")
if not self.result:
@@ -46,14 +45,17 @@ class TXTResultSerializer(ResultSerializer):
keys = sorted(self.result.vars)
maxlen = [0] * len(keys)
- b = [[_termString(r[k], namespace_manager) for k in keys] for r in self.result]
+ b = [
+ [_termString(r[k], namespace_manager) for k in keys]
+ for r in self.result
+ ]
for r in b:
for i in range(len(keys)):
maxlen[i] = max(maxlen[i], len(r[i]))
- stream.write(
- "|".join([c(k, maxlen[i]) for i, k in enumerate(keys)]) + "\n")
+ stream.write("|".join([c(k, maxlen[i]) for i, k in enumerate(keys)]) + "\n")
stream.write("-" * (len(maxlen) + sum(maxlen)) + "\n")
for r in sorted(b):
- stream.write("|".join(
- [t + " " * (i - len(t)) for i, t in zip(maxlen, r)]) + "\n")
+ stream.write(
+ "|".join([t + " " * (i - len(t)) for i, t in zip(maxlen, r)]) + "\n"
+ )
diff --git a/rdflib/plugins/sparql/results/xmlresults.py b/rdflib/plugins/sparql/results/xmlresults.py
index cdb81f76..aa4f796f 100644
--- a/rdflib/plugins/sparql/results/xmlresults.py
+++ b/rdflib/plugins/sparql/results/xmlresults.py
@@ -8,17 +8,11 @@ from xml.sax.xmlreader import AttributesNSImpl
from rdflib.compat import etree
from rdflib import Literal, URIRef, BNode, Graph, Variable
-from rdflib.query import (
- Result,
- ResultParser,
- ResultSerializer,
- ResultException
-)
+from rdflib.query import Result, ResultParser, ResultSerializer, ResultException
-
-SPARQL_XML_NAMESPACE = u'http://www.w3.org/2005/sparql-results#'
-RESULTS_NS_ET = '{%s}' % SPARQL_XML_NAMESPACE
+SPARQL_XML_NAMESPACE = u"http://www.w3.org/2005/sparql-results#"
+RESULTS_NS_ET = "{%s}" % SPARQL_XML_NAMESPACE
log = logging.getLogger(__name__)
@@ -35,7 +29,6 @@ Authors: Drew Perttula, Gunnar Aastrand Grimnes
class XMLResultParser(ResultParser):
-
def parse(self, source, content_type=None):
return XMLResult(source)
@@ -49,31 +42,32 @@ class XMLResult(Result):
except TypeError:
tree = etree.parse(source)
- boolean = tree.find(RESULTS_NS_ET + 'boolean')
- results = tree.find(RESULTS_NS_ET + 'results')
+ boolean = tree.find(RESULTS_NS_ET + "boolean")
+ results = tree.find(RESULTS_NS_ET + "results")
if boolean is not None:
- type_ = 'ASK'
+ type_ = "ASK"
elif results is not None:
- type_ = 'SELECT'
+ type_ = "SELECT"
else:
- raise ResultException(
- "No RDF result-bindings or boolean answer found!")
+ raise ResultException("No RDF result-bindings or boolean answer found!")
Result.__init__(self, type_)
- if type_ == 'SELECT':
+ if type_ == "SELECT":
self.bindings = []
for result in results:
r = {}
for binding in result:
- r[Variable(binding.get('name'))] = parseTerm(binding[0])
+ r[Variable(binding.get("name"))] = parseTerm(binding[0])
self.bindings.append(r)
- self.vars = [Variable(x.get("name"))
- for x in tree.findall(
- './%shead/%svariable' % (
- RESULTS_NS_ET, RESULTS_NS_ET))]
+ self.vars = [
+ Variable(x.get("name"))
+ for x in tree.findall(
+ "./%shead/%svariable" % (RESULTS_NS_ET, RESULTS_NS_ET)
+ )
+ ]
else:
self.askAnswer = boolean.text.lower().strip() == "true"
@@ -83,36 +77,35 @@ def parseTerm(element):
"""rdflib object (Literal, URIRef, BNode) for the given
elementtree element"""
tag, text = element.tag, element.text
- if tag == RESULTS_NS_ET + 'literal':
+ if tag == RESULTS_NS_ET + "literal":
if text is None:
- text = ''
+ text = ""
datatype = None
lang = None
- if element.get('datatype', None):
- datatype = URIRef(element.get('datatype'))
+ if element.get("datatype", None):
+ datatype = URIRef(element.get("datatype"))
elif element.get("{%s}lang" % XML_NAMESPACE, None):
lang = element.get("{%s}lang" % XML_NAMESPACE)
ret = Literal(text, datatype=datatype, lang=lang)
return ret
- elif tag == RESULTS_NS_ET + 'uri':
+ elif tag == RESULTS_NS_ET + "uri":
return URIRef(text)
- elif tag == RESULTS_NS_ET + 'bnode':
+ elif tag == RESULTS_NS_ET + "bnode":
return BNode(text)
else:
raise TypeError("unknown binding type %r" % element)
class XMLResultSerializer(ResultSerializer):
-
def __init__(self, result):
ResultSerializer.__init__(self, result)
def serialize(self, stream, encoding="utf-8"):
writer = SPARQLXMLWriter(stream, encoding)
- if self.result.type == 'ASK':
+ if self.result.type == "ASK":
writer.write_header([])
writer.write_ask(self.result.askAnswer)
else:
@@ -134,14 +127,14 @@ class SPARQLXMLWriter:
Python saxutils-based SPARQL XML Writer
"""
- def __init__(self, output, encoding='utf-8'):
+ def __init__(self, output, encoding="utf-8"):
writer = XMLGenerator(output, encoding)
writer.startDocument()
- writer.startPrefixMapping(u'', SPARQL_XML_NAMESPACE)
- writer.startPrefixMapping(u'xml', XML_NAMESPACE)
+ writer.startPrefixMapping(u"", SPARQL_XML_NAMESPACE)
+ writer.startPrefixMapping(u"xml", XML_NAMESPACE)
writer.startElementNS(
- (SPARQL_XML_NAMESPACE, u'sparql'),
- u'sparql', AttributesNSImpl({}, {}))
+ (SPARQL_XML_NAMESPACE, u"sparql"), u"sparql", AttributesNSImpl({}, {})
+ )
self.writer = writer
self._output = output
self._encoding = encoding
@@ -149,102 +142,99 @@ class SPARQLXMLWriter:
def write_header(self, allvarsL):
self.writer.startElementNS(
- (SPARQL_XML_NAMESPACE, u'head'),
- u'head', AttributesNSImpl({}, {}))
+ (SPARQL_XML_NAMESPACE, u"head"), u"head", AttributesNSImpl({}, {})
+ )
for i in range(0, len(allvarsL)):
attr_vals = {
- (None, u'name'): str(allvarsL[i]),
+ (None, u"name"): str(allvarsL[i]),
}
attr_qnames = {
- (None, u'name'): u'name',
+ (None, u"name"): u"name",
}
self.writer.startElementNS(
- (SPARQL_XML_NAMESPACE, u'variable'),
- u'variable', AttributesNSImpl(attr_vals, attr_qnames))
- self.writer.endElementNS((SPARQL_XML_NAMESPACE,
- u'variable'), u'variable')
- self.writer.endElementNS((SPARQL_XML_NAMESPACE, u'head'), u'head')
+ (SPARQL_XML_NAMESPACE, u"variable"),
+ u"variable",
+ AttributesNSImpl(attr_vals, attr_qnames),
+ )
+ self.writer.endElementNS((SPARQL_XML_NAMESPACE, u"variable"), u"variable")
+ self.writer.endElementNS((SPARQL_XML_NAMESPACE, u"head"), u"head")
def write_ask(self, val):
self.writer.startElementNS(
- (SPARQL_XML_NAMESPACE, u'boolean'),
- u'boolean', AttributesNSImpl({}, {}))
+ (SPARQL_XML_NAMESPACE, u"boolean"), u"boolean", AttributesNSImpl({}, {})
+ )
self.writer.characters(str(val).lower())
- self.writer.endElementNS(
- (SPARQL_XML_NAMESPACE, u'boolean'), u'boolean')
+ self.writer.endElementNS((SPARQL_XML_NAMESPACE, u"boolean"), u"boolean")
def write_results_header(self):
self.writer.startElementNS(
- (SPARQL_XML_NAMESPACE, u'results'),
- u'results', AttributesNSImpl({}, {}))
+ (SPARQL_XML_NAMESPACE, u"results"), u"results", AttributesNSImpl({}, {})
+ )
self._results = True
def write_start_result(self):
self.writer.startElementNS(
- (SPARQL_XML_NAMESPACE, u'result'),
- u'result', AttributesNSImpl({}, {}))
+ (SPARQL_XML_NAMESPACE, u"result"), u"result", AttributesNSImpl({}, {})
+ )
self._resultStarted = True
def write_end_result(self):
assert self._resultStarted
- self.writer.endElementNS(
- (SPARQL_XML_NAMESPACE, u'result'), u'result')
+ self.writer.endElementNS((SPARQL_XML_NAMESPACE, u"result"), u"result")
self._resultStarted = False
def write_binding(self, name, val):
assert self._resultStarted
attr_vals = {
- (None, u'name'): str(name),
+ (None, u"name"): str(name),
}
attr_qnames = {
- (None, u'name'): u'name',
+ (None, u"name"): u"name",
}
self.writer.startElementNS(
- (SPARQL_XML_NAMESPACE, u'binding'),
- u'binding', AttributesNSImpl(attr_vals, attr_qnames))
+ (SPARQL_XML_NAMESPACE, u"binding"),
+ u"binding",
+ AttributesNSImpl(attr_vals, attr_qnames),
+ )
if isinstance(val, URIRef):
self.writer.startElementNS(
- (SPARQL_XML_NAMESPACE, u'uri'),
- u'uri', AttributesNSImpl({}, {}))
+ (SPARQL_XML_NAMESPACE, u"uri"), u"uri", AttributesNSImpl({}, {})
+ )
self.writer.characters(val)
- self.writer.endElementNS(
- (SPARQL_XML_NAMESPACE, u'uri'), u'uri')
+ self.writer.endElementNS((SPARQL_XML_NAMESPACE, u"uri"), u"uri")
elif isinstance(val, BNode):
self.writer.startElementNS(
- (SPARQL_XML_NAMESPACE, u'bnode'),
- u'bnode', AttributesNSImpl({}, {}))
+ (SPARQL_XML_NAMESPACE, u"bnode"), u"bnode", AttributesNSImpl({}, {})
+ )
self.writer.characters(val)
- self.writer.endElementNS(
- (SPARQL_XML_NAMESPACE, u'bnode'), u'bnode')
+ self.writer.endElementNS((SPARQL_XML_NAMESPACE, u"bnode"), u"bnode")
elif isinstance(val, Literal):
attr_vals = {}
attr_qnames = {}
if val.language:
- attr_vals[(XML_NAMESPACE, u'lang')] = val.language
- attr_qnames[(XML_NAMESPACE, u'lang')] = u"xml:lang"
+ attr_vals[(XML_NAMESPACE, u"lang")] = val.language
+ attr_qnames[(XML_NAMESPACE, u"lang")] = u"xml:lang"
elif val.datatype:
- attr_vals[(None, u'datatype')] = val.datatype
- attr_qnames[(None, u'datatype')] = u'datatype'
+ attr_vals[(None, u"datatype")] = val.datatype
+ attr_qnames[(None, u"datatype")] = u"datatype"
self.writer.startElementNS(
- (SPARQL_XML_NAMESPACE, u'literal'),
- u'literal', AttributesNSImpl(attr_vals, attr_qnames))
+ (SPARQL_XML_NAMESPACE, u"literal"),
+ u"literal",
+ AttributesNSImpl(attr_vals, attr_qnames),
+ )
self.writer.characters(val)
- self.writer.endElementNS(
- (SPARQL_XML_NAMESPACE, u'literal'), u'literal')
+ self.writer.endElementNS((SPARQL_XML_NAMESPACE, u"literal"), u"literal")
else:
raise Exception("Unsupported RDF term: %s" % val)
- self.writer.endElementNS(
- (SPARQL_XML_NAMESPACE, u'binding'), u'binding')
+ self.writer.endElementNS((SPARQL_XML_NAMESPACE, u"binding"), u"binding")
def close(self):
if self._results:
- self.writer.endElementNS(
- (SPARQL_XML_NAMESPACE, u'results'), u'results')
- self.writer.endElementNS(
- (SPARQL_XML_NAMESPACE, u'sparql'), u'sparql')
+ self.writer.endElementNS((SPARQL_XML_NAMESPACE, u"results"), u"results")
+ self.writer.endElementNS((SPARQL_XML_NAMESPACE, u"sparql"), u"sparql")
self.writer.endDocument()
diff --git a/rdflib/plugins/sparql/sparql.py b/rdflib/plugins/sparql/sparql.py
index d9d781db..5b6eab2e 100644
--- a/rdflib/plugins/sparql/sparql.py
+++ b/rdflib/plugins/sparql/sparql.py
@@ -129,8 +129,7 @@ class FrozenDict(Mapping):
return self._hash
def project(self, vars):
- return FrozenDict(
- (x for x in self.items() if x[0] in vars))
+ return FrozenDict((x for x in self.items() if x[0] in vars))
def disjointDomain(self, other):
return not bool(set(self).intersection(other))
@@ -146,8 +145,7 @@ class FrozenDict(Mapping):
return True
def merge(self, other):
- res = FrozenDict(
- itertools.chain(self.items(), other.items()))
+ res = FrozenDict(itertools.chain(self.items(), other.items()))
return res
@@ -159,7 +157,6 @@ class FrozenDict(Mapping):
class FrozenBindings(FrozenDict):
-
def __init__(self, ctx, *args, **kwargs):
FrozenDict.__init__(self, *args, **kwargs)
self.ctx = ctx
@@ -178,12 +175,10 @@ class FrozenBindings(FrozenDict):
return self._d[key]
def project(self, vars):
- return FrozenBindings(
- self.ctx, (x for x in self.items() if x[0] in vars))
+ return FrozenBindings(self.ctx, (x for x in self.items() if x[0] in vars))
def merge(self, other):
- res = FrozenBindings(
- self.ctx, itertools.chain(self.items(), other.items()))
+ res = FrozenBindings(self.ctx, itertools.chain(self.items(), other.items()))
return res
@@ -210,18 +205,23 @@ class FrozenBindings(FrozenDict):
# bindings from initBindings are newer forgotten
return FrozenBindings(
- self.ctx, (
- x for x in self.items() if (
- x[0] in _except or
- x[0] in self.ctx.initBindings or
- before[x[0]] is None)))
+ self.ctx,
+ (
+ x
+ for x in self.items()
+ if (
+ x[0] in _except
+ or x[0] in self.ctx.initBindings
+ or before[x[0]] is None
+ )
+ ),
+ )
def remember(self, these):
"""
return a frozen dict only of bindings in these
"""
- return FrozenBindings(
- self.ctx, (x for x in self.items() if x[0] in these))
+ return FrozenBindings(self.ctx, (x for x in self.items() if x[0] in these))
class QueryContext(object):
@@ -253,7 +253,10 @@ class QueryContext(object):
def clone(self, bindings=None):
r = QueryContext(
- self._dataset if self._dataset is not None else self.graph, bindings or self.bindings, initBindings=self.initBindings)
+ self._dataset if self._dataset is not None else self.graph,
+ bindings or self.bindings,
+ initBindings=self.initBindings,
+ )
r.prologue = self.prologue
r.graph = self.graph
r.bnodes = self.bnodes
@@ -262,30 +265,30 @@ class QueryContext(object):
def _get_dataset(self):
if self._dataset is None:
raise Exception(
- 'You performed a query operation requiring ' +
- 'a dataset (i.e. ConjunctiveGraph), but ' +
- 'operating currently on a single graph.')
+ "You performed a query operation requiring "
+ + "a dataset (i.e. ConjunctiveGraph), but "
+ + "operating currently on a single graph."
+ )
return self._dataset
dataset = property(_get_dataset, doc="current dataset")
def load(self, source, default=False, **kwargs):
-
def _load(graph, source):
try:
return graph.load(source, **kwargs)
except:
pass
try:
- return graph.load(source, format='n3', **kwargs)
+ return graph.load(source, format="n3", **kwargs)
except:
pass
try:
- return graph.load(source, format='nt', **kwargs)
+ return graph.load(source, format="nt", **kwargs)
except:
raise Exception(
- "Could not load %s as either RDF/XML, N3 or NTriples" % (
- source))
+ "Could not load %s as either RDF/XML, N3 or NTriples" % (source)
+ )
if not rdflib.plugins.sparql.SPARQL_LOAD_GRAPHS:
# we are not loading - if we already know the graph
@@ -320,9 +323,8 @@ class QueryContext(object):
"""
if vars:
return FrozenBindings(
- self, ((k, v)
- for k, v in self.bindings.items()
- if k in vars))
+ self, ((k, v) for k, v in self.bindings.items() if k in vars)
+ )
else:
return FrozenBindings(self, self.bindings.items())
@@ -366,13 +368,12 @@ class Prologue(object):
def __init__(self):
self.base = None
- self.namespace_manager = NamespaceManager(
- Graph()) # ns man needs a store
+ self.namespace_manager = NamespaceManager(Graph()) # ns man needs a store
def resolvePName(self, prefix, localname):
ns = self.namespace_manager.store.namespace(prefix or "")
if ns is None:
- raise Exception('Unknown namespace prefix : %s' % prefix)
+ raise Exception("Unknown namespace prefix : %s" % prefix)
return URIRef(ns + (localname or ""))
def bind(self, prefix, uri):
@@ -387,13 +388,13 @@ class Prologue(object):
"""
if isinstance(iri, CompValue):
- if iri.name == 'pname':
+ if iri.name == "pname":
return self.resolvePName(iri.prefix, iri.localname)
- if iri.name == 'literal':
+ if iri.name == "literal":
return Literal(
- iri.string, lang=iri.lang,
- datatype=self.absolutize(iri.datatype))
- elif isinstance(iri, URIRef) and not ':' in iri:
+ iri.string, lang=iri.lang, datatype=self.absolutize(iri.datatype)
+ )
+ elif isinstance(iri, URIRef) and not ":" in iri:
return URIRef(iri, base=self.base)
return iri
diff --git a/rdflib/plugins/sparql/update.py b/rdflib/plugins/sparql/update.py
index 44ea40a3..f979c387 100644
--- a/rdflib/plugins/sparql/update.py
+++ b/rdflib/plugins/sparql/update.py
@@ -11,7 +11,7 @@ from rdflib.plugins.sparql.evaluate import evalBGP, evalPart
def _graphOrDefault(ctx, g):
- if g == 'DEFAULT':
+ if g == "DEFAULT":
return ctx.graph
else:
return ctx.dataset.get_context(g)
@@ -21,12 +21,13 @@ def _graphAll(ctx, g):
"""
return a list of graphs
"""
- if g == 'DEFAULT':
+ if g == "DEFAULT":
return [ctx.graph]
- elif g == 'NAMED':
- return [c for c in ctx.dataset.contexts()
- if c.identifier != ctx.graph.identifier]
- elif g == 'ALL':
+ elif g == "NAMED":
+ return [
+ c for c in ctx.dataset.contexts() if c.identifier != ctx.graph.identifier
+ ]
+ elif g == "ALL":
return list(ctx.dataset.contexts())
else:
return [ctx.dataset.get_context(g)]
@@ -280,30 +281,30 @@ def evalUpdate(graph, update, initBindings={}):
ctx.prologue = u.prologue
try:
- if u.name == 'Load':
+ if u.name == "Load":
evalLoad(ctx, u)
- elif u.name == 'Clear':
+ elif u.name == "Clear":
evalClear(ctx, u)
- elif u.name == 'Drop':
+ elif u.name == "Drop":
evalDrop(ctx, u)
- elif u.name == 'Create':
+ elif u.name == "Create":
evalCreate(ctx, u)
- elif u.name == 'Add':
+ elif u.name == "Add":
evalAdd(ctx, u)
- elif u.name == 'Move':
+ elif u.name == "Move":
evalMove(ctx, u)
- elif u.name == 'Copy':
+ elif u.name == "Copy":
evalCopy(ctx, u)
- elif u.name == 'InsertData':
+ elif u.name == "InsertData":
evalInsertData(ctx, u)
- elif u.name == 'DeleteData':
+ elif u.name == "DeleteData":
evalDeleteData(ctx, u)
- elif u.name == 'DeleteWhere':
+ elif u.name == "DeleteWhere":
evalDeleteWhere(ctx, u)
- elif u.name == 'Modify':
+ elif u.name == "Modify":
evalModify(ctx, u)
else:
- raise Exception('Unknown update operation: %s' % (u,))
+ raise Exception("Unknown update operation: %s" % (u,))
except:
if not u.silent:
raise
diff --git a/rdflib/plugins/stores/auditable.py b/rdflib/plugins/stores/auditable.py
index 7a3492b7..ff21716b 100644
--- a/rdflib/plugins/stores/auditable.py
+++ b/rdflib/plugins/stores/auditable.py
@@ -20,8 +20,8 @@ from rdflib import Graph, ConjunctiveGraph
import threading
destructiveOpLocks = {
- 'add': None,
- 'remove': None,
+ "add": None,
+ "remove": None,
}
@@ -50,59 +50,79 @@ class AuditableStore(Store):
def add(self, triple, context, quoted=False):
(s, p, o) = triple
- lock = destructiveOpLocks['add']
+ lock = destructiveOpLocks["add"]
lock = lock if lock else threading.RLock()
with lock:
- context = context.__class__(self.store, context.identifier) if context is not None else None
+ context = (
+ context.__class__(self.store, context.identifier)
+ if context is not None
+ else None
+ )
ctxId = context.identifier if context is not None else None
if list(self.store.triples(triple, context)):
return # triple already in store, do nothing
- self.reverseOps.append((s, p, o, ctxId, 'remove'))
+ self.reverseOps.append((s, p, o, ctxId, "remove"))
try:
- self.reverseOps.remove((s, p, o, ctxId, 'add'))
+ self.reverseOps.remove((s, p, o, ctxId, "add"))
except ValueError:
pass
self.store.add((s, p, o), context, quoted)
def remove(self, spo, context=None):
subject, predicate, object_ = spo
- lock = destructiveOpLocks['remove']
+ lock = destructiveOpLocks["remove"]
lock = lock if lock else threading.RLock()
with lock:
# Need to determine which quads will be removed if any term is a
# wildcard
- context = context.__class__(self.store, context.identifier) if context is not None else None
+ context = (
+ context.__class__(self.store, context.identifier)
+ if context is not None
+ else None
+ )
ctxId = context.identifier if context is not None else None
if None in [subject, predicate, object_, context]:
if ctxId:
for s, p, o in context.triples((subject, predicate, object_)):
try:
- self.reverseOps.remove((s, p, o, ctxId, 'remove'))
+ self.reverseOps.remove((s, p, o, ctxId, "remove"))
except ValueError:
- self.reverseOps.append((s, p, o, ctxId, 'add'))
+ self.reverseOps.append((s, p, o, ctxId, "add"))
else:
- for s, p, o, ctx in ConjunctiveGraph(self.store).quads((subject, predicate, object_)):
+ for s, p, o, ctx in ConjunctiveGraph(self.store).quads(
+ (subject, predicate, object_)
+ ):
try:
- self.reverseOps.remove((s, p, o, ctx.identifier, 'remove'))
+ self.reverseOps.remove((s, p, o, ctx.identifier, "remove"))
except ValueError:
- self.reverseOps.append((s, p, o, ctx.identifier, 'add'))
+ self.reverseOps.append((s, p, o, ctx.identifier, "add"))
else:
if not list(self.triples((subject, predicate, object_), context)):
return # triple not present in store, do nothing
try:
- self.reverseOps.remove((subject, predicate, object_, ctxId, 'remove'))
+ self.reverseOps.remove(
+ (subject, predicate, object_, ctxId, "remove")
+ )
except ValueError:
- self.reverseOps.append((subject, predicate, object_, ctxId, 'add'))
+ self.reverseOps.append((subject, predicate, object_, ctxId, "add"))
self.store.remove((subject, predicate, object_), context)
def triples(self, triple, context=None):
(su, pr, ob) = triple
- context = context.__class__(self.store, context.identifier) if context is not None else None
+ context = (
+ context.__class__(self.store, context.identifier)
+ if context is not None
+ else None
+ )
for (s, p, o), cg in self.store.triples((su, pr, ob), context):
yield (s, p, o), cg
def __len__(self, context=None):
- context = context.__class__(self.store, context.identifier) if context is not None else None
+ context = (
+ context.__class__(self.store, context.identifier)
+ if context is not None
+ else None
+ )
return self.store.__len__(context)
def contexts(self, triple=None):
@@ -129,11 +149,13 @@ class AuditableStore(Store):
# order
with self.rollbackLock:
for subject, predicate, obj, context, op in self.reverseOps:
- if op == 'add':
+ if op == "add":
self.store.add(
- (subject, predicate, obj), Graph(self.store, context))
+ (subject, predicate, obj), Graph(self.store, context)
+ )
else:
self.store.remove(
- (subject, predicate, obj), Graph(self.store, context))
+ (subject, predicate, obj), Graph(self.store, context)
+ )
self.reverseOps = []
diff --git a/rdflib/plugins/stores/concurrent.py b/rdflib/plugins/stores/concurrent.py
index 40747fb1..a258e778 100644
--- a/rdflib/plugins/stores/concurrent.py
+++ b/rdflib/plugins/stores/concurrent.py
@@ -4,7 +4,7 @@ from threading import Lock
class ResponsibleGenerator(object):
"""A generator that will help clean up when it is done being used."""
- __slots__ = ['cleanup', 'gen']
+ __slots__ = ["cleanup", "gen"]
def __init__(self, gen, cleanup):
self.cleanup = cleanup
@@ -21,7 +21,6 @@ class ResponsibleGenerator(object):
class ConcurrentStore(object):
-
def __init__(self, store):
self.store = store
@@ -60,9 +59,11 @@ class ConcurrentStore(object):
yield s, p, o
for (s, p, o) in self.__pending_adds:
- if (su is None or su == s) \
- and (pr is None or pr == p) \
- and (ob is None or ob == o):
+ if (
+ (su is None or su == s)
+ and (pr is None or pr == p)
+ and (ob is None or ob == o)
+ ):
yield s, p, o
def __len__(self):
diff --git a/rdflib/plugins/stores/regexmatching.py b/rdflib/plugins/stores/regexmatching.py
index 55c1e671..f890405d 100644
--- a/rdflib/plugins/stores/regexmatching.py
+++ b/rdflib/plugins/stores/regexmatching.py
@@ -12,7 +12,6 @@ from rdflib.store import Store
from rdflib.graph import Graph
-
import re
# Store is capable of doing its own REGEX matching
@@ -32,13 +31,14 @@ class REGEXTerm(str):
self.compiledExpr = re.compile(expr)
def __reduce__(self):
- return (REGEXTerm, (str(''),))
+ return (REGEXTerm, (str(""),))
def regexCompareQuad(quad, regexQuad):
for index in range(4):
- if isinstance(regexQuad[index], REGEXTerm) and not \
- regexQuad[index].compiledExpr.match(quad[index]):
+ if isinstance(regexQuad[index], REGEXTerm) and not regexQuad[
+ index
+ ].compiledExpr.match(quad[index]):
return False
return True
@@ -67,29 +67,36 @@ class REGEXMatching(Store):
def remove(self, triple, context=None):
(subject, predicate, object_) = triple
- if isinstance(subject, REGEXTerm) or \
- isinstance(predicate, REGEXTerm) or \
- isinstance(object_, REGEXTerm) or \
- (context is not None and
- isinstance(context.identifier, REGEXTerm)):
+ if (
+ isinstance(subject, REGEXTerm)
+ or isinstance(predicate, REGEXTerm)
+ or isinstance(object_, REGEXTerm)
+ or (context is not None and isinstance(context.identifier, REGEXTerm))
+ ):
# One or more of the terms is a REGEX expression, so we must
# replace it / them with wildcard(s)and match after we query.
s = not isinstance(subject, REGEXTerm) and subject or None
p = not isinstance(predicate, REGEXTerm) and predicate or None
o = not isinstance(object_, REGEXTerm) and object_ or None
- c = (context is not None and
- not isinstance(context.identifier, REGEXTerm)) \
- and context \
+ c = (
+ (context is not None and not isinstance(context.identifier, REGEXTerm))
+ and context
or None
+ )
removeQuadList = []
for (s1, p1, o1), cg in self.storage.triples((s, p, o), c):
for ctx in cg:
ctx = ctx.identifier
if regexCompareQuad(
- (s1, p1, o1, ctx),
- (subject, predicate, object_, context
- is not None and context.identifier or context)):
+ (s1, p1, o1, ctx),
+ (
+ subject,
+ predicate,
+ object_,
+ context is not None and context.identifier or context,
+ ),
+ ):
removeQuadList.append((s1, p1, o1, ctx))
for s, p, o, c in removeQuadList:
self.storage.remove((s, p, o), c and Graph(self, c) or c)
@@ -98,37 +105,40 @@ class REGEXMatching(Store):
def triples(self, triple, context=None):
(subject, predicate, object_) = triple
- if isinstance(subject, REGEXTerm) or \
- isinstance(predicate, REGEXTerm) or \
- isinstance(object_, REGEXTerm) or \
- (context is not None and
- isinstance(context.identifier, REGEXTerm)):
+ if (
+ isinstance(subject, REGEXTerm)
+ or isinstance(predicate, REGEXTerm)
+ or isinstance(object_, REGEXTerm)
+ or (context is not None and isinstance(context.identifier, REGEXTerm))
+ ):
# One or more of the terms is a REGEX expression, so we must
# replace it / them with wildcard(s) and match after we query.
s = not isinstance(subject, REGEXTerm) and subject or None
p = not isinstance(predicate, REGEXTerm) and predicate or None
o = not isinstance(object_, REGEXTerm) and object_ or None
- c = (context is not None and
- not isinstance(context.identifier, REGEXTerm)) \
- and context \
+ c = (
+ (context is not None and not isinstance(context.identifier, REGEXTerm))
+ and context
or None
+ )
for (s1, p1, o1), cg in self.storage.triples((s, p, o), c):
matchingCtxs = []
for ctx in cg:
if c is None:
- if context is None \
- or context.identifier.compiledExpr.match(
- ctx.identifier):
+ if context is None or context.identifier.compiledExpr.match(
+ ctx.identifier
+ ):
matchingCtxs.append(ctx)
else:
matchingCtxs.append(ctx)
- if matchingCtxs \
- and regexCompareQuad((s1, p1, o1, None),
- (subject, predicate, object_, None)):
+ if matchingCtxs and regexCompareQuad(
+ (s1, p1, o1, None), (subject, predicate, object_, None)
+ ):
yield (s1, p1, o1), (c for c in matchingCtxs)
else:
for (s1, p1, o1), cg in self.storage.triples(
- (subject, predicate, object_), context):
+ (subject, predicate, object_), context
+ ):
yield (s1, p1, o1), cg
def __len__(self, context=None):
diff --git a/rdflib/plugins/stores/sparqlconnector.py b/rdflib/plugins/stores/sparqlconnector.py
index abb69a55..abec85a8 100644
--- a/rdflib/plugins/stores/sparqlconnector.py
+++ b/rdflib/plugins/stores/sparqlconnector.py
@@ -14,13 +14,14 @@ log = logging.getLogger(__name__)
class SPARQLConnectorException(Exception):
pass
+
# TODO: Pull in these from the result implementation plugins?
_response_mime_types = {
- 'xml': 'application/sparql-results+xml, application/rdf+xml',
- 'json': 'application/sparql-results+json',
- 'csv': 'text/csv',
- 'tsv': 'text/tab-separated-values',
- 'application/rdf+xml': 'application/rdf+xml',
+ "xml": "application/sparql-results+xml, application/rdf+xml",
+ "json": "application/sparql-results+json",
+ "csv": "text/csv",
+ "tsv": "text/tab-separated-values",
+ "application/rdf+xml": "application/rdf+xml",
}
@@ -30,7 +31,14 @@ class SPARQLConnector(object):
this class deals with nitty gritty details of talking to a SPARQL server
"""
- def __init__(self, query_endpoint=None, update_endpoint=None, returnFormat='xml', method='GET', **kwargs):
+ def __init__(
+ self,
+ query_endpoint=None,
+ update_endpoint=None,
+ returnFormat="xml",
+ method="GET",
+ **kwargs
+ ):
"""
Any additional keyword arguments will be passed to requests, and can be used to setup timesouts, basic auth, etc.
"""
@@ -48,9 +56,9 @@ class SPARQLConnector(object):
@property
def session(self):
- k = 'session_%d' % os.getpid()
+ k = "session_%d" % os.getpid()
self._session.__dict__.setdefault(k, requests.Session())
- log.debug('Session %s %s', os.getpid(), id(self._session.__dict__[k]))
+ log.debug("Session %s %s", os.getpid(), id(self._session.__dict__[k]))
return self._session.__dict__[k]
@property
@@ -59,7 +67,7 @@ class SPARQLConnector(object):
@method.setter
def method(self, method):
- if method not in ('GET', 'POST'):
+ if method not in ("GET", "POST"):
raise SPARQLConnectorException('Method must be "GET" or "POST"')
self._method = method
@@ -69,26 +77,26 @@ class SPARQLConnector(object):
if not self.query_endpoint:
raise SPARQLConnectorException("Query endpoint not set!")
- params = {'query': query}
+ params = {"query": query}
if default_graph:
params["default-graph-uri"] = default_graph
- headers = {'Accept': _response_mime_types[self.returnFormat]}
+ headers = {"Accept": _response_mime_types[self.returnFormat]}
args = dict(self.kwargs)
args.update(url=self.query_endpoint)
# merge params/headers dicts
- args.setdefault('params', {})
+ args.setdefault("params", {})
- args.setdefault('headers', {})
- args['headers'].update(headers)
+ args.setdefault("headers", {})
+ args["headers"].update(headers)
- if self.method == 'GET':
- args['params'].update(params)
- elif self.method == 'POST':
- args['headers'].update({'Content-Type': 'application/sparql-query'})
- args['data'] = params
+ if self.method == "GET":
+ args["params"].update(params)
+ elif self.method == "POST":
+ args["headers"].update({"Content-Type": "application/sparql-query"})
+ args["data"] = params
else:
raise SPARQLConnectorException("Unknown method %s" % self.method)
@@ -96,7 +104,9 @@ class SPARQLConnector(object):
res.raise_for_status()
- return Result.parse(BytesIO(res.content), content_type=res.headers['Content-type'])
+ return Result.parse(
+ BytesIO(res.content), content_type=res.headers["Content-type"]
+ )
def update(self, update, default_graph=None):
if not self.update_endpoint:
@@ -108,20 +118,19 @@ class SPARQLConnector(object):
params["using-graph-uri"] = default_graph
headers = {
- 'Accept': _response_mime_types[self.returnFormat],
- 'Content-Type': 'application/sparql-update',
+ "Accept": _response_mime_types[self.returnFormat],
+ "Content-Type": "application/sparql-update",
}
args = dict(self.kwargs)
- args.update(url=self.update_endpoint,
- data=update.encode('utf-8'))
+ args.update(url=self.update_endpoint, data=update.encode("utf-8"))
# merge params/headers dicts
- args.setdefault('params', {})
- args['params'].update(params)
- args.setdefault('headers', {})
- args['headers'].update(headers)
+ args.setdefault("params", {})
+ args["params"].update(params)
+ args.setdefault("headers", {})
+ args["headers"].update(headers)
res = self.session.post(**args)
diff --git a/rdflib/plugins/stores/sparqlstore.py b/rdflib/plugins/stores/sparqlstore.py
index 989b0126..63e92f54 100644
--- a/rdflib/plugins/stores/sparqlstore.py
+++ b/rdflib/plugins/stores/sparqlstore.py
@@ -7,9 +7,9 @@ This was first done in layer-cake, and then ported to RDFLib
"""
# Defines some SPARQL keywords
-LIMIT = 'LIMIT'
-OFFSET = 'OFFSET'
-ORDERBY = 'ORDER BY'
+LIMIT = "LIMIT"
+OFFSET = "OFFSET"
+ORDERBY = "ORDER BY"
import re
import collections
@@ -24,7 +24,7 @@ from rdflib.graph import DATASET_DEFAULT_GRAPH_ID
from rdflib.term import Node
-BNODE_IDENT_PATTERN = re.compile('(?P<label>_\:[^\s]+)')
+BNODE_IDENT_PATTERN = re.compile("(?P<label>_\:[^\s]+)")
def _node_to_sparql(node):
@@ -87,21 +87,26 @@ class SPARQLStore(SPARQLConnector, Store):
will use HTTP basic auth.
"""
+
formula_aware = False
transaction_aware = False
graph_aware = True
regex_matching = NATIVE_REGEX
- def __init__(self,
- endpoint=None,
- sparql11=True, context_aware=True,
- node_to_sparql=_node_to_sparql,
- returnFormat='xml',
- **sparqlconnector_kwargs):
+ def __init__(
+ self,
+ endpoint=None,
+ sparql11=True,
+ context_aware=True,
+ node_to_sparql=_node_to_sparql,
+ returnFormat="xml",
+ **sparqlconnector_kwargs
+ ):
"""
"""
super(SPARQLStore, self).__init__(
- endpoint, returnFormat=returnFormat, **sparqlconnector_kwargs)
+ endpoint, returnFormat=returnFormat, **sparqlconnector_kwargs
+ )
self.node_to_sparql = node_to_sparql
self.nsBindings = {}
@@ -112,7 +117,7 @@ class SPARQLStore(SPARQLConnector, Store):
# Database Management Methods
def create(self, configuration):
- raise TypeError('The SPARQL store is read only')
+ raise TypeError("The SPARQL store is read only")
def open(self, configuration, create=False):
"""
@@ -125,23 +130,23 @@ class SPARQLStore(SPARQLConnector, Store):
self.query_endpoint = configuration
def destroy(self, configuration):
- raise TypeError('The SPARQL store is read only')
+ raise TypeError("The SPARQL store is read only")
# Transactional interfaces
def commit(self):
- raise TypeError('The SPARQL store is read only')
+ raise TypeError("The SPARQL store is read only")
def rollback(self):
- raise TypeError('The SPARQL store is read only')
+ raise TypeError("The SPARQL store is read only")
def add(self, _, context=None, quoted=False):
- raise TypeError('The SPARQL store is read only')
+ raise TypeError("The SPARQL store is read only")
def addN(self, quads):
- raise TypeError('The SPARQL store is read only')
+ raise TypeError("The SPARQL store is read only")
def remove(self, _, context):
- raise TypeError('The SPARQL store is read only')
+ raise TypeError("The SPARQL store is read only")
def _query(self, *args, **kwargs):
self._queries += 1
@@ -152,20 +157,18 @@ class SPARQLStore(SPARQLConnector, Store):
bindings = list(self.nsBindings.items()) + list(extra_bindings.items())
if not bindings:
return query
- return '\n'.join([
- '\n'.join(['PREFIX %s: <%s>' % (k, v) for k, v in bindings]),
- '', # separate ns_bindings from query with an empty line
- query
- ])
+ return "\n".join(
+ [
+ "\n".join(["PREFIX %s: <%s>" % (k, v) for k, v in bindings]),
+ "", # separate ns_bindings from query with an empty line
+ query,
+ ]
+ )
def _preprocess_query(self, query):
return self._inject_prefixes(query)
- def query(self, query,
- initNs={},
- initBindings={},
- queryGraph=None,
- DEBUG=False):
+ def query(self, query, initNs={}, initBindings={}, queryGraph=None, DEBUG=False):
self.debug = DEBUG
assert isinstance(query, str)
@@ -173,17 +176,18 @@ class SPARQLStore(SPARQLConnector, Store):
if initBindings:
if not self.sparql11:
- raise Exception(
- "initBindings not supported for SPARQL 1.0 Endpoints.")
+ raise Exception("initBindings not supported for SPARQL 1.0 Endpoints.")
v = list(initBindings)
# VALUES was added to SPARQL 1.1 on 2012/07/24
- query += "\nVALUES ( %s )\n{ ( %s ) }\n"\
- % (" ".join("?" + str(x) for x in v),
- " ".join(self.node_to_sparql(initBindings[x]) for x in v))
+ query += "\nVALUES ( %s )\n{ ( %s ) }\n" % (
+ " ".join("?" + str(x) for x in v),
+ " ".join(self.node_to_sparql(initBindings[x]) for x in v),
+ )
- return self._query(query,
- default_graph=queryGraph if self._is_contextual(queryGraph) else None)
+ return self._query(
+ query, default_graph=queryGraph if self._is_contextual(queryGraph) else None
+ )
def triples(self, spo, context=None):
"""
@@ -225,28 +229,31 @@ class SPARQLStore(SPARQLConnector, Store):
vars = []
if not s:
- s = Variable('s')
+ s = Variable("s")
vars.append(s)
if not p:
- p = Variable('p')
+ p = Variable("p")
vars.append(p)
if not o:
- o = Variable('o')
+ o = Variable("o")
vars.append(o)
if vars:
- v = ' '.join([term.n3() for term in vars])
- verb = 'SELECT %s ' % v
+ v = " ".join([term.n3() for term in vars])
+ verb = "SELECT %s " % v
else:
- verb = 'ASK'
+ verb = "ASK"
nts = self.node_to_sparql
query = "%s { %s %s %s }" % (verb, nts(s), nts(p), nts(o))
# The ORDER BY is necessary
- if hasattr(context, LIMIT) or hasattr(context, OFFSET) \
- or hasattr(context, ORDERBY):
+ if (
+ hasattr(context, LIMIT)
+ or hasattr(context, OFFSET)
+ or hasattr(context, ORDERBY)
+ ):
var = None
if isinstance(s, Variable):
var = s
@@ -254,28 +261,33 @@ class SPARQLStore(SPARQLConnector, Store):
var = p
elif isinstance(o, Variable):
var = o
- elif hasattr(context, ORDERBY) \
- and isinstance(getattr(context, ORDERBY), Variable):
+ elif hasattr(context, ORDERBY) and isinstance(
+ getattr(context, ORDERBY), Variable
+ ):
var = getattr(context, ORDERBY)
- query = query + ' %s %s' % (ORDERBY, var.n3())
+ query = query + " %s %s" % (ORDERBY, var.n3())
try:
- query = query + ' LIMIT %s' % int(getattr(context, LIMIT))
+ query = query + " LIMIT %s" % int(getattr(context, LIMIT))
except (ValueError, TypeError, AttributeError):
pass
try:
- query = query + ' OFFSET %s' % int(getattr(context, OFFSET))
+ query = query + " OFFSET %s" % int(getattr(context, OFFSET))
except (ValueError, TypeError, AttributeError):
pass
- result = self._query(query,
- default_graph=context.identifier if self._is_contextual(context) else None)
+ result = self._query(
+ query,
+ default_graph=context.identifier if self._is_contextual(context) else None,
+ )
if vars:
for row in result:
- yield (row.get(s, s),
- row.get(p, p),
- row.get(o, o)), None # why is the context here not the passed in graph 'context'?
+ yield (
+ row.get(s, s),
+ row.get(p, p),
+ row.get(o, o),
+ ), None # why is the context here not the passed in graph 'context'?
else:
if result.askAnswer:
yield (s, p, o), None
@@ -288,18 +300,23 @@ class SPARQLStore(SPARQLConnector, Store):
which will iterate over each term in the list and dispatch to
triples.
"""
- raise NotImplementedError('Triples choices currently not supported')
+ raise NotImplementedError("Triples choices currently not supported")
def __len__(self, context=None):
if not self.sparql11:
raise NotImplementedError(
- "For performance reasons, this is not" +
- "supported for sparql1.0 endpoints")
+ "For performance reasons, this is not"
+ + "supported for sparql1.0 endpoints"
+ )
else:
q = "SELECT (count(*) as ?c) WHERE {?s ?p ?o .}"
- result = self._query(q,
- default_graph=context.identifier if self._is_contextual(context) else None)
+ result = self._query(
+ q,
+ default_graph=context.identifier
+ if self._is_contextual(context)
+ else None,
+ )
return int(next(iter(result)).c)
@@ -321,12 +338,14 @@ class SPARQLStore(SPARQLConnector, Store):
if triple:
nts = self.node_to_sparql
s, p, o = triple
- params = (nts(s if s else Variable('s')),
- nts(p if p else Variable('p')),
- nts(o if o else Variable('o')))
- q = 'SELECT ?name WHERE { GRAPH ?name { %s %s %s }}' % params
+ params = (
+ nts(s if s else Variable("s")),
+ nts(p if p else Variable("p")),
+ nts(o if o else Variable("o")),
+ )
+ q = "SELECT ?name WHERE { GRAPH ?name { %s %s %s }}" % params
else:
- q = 'SELECT ?name WHERE { GRAPH ?name {} }'
+ q = "SELECT ?name WHERE { GRAPH ?name {} }"
result = self._query(q)
@@ -338,9 +357,7 @@ class SPARQLStore(SPARQLConnector, Store):
def prefix(self, namespace):
""" """
- return dict(
- [(v, k) for k, v in self.nsBindings.items()]
- ).get(namespace)
+ return dict([(v, k) for k, v in self.nsBindings.items()]).get(namespace)
def namespace(self, prefix):
return self.nsBindings.get(prefix)
@@ -350,10 +367,10 @@ class SPARQLStore(SPARQLConnector, Store):
yield prefix, ns
def add_graph(self, graph):
- raise TypeError('The SPARQL store is read only')
+ raise TypeError("The SPARQL store is read only")
def remove_graph(self, graph):
- raise TypeError('The SPARQL store is read only')
+ raise TypeError("The SPARQL store is read only")
def _is_contextual(self, graph):
""" Returns `True` if the "GRAPH" keyword must appear
@@ -362,7 +379,7 @@ class SPARQLStore(SPARQLConnector, Store):
if (not self.context_aware) or (graph is None):
return False
if isinstance(graph, str):
- return graph != '__UNION__'
+ return graph != "__UNION__"
else:
return graph.identifier != DATASET_DEFAULT_GRAPH_ID
@@ -414,19 +431,27 @@ class SPARQLUpdateStore(SPARQLStore):
STRING_LITERAL2 = u'"([^"\\\\]|\\\\.)*"'
STRING_LITERAL_LONG1 = u"'''(('|'')?([^'\\\\]|\\\\.))*'''"
STRING_LITERAL_LONG2 = u'"""(("|"")?([^"\\\\]|\\\\.))*"""'
- String = u'(%s)|(%s)|(%s)|(%s)' % (STRING_LITERAL1, STRING_LITERAL2,
- STRING_LITERAL_LONG1, STRING_LITERAL_LONG2)
+ String = u"(%s)|(%s)|(%s)|(%s)" % (
+ STRING_LITERAL1,
+ STRING_LITERAL2,
+ STRING_LITERAL_LONG1,
+ STRING_LITERAL_LONG2,
+ )
IRIREF = u'<([^<>"{}|^`\\]\\\\\[\\x00-\\x20])*>'
- COMMENT = u'#[^\\x0D\\x0A]*([\\x0D\\x0A]|\\Z)'
+ COMMENT = u"#[^\\x0D\\x0A]*([\\x0D\\x0A]|\\Z)"
# Simplified grammar to find { at beginning and } at end of blocks
- BLOCK_START = u'{'
- BLOCK_END = u'}'
- ESCAPED = u'\\\\.'
+ BLOCK_START = u"{"
+ BLOCK_END = u"}"
+ ESCAPED = u"\\\\."
# Match anything that doesn't start or end a block:
- BlockContent = u'(%s)|(%s)|(%s)|(%s)' % (String, IRIREF, COMMENT, ESCAPED)
- BlockFinding = u'(?P<block_start>%s)|(?P<block_end>%s)|(?P<block_content>%s)' % (BLOCK_START, BLOCK_END, BlockContent)
+ BlockContent = u"(%s)|(%s)|(%s)|(%s)" % (String, IRIREF, COMMENT, ESCAPED)
+ BlockFinding = u"(?P<block_start>%s)|(?P<block_end>%s)|(?P<block_content>%s)" % (
+ BLOCK_START,
+ BLOCK_END,
+ BlockContent,
+ )
BLOCK_FINDING_PATTERN = re.compile(BlockFinding)
# Note that BLOCK_FINDING_PATTERN.finditer() will not cover the whole
@@ -435,15 +460,17 @@ class SPARQLUpdateStore(SPARQLStore):
##################################################################
- def __init__(self,
- queryEndpoint=None, update_endpoint=None,
- sparql11=True,
- context_aware=True,
- postAsEncoded=True,
- autocommit=True,
- dirty_reads=False,
- **kwds
- ):
+ def __init__(
+ self,
+ queryEndpoint=None,
+ update_endpoint=None,
+ sparql11=True,
+ context_aware=True,
+ postAsEncoded=True,
+ autocommit=True,
+ dirty_reads=False,
+ **kwds
+ ):
"""
:param autocommit if set, the store will commit after every
writing operations. If False, we only make queries on the
@@ -521,7 +548,7 @@ class SPARQLUpdateStore(SPARQLStore):
and reads can degenerate to the original call-per-triple situation that originally existed.
"""
if self._edits and len(self._edits) > 0:
- self._update('\n;\n'.join(self._edits))
+ self._update("\n;\n".join(self._edits))
self._edits = None
def rollback(self):
@@ -539,8 +566,7 @@ class SPARQLUpdateStore(SPARQLStore):
nts = self.node_to_sparql
triple = "%s %s %s ." % (nts(subject), nts(predicate), nts(obj))
if self._is_contextual(context):
- q = "INSERT DATA { GRAPH %s { %s } }" % (
- nts(context.identifier), triple)
+ q = "INSERT DATA { GRAPH %s { %s } }" % (nts(context.identifier), triple)
else:
q = "INSERT DATA { %s }" % triple
self._transaction().append(q)
@@ -559,12 +585,13 @@ class SPARQLUpdateStore(SPARQLStore):
nts = self.node_to_sparql
for context in contexts:
triples = [
- "%s %s %s ." % (
- nts(subject), nts(predicate), nts(obj)
- ) for subject, predicate, obj in contexts[context]
+ "%s %s %s ." % (nts(subject), nts(predicate), nts(obj))
+ for subject, predicate, obj in contexts[context]
]
- data.append("INSERT DATA { GRAPH %s { %s } }\n" % (
- nts(context.identifier), '\n'.join(triples)))
+ data.append(
+ "INSERT DATA { GRAPH %s { %s } }\n"
+ % (nts(context.identifier), "\n".join(triples))
+ )
self._transaction().extend(data)
if self.autocommit:
self.commit()
@@ -586,7 +613,10 @@ class SPARQLUpdateStore(SPARQLStore):
triple = "%s %s %s ." % (nts(subject), nts(predicate), nts(obj))
if self._is_contextual(context):
cid = nts(context.identifier)
- q = "WITH %(graph)s DELETE { %(triple)s } WHERE { %(triple)s }" % { 'graph': cid, 'triple': triple }
+ q = "WITH %(graph)s DELETE { %(triple)s } WHERE { %(triple)s }" % {
+ "graph": cid,
+ "triple": triple,
+ }
else:
q = "DELETE { %s } WHERE { %s } " % (triple, triple)
self._transaction().append(q)
@@ -602,11 +632,7 @@ class SPARQLUpdateStore(SPARQLStore):
SPARQLConnector.update(self, update)
- def update(self, query,
- initNs={},
- initBindings={},
- queryGraph=None,
- DEBUG=False):
+ def update(self, query, initNs={}, initBindings={}, queryGraph=None, DEBUG=False):
"""
Perform a SPARQL Update Query against the endpoint,
INSERT, LOAD, DELETE etc.
@@ -656,9 +682,10 @@ class SPARQLUpdateStore(SPARQLStore):
# have a WHERE clause. This also works for updates with
# more than one INSERT/DELETE.
v = list(initBindings)
- values = "\nVALUES ( %s )\n{ ( %s ) }\n"\
- % (" ".join("?" + str(x) for x in v),
- " ".join(self.node_to_sparql(initBindings[x]) for x in v))
+ values = "\nVALUES ( %s )\n{ ( %s ) }\n" % (
+ " ".join("?" + str(x) for x in v),
+ " ".join(self.node_to_sparql(initBindings[x]) for x in v),
+ )
query = self.where_pattern.sub("WHERE { " + values, query)
@@ -677,7 +704,7 @@ class SPARQLUpdateStore(SPARQLStore):
if isinstance(query_graph, Node):
query_graph = self.node_to_sparql(query_graph)
else:
- query_graph = '<%s>' % query_graph
+ query_graph = "<%s>" % query_graph
graph_block_open = " GRAPH %s {" % query_graph
graph_block_close = "} "
@@ -696,16 +723,18 @@ class SPARQLUpdateStore(SPARQLStore):
modified_query = []
pos = 0
for match in self.BLOCK_FINDING_PATTERN.finditer(query):
- if match.group('block_start') is not None:
+ if match.group("block_start") is not None:
level += 1
if level == 1:
- modified_query.append(query[pos:match.end()])
+ modified_query.append(query[pos : match.end()])
modified_query.append(graph_block_open)
pos = match.end()
- elif match.group('block_end') is not None:
+ elif match.group("block_end") is not None:
if level == 1:
- since_previous_pos = query[pos:match.start()]
- if modified_query[-1] is graph_block_open and (since_previous_pos == "" or since_previous_pos.isspace()):
+ since_previous_pos = query[pos : match.start()]
+ if modified_query[-1] is graph_block_open and (
+ since_previous_pos == "" or since_previous_pos.isspace()
+ ):
# In this case, adding graph_block_start and
# graph_block_end results in an empty GRAPH block. Some
# enpoints (e.g. TDB) can not handle this. Therefore
@@ -725,8 +754,7 @@ class SPARQLUpdateStore(SPARQLStore):
if not self.graph_aware:
Store.add_graph(self, graph)
elif graph.identifier != DATASET_DEFAULT_GRAPH_ID:
- self.update(
- "CREATE GRAPH %s" % self.node_to_sparql(graph.identifier))
+ self.update("CREATE GRAPH %s" % self.node_to_sparql(graph.identifier))
def close(self, commit_pending_transaction=False):
@@ -741,5 +769,4 @@ class SPARQLUpdateStore(SPARQLStore):
elif graph.identifier == DATASET_DEFAULT_GRAPH_ID:
self.update("DROP DEFAULT")
else:
- self.update(
- "DROP GRAPH %s" % self.node_to_sparql(graph.identifier))
+ self.update("DROP GRAPH %s" % self.node_to_sparql(graph.identifier))
diff --git a/rdflib/query.py b/rdflib/query.py
index 4ff27598..3e21632f 100644
--- a/rdflib/query.py
+++ b/rdflib/query.py
@@ -13,8 +13,7 @@ from io import BytesIO
from urllib.parse import urlparse
-__all__ = ['Processor', 'Result', 'ResultParser', 'ResultSerializer',
- 'ResultException']
+__all__ = ["Processor", "Result", "ResultParser", "ResultSerializer", "ResultException"]
class Processor(object):
@@ -117,10 +116,8 @@ class ResultRow(tuple):
def __new__(cls, values, labels):
- instance = super(ResultRow, cls).__new__(
- cls, (values.get(v) for v in labels))
- instance.labels = dict((str(x[1]), x[0])
- for x in enumerate(labels))
+ instance = super(ResultRow, cls).__new__(cls, (values.get(v) for v in labels))
+ instance.labels = dict((str(x[1]), x[0]) for x in enumerate(labels))
return instance
def __getattr__(self, name):
@@ -169,8 +166,8 @@ class Result(object):
def __init__(self, type_):
- if type_ not in ('CONSTRUCT', 'DESCRIBE', 'SELECT', 'ASK'):
- raise ResultException('Unknown Result type: %s' % type_)
+ if type_ not in ("CONSTRUCT", "DESCRIBE", "SELECT", "ASK"):
+ raise ResultException("Unknown Result type: %s" % type_)
self.type = type_
self.vars = None
@@ -194,7 +191,8 @@ class Result(object):
self._bindings = b
bindings = property(
- _get_bindings, _set_bindings, doc="a list of variable bindings as dicts")
+ _get_bindings, _set_bindings, doc="a list of variable bindings as dicts"
+ )
@staticmethod
def parse(source=None, format=None, content_type=None, **kwargs):
@@ -205,21 +203,22 @@ class Result(object):
elif content_type:
plugin_key = content_type.split(";", 1)[0]
else:
- plugin_key = 'xml'
+ plugin_key = "xml"
parser = plugin.get(plugin_key, ResultParser)()
return parser.parse(source, content_type=content_type, **kwargs)
- def serialize(
- self, destination=None, encoding="utf-8", format='xml', **args):
+ def serialize(self, destination=None, encoding="utf-8", format="xml", **args):
- if self.type in ('CONSTRUCT', 'DESCRIBE'):
+ if self.type in ("CONSTRUCT", "DESCRIBE"):
return self.graph.serialize(
- destination, encoding=encoding, format=format, **args)
+ destination, encoding=encoding, format=format, **args
+ )
"""stolen wholesale from graph.serialize"""
from rdflib import plugin
+
serializer = plugin.get(format, ResultSerializer)(self)
if destination is None:
stream = BytesIO()
@@ -233,11 +232,12 @@ class Result(object):
location = destination
scheme, netloc, path, params, query, fragment = urlparse(location)
if netloc != "":
- print("WARNING: not saving as location" +
- "is not a local file reference")
+ print(
+ "WARNING: not saving as location" + "is not a local file reference"
+ )
return
fd, name = tempfile.mkstemp()
- stream = os.fdopen(fd, 'wb')
+ stream = os.fdopen(fd, "wb")
serializer.serialize(stream, encoding=encoding, **args)
stream.close()
if hasattr(shutil, "move"):
@@ -247,15 +247,15 @@ class Result(object):
os.remove(name)
def __len__(self):
- if self.type == 'ASK':
+ if self.type == "ASK":
return 1
- elif self.type == 'SELECT':
+ elif self.type == "SELECT":
return len(self.bindings)
else:
return len(self.graph)
def __bool__(self):
- if self.type == 'ASK':
+ if self.type == "ASK":
return self.askAnswer
else:
return len(self) > 0
@@ -264,9 +264,9 @@ class Result(object):
if self.type in ("CONSTRUCT", "DESCRIBE"):
for t in self.graph:
yield t
- elif self.type == 'ASK':
+ elif self.type == "ASK":
yield self.askAnswer
- elif self.type == 'SELECT':
+ elif self.type == "SELECT":
# this iterates over ResultRows of variable bindings
if self._genbindings:
@@ -283,26 +283,26 @@ class Result(object):
def __getattr__(self, name):
if self.type in ("CONSTRUCT", "DESCRIBE") and self.graph is not None:
return self.graph.__getattr__(self, name)
- elif self.type == 'SELECT' and name == 'result':
+ elif self.type == "SELECT" and name == "result":
warnings.warn(
"accessing the 'result' attribute is deprecated."
" Iterate over the object instead.",
- DeprecationWarning, stacklevel=2)
+ DeprecationWarning,
+ stacklevel=2,
+ )
# copied from __iter__, above
return [(tuple(b[v] for v in self.vars)) for b in self.bindings]
else:
- raise AttributeError(
- "'%s' object has no attribute '%s'" % (self, name))
+ raise AttributeError("'%s' object has no attribute '%s'" % (self, name))
def __eq__(self, other):
try:
if self.type != other.type:
return False
- if self.type == 'ASK':
+ if self.type == "ASK":
return self.askAnswer == other.askAnswer
- elif self.type == 'SELECT':
- return self.vars == other.vars \
- and self.bindings == other.bindings
+ elif self.type == "SELECT":
+ return self.vars == other.vars and self.bindings == other.bindings
else:
return self.graph == other.graph
@@ -311,7 +311,6 @@ class Result(object):
class ResultParser(object):
-
def __init__(self):
pass
@@ -321,7 +320,6 @@ class ResultParser(object):
class ResultSerializer(object):
-
def __init__(self, result):
self.result = result
diff --git a/rdflib/resource.py b/rdflib/resource.py
index 39bdc32c..691a07f1 100644
--- a/rdflib/resource.py
+++ b/rdflib/resource.py
@@ -318,11 +318,10 @@ from rdflib.term import Node, BNode, URIRef
from rdflib.namespace import RDF
from rdflib.paths import Path
-__all__ = ['Resource']
+__all__ = ["Resource"]
class Resource(object):
-
def __init__(self, graph, subject):
self._graph = graph
self._identifier = subject
@@ -335,11 +334,14 @@ class Resource(object):
return hash(Resource) ^ hash(self._graph) ^ hash(self._identifier)
def __eq__(self, other):
- return (isinstance(other, Resource) and
- self._graph == other._graph and
- self._identifier == other._identifier)
+ return (
+ isinstance(other, Resource)
+ and self._graph == other._graph
+ and self._identifier == other._identifier
+ )
- def __ne__(self, other): return not self == other
+ def __ne__(self, other):
+ return not self == other
def __lt__(self, other):
if isinstance(other, Resource):
@@ -347,11 +349,14 @@ class Resource(object):
else:
return False
- def __gt__(self, other): return not (self < other or self == other)
+ def __gt__(self, other):
+ return not (self < other or self == other)
- def __le__(self, other): return self < other or self == other
+ def __le__(self, other):
+ return self < other or self == other
- def __ge__(self, other): return not self < other
+ def __ge__(self, other):
+ return not self < other
def __unicode__(self):
return str(self._identifier)
@@ -377,38 +382,31 @@ class Resource(object):
self._graph.set((self._identifier, p, o))
def subjects(self, predicate=None): # rev
- return self._resources(
- self._graph.subjects(predicate, self._identifier))
+ return self._resources(self._graph.subjects(predicate, self._identifier))
def predicates(self, o=None):
if isinstance(o, Resource):
o = o._identifier
- return self._resources(
- self._graph.predicates(self._identifier, o))
+ return self._resources(self._graph.predicates(self._identifier, o))
def objects(self, predicate=None):
- return self._resources(
- self._graph.objects(self._identifier, predicate))
+ return self._resources(self._graph.objects(self._identifier, predicate))
def subject_predicates(self):
- return self._resource_pairs(
- self._graph.subject_predicates(self._identifier))
+ return self._resource_pairs(self._graph.subject_predicates(self._identifier))
def subject_objects(self):
- return self._resource_pairs(
- self._graph.subject_objects(self._identifier))
+ return self._resource_pairs(self._graph.subject_objects(self._identifier))
def predicate_objects(self):
- return self._resource_pairs(
- self._graph.predicate_objects(self._identifier))
+ return self._resource_pairs(self._graph.predicate_objects(self._identifier))
def value(self, p=RDF.value, o=None, default=None, any=True):
if isinstance(o, Resource):
o = o._identifier
- return self._cast(
- self._graph.value(self._identifier, p, o, default, any))
+ return self._cast(self._graph.value(self._identifier, p, o, default, any))
def label(self):
return self._graph.label(self._identifier)
@@ -420,12 +418,14 @@ class Resource(object):
return self._resources(self._graph.items(self._identifier))
def transitive_objects(self, predicate, remember=None):
- return self._resources(self._graph.transitive_objects(
- self._identifier, predicate, remember))
+ return self._resources(
+ self._graph.transitive_objects(self._identifier, predicate, remember)
+ )
def transitive_subjects(self, predicate, remember=None):
- return self._resources(self._graph.transitive_subjects(
- predicate, self._identifier, remember))
+ return self._resources(
+ self._graph.transitive_subjects(predicate, self._identifier, remember)
+ )
def seq(self):
return self._resources(self._graph.seq(self._identifier))
@@ -452,12 +452,16 @@ class Resource(object):
return node
def __iter__(self):
- return self._resource_triples(self._graph.triples((self.identifier, None, None)))
+ return self._resource_triples(
+ self._graph.triples((self.identifier, None, None))
+ )
def __getitem__(self, item):
if isinstance(item, slice):
if item.step:
- raise TypeError("Resources fix the subject for slicing, and can only be sliced by predicate/object. ")
+ raise TypeError(
+ "Resources fix the subject for slicing, and can only be sliced by predicate/object. "
+ )
p, o = item.start, item.stop
if isinstance(p, Resource):
p = p._identifier
@@ -474,7 +478,10 @@ class Resource(object):
elif isinstance(item, (Node, Path)):
return self.objects(item)
else:
- raise TypeError("You can only index a resource by a single rdflib term, a slice of rdflib terms, not %s (%s)"%(item, type(item)))
+ raise TypeError(
+ "You can only index a resource by a single rdflib term, a slice of rdflib terms, not %s (%s)"
+ % (item, type(item))
+ )
def __setitem__(self, item, value):
self.set(item, value)
@@ -483,7 +490,7 @@ class Resource(object):
return type(self)(self._graph, subject)
def __str__(self):
- return 'Resource(%s)' % self._identifier
+ return "Resource(%s)" % self._identifier
def __repr__(self):
- return 'Resource(%s,%s)' % (self._graph, self._identifier)
+ return "Resource(%s,%s)" % (self._graph, self._identifier)
diff --git a/rdflib/serializer.py b/rdflib/serializer.py
index e5a31989..ecb8da0a 100644
--- a/rdflib/serializer.py
+++ b/rdflib/serializer.py
@@ -12,11 +12,10 @@ See also rdflib.plugin
from rdflib.term import URIRef
-__all__ = ['Serializer']
+__all__ = ["Serializer"]
class Serializer(object):
-
def __init__(self, store):
self.store = store
self.encoding = "UTF-8"
diff --git a/rdflib/store.py b/rdflib/store.py
index b70f1e97..ead1c2e7 100644
--- a/rdflib/store.py
+++ b/rdflib/store.py
@@ -5,6 +5,7 @@ from __future__ import print_function
from io import BytesIO
import pickle
from rdflib.events import Dispatcher, Event
+
"""
============
rdflib.store
@@ -44,8 +45,13 @@ Pickler = pickle.Pickler
Unpickler = pickle.Unpickler
UnpicklingError = pickle.UnpicklingError
-__all__ = ['StoreCreatedEvent', 'TripleAddedEvent', 'TripleRemovedEvent',
- 'NodePickler', 'Store']
+__all__ = [
+ "StoreCreatedEvent",
+ "TripleAddedEvent",
+ "TripleRemovedEvent",
+ "NodePickler",
+ "Store",
+]
class StoreCreatedEvent(Event):
@@ -113,11 +119,10 @@ class NodePickler(object):
def __getstate__(self):
state = self.__dict__.copy()
- del state['_get_object']
- state.update({
- '_ids': tuple(self._ids.items()),
- '_objects': tuple(self._objects.items())
- })
+ del state["_get_object"]
+ state.update(
+ {"_ids": tuple(self._ids.items()), "_objects": tuple(self._objects.items())}
+ )
return state
def __setstate__(self, state):
@@ -153,6 +158,7 @@ class Store(object):
from rdflib.graph import Graph, QuotedGraph
from rdflib.term import Variable
from rdflib.term import Statement
+
self.__node_pickler = np = NodePickler()
np.register(self, "S")
np.register(URIRef, "U")
@@ -163,12 +169,12 @@ class Store(object):
np.register(Variable, "V")
np.register(Statement, "s")
return self.__node_pickler
+
node_pickler = property(__get_node_pickler)
# Database management methods
def create(self, configuration):
- self.dispatcher.dispatch(
- StoreCreatedEvent(configuration=configuration))
+ self.dispatcher.dispatch(StoreCreatedEvent(configuration=configuration))
def open(self, configuration, create=False):
"""
@@ -211,9 +217,7 @@ class Store(object):
be an error for the quoted argument to be True when the store is not
formula-aware.
"""
- self.dispatcher.dispatch(
- TripleAddedEvent(
- triple=triple, context=context))
+ self.dispatcher.dispatch(TripleAddedEvent(triple=triple, context=context))
def addN(self, quads):
"""
@@ -223,15 +227,16 @@ class Store(object):
is a redirect to add
"""
for s, p, o, c in quads:
- assert c is not None, \
- "Context associated with %s %s %s is None!" % (s, p, o)
+ assert c is not None, "Context associated with %s %s %s is None!" % (
+ s,
+ p,
+ o,
+ )
self.add((s, p, o), c)
def remove(self, triple, context=None):
""" Remove the set of triples matching the pattern from the store """
- self.dispatcher.dispatch(
- TripleRemovedEvent(
- triple=triple, context=context))
+ self.dispatcher.dispatch(TripleRemovedEvent(triple=triple, context=context))
def triples_choices(self, triple, context=None):
"""
@@ -242,44 +247,44 @@ class Store(object):
"""
subject, predicate, object_ = triple
if isinstance(object_, list):
- assert not isinstance(
- subject, list), "object_ / subject are both lists"
- assert not isinstance(
- predicate, list), "object_ / predicate are both lists"
+ assert not isinstance(subject, list), "object_ / subject are both lists"
+ assert not isinstance(predicate, list), "object_ / predicate are both lists"
if object_:
for obj in object_:
for (s1, p1, o1), cg in self.triples(
- (subject, predicate, obj), context):
+ (subject, predicate, obj), context
+ ):
yield (s1, p1, o1), cg
else:
for (s1, p1, o1), cg in self.triples(
- (subject, predicate, None), context):
+ (subject, predicate, None), context
+ ):
yield (s1, p1, o1), cg
elif isinstance(subject, list):
- assert not isinstance(
- predicate, list), "subject / predicate are both lists"
+ assert not isinstance(predicate, list), "subject / predicate are both lists"
if subject:
for subj in subject:
for (s1, p1, o1), cg in self.triples(
- (subj, predicate, object_), context):
+ (subj, predicate, object_), context
+ ):
yield (s1, p1, o1), cg
else:
for (s1, p1, o1), cg in self.triples(
- (None, predicate, object_), context):
+ (None, predicate, object_), context
+ ):
yield (s1, p1, o1), cg
elif isinstance(predicate, list):
- assert not isinstance(
- subject, list), "predicate / subject are both lists"
+ assert not isinstance(subject, list), "predicate / subject are both lists"
if predicate:
for pred in predicate:
for (s1, p1, o1), cg in self.triples(
- (subject, pred, object_), context):
+ (subject, pred, object_), context
+ ):
yield (s1, p1, o1), cg
else:
- for (s1, p1, o1), cg in self.triples(
- (subject, None, object_), context):
+ for (s1, p1, o1), cg in self.triples((subject, None, object_), context):
yield (s1, p1, o1), cg
def triples(self, triple_pattern, context=None):
diff --git a/rdflib/term.py b/rdflib/term.py
index b6819b5d..c2833fd6 100644
--- a/rdflib/term.py
+++ b/rdflib/term.py
@@ -23,24 +23,23 @@ underlying Graph:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+
# from __future__ import unicode_literals
from fractions import Fraction
__all__ = [
- 'bind',
-
- 'Node',
- 'Identifier',
-
- 'URIRef',
- 'BNode',
- 'Literal',
-
- 'Variable',
- 'Statement',
+ "bind",
+ "Node",
+ "Identifier",
+ "URIRef",
+ "BNode",
+ "Literal",
+ "Variable",
+ "Statement",
]
import logging
+
logger = logging.getLogger(__name__)
import warnings
import math
@@ -52,7 +51,14 @@ from datetime import date, time, datetime, timedelta
from re import sub, compile
from collections import defaultdict
-from isodate import parse_time, parse_date, parse_datetime, Duration, parse_duration, duration_isoformat
+from isodate import (
+ parse_time,
+ parse_date,
+ parse_datetime,
+ Duration,
+ parse_duration,
+ duration_isoformat,
+)
from binascii import hexlify, unhexlify
import rdflib
@@ -74,7 +80,7 @@ def _is_valid_uri(uri):
return all(map(lambda c: ord(c) > 256 or not c in _invalid_uri_chars, uri))
-_lang_tag_regex = compile('^[a-zA-Z]+(?:-[a-zA-Z0-9]+)*$')
+_lang_tag_regex = compile("^[a-zA-Z]+(?:-[a-zA-Z0-9]+)*$")
def _is_valid_langtag(tag):
@@ -87,7 +93,7 @@ def _is_valid_unicode(value):
unicode object.
"""
if isinstance(value, bytes):
- coding_func, param = getattr(value, 'decode'), 'utf-8'
+ coding_func, param = getattr(value, "decode"), "utf-8"
else:
coding_func, param = str, value
@@ -221,13 +227,15 @@ class URIRef(Identifier):
value += "#"
if not _is_valid_uri(value):
- logger.warning('%s does not look like a valid URI, trying to serialize this will break.'%value)
-
+ logger.warning(
+ "%s does not look like a valid URI, trying to serialize this will break."
+ % value
+ )
try:
rt = str.__new__(cls, value)
except UnicodeDecodeError:
- rt = str.__new__(cls, value, 'utf-8')
+ rt = str.__new__(cls, value, "utf-8")
return rt
def toPython(self):
@@ -244,7 +252,10 @@ class URIRef(Identifier):
"""
if not _is_valid_uri(self):
- raise Exception('"%s" does not look like a valid URI, I cannot serialize this as N3/Turtle. Perhaps you wanted to urlencode it?'%self)
+ raise Exception(
+ '"%s" does not look like a valid URI, I cannot serialize this as N3/Turtle. Perhaps you wanted to urlencode it?'
+ % self
+ )
if namespace_manager:
return namespace_manager.normalizeUri(self)
@@ -262,7 +273,7 @@ class URIRef(Identifier):
return (URIRef, (str(self),))
def __getnewargs__(self):
- return (str(self), )
+ return (str(self),)
def __repr__(self):
if self.__class__ is URIRef:
@@ -291,8 +302,7 @@ class URIRef(Identifier):
"""
if isinstance(self, RDFLibGenid):
parsed_uri = urlparse("%s" % self)
- return BNode(
- value=parsed_uri.path[len(rdflib_skolem_genid):])
+ return BNode(value=parsed_uri.path[len(rdflib_skolem_genid) :])
elif isinstance(self, Genid):
bnode_id = "%s" % self
if bnode_id in skolems:
@@ -327,9 +337,11 @@ class RDFLibGenid(Genid):
if not isinstance(uri, str):
uri = str(uri)
parsed_uri = urlparse(uri)
- if parsed_uri.params != "" \
- or parsed_uri.query != "" \
- or parsed_uri.fragment != "":
+ if (
+ parsed_uri.params != ""
+ or parsed_uri.query != ""
+ or parsed_uri.fragment != ""
+ ):
return False
gen_id = parsed_uri.path.rfind(rdflib_skolem_genid)
if gen_id != 0:
@@ -367,10 +379,12 @@ class BNode(Identifier):
Blank Node: http://www.w3.org/TR/rdf-concepts/#section-blank-nodes
"""
+
__slots__ = ()
- def __new__(cls, value=None,
- _sn_gen=_serial_number_generator(), _prefix=_unique_id()):
+ def __new__(
+ cls, value=None, _sn_gen=_serial_number_generator(), _prefix=_unique_id()
+ ):
"""
# only store implementations should pass in a value
"""
@@ -396,7 +410,7 @@ class BNode(Identifier):
return "_:%s" % self
def __getnewargs__(self):
- return (str(self), )
+ return (str(self),)
def __reduce__(self):
return (BNode, (str(self),))
@@ -510,7 +524,7 @@ class Literal(Identifier):
def __new__(cls, lexical_or_value, lang=None, datatype=None, normalize=None):
- if lang == '':
+ if lang == "":
lang = None # no empty lang-tags in RDF
normalize = normalize if normalize is not None else rdflib.NORMALIZE_LITERALS
@@ -518,7 +532,8 @@ class Literal(Identifier):
if lang is not None and datatype is not None:
raise TypeError(
"A Literal can only have one of lang or datatype, "
- "per http://www.w3.org/TR/rdf-concepts/#section-Graph-Literal")
+ "per http://www.w3.org/TR/rdf-concepts/#section-Graph-Literal"
+ )
if lang and not _is_valid_langtag(lang):
raise Exception("'%s' is not a valid language tag!" % lang)
@@ -539,8 +554,8 @@ class Literal(Identifier):
value = lexical_or_value.value
elif isinstance(lexical_or_value, str) or isinstance(lexical_or_value, bytes):
- # passed a string
- # try parsing lexical form of datatyped literal
+ # passed a string
+ # try parsing lexical form of datatyped literal
value = _castLexicalToPython(lexical_or_value, datatype)
if value is not None and normalize:
@@ -560,12 +575,12 @@ class Literal(Identifier):
lang = None
if isinstance(lexical_or_value, bytes):
- lexical_or_value = lexical_or_value.decode('utf-8')
+ lexical_or_value = lexical_or_value.decode("utf-8")
try:
inst = str.__new__(cls, lexical_or_value)
except UnicodeDecodeError:
- inst = str.__new__(cls, lexical_or_value, 'utf-8')
+ inst = str.__new__(cls, lexical_or_value, "utf-8")
inst._language = lang
inst._datatype = datatype
@@ -604,7 +619,10 @@ class Literal(Identifier):
return self._datatype
def __reduce__(self):
- return (Literal, (str(self), self.language, self.datatype),)
+ return (
+ Literal,
+ (str(self), self.language, self.datatype),
+ )
def __getstate__(self):
return (None, dict(language=self.language, datatype=self.datatype))
@@ -632,19 +650,25 @@ class Literal(Identifier):
# if the datatypes are the same, just add the Python values and convert back
if self.datatype == val.datatype:
- return Literal(self.toPython() + val.toPython(), self.language, datatype=self.datatype)
+ return Literal(
+ self.toPython() + val.toPython(), self.language, datatype=self.datatype
+ )
# if the datatypes are not the same but are both numeric, add the Python values and strip off decimal junk
# (i.e. tiny numbers (more than 17 decimal places) and trailing zeros) and return as a decimal
elif (
- self.datatype in _NUMERIC_LITERAL_TYPES
- and
- val.datatype in _NUMERIC_LITERAL_TYPES
+ self.datatype in _NUMERIC_LITERAL_TYPES
+ and val.datatype in _NUMERIC_LITERAL_TYPES
):
return Literal(
Decimal(
- ('%f' % round(Decimal(self.toPython()) + Decimal(val.toPython()), 15)).rstrip('0').rstrip('.')
+ (
+ "%f"
+ % round(Decimal(self.toPython()) + Decimal(val.toPython()), 15)
+ )
+ .rstrip("0")
+ .rstrip(".")
),
- datatype=_XSD_DECIMAL
+ datatype=_XSD_DECIMAL,
)
# in all other cases, perform string concatenation
else:
@@ -793,8 +817,10 @@ class Literal(Identifier):
return True # Everything is greater than None
if isinstance(other, Literal):
- if self.datatype in _NUMERIC_LITERAL_TYPES and \
- other.datatype in _NUMERIC_LITERAL_TYPES:
+ if (
+ self.datatype in _NUMERIC_LITERAL_TYPES
+ and other.datatype in _NUMERIC_LITERAL_TYPES
+ ):
return self.value > other.value
# plain-literals and xsd:string literals
@@ -889,17 +915,21 @@ class Literal(Identifier):
rich-compare with this literal
"""
if isinstance(other, Literal):
- if (self.datatype and other.datatype):
+ if self.datatype and other.datatype:
# two datatyped literals
- if not self.datatype in XSDToPython or not other.datatype in XSDToPython:
+ if (
+ not self.datatype in XSDToPython
+ or not other.datatype in XSDToPython
+ ):
# non XSD DTs must match
if self.datatype != other.datatype:
return False
else:
# xsd:string may be compared with plain literals
- if not (self.datatype == _XSD_STRING and not other.datatype) or \
- (other.datatype == _XSD_STRING and not self.datatype):
+ if not (self.datatype == _XSD_STRING and not other.datatype) or (
+ other.datatype == _XSD_STRING and not self.datatype
+ ):
return False
# if given lang-tag has to be case insensitive equal
@@ -986,9 +1016,12 @@ class Literal(Identifier):
if other is None:
return False
if isinstance(other, Literal):
- return self.datatype == other.datatype \
- and (self.language.lower() if self.language else None) == (other.language.lower() if other.language else None) \
+ return (
+ self.datatype == other.datatype
+ and (self.language.lower() if self.language else None)
+ == (other.language.lower() if other.language else None)
and str.__eq__(self, other)
+ )
return False
@@ -1016,29 +1049,35 @@ class Literal(Identifier):
"""
if isinstance(other, Literal):
- if self.datatype in _NUMERIC_LITERAL_TYPES \
- and other.datatype in _NUMERIC_LITERAL_TYPES:
+ if (
+ self.datatype in _NUMERIC_LITERAL_TYPES
+ and other.datatype in _NUMERIC_LITERAL_TYPES
+ ):
if self.value is not None and other.value is not None:
return self.value == other.value
else:
if str.__eq__(self, other):
return True
raise TypeError(
- 'I cannot know that these two lexical forms do not map to the same value: %s and %s' % (self, other))
+ "I cannot know that these two lexical forms do not map to the same value: %s and %s"
+ % (self, other)
+ )
if (self.language or "").lower() != (other.language or "").lower():
return False
dtself = self.datatype or _XSD_STRING
dtother = other.datatype or _XSD_STRING
- if (dtself == _XSD_STRING and dtother == _XSD_STRING):
+ if dtself == _XSD_STRING and dtother == _XSD_STRING:
# string/plain literals, compare on lexical form
return str.__eq__(self, other)
if dtself != dtother:
if rdflib.DAWG_LITERAL_COLLATION:
- raise TypeError("I don't know how to compare literals with datatypes %s and %s" % (
- self.datatype, other.datatype))
+ raise TypeError(
+ "I don't know how to compare literals with datatypes %s and %s"
+ % (self.datatype, other.datatype)
+ )
else:
return False
@@ -1062,7 +1101,9 @@ class Literal(Identifier):
# matching DTs, but not matching, we cannot compare!
raise TypeError(
- 'I cannot know that these two lexical forms do not map to the same value: %s and %s' % (self, other))
+ "I cannot know that these two lexical forms do not map to the same value: %s and %s"
+ % (self, other)
+ )
elif isinstance(other, Node):
return False # no non-Literal nodes are equal to a literal
@@ -1074,7 +1115,7 @@ class Literal(Identifier):
if self.language is not None:
return False
- if (self.datatype == _XSD_STRING or self.datatype is None):
+ if self.datatype == _XSD_STRING or self.datatype is None:
return str(self) == other
elif isinstance(other, (int, long_type, float)):
@@ -1084,7 +1125,11 @@ class Literal(Identifier):
if self.datatype in (_XSD_DATETIME, _XSD_DATE, _XSD_TIME):
return self.value == other
elif isinstance(other, (timedelta, Duration)):
- if self.datatype in (_XSD_DURATION, _XSD_DAYTIMEDURATION, _XSD_YEARMONTHDURATION):
+ if self.datatype in (
+ _XSD_DURATION,
+ _XSD_DAYTIMEDURATION,
+ _XSD_YEARMONTHDURATION,
+ ):
return self.value == other
elif isinstance(other, bool):
if self.datatype == _XSD_BOOLEAN:
@@ -1154,7 +1199,7 @@ class Literal(Identifier):
return self._literal_n3()
def _literal_n3(self, use_plain=False, qname_callback=None):
- '''
+ """
Using plain literal (shorthand) output::
>>> from rdflib.namespace import XSD
@@ -1197,7 +1242,7 @@ class Literal(Identifier):
... qname_callback=lambda uri: "xsd:integer")
u'"1"^^xsd:integer'
- '''
+ """
if use_plain and self.datatype in _PLAIN_LITERAL_TYPES:
if self.value is not None:
# If self is inf or NaN, we need a datatype
@@ -1214,17 +1259,17 @@ class Literal(Identifier):
# in py >=2.6 the string.format function makes this easier
# we try to produce "pretty" output
if self.datatype == _XSD_DOUBLE:
- return sub("\\.?0*e", "e", u'%e' % float(self))
+ return sub("\\.?0*e", "e", u"%e" % float(self))
elif self.datatype == _XSD_DECIMAL:
- s = '%s' % self
- if '.' not in s:
- s += '.0'
+ s = "%s" % self
+ if "." not in s:
+ s += ".0"
return s
elif self.datatype == _XSD_BOOLEAN:
- return (u'%s' % self).lower()
+ return (u"%s" % self).lower()
else:
- return u'%s' % self
+ return u"%s" % self
encoded = self._quote_encode()
@@ -1241,10 +1286,11 @@ class Literal(Identifier):
if math.isinf(v):
# py string reps: float: 'inf', Decimal: 'Infinity"
# both need to become "INF" in xsd datatypes
- encoded = encoded.replace('inf', 'INF').replace(
- 'Infinity', 'INF')
+ encoded = encoded.replace("inf", "INF").replace(
+ "Infinity", "INF"
+ )
if math.isnan(v):
- encoded = encoded.replace('nan', 'NaN')
+ encoded = encoded.replace("nan", "NaN")
except ValueError:
# if we can't cast to float something is wrong, but we can
# still serialize. Warn user about it
@@ -1252,11 +1298,11 @@ class Literal(Identifier):
language = self.language
if language:
- return '%s@%s' % (encoded, language)
+ return "%s@%s" % (encoded, language)
elif datatype:
- return '%s^^%s' % (encoded, quoted_dt)
+ return "%s^^%s" % (encoded, quoted_dt)
else:
- return '%s' % encoded
+ return "%s" % encoded
def _quote_encode(self):
# This simpler encoding doesn't work; a newline gets encoded as "\\n",
@@ -1270,20 +1316,18 @@ class Literal(Identifier):
if "\n" in self:
# Triple quote this string.
- encoded = self.replace('\\', '\\\\')
+ encoded = self.replace("\\", "\\\\")
if '"""' in self:
# is this ok?
encoded = encoded.replace('"""', '\\"\\"\\"')
- if encoded[-1] == '"' and encoded[-2] != '\\':
- encoded = encoded[:-1] + '\\' + '"'
+ if encoded[-1] == '"' and encoded[-2] != "\\":
+ encoded = encoded[:-1] + "\\" + '"'
- return '"""%s"""' % encoded.replace('\r', '\\r')
+ return '"""%s"""' % encoded.replace("\r", "\\r")
else:
- return '"%s"' % self.replace(
- '\n', '\\n').replace(
- '\\', '\\\\').replace(
- '"', '\\"').replace(
- '\r', '\\r')
+ return '"%s"' % self.replace("\n", "\\n").replace("\\", "\\\\").replace(
+ '"', '\\"'
+ ).replace("\r", "\\r")
def __repr__(self):
args = [super(Literal, self).__repr__()]
@@ -1309,7 +1353,8 @@ class Literal(Identifier):
def _parseXML(xmlstring):
retval = xml.dom.minidom.parseString(
- "<rdflibtoplevelelement>%s</rdflibtoplevelelement>" % xmlstring)
+ "<rdflibtoplevelelement>%s</rdflibtoplevelelement>" % xmlstring
+ )
retval.normalize()
return retval
@@ -1317,15 +1362,16 @@ def _parseXML(xmlstring):
def _parseHTML(htmltext):
try:
import html5lib
- parser = html5lib.HTMLParser(
- tree=html5lib.treebuilders.getTreeBuilder("dom"))
+
+ parser = html5lib.HTMLParser(tree=html5lib.treebuilders.getTreeBuilder("dom"))
retval = parser.parseFragment(htmltext)
retval.normalize()
return retval
except ImportError:
raise ImportError(
- "HTML5 parser not available. Try installing" +
- " html5lib <http://code.google.com/p/html5lib>")
+ "HTML5 parser not available. Try installing"
+ + " html5lib <http://code.google.com/p/html5lib>"
+ )
def _writeXML(xmlnode):
@@ -1333,16 +1379,16 @@ def _writeXML(xmlnode):
d = xml.dom.minidom.Document()
d.childNodes += xmlnode.childNodes
xmlnode = d
- s = xmlnode.toxml('utf-8')
+ s = xmlnode.toxml("utf-8")
# for clean round-tripping, remove headers -- I have great and
# specific worries that this will blow up later, but this margin
# is too narrow to contain them
if s.startswith('<?xml version="1.0" encoding="utf-8"?>'.encode("latin-1")):
s = s[38:]
- if s.startswith('<rdflibtoplevelelement>'.encode("latin-1")):
+ if s.startswith("<rdflibtoplevelelement>".encode("latin-1")):
s = s[23:-24]
- if s == '<rdflibtoplevelelement/>'.encode("latin-1"):
- s = ''.encode("latin-1")
+ if s == "<rdflibtoplevelelement/>".encode("latin-1"):
+ s = "".encode("latin-1")
return s
@@ -1354,60 +1400,62 @@ def _unhexlify(value):
def _parseBoolean(value):
- true_accepted_values = ['1', 'true']
- false_accepted_values = ['0', 'false']
+ true_accepted_values = ["1", "true"]
+ false_accepted_values = ["0", "false"]
new_value = value.lower()
if new_value in true_accepted_values:
return True
if new_value not in false_accepted_values:
- warnings.warn('Parsing weird boolean, % r does not map to True or False' % value, category = DeprecationWarning)
+ warnings.warn(
+ "Parsing weird boolean, % r does not map to True or False" % value,
+ category=DeprecationWarning,
+ )
return False
+
# Cannot import Namespace/XSD because of circular dependencies
-_XSD_PFX = 'http://www.w3.org/2001/XMLSchema#'
-_RDF_PFX = 'http://www.w3.org/1999/02/22-rdf-syntax-ns#'
+_XSD_PFX = "http://www.w3.org/2001/XMLSchema#"
+_RDF_PFX = "http://www.w3.org/1999/02/22-rdf-syntax-ns#"
-_RDF_XMLLITERAL = URIRef(_RDF_PFX + 'XMLLiteral')
-_RDF_HTMLLITERAL = URIRef(_RDF_PFX + 'HTML')
+_RDF_XMLLITERAL = URIRef(_RDF_PFX + "XMLLiteral")
+_RDF_HTMLLITERAL = URIRef(_RDF_PFX + "HTML")
-_XSD_STRING = URIRef(_XSD_PFX + 'string')
+_XSD_STRING = URIRef(_XSD_PFX + "string")
-_XSD_FLOAT = URIRef(_XSD_PFX + 'float')
-_XSD_DOUBLE = URIRef(_XSD_PFX + 'double')
-_XSD_DECIMAL = URIRef(_XSD_PFX + 'decimal')
-_XSD_INTEGER = URIRef(_XSD_PFX + 'integer')
-_XSD_BOOLEAN = URIRef(_XSD_PFX + 'boolean')
+_XSD_FLOAT = URIRef(_XSD_PFX + "float")
+_XSD_DOUBLE = URIRef(_XSD_PFX + "double")
+_XSD_DECIMAL = URIRef(_XSD_PFX + "decimal")
+_XSD_INTEGER = URIRef(_XSD_PFX + "integer")
+_XSD_BOOLEAN = URIRef(_XSD_PFX + "boolean")
-_XSD_DATETIME = URIRef(_XSD_PFX + 'dateTime')
-_XSD_DATE = URIRef(_XSD_PFX + 'date')
-_XSD_TIME = URIRef(_XSD_PFX + 'time')
-_XSD_DURATION = URIRef(_XSD_PFX + 'duration')
-_XSD_DAYTIMEDURATION = URIRef(_XSD_PFX + 'dayTimeDuration')
-_XSD_YEARMONTHDURATION = URIRef(_XSD_PFX + 'yearMonthDuration')
+_XSD_DATETIME = URIRef(_XSD_PFX + "dateTime")
+_XSD_DATE = URIRef(_XSD_PFX + "date")
+_XSD_TIME = URIRef(_XSD_PFX + "time")
+_XSD_DURATION = URIRef(_XSD_PFX + "duration")
+_XSD_DAYTIMEDURATION = URIRef(_XSD_PFX + "dayTimeDuration")
+_XSD_YEARMONTHDURATION = URIRef(_XSD_PFX + "yearMonthDuration")
-_OWL_RATIONAL = URIRef('http://www.w3.org/2002/07/owl#rational')
-_XSD_HEXBINARY = URIRef(_XSD_PFX + 'hexBinary')
+_OWL_RATIONAL = URIRef("http://www.w3.org/2002/07/owl#rational")
+_XSD_HEXBINARY = URIRef(_XSD_PFX + "hexBinary")
# TODO: gYearMonth, gYear, gMonthDay, gDay, gMonth
_NUMERIC_LITERAL_TYPES = (
_XSD_INTEGER,
_XSD_DECIMAL,
_XSD_DOUBLE,
- URIRef(_XSD_PFX + 'float'),
-
- URIRef(_XSD_PFX + 'byte'),
- URIRef(_XSD_PFX + 'int'),
- URIRef(_XSD_PFX + 'long'),
- URIRef(_XSD_PFX + 'negativeInteger'),
- URIRef(_XSD_PFX + 'nonNegativeInteger'),
- URIRef(_XSD_PFX + 'nonPositiveInteger'),
- URIRef(_XSD_PFX + 'positiveInteger'),
- URIRef(_XSD_PFX + 'short'),
- URIRef(_XSD_PFX + 'unsignedByte'),
- URIRef(_XSD_PFX + 'unsignedInt'),
- URIRef(_XSD_PFX + 'unsignedLong'),
- URIRef(_XSD_PFX + 'unsignedShort'),
-
+ URIRef(_XSD_PFX + "float"),
+ URIRef(_XSD_PFX + "byte"),
+ URIRef(_XSD_PFX + "int"),
+ URIRef(_XSD_PFX + "long"),
+ URIRef(_XSD_PFX + "negativeInteger"),
+ URIRef(_XSD_PFX + "nonNegativeInteger"),
+ URIRef(_XSD_PFX + "nonPositiveInteger"),
+ URIRef(_XSD_PFX + "positiveInteger"),
+ URIRef(_XSD_PFX + "short"),
+ URIRef(_XSD_PFX + "unsignedByte"),
+ URIRef(_XSD_PFX + "unsignedInt"),
+ URIRef(_XSD_PFX + "unsignedLong"),
+ URIRef(_XSD_PFX + "unsignedShort"),
)
# these have "native" syntax in N3/SPARQL
@@ -1416,12 +1464,12 @@ _PLAIN_LITERAL_TYPES = (
_XSD_BOOLEAN,
_XSD_DOUBLE,
_XSD_DECIMAL,
- _OWL_RATIONAL
+ _OWL_RATIONAL,
)
# these have special INF and NaN XSD representations
_NUMERIC_INF_NAN_LITERAL_TYPES = (
- URIRef(_XSD_PFX + 'float'),
+ URIRef(_XSD_PFX + "float"),
_XSD_DOUBLE,
_XSD_DECIMAL,
)
@@ -1433,12 +1481,12 @@ _TOTAL_ORDER_CASTERS = {
datetime: lambda value: (
# naive vs. aware
value.tzinfo is not None and value.tzinfo.utcoffset(value) is not None,
- value
+ value,
),
time: lambda value: (
# naive vs. aware
value.tzinfo is not None and value.tzinfo.utcoffset(None) is not None,
- value
+ value,
),
xml.dom.minidom.Document: lambda value: value.toxml(),
}
@@ -1448,8 +1496,8 @@ _STRING_LITERAL_TYPES = (
_XSD_STRING,
_RDF_XMLLITERAL,
_RDF_HTMLLITERAL,
- URIRef(_XSD_PFX + 'normalizedString'),
- URIRef(_XSD_PFX + 'token')
+ URIRef(_XSD_PFX + "normalizedString"),
+ URIRef(_XSD_PFX + "token"),
)
@@ -1493,65 +1541,65 @@ from decimal import Decimal
_GenericPythonToXSDRules = [
(str, (None, None)),
(float, (None, _XSD_DOUBLE)),
- (bool, (lambda i:str(i).lower(), _XSD_BOOLEAN)),
+ (bool, (lambda i: str(i).lower(), _XSD_BOOLEAN)),
(int, (None, _XSD_INTEGER)),
(long_type, (None, _XSD_INTEGER)),
(Decimal, (None, _XSD_DECIMAL)),
- (datetime, (lambda i:i.isoformat(), _XSD_DATETIME)),
- (date, (lambda i:i.isoformat(), _XSD_DATE)),
- (time, (lambda i:i.isoformat(), _XSD_TIME)),
- (Duration, (lambda i:duration_isoformat(i), _XSD_DURATION)),
- (timedelta, (lambda i:duration_isoformat(i), _XSD_DAYTIMEDURATION)),
+ (datetime, (lambda i: i.isoformat(), _XSD_DATETIME)),
+ (date, (lambda i: i.isoformat(), _XSD_DATE)),
+ (time, (lambda i: i.isoformat(), _XSD_TIME)),
+ (Duration, (lambda i: duration_isoformat(i), _XSD_DURATION)),
+ (timedelta, (lambda i: duration_isoformat(i), _XSD_DAYTIMEDURATION)),
(xml.dom.minidom.Document, (_writeXML, _RDF_XMLLITERAL)),
# this is a bit dirty - by accident the html5lib parser produces
# DocumentFragments, and the xml parser Documents, letting this
# decide what datatype to use makes roundtripping easier, but it a
# bit random
(xml.dom.minidom.DocumentFragment, (_writeXML, _RDF_HTMLLITERAL)),
- (Fraction, (None, _OWL_RATIONAL))
+ (Fraction, (None, _OWL_RATIONAL)),
]
_SpecificPythonToXSDRules = [
((str, _XSD_HEXBINARY), hexlify),
- ((bytes, _XSD_HEXBINARY), hexlify)
+ ((bytes, _XSD_HEXBINARY), hexlify),
]
XSDToPython = {
None: None, # plain literals map directly to value space
- URIRef(_XSD_PFX + 'time'): parse_time,
- URIRef(_XSD_PFX + 'date'): parse_date,
- URIRef(_XSD_PFX + 'gYear'): parse_date,
- URIRef(_XSD_PFX + 'gYearMonth'): parse_date,
- URIRef(_XSD_PFX + 'dateTime'): parse_datetime,
- URIRef(_XSD_PFX + 'duration'): parse_duration,
- URIRef(_XSD_PFX + 'dayTimeDuration'): parse_duration,
- URIRef(_XSD_PFX + 'yearMonthDuration'): parse_duration,
- URIRef(_XSD_PFX + 'hexBinary'): _unhexlify,
- URIRef(_XSD_PFX + 'string'): None,
- URIRef(_XSD_PFX + 'normalizedString'): None,
- URIRef(_XSD_PFX + 'token'): None,
- URIRef(_XSD_PFX + 'language'): None,
- URIRef(_XSD_PFX + 'boolean'): _parseBoolean,
- URIRef(_XSD_PFX + 'decimal'): Decimal,
- URIRef(_XSD_PFX + 'integer'): long_type,
- URIRef(_XSD_PFX + 'nonPositiveInteger'): int,
- URIRef(_XSD_PFX + 'long'): long_type,
- URIRef(_XSD_PFX + 'nonNegativeInteger'): int,
- URIRef(_XSD_PFX + 'negativeInteger'): int,
- URIRef(_XSD_PFX + 'int'): long_type,
- URIRef(_XSD_PFX + 'unsignedLong'): long_type,
- URIRef(_XSD_PFX + 'positiveInteger'): int,
- URIRef(_XSD_PFX + 'short'): int,
- URIRef(_XSD_PFX + 'unsignedInt'): long_type,
- URIRef(_XSD_PFX + 'byte'): int,
- URIRef(_XSD_PFX + 'unsignedShort'): int,
- URIRef(_XSD_PFX + 'unsignedByte'): int,
- URIRef(_XSD_PFX + 'float'): float,
- URIRef(_XSD_PFX + 'double'): float,
- URIRef(_XSD_PFX + 'base64Binary'): lambda s: base64.b64decode(s),
- URIRef(_XSD_PFX + 'anyURI'): None,
+ URIRef(_XSD_PFX + "time"): parse_time,
+ URIRef(_XSD_PFX + "date"): parse_date,
+ URIRef(_XSD_PFX + "gYear"): parse_date,
+ URIRef(_XSD_PFX + "gYearMonth"): parse_date,
+ URIRef(_XSD_PFX + "dateTime"): parse_datetime,
+ URIRef(_XSD_PFX + "duration"): parse_duration,
+ URIRef(_XSD_PFX + "dayTimeDuration"): parse_duration,
+ URIRef(_XSD_PFX + "yearMonthDuration"): parse_duration,
+ URIRef(_XSD_PFX + "hexBinary"): _unhexlify,
+ URIRef(_XSD_PFX + "string"): None,
+ URIRef(_XSD_PFX + "normalizedString"): None,
+ URIRef(_XSD_PFX + "token"): None,
+ URIRef(_XSD_PFX + "language"): None,
+ URIRef(_XSD_PFX + "boolean"): _parseBoolean,
+ URIRef(_XSD_PFX + "decimal"): Decimal,
+ URIRef(_XSD_PFX + "integer"): long_type,
+ URIRef(_XSD_PFX + "nonPositiveInteger"): int,
+ URIRef(_XSD_PFX + "long"): long_type,
+ URIRef(_XSD_PFX + "nonNegativeInteger"): int,
+ URIRef(_XSD_PFX + "negativeInteger"): int,
+ URIRef(_XSD_PFX + "int"): long_type,
+ URIRef(_XSD_PFX + "unsignedLong"): long_type,
+ URIRef(_XSD_PFX + "positiveInteger"): int,
+ URIRef(_XSD_PFX + "short"): int,
+ URIRef(_XSD_PFX + "unsignedInt"): long_type,
+ URIRef(_XSD_PFX + "byte"): int,
+ URIRef(_XSD_PFX + "unsignedShort"): int,
+ URIRef(_XSD_PFX + "unsignedByte"): int,
+ URIRef(_XSD_PFX + "float"): float,
+ URIRef(_XSD_PFX + "double"): float,
+ URIRef(_XSD_PFX + "base64Binary"): lambda s: base64.b64decode(s),
+ URIRef(_XSD_PFX + "anyURI"): None,
_RDF_XMLLITERAL: _parseXML,
- _RDF_HTMLLITERAL: _parseHTML
+ _RDF_HTMLLITERAL: _parseHTML,
}
_toPythonMapping = {}
@@ -1576,13 +1624,15 @@ def _castLexicalToPython(lexical, datatype):
try:
return str(lexical)
except UnicodeDecodeError:
- return str(lexical, 'utf-8')
+ return str(lexical, "utf-8")
else:
# no convFunc - unknown data-type
return None
-def bind(datatype, pythontype, constructor=None, lexicalizer=None, datatype_specific=False):
+def bind(
+ datatype, pythontype, constructor=None, lexicalizer=None, datatype_specific=False
+):
"""
register a new datatype<->pythontype binding
@@ -1601,8 +1651,7 @@ def bind(datatype, pythontype, constructor=None, lexicalizer=None, datatype_spec
raise Exception("No datatype given for a datatype-specific binding")
if datatype in _toPythonMapping:
- logger.warning("datatype '%s' was already bound. Rebinding." %
- datatype)
+ logger.warning("datatype '%s' was already bound. Rebinding." % datatype)
if constructor is None:
constructor = pythontype
@@ -1618,13 +1667,13 @@ class Variable(Identifier):
A Variable - this is used for querying, or in Formula aware
graphs, where Variables can stored in the graph
"""
+
__slots__ = ()
def __new__(cls, value):
if len(value) == 0:
- raise Exception(
- "Attempted to create variable with empty string as name!")
- if value[0] == '?':
+ raise Exception("Attempted to create variable with empty string as name!")
+ if value[0] == "?":
value = value[1:]
return str.__new__(cls, value)
@@ -1647,13 +1696,14 @@ class Variable(Identifier):
class Statement(Node, tuple):
-
def __new__(cls, triple, context):
subject, predicate, object = triple
warnings.warn(
- "Class Statement is deprecated, and will be removed in " +
- "the future. If you use this please let rdflib-dev know!",
- category=DeprecationWarning, stacklevel=2)
+ "Class Statement is deprecated, and will be removed in "
+ + "the future. If you use this please let rdflib-dev know!",
+ category=DeprecationWarning,
+ stacklevel=2,
+ )
return tuple.__new__(cls, ((subject, predicate, object), context))
def __reduce__(self):
@@ -1668,12 +1718,7 @@ class Statement(Node, tuple):
# we leave "space" for more subclasses of Node elsewhere
# default-dict to grazefully fail for new subclasses
_ORDERING = defaultdict(int)
-_ORDERING.update({
- BNode: 10,
- Variable: 20,
- URIRef: 30,
- Literal: 40
-})
+_ORDERING.update({BNode: 10, Variable: 20, URIRef: 30, Literal: 40})
def _isEqualXMLNode(node, other):
@@ -1687,8 +1732,7 @@ def _isEqualXMLNode(node, other):
# for the length becomes necessary...
if len(node.childNodes) != len(other.childNodes):
return False
- for (nc, oc) in map(
- lambda x, y: (x, y), node.childNodes, other.childNodes):
+ for (nc, oc) in map(lambda x, y: (x, y), node.childNodes, other.childNodes):
if not _isEqualXMLNode(nc, oc):
return False
# if we got here then everything is fine:
@@ -1705,8 +1749,9 @@ def _isEqualXMLNode(node, other):
elif node.nodeType == Node.ELEMENT_NODE:
# Get the basics right
- if not (node.tagName == other.tagName and
- node.namespaceURI == other.namespaceURI):
+ if not (
+ node.tagName == other.tagName and node.namespaceURI == other.namespaceURI
+ ):
return False
# Handle the (namespaced) attributes; the namespace setting key
@@ -1714,17 +1759,22 @@ def _isEqualXMLNode(node, other):
# Note that the minidom orders the keys already, so we do not have
# to worry about that, which is a bonus...
n_keys = [
- k for k in node.attributes.keysNS()
- if k[0] != 'http://www.w3.org/2000/xmlns/']
+ k
+ for k in node.attributes.keysNS()
+ if k[0] != "http://www.w3.org/2000/xmlns/"
+ ]
o_keys = [
- k for k in other.attributes.keysNS()
- if k[0] != 'http://www.w3.org/2000/xmlns/']
+ k
+ for k in other.attributes.keysNS()
+ if k[0] != "http://www.w3.org/2000/xmlns/"
+ ]
if len(n_keys) != len(o_keys):
return False
for k in n_keys:
- if not (k in o_keys
- and node.getAttributeNS(k[0], k[1]) ==
- other.getAttributeNS(k[0], k[1])):
+ if not (
+ k in o_keys
+ and node.getAttributeNS(k[0], k[1]) == other.getAttributeNS(k[0], k[1])
+ ):
return False
# if we got here, the attributes are all right, we can go down
@@ -1732,8 +1782,11 @@ def _isEqualXMLNode(node, other):
return recurse()
elif node.nodeType in [
- Node.TEXT_NODE, Node.COMMENT_NODE, Node.CDATA_SECTION_NODE,
- Node.NOTATION_NODE]:
+ Node.TEXT_NODE,
+ Node.COMMENT_NODE,
+ Node.CDATA_SECTION_NODE,
+ Node.NOTATION_NODE,
+ ]:
return node.data == other.data
elif node.nodeType == Node.PROCESSING_INSTRUCTION_NODE:
@@ -1743,15 +1796,14 @@ def _isEqualXMLNode(node, other):
return node.nodeValue == other.nodeValue
elif node.nodeType == Node.DOCUMENT_TYPE_NODE:
- return node.publicId == other.publicId \
- and node.systemId == other.system.Id
+ return node.publicId == other.publicId and node.systemId == other.system.Id
else:
# should not happen, in fact
- raise Exception(
- 'I dont know how to compare XML Node type: %s' % node.nodeType)
+ raise Exception("I dont know how to compare XML Node type: %s" % node.nodeType)
-if __name__ == '__main__':
+if __name__ == "__main__":
import doctest
+
doctest.testmod()
diff --git a/rdflib/tools/csv2rdf.py b/rdflib/tools/csv2rdf.py
index ef246a73..cec1005c 100644
--- a/rdflib/tools/csv2rdf.py
+++ b/rdflib/tools/csv2rdf.py
@@ -24,7 +24,7 @@ import rdflib
from rdflib import RDF, RDFS
from rdflib.namespace import split_uri
-__all__ = ['CSV2RDF']
+__all__ = ["CSV2RDF"]
HELP = """
csv2rdf.py \
@@ -124,23 +124,21 @@ def index(l, i):
def csv_reader(csv_data, dialect=csv.excel, **kwargs):
- csv_reader = csv.reader(csv_data,
- dialect=dialect, **kwargs)
+ csv_reader = csv.reader(csv_data, dialect=dialect, **kwargs)
for row in csv_reader:
# decode UTF-8 back to Unicode, cell by cell:
- yield [str(cell, 'utf-8', errors='replace') for cell in row]
+ yield [str(cell, "utf-8", errors="replace") for cell in row]
def prefixuri(x, prefix, class_=None):
if prefix:
- r = rdflib.URIRef(
- prefix + quote(
- x.encode("utf8").replace(" ", "_"), safe=""))
+ r = rdflib.URIRef(prefix + quote(x.encode("utf8").replace(" ", "_"), safe=""))
else:
r = rdflib.URIRef(x)
uris[x] = (r, class_)
return r
+
# meta-language for config
@@ -235,8 +233,7 @@ class NodeSplit(NodeMaker):
self.f = rdflib.Literal
if not callable(self.f):
raise Exception("Function passed to split is not callable!")
- return [
- self.f(y.strip()) for y in x.split(self.sep) if y.strip() != ""]
+ return [self.f(y.strip()) for y in x.split(self.sep) if y.strip() != ""]
def range(self):
if self.f and isinstance(self.f, NodeMaker):
@@ -283,16 +280,17 @@ def _config_split(sep=None, f=None):
return NodeSplit(sep, f)
-config_functions = {"ignore": _config_ignore,
- "uri": _config_uri,
- "literal": _config_literal,
- "float": _config_float,
- "int": _config_int,
- "date": _config_date,
- "split": _config_split,
- "replace": _config_replace,
- "bool": _config_bool,
- }
+config_functions = {
+ "ignore": _config_ignore,
+ "uri": _config_uri,
+ "literal": _config_literal,
+ "float": _config_float,
+ "int": _config_int,
+ "date": _config_date,
+ "split": _config_split,
+ "replace": _config_replace,
+ "bool": _config_bool,
+}
def column(v):
@@ -307,7 +305,7 @@ class CSV2RDF(object):
self.CLASS = None
self.BASE = None
self.PROPBASE = None
- self.IDENT = 'auto'
+ self.IDENT = "auto"
self.LABEL = None
self.DEFINECLASS = False
self.SKIP = 0
@@ -317,7 +315,7 @@ class CSV2RDF(object):
self.COLUMNS = {}
self.PROPS = {}
- self.OUT = codecs.getwriter("utf-8")(sys.stdout, errors='replace')
+ self.OUT = codecs.getwriter("utf-8")(sys.stdout, errors="replace")
self.triples = 0
@@ -340,8 +338,7 @@ class CSV2RDF(object):
self.BASE = rdflib.Namespace("http://example.org/instances/")
if not self.PROPBASE:
- warnings.warn(
- "No property base given, using http://example.org/property/")
+ warnings.warn("No property base given, using http://example.org/property/")
self.PROPBASE = rdflib.Namespace("http://example.org/props/")
# skip lines at the start
@@ -350,8 +347,7 @@ class CSV2RDF(object):
# read header line
header_labels = list(csvreader.next())
- headers = dict(
- enumerate([self.PROPBASE[toProperty(x)] for x in header_labels]))
+ headers = dict(enumerate([self.PROPBASE[toProperty(x)] for x in header_labels]))
# override header properties if some are given
for k, v in self.PROPS.items():
headers[k] = v
@@ -364,27 +360,34 @@ class CSV2RDF(object):
h, l = headers[i], header_labels[i]
if h == "" or l == "":
continue
- if self.COLUMNS.get(i, self.DEFAULT) == 'ignore':
+ if self.COLUMNS.get(i, self.DEFAULT) == "ignore":
continue
self.triple(h, RDF.type, RDF.Property)
self.triple(h, RDFS.label, rdflib.Literal(toPropertyLabel(l)))
self.triple(h, RDFS.domain, self.CLASS)
- self.triple(h, RDFS.range,
- self.COLUMNS.get(i, default_node_make).range())
+ self.triple(
+ h, RDFS.range, self.COLUMNS.get(i, default_node_make).range()
+ )
rows = 0
for l in csvreader:
try:
- if self.IDENT == 'auto':
+ if self.IDENT == "auto":
uri = self.BASE["%d" % rows]
else:
- uri = self.BASE["_".join([quote(x.encode(
- "utf8").replace(" ", "_"), safe="")
- for x in index(l, self.IDENT)])]
+ uri = self.BASE[
+ "_".join(
+ [
+ quote(x.encode("utf8").replace(" ", "_"), safe="")
+ for x in index(l, self.IDENT)
+ ]
+ )
+ ]
if self.LABEL:
- self.triple(uri, RDFS.label, rdflib.Literal(
- " ".join(index(l, self.LABEL))))
+ self.triple(
+ uri, RDFS.label, rdflib.Literal(" ".join(index(l, self.LABEL)))
+ )
if self.CLASS:
# type triple
@@ -392,8 +395,8 @@ class CSV2RDF(object):
for i, x in enumerate(l):
x = x.strip()
- if x != '':
- if self.COLUMNS.get(i, self.DEFAULT) == 'ignore':
+ if x != "":
+ if self.COLUMNS.get(i, self.DEFAULT) == "ignore":
continue
try:
o = self.COLUMNS.get(i, rdflib.Literal)(x)
@@ -405,15 +408,17 @@ class CSV2RDF(object):
except Exception as e:
warnings.warn(
- "Could not process value for column " +
- "%d:%s in row %d, ignoring: %s " % (
- i, headers[i], rows, e.message))
+ "Could not process value for column "
+ + "%d:%s in row %d, ignoring: %s "
+ % (i, headers[i], rows, e.message)
+ )
rows += 1
if rows % 100000 == 0:
sys.stderr.write(
- "%d rows, %d triples, elapsed %.2fs.\n" % (
- rows, self.triples, time.time() - start))
+ "%d rows, %d triples, elapsed %.2fs.\n"
+ % (rows, self.triples, time.time() - start)
+ )
except:
sys.stderr.write("Error processing line: %d\n" % rows)
raise
@@ -432,8 +437,7 @@ class CSV2RDF(object):
self.triple(c, RDF.type, RDFS.Class)
self.OUT.close()
- sys.stderr.write(
- "Converted %d rows into %d triples.\n" % (rows, self.triples))
+ sys.stderr.write("Converted %d rows into %d triples.\n" % (rows, self.triples))
sys.stderr.write("Took %.2f seconds.\n" % (time.time() - start))
@@ -443,8 +447,19 @@ def main():
opts, files = getopt.getopt(
sys.argv[1:],
"hc:b:p:i:o:Cf:l:s:d:D:",
- ["out=", "base=", "delim=", "propbase=", "class=", "default="
- "ident=", "label=", "skip=", "defineclass", "help"])
+ [
+ "out=",
+ "base=",
+ "delim=",
+ "propbase=",
+ "class=",
+ "default=" "ident=",
+ "label=",
+ "skip=",
+ "defineclass",
+ "help",
+ ],
+ )
opts = dict(opts)
if "-h" in opts or "--help" in opts:
@@ -534,9 +549,8 @@ def main():
if csv2rdf.CLASS and ("-C" in opts or "--defineclass" in opts):
csv2rdf.DEFINECLASS = True
- csv2rdf.convert(
- csv_reader(fileinput.input(files), delimiter=csv2rdf.DELIM))
+ csv2rdf.convert(csv_reader(fileinput.input(files), delimiter=csv2rdf.DELIM))
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/rdflib/tools/graphisomorphism.py b/rdflib/tools/graphisomorphism.py
index abc84fa1..a073d7d9 100644
--- a/rdflib/tools/graphisomorphism.py
+++ b/rdflib/tools/graphisomorphism.py
@@ -69,39 +69,47 @@ class IsomorphicTestableGraph(Graph):
def main():
import sys
from optparse import OptionParser
- usage = '''usage: %prog [options] file1 file2 ... fileN'''
+
+ usage = """usage: %prog [options] file1 file2 ... fileN"""
op = OptionParser(usage=usage)
- op.add_option('-s', '--stdin', action='store_true', default=False,
- help='Load from STDIN as well')
- op.add_option('--format',
- default='xml',
- dest='inputFormat',
- metavar='RDF_FORMAT',
- choices=['xml', 'trix', 'n3', 'nt', 'rdfa'],
- help="The format of the RDF document(s) to compare" +
- "One of 'xml','n3','trix', 'nt', " +
- "or 'rdfa'. The default is %default")
+ op.add_option(
+ "-s",
+ "--stdin",
+ action="store_true",
+ default=False,
+ help="Load from STDIN as well",
+ )
+ op.add_option(
+ "--format",
+ default="xml",
+ dest="inputFormat",
+ metavar="RDF_FORMAT",
+ choices=["xml", "trix", "n3", "nt", "rdfa"],
+ help="The format of the RDF document(s) to compare"
+ + "One of 'xml','n3','trix', 'nt', "
+ + "or 'rdfa'. The default is %default",
+ )
(options, args) = op.parse_args()
graphs = []
graph2FName = {}
if options.stdin:
- graph = IsomorphicTestableGraph().parse(
- sys.stdin, format=options.inputFormat)
+ graph = IsomorphicTestableGraph().parse(sys.stdin, format=options.inputFormat)
graphs.append(graph)
- graph2FName[graph] = '(STDIN)'
+ graph2FName[graph] = "(STDIN)"
for fn in args:
- graph = IsomorphicTestableGraph().parse(
- fn, format=options.inputFormat)
+ graph = IsomorphicTestableGraph().parse(fn, format=options.inputFormat)
graphs.append(graph)
graph2FName[graph] = fn
checked = set()
for graph1, graph2 in combinations(graphs, 2):
if (graph1, graph2) not in checked and (graph2, graph1) not in checked:
assert graph1 == graph2, "%s != %s" % (
- graph2FName[graph1], graph2FName[graph2])
+ graph2FName[graph1],
+ graph2FName[graph2],
+ )
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/rdflib/tools/rdf2dot.py b/rdflib/tools/rdf2dot.py
index 84b251a1..9b622b66 100644
--- a/rdflib/tools/rdf2dot.py
+++ b/rdflib/tools/rdf2dot.py
@@ -20,23 +20,60 @@ import collections
from rdflib import XSD
-LABEL_PROPERTIES = [rdflib.RDFS.label,
- rdflib.URIRef("http://purl.org/dc/elements/1.1/title"),
- rdflib.URIRef("http://xmlns.com/foaf/0.1/name"),
- rdflib.URIRef("http://www.w3.org/2006/vcard/ns#fn"),
- rdflib.URIRef("http://www.w3.org/2006/vcard/ns#org")
- ]
+LABEL_PROPERTIES = [
+ rdflib.RDFS.label,
+ rdflib.URIRef("http://purl.org/dc/elements/1.1/title"),
+ rdflib.URIRef("http://xmlns.com/foaf/0.1/name"),
+ rdflib.URIRef("http://www.w3.org/2006/vcard/ns#fn"),
+ rdflib.URIRef("http://www.w3.org/2006/vcard/ns#org"),
+]
XSDTERMS = [
- XSD[x] for x in (
- "anyURI", "base64Binary", "boolean", "byte", "date",
- "dateTime", "decimal", "double", "duration", "float", "gDay", "gMonth",
- "gMonthDay", "gYear", "gYearMonth", "hexBinary", "ID", "IDREF",
- "IDREFS", "int", "integer", "language", "long", "Name", "NCName",
- "negativeInteger", "NMTOKEN", "NMTOKENS", "nonNegativeInteger",
- "nonPositiveInteger", "normalizedString", "positiveInteger", "QName",
- "short", "string", "time", "token", "unsignedByte", "unsignedInt",
- "unsignedLong", "unsignedShort")]
+ XSD[x]
+ for x in (
+ "anyURI",
+ "base64Binary",
+ "boolean",
+ "byte",
+ "date",
+ "dateTime",
+ "decimal",
+ "double",
+ "duration",
+ "float",
+ "gDay",
+ "gMonth",
+ "gMonthDay",
+ "gYear",
+ "gYearMonth",
+ "hexBinary",
+ "ID",
+ "IDREF",
+ "IDREFS",
+ "int",
+ "integer",
+ "language",
+ "long",
+ "Name",
+ "NCName",
+ "negativeInteger",
+ "NMTOKEN",
+ "NMTOKENS",
+ "nonNegativeInteger",
+ "nonPositiveInteger",
+ "normalizedString",
+ "positiveInteger",
+ "QName",
+ "short",
+ "string",
+ "time",
+ "token",
+ "unsignedByte",
+ "unsignedInt",
+ "unsignedLong",
+ "unsignedShort",
+ )
+]
EDGECOLOR = "blue"
NODECOLOR = "black"
@@ -73,10 +110,10 @@ def rdf2dot(g, stream, opts={}):
def formatliteral(l, g):
v = cgi.escape(l)
if l.datatype:
- return u'&quot;%s&quot;^^%s' % (v, qname(l.datatype, g))
+ return u"&quot;%s&quot;^^%s" % (v, qname(l.datatype, g))
elif l.language:
- return u'&quot;%s&quot;@%s' % (v, l.language)
- return u'&quot;%s&quot;' % v
+ return u"&quot;%s&quot;@%s" % (v, l.language)
+ return u"&quot;%s&quot;" % v
def qname(x, g):
try:
@@ -88,7 +125,7 @@ def rdf2dot(g, stream, opts={}):
def color(p):
return "BLACK"
- stream.write(u"digraph { \n node [ fontname=\"DejaVu Sans\" ] ; \n")
+ stream.write(u'digraph { \n node [ fontname="DejaVu Sans" ] ; \n')
for s, p, o in g:
sn = node(s)
@@ -96,40 +133,48 @@ def rdf2dot(g, stream, opts={}):
continue
if isinstance(o, (rdflib.URIRef, rdflib.BNode)):
on = node(o)
- opstr = u"\t%s -> %s [ color=%s, label=< <font point-size='10' " + \
- u"color='#336633'>%s</font> > ] ;\n"
+ opstr = (
+ u"\t%s -> %s [ color=%s, label=< <font point-size='10' "
+ + u"color='#336633'>%s</font> > ] ;\n"
+ )
stream.write(opstr % (sn, on, color(p), qname(p, g)))
else:
fields[sn].add((qname(p, g), formatliteral(o, g)))
for u, n in nodes.items():
stream.write(u"# %s %s\n" % (u, n))
- f = [u"<tr><td align='left'>%s</td><td align='left'>%s</td></tr>" %
- x for x in sorted(fields[n])]
- opstr = u"%s [ shape=none, color=%s label=< <table color='#666666'" + \
- u" cellborder='0' cellspacing='0' border='1'><tr>" + \
- u"<td colspan='2' bgcolor='grey'><B>%s</B></td></tr><tr>" + \
- u"<td href='%s' bgcolor='#eeeeee' colspan='2'>" + \
- u"<font point-size='10' color='#6666ff'>%s</font></td>" + \
- u"</tr>%s</table> > ] \n"
+ f = [
+ u"<tr><td align='left'>%s</td><td align='left'>%s</td></tr>" % x
+ for x in sorted(fields[n])
+ ]
+ opstr = (
+ u"%s [ shape=none, color=%s label=< <table color='#666666'"
+ + u" cellborder='0' cellspacing='0' border='1'><tr>"
+ + u"<td colspan='2' bgcolor='grey'><B>%s</B></td></tr><tr>"
+ + u"<td href='%s' bgcolor='#eeeeee' colspan='2'>"
+ + u"<font point-size='10' color='#6666ff'>%s</font></td>"
+ + u"</tr>%s</table> > ] \n"
+ )
stream.write(opstr % (n, NODECOLOR, label(u, g), u, u, u"".join(f)))
stream.write("}\n")
def _help():
- sys.stderr.write("""
+ sys.stderr.write(
+ """
rdf2dot.py [-f <format>] files...
Read RDF files given on STDOUT, writes a graph of the RDFS schema in DOT
language to stdout
-f specifies parser to use, if not given,
-""")
+"""
+ )
def main():
rdflib.extras.cmdlineutils.main(rdf2dot, _help)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/rdflib/tools/rdfpipe.py b/rdflib/tools/rdfpipe.py
index 88cd99e9..4be352a0 100644
--- a/rdflib/tools/rdfpipe.py
+++ b/rdflib/tools/rdfpipe.py
@@ -22,14 +22,20 @@ from rdflib.serializer import Serializer
from rdflib.util import guess_format
+DEFAULT_INPUT_FORMAT = "xml"
+DEFAULT_OUTPUT_FORMAT = "n3"
-DEFAULT_INPUT_FORMAT = 'xml'
-DEFAULT_OUTPUT_FORMAT = 'n3'
-
-def parse_and_serialize(input_files, input_format, guess,
- outfile, output_format, ns_bindings,
- store_conn="", store_type=None):
+def parse_and_serialize(
+ input_files,
+ input_format,
+ guess,
+ outfile,
+ output_format,
+ ns_bindings,
+ store_conn="",
+ store_type=None,
+):
if store_type:
store = plugin.get(store_type, Store)()
@@ -44,7 +50,7 @@ def parse_and_serialize(input_files, input_format, guess,
for fpath in input_files:
use_format, kws = _format_and_kws(input_format)
- if fpath == '-':
+ if fpath == "-":
fpath = sys.stdin
elif not input_format and guess:
use_format = guess_format(fpath) or DEFAULT_INPUT_FORMAT
@@ -52,7 +58,7 @@ def parse_and_serialize(input_files, input_format, guess,
if outfile:
output_format, kws = _format_and_kws(output_format)
- kws.setdefault('base', None)
+ kws.setdefault("base", None)
graph.serialize(destination=outfile, format=output_format, **kws)
if store:
@@ -75,15 +81,15 @@ def _format_and_kws(fmt):
('fmt', {'a': 'b:c'})
"""
fmt, kws = fmt, {}
- if fmt and ':' in fmt:
- fmt, kwrepr = fmt.split(':', 1)
- for kw in kwrepr.split(','):
- if '=' in kw:
- k, v = kw.split('=')
+ if fmt and ":" in fmt:
+ fmt, kwrepr = fmt.split(":", 1)
+ for kw in kwrepr.split(","):
+ if "=" in kw:
+ k, v = kw.split("=")
kws[k] = v
- elif kw.startswith('-'):
+ elif kw.startswith("-"):
kws[kw[1:]] = False
- elif kw.startswith('+'):
+ elif kw.startswith("+"):
kws[kw[1:]] = True
else: # same as "+"
kws[kw] = True
@@ -96,62 +102,78 @@ def make_option_parser():
kw_example = "FORMAT:(+)KW1,-KW2,KW3=VALUE"
oparser = OptionParser(
- "%prog [-h] [-i INPUT_FORMAT] [-o OUTPUT_FORMAT] " +
- "[--ns=PFX=NS ...] [-] [FILE ...]",
- description=__doc__.strip() + (
+ "%prog [-h] [-i INPUT_FORMAT] [-o OUTPUT_FORMAT] "
+ + "[--ns=PFX=NS ...] [-] [FILE ...]",
+ description=__doc__.strip()
+ + (
" Reads file system paths, URLs or from stdin if '-' is given."
- " The result is serialized to stdout."),
- version="%prog " + "(using rdflib %s)" % rdflib.__version__)
+ " The result is serialized to stdout."
+ ),
+ version="%prog " + "(using rdflib %s)" % rdflib.__version__,
+ )
oparser.add_option(
- '-i', '--input-format',
+ "-i",
+ "--input-format",
type=str, # default=DEFAULT_INPUT_FORMAT,
help="Format of the input document(s)."
- " Available input formats are: %s." % parser_names +
- " If no format is given, it will be " +
- "guessed from the file name extension." +
- " Keywords to parser can be given after format like: %s." % kw_example,
- metavar="INPUT_FORMAT")
+ " Available input formats are: %s." % parser_names
+ + " If no format is given, it will be "
+ + "guessed from the file name extension."
+ + " Keywords to parser can be given after format like: %s." % kw_example,
+ metavar="INPUT_FORMAT",
+ )
oparser.add_option(
- '-o', '--output-format',
- type=str, default=DEFAULT_OUTPUT_FORMAT,
+ "-o",
+ "--output-format",
+ type=str,
+ default=DEFAULT_OUTPUT_FORMAT,
help="Format of the graph serialization."
- " Available output formats are: %s."
- % serializer_names +
- " Default format is: '%default'." +
- " Keywords to serializer can be given after format like: %s." %
- kw_example,
- metavar="OUTPUT_FORMAT")
+ " Available output formats are: %s." % serializer_names
+ + " Default format is: '%default'."
+ + " Keywords to serializer can be given after format like: %s." % kw_example,
+ metavar="OUTPUT_FORMAT",
+ )
oparser.add_option(
- '--ns',
- action="append", type=str,
+ "--ns",
+ action="append",
+ type=str,
help="Register a namespace binding (QName prefix to a base URI). "
"This can be used more than once.",
- metavar="PREFIX=NAMESPACE")
+ metavar="PREFIX=NAMESPACE",
+ )
oparser.add_option(
- '--no-guess', dest='guess',
- action='store_false', default=True,
- help="Don't guess format based on file suffix.")
+ "--no-guess",
+ dest="guess",
+ action="store_false",
+ default=True,
+ help="Don't guess format based on file suffix.",
+ )
oparser.add_option(
- '--no-out',
- action='store_true', default=False,
- help="Don't output the resulting graph " +
- "(useful for checking validity of input).")
+ "--no-out",
+ action="store_true",
+ default=False,
+ help="Don't output the resulting graph "
+ + "(useful for checking validity of input).",
+ )
oparser.add_option(
- '-w', '--warn',
- action='store_true', default=False,
- help="Output warnings to stderr (by default only critical errors).")
+ "-w",
+ "--warn",
+ action="store_true",
+ default=False,
+ help="Output warnings to stderr (by default only critical errors).",
+ )
return oparser
-def _get_plugin_names(kind): return ", ".join(
- p.name for p in plugin.plugins(kind=kind))
+def _get_plugin_names(kind):
+ return ", ".join(p.name for p in plugin.plugins(kind=kind))
def main():
@@ -170,7 +192,7 @@ def main():
ns_bindings = {}
if opts.ns:
for ns_kw in opts.ns:
- pfx, uri = ns_kw.split('=')
+ pfx, uri = ns_kw.split("=")
ns_bindings[pfx] = uri
outfile = sys.stdout.buffer
@@ -178,8 +200,9 @@ def main():
if opts.no_out:
outfile = None
- parse_and_serialize(args, opts.input_format, opts.guess,
- outfile, opts.output_format, ns_bindings)
+ parse_and_serialize(
+ args, opts.input_format, opts.guess, outfile, opts.output_format, ns_bindings
+ )
if __name__ == "__main__":
diff --git a/rdflib/tools/rdfs2dot.py b/rdflib/tools/rdfs2dot.py
index 7135fe62..4c31516f 100644
--- a/rdflib/tools/rdfs2dot.py
+++ b/rdflib/tools/rdfs2dot.py
@@ -21,14 +21,52 @@ import collections
from rdflib import XSD, RDF, RDFS
-XSDTERMS = [XSD[x] for x in (
- "anyURI", "base64Binary", "boolean", "byte", "date", "dateTime", "decimal",
- "double", "duration", "float", "gDay", "gMonth", "gMonthDay", "gYear",
- "gYearMonth", "hexBinary", "ID", "IDREF", "IDREFS", "int", "integer",
- "language", "long", "Name", "NCName", "negativeInteger", "NMTOKEN",
- "NMTOKENS", "nonNegativeInteger", "nonPositiveInteger", "normalizedString",
- "positiveInteger", "QName", "short", "string", "time", "token",
- "unsignedByte", "unsignedInt", "unsignedLong", "unsignedShort")]
+XSDTERMS = [
+ XSD[x]
+ for x in (
+ "anyURI",
+ "base64Binary",
+ "boolean",
+ "byte",
+ "date",
+ "dateTime",
+ "decimal",
+ "double",
+ "duration",
+ "float",
+ "gDay",
+ "gMonth",
+ "gMonthDay",
+ "gYear",
+ "gYearMonth",
+ "hexBinary",
+ "ID",
+ "IDREF",
+ "IDREFS",
+ "int",
+ "integer",
+ "language",
+ "long",
+ "Name",
+ "NCName",
+ "negativeInteger",
+ "NMTOKEN",
+ "NMTOKENS",
+ "nonNegativeInteger",
+ "nonPositiveInteger",
+ "normalizedString",
+ "positiveInteger",
+ "QName",
+ "short",
+ "string",
+ "time",
+ "token",
+ "unsignedByte",
+ "unsignedInt",
+ "unsignedLong",
+ "unsignedShort",
+ )
+]
EDGECOLOR = "blue"
NODECOLOR = "black"
@@ -60,7 +98,7 @@ def rdfs2dot(g, stream, opts={}):
pass # bnodes and some weird URIs cannot be split
return l
- stream.write(u"digraph { \n node [ fontname=\"DejaVu Sans\" ] ; \n")
+ stream.write(u'digraph { \n node [ fontname="DejaVu Sans" ] ; \n')
for x in g.subjects(RDF.type, RDFS.Class):
n = node(x)
@@ -72,7 +110,8 @@ def rdfs2dot(g, stream, opts={}):
for x in g.subjects(RDF.type, RDF.Property):
for a, b in itertools.product(
- g.objects(x, RDFS.domain), g.objects(x, RDFS.range)):
+ g.objects(x, RDFS.domain), g.objects(x, RDFS.range)
+ ):
if b in XSDTERMS or b == RDFS.Literal:
l = label(b, g)
if b == RDFS.Literal:
@@ -81,35 +120,42 @@ def rdfs2dot(g, stream, opts={}):
else:
# if a in nodes and b in nodes:
stream.write(
- "\t%s -> %s [ color=%s, label=\"%s\" ];\n" % (
- node(a), node(b), EDGECOLOR, label(x, g)))
+ '\t%s -> %s [ color=%s, label="%s" ];\n'
+ % (node(a), node(b), EDGECOLOR, label(x, g))
+ )
for u, n in nodes.items():
stream.write(u"# %s %s\n" % (u, n))
- f = [u"<tr><td align='left'>%s</td><td>%s</td></tr>" %
- x for x in sorted(fields[n])]
- opstr = u"%s [ shape=none, color=%s label=< <table color='#666666'" + \
- u" cellborder=\"0\" cellspacing='0' border=\"1\"><tr>" + \
- u"<td colspan=\"2\" bgcolor='grey'><B>%s</B></td>" + \
- u"</tr>%s</table> > ] \n"
+ f = [
+ u"<tr><td align='left'>%s</td><td>%s</td></tr>" % x
+ for x in sorted(fields[n])
+ ]
+ opstr = (
+ u"%s [ shape=none, color=%s label=< <table color='#666666'"
+ + u' cellborder="0" cellspacing=\'0\' border="1"><tr>'
+ + u"<td colspan=\"2\" bgcolor='grey'><B>%s</B></td>"
+ + u"</tr>%s</table> > ] \n"
+ )
stream.write(opstr % (n, NODECOLOR, label(u, g), u"".join(f)))
stream.write("}\n")
def _help():
- sys.stderr.write("""
+ sys.stderr.write(
+ """
rdfs2dot.py [-f <format>] files...
Read RDF files given on STDOUT, writes a graph of the RDFS schema in
DOT language to stdout
-f specifies parser to use, if not given,
-""")
+"""
+ )
def main():
rdflib.extras.cmdlineutils.main(rdfs2dot, _help)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/rdflib/util.py b/rdflib/util.py
index 1789aa70..38faf06f 100644
--- a/rdflib/util.py
+++ b/rdflib/util.py
@@ -34,6 +34,7 @@ from __future__ import print_function
from calendar import timegm
from time import altzone
+
# from time import daylight
from time import gmtime
from time import localtime
@@ -56,10 +57,24 @@ from rdflib.term import URIRef
from rdflib.compat import sign
__all__ = [
- 'list2set', 'first', 'uniq', 'more_than', 'to_term', 'from_n3',
- 'date_time', 'parse_date_time', 'check_context', 'check_subject',
- 'check_predicate', 'check_object', 'check_statement', 'check_pattern',
- 'guess_format', 'find_roots', 'get_tree']
+ "list2set",
+ "first",
+ "uniq",
+ "more_than",
+ "to_term",
+ "from_n3",
+ "date_time",
+ "parse_date_time",
+ "check_context",
+ "check_subject",
+ "check_predicate",
+ "check_object",
+ "check_statement",
+ "check_pattern",
+ "guess_format",
+ "find_roots",
+ "get_tree",
+]
def list2set(seq):
@@ -155,7 +170,7 @@ def from_n3(s, default=None, backend=None, nsm=None):
'''
if not s:
return default
- if s.startswith('<'):
+ if s.startswith("<"):
# Hack: this should correctly handle strings with either native unicode
# characters, or \u1234 unicode escapes.
return URIRef(s[1:-1].encode("raw-unicode-escape").decode("unicode-escape"))
@@ -165,44 +180,44 @@ def from_n3(s, default=None, backend=None, nsm=None):
else:
quotes = '"'
value, rest = s.rsplit(quotes, 1)
- value = value[len(quotes):] # strip leading quotes
+ value = value[len(quotes) :] # strip leading quotes
datatype = None
language = None
# as a given datatype overrules lang-tag check for it first
- dtoffset = rest.rfind('^^')
+ dtoffset = rest.rfind("^^")
if dtoffset >= 0:
# found a datatype
# datatype has to come after lang-tag so ignore everything before
# see: http://www.w3.org/TR/2011/WD-turtle-20110809/
# #prod-turtle2-RDFLiteral
- datatype = from_n3(rest[dtoffset + 2:], default, backend, nsm)
+ datatype = from_n3(rest[dtoffset + 2 :], default, backend, nsm)
else:
if rest.startswith("@"):
language = rest[1:] # strip leading at sign
- value = value.replace(r'\"', '"')
+ value = value.replace(r"\"", '"')
# Hack: this should correctly handle strings with either native unicode
# characters, or \u1234 unicode escapes.
value = value.encode("raw-unicode-escape").decode("unicode-escape")
return Literal(value, language, datatype)
- elif s == 'true' or s == 'false':
- return Literal(s == 'true')
+ elif s == "true" or s == "false":
+ return Literal(s == "true")
elif s.isdigit():
return Literal(int(s))
- elif s.startswith('{'):
+ elif s.startswith("{"):
identifier = from_n3(s[1:-1])
return QuotedGraph(backend, identifier)
- elif s.startswith('['):
+ elif s.startswith("["):
identifier = from_n3(s[1:-1])
return Graph(backend, identifier)
elif s.startswith("_:"):
return BNode(s[2:])
- elif ':' in s:
+ elif ":" in s:
if nsm is None:
# instantiate default NamespaceManager and rely on its defaults
nsm = NamespaceManager(Graph())
- prefix, last_part = s.split(':', 1)
+ prefix, last_part = s.split(":", 1)
ns = dict(nsm.namespaces())[prefix]
return Namespace(ns)[last_part]
else:
@@ -210,8 +225,7 @@ def from_n3(s, default=None, backend=None, nsm=None):
def check_context(c):
- if not (isinstance(c, URIRef) or
- isinstance(c, BNode)):
+ if not (isinstance(c, URIRef) or isinstance(c, BNode)):
raise ContextTypeError("%s:%s" % (c, type(c)))
@@ -229,9 +243,7 @@ def check_predicate(p):
def check_object(o):
""" Test that o is a valid object identifier."""
- if not (isinstance(o, URIRef) or
- isinstance(o, Literal) or
- isinstance(o, BNode)):
+ if not (isinstance(o, URIRef) or isinstance(o, Literal) or isinstance(o, BNode)):
raise ObjectTypeError(o)
@@ -243,9 +255,7 @@ def check_statement(triple):
if not isinstance(p, URIRef):
raise PredicateTypeError(p)
- if not (isinstance(o, URIRef) or
- isinstance(o, Literal) or
- isinstance(o, BNode)):
+ if not (isinstance(o, URIRef) or isinstance(o, Literal) or isinstance(o, BNode)):
raise ObjectTypeError(o)
@@ -257,9 +267,9 @@ def check_pattern(triple):
if p and not isinstance(p, URIRef):
raise PredicateTypeError(p)
- if o and not (isinstance(o, URIRef) or
- isinstance(o, Literal) or
- isinstance(o, BNode)):
+ if o and not (
+ isinstance(o, URIRef) or isinstance(o, Literal) or isinstance(o, BNode)
+ ):
raise ObjectTypeError(o)
@@ -294,8 +304,7 @@ def date_time(t=None, local_time_zone=False):
tzd = "Z"
year, month, day, hh, mm, ss, wd, y, z = time_tuple
- s = "%0004d-%02d-%02dT%02d:%02d:%02d%s" % (
- year, month, day, hh, mm, ss, tzd)
+ s = "%0004d-%02d-%02dT%02d:%02d:%02d%s" % (year, month, day, hh, mm, ss, tzd)
return s
@@ -336,25 +345,26 @@ def parse_date_time(val):
year, month, day = ymd.split("-")
hour, minute, second = hms.split(":")
- t = timegm((int(year), int(month), int(day), int(hour),
- int(minute), int(second), 0, 0, 0))
+ t = timegm(
+ (int(year), int(month), int(day), int(hour), int(minute), int(second), 0, 0, 0)
+ )
t = t + tz_offset
return t
SUFFIX_FORMAT_MAP = {
- 'rdf': 'xml',
- 'rdfs': 'xml',
- 'owl': 'xml',
- 'n3': 'n3',
- 'ttl': 'turtle',
- 'nt': 'nt',
- 'trix': 'trix',
- 'xhtml': 'rdfa',
- 'html': 'rdfa',
- 'svg': 'rdfa',
- 'nq': 'nquads',
- 'trig': 'trig'
+ "rdf": "xml",
+ "rdfs": "xml",
+ "owl": "xml",
+ "n3": "n3",
+ "ttl": "turtle",
+ "nt": "nt",
+ "trix": "trix",
+ "xhtml": "rdfa",
+ "html": "rdfa",
+ "svg": "rdfa",
+ "nq": "nquads",
+ "trig": "trig",
}
@@ -405,11 +415,11 @@ def _get_ext(fpath, lower=True):
'rdf'
"""
ext = splitext(fpath)[-1]
- if ext == '' and fpath.startswith("."):
+ if ext == "" and fpath.startswith("."):
ext = fpath
if lower:
ext = ext.lower()
- if ext.startswith('.'):
+ if ext.startswith("."):
ext = ext[1:]
return ext
@@ -438,13 +448,9 @@ def find_roots(graph, prop, roots=None):
return roots
-def get_tree(graph,
- root,
- prop,
- mapper=lambda x: x,
- sortkey=None,
- done=None,
- dir='down'):
+def get_tree(
+ graph, root, prop, mapper=lambda x: x, sortkey=None, done=None, dir="down"
+):
"""
Return a nested list/tuple structure representing the tree
built by the transitive property given, starting from the root given
@@ -470,7 +476,7 @@ def get_tree(graph,
done.add(root)
tree = []
- if dir == 'down':
+ if dir == "down":
branches = graph.subjects(prop, root)
else:
branches = graph.objects(root, prop)
@@ -485,6 +491,7 @@ def get_tree(graph,
def test():
import doctest
+
doctest.testmod()
@@ -496,7 +503,7 @@ if __name__ == "__main__":
# time.tzset()
# except AttributeError, e:
# print e
- # pass
- # tzset missing! see
- # http://mail.python.org/pipermail/python-dev/2003-April/034480.html
+ # pass
+ # tzset missing! see
+ # http://mail.python.org/pipermail/python-dev/2003-April/034480.html
test() # pragma: no cover
diff --git a/rdflib/void.py b/rdflib/void.py
index db9bcc32..92a0e0be 100644
--- a/rdflib/void.py
+++ b/rdflib/void.py
@@ -93,10 +93,8 @@ def generateVoID(g, dataset=None, res=None, distinctForPartitions=True):
res.add((part, VOID.distinctSubjects, Literal(len(classes[c]))))
if distinctForPartitions:
- res.add(
- (part, VOID.properties, Literal(len(classProps[c]))))
- res.add((part, VOID.distinctObjects,
- Literal(len(classObjects[c]))))
+ res.add((part, VOID.properties, Literal(len(classProps[c]))))
+ res.add((part, VOID.distinctObjects, Literal(len(classObjects[c]))))
for i, p in enumerate(properties):
part = URIRef(dataset + "_property%d" % i)
@@ -121,9 +119,7 @@ def generateVoID(g, dataset=None, res=None, distinctForPartitions=True):
res.add((part, VOID.entities, Literal(entities)))
res.add((part, VOID.classes, Literal(len(propClasses))))
- res.add((part, VOID.distinctSubjects,
- Literal(len(propSubjects[p]))))
- res.add((part, VOID.distinctObjects,
- Literal(len(propObjects[p]))))
+ res.add((part, VOID.distinctSubjects, Literal(len(propSubjects[p]))))
+ res.add((part, VOID.distinctObjects, Literal(len(propObjects[p]))))
return res, dataset
diff --git a/run_tests.py b/run_tests.py
index 77bc9fec..ab68f792 100755
--- a/run_tests.py
+++ b/run_tests.py
@@ -42,56 +42,58 @@ from __future__ import print_function
NOSE_ARGS = [
- '--with-doctest',
- '--doctest-extension=.doctest',
- '--doctest-tests',
+ "--with-doctest",
+ "--doctest-extension=.doctest",
+ "--doctest-tests",
# '--with-EARL',
]
COVERAGE_EXTRA_ARGS = [
- '--cover-package=rdflib',
- '--cover-inclusive',
+ "--cover-package=rdflib",
+ "--cover-inclusive",
]
-DEFAULT_LOCATION = '--where=./'
+DEFAULT_LOCATION = "--where=./"
DEFAULT_ATTRS = [] # ['!known_issue', '!sparql']
-DEFAULT_DIRS = ['test', 'rdflib']
+DEFAULT_DIRS = ["test", "rdflib"]
-if __name__ == '__main__':
+if __name__ == "__main__":
from sys import argv, exit, stderr
+
try:
import nose
except ImportError:
- print("""\
+ print(
+ """\
Requires Nose. Try:
$ sudo easy_install nose
- Exiting. """, file=stderr)
+ Exiting. """,
+ file=stderr,
+ )
exit(1)
-
- if '--with-coverage' in argv:
+ if "--with-coverage" in argv:
try:
import coverage
except ImportError:
print("No coverage module found, skipping code coverage.", file=stderr)
- argv.remove('--with-coverage')
+ argv.remove("--with-coverage")
else:
NOSE_ARGS += COVERAGE_EXTRA_ARGS
+ if True not in [a.startswith("-a") or a.startswith("--attr=") for a in argv]:
+ argv.append("--attr=" + ",".join(DEFAULT_ATTRS))
- if True not in [a.startswith('-a') or a.startswith('--attr=') for a in argv]:
- argv.append('--attr=' + ','.join(DEFAULT_ATTRS))
-
- if not [a for a in argv[1:] if not a.startswith('-')]:
+ if not [a for a in argv[1:] if not a.startswith("-")]:
argv += DEFAULT_DIRS # since nose doesn't look here by default..
- if not [a for a in argv if a.startswith('--where=')]:
+ if not [a for a in argv if a.startswith("--where=")]:
argv += [DEFAULT_LOCATION]
finalArgs = argv + NOSE_ARGS
diff --git a/setup.py b/setup.py
index bc8c03f2..7e4d4e21 100644
--- a/setup.py
+++ b/setup.py
@@ -5,15 +5,21 @@ import re
from setuptools import setup, find_packages
kwargs = {}
-kwargs['install_requires'] = ['isodate', 'pyparsing']
-kwargs['tests_require'] = ['html5lib', 'networkx', 'nose', 'doctest-ignore-unicode', 'requests']
-kwargs['test_suite'] = "nose.collector"
-kwargs['extras_require'] = {
- 'html': ['html5lib'],
- 'sparql': ['requests'],
- 'tests': kwargs['tests_require'],
- 'docs': ['sphinx < 4', 'sphinxcontrib-apidoc']
- }
+kwargs["install_requires"] = ["isodate", "pyparsing"]
+kwargs["tests_require"] = [
+ "html5lib",
+ "networkx",
+ "nose",
+ "doctest-ignore-unicode",
+ "requests",
+]
+kwargs["test_suite"] = "nose.collector"
+kwargs["extras_require"] = {
+ "html": ["html5lib"],
+ "sparql": ["requests"],
+ "tests": kwargs["tests_require"],
+ "docs": ["sphinx < 4", "sphinxcontrib-apidoc"],
+}
def find_version(filename):
@@ -24,20 +30,20 @@ def find_version(filename):
return version_match.group(1)
-version = find_version('rdflib/__init__.py')
+version = find_version("rdflib/__init__.py")
-packages = find_packages(exclude=('examples*', 'test*'))
+packages = find_packages(exclude=("examples*", "test*"))
-if os.environ.get('READTHEDOCS', None):
+if os.environ.get("READTHEDOCS", None):
# if building docs for RTD
# install examples, to get docstrings
packages.append("examples")
setup(
- name='rdflib',
+ name="rdflib",
version=version,
description="RDFLib is a Python library for working with RDF, a "
- "simple yet powerful language for representing information.",
+ "simple yet powerful language for representing information.",
author="Daniel 'eikeon' Krech",
author_email="eikeon@eikeon.com",
maintainer="RDFLib Team",
@@ -45,19 +51,19 @@ setup(
url="https://github.com/RDFLib/rdflib",
license="BSD-3-Clause",
platforms=["any"],
- python_requires='>=3.5',
+ python_requires=">=3.5",
classifiers=[
- "Programming Language :: Python",
- "Programming Language :: Python :: 3",
- "Programming Language :: Python :: 3.5",
- "Programming Language :: Python :: 3.6",
- "Programming Language :: Python :: 3.7",
- "Programming Language :: Python :: 3.8",
- "License :: OSI Approved :: BSD License",
- "Topic :: Software Development :: Libraries :: Python Modules",
- "Operating System :: OS Independent",
- "Natural Language :: English",
- ],
+ "Programming Language :: Python",
+ "Programming Language :: Python :: 3",
+ "Programming Language :: Python :: 3.5",
+ "Programming Language :: Python :: 3.6",
+ "Programming Language :: Python :: 3.7",
+ "Programming Language :: Python :: 3.8",
+ "License :: OSI Approved :: BSD License",
+ "Topic :: Software Development :: Libraries :: Python Modules",
+ "Operating System :: OS Independent",
+ "Natural Language :: English",
+ ],
long_description="""\
RDFLib is a Python library for working with
RDF, a simple yet powerful language for representing information.
@@ -82,16 +88,15 @@ Read the docs at:
http://rdflib.readthedocs.io
""",
- packages = packages,
- entry_points = {
- 'console_scripts': [
- 'rdfpipe = rdflib.tools.rdfpipe:main',
- 'csv2rdf = rdflib.tools.csv2rdf:main',
- 'rdf2dot = rdflib.tools.rdf2dot:main',
- 'rdfs2dot = rdflib.tools.rdfs2dot:main',
- 'rdfgraphisomorphism = rdflib.tools.graphisomorphism:main',
- ],
- },
-
+ packages=packages,
+ entry_points={
+ "console_scripts": [
+ "rdfpipe = rdflib.tools.rdfpipe:main",
+ "csv2rdf = rdflib.tools.csv2rdf:main",
+ "rdf2dot = rdflib.tools.rdf2dot:main",
+ "rdfs2dot = rdflib.tools.rdfs2dot:main",
+ "rdfgraphisomorphism = rdflib.tools.graphisomorphism:main",
+ ],
+ },
**kwargs
- )
+)
diff --git a/test/earl.py b/test/earl.py
index 9e4d0413..54df7d3e 100644
--- a/test/earl.py
+++ b/test/earl.py
@@ -9,17 +9,17 @@ EARL = Namespace("http://www.w3.org/ns/earl#")
report = Graph()
-report.bind('foaf', FOAF)
-report.bind('earl', EARL)
-report.bind('doap', DOAP)
-report.bind('dc', DC)
+report.bind("foaf", FOAF)
+report.bind("earl", EARL)
+report.bind("doap", DOAP)
+report.bind("dc", DC)
-me = URIRef('http://gromgull.net/me')
+me = URIRef("http://gromgull.net/me")
report.add((me, RDF.type, FOAF.Person))
report.add((me, FOAF.homepage, URIRef("http://gromgull.net")))
report.add((me, FOAF.name, Literal("Gunnar Aastrand Grimnes")))
-rdflib = URIRef('https://github.com/RDFLib/rdflib')
+rdflib = URIRef("https://github.com/RDFLib/rdflib")
report.add((rdflib, DOAP.homepage, rdflib))
report.add((rdflib, DOAP.name, Literal("rdflib")))
diff --git a/test/manifest.py b/test/manifest.py
index 1a8d774d..107b9422 100644
--- a/test/manifest.py
+++ b/test/manifest.py
@@ -5,26 +5,27 @@ from nose.tools import nottest
from rdflib import Graph, RDF, RDFS, Namespace
-MF = Namespace('http://www.w3.org/2001/sw/DataAccess/tests/test-manifest#')
-QT = Namespace('http://www.w3.org/2001/sw/DataAccess/tests/test-query#')
-UP = Namespace('http://www.w3.org/2009/sparql/tests/test-update#')
-RDFT = Namespace('http://www.w3.org/ns/rdftest#')
+MF = Namespace("http://www.w3.org/2001/sw/DataAccess/tests/test-manifest#")
+QT = Namespace("http://www.w3.org/2001/sw/DataAccess/tests/test-query#")
+UP = Namespace("http://www.w3.org/2009/sparql/tests/test-update#")
+RDFT = Namespace("http://www.w3.org/ns/rdftest#")
-DAWG = Namespace('http://www.w3.org/2001/sw/DataAccess/tests/test-dawg#')
+DAWG = Namespace("http://www.w3.org/2001/sw/DataAccess/tests/test-dawg#")
-RDFTest = namedtuple('RDFTest', ['uri', 'name', 'comment', 'data',
- 'graphdata', 'action', 'result', 'syntax'])
+RDFTest = namedtuple(
+ "RDFTest",
+ ["uri", "name", "comment", "data", "graphdata", "action", "result", "syntax"],
+)
def read_manifest(f, base=None, legacy=False):
-
def _str(x):
if x is not None:
return str(x)
return None
g = Graph()
- g.load(f, publicID=base, format='turtle')
+ g.load(f, publicID=base, format="turtle")
for m in g.subjects(RDF.type, MF.Manifest):
@@ -36,17 +37,22 @@ def read_manifest(f, base=None, legacy=False):
for col in g.objects(m, MF.entries):
for e in g.items(col):
- approved = ((e, DAWG.approval, DAWG.Approved) in g or
- (e, DAWG.approval, DAWG.NotClassified) in g or
- (e, RDFT.approval, RDFT.Approved) in g)
+ approved = (
+ (e, DAWG.approval, DAWG.Approved) in g
+ or (e, DAWG.approval, DAWG.NotClassified) in g
+ or (e, RDFT.approval, RDFT.Approved) in g
+ )
# run proposed tests
# approved |= (e, RDFT.approval, RDFT.Proposed) in g
# run legacy tests with no approval set
if legacy:
- approved |= ((e, DAWG.approval, None) not in g and
- (e, RDFT.approval, None) not in g)
+ approved |= (e, DAWG.approval, None) not in g and (
+ e,
+ RDFT.approval,
+ None,
+ ) not in g
if not approved:
continue
@@ -75,15 +81,17 @@ def read_manifest(f, base=None, legacy=False):
data = g.value(a, UP.data)
graphdata = []
for gd in g.objects(a, UP.graphData):
- graphdata.append((g.value(gd, UP.graph),
- g.value(gd, RDFS.label)))
+ graphdata.append(
+ (g.value(gd, UP.graph), g.value(gd, RDFS.label))
+ )
r = g.value(e, MF.result)
resdata = g.value(r, UP.data)
resgraphdata = []
for gd in g.objects(r, UP.graphData):
- resgraphdata.append((g.value(gd, UP.graph),
- g.value(gd, RDFS.label)))
+ resgraphdata.append(
+ (g.value(gd, UP.graph), g.value(gd, RDFS.label))
+ )
res = resdata, resgraphdata
@@ -91,28 +99,37 @@ def read_manifest(f, base=None, legacy=False):
query = g.value(e, MF.action)
syntax = _type == MF.PositiveSyntaxTest11
- elif _type in (MF.PositiveUpdateSyntaxTest11,
- MF.NegativeUpdateSyntaxTest11):
+ elif _type in (
+ MF.PositiveUpdateSyntaxTest11,
+ MF.NegativeUpdateSyntaxTest11,
+ ):
query = g.value(e, MF.action)
syntax = _type == MF.PositiveUpdateSyntaxTest11
- elif _type in (RDFT.TestNQuadsPositiveSyntax,
- RDFT.TestNQuadsNegativeSyntax,
- RDFT.TestTrigPositiveSyntax,
- RDFT.TestTrigNegativeSyntax,
- RDFT.TestNTriplesPositiveSyntax,
- RDFT.TestNTriplesNegativeSyntax,
- RDFT.TestTurtlePositiveSyntax,
- RDFT.TestTurtleNegativeSyntax,
- ):
+ elif _type in (
+ RDFT.TestNQuadsPositiveSyntax,
+ RDFT.TestNQuadsNegativeSyntax,
+ RDFT.TestTrigPositiveSyntax,
+ RDFT.TestTrigNegativeSyntax,
+ RDFT.TestNTriplesPositiveSyntax,
+ RDFT.TestNTriplesNegativeSyntax,
+ RDFT.TestTurtlePositiveSyntax,
+ RDFT.TestTurtleNegativeSyntax,
+ ):
query = g.value(e, MF.action)
- syntax = _type in (RDFT.TestNQuadsPositiveSyntax,
- RDFT.TestNTriplesPositiveSyntax,
- RDFT.TestTrigPositiveSyntax,
- RDFT.TestTurtlePositiveSyntax)
-
- elif _type in (RDFT.TestTurtleEval, RDFT.TestTurtleNegativeEval,
- RDFT.TestTrigEval, RDFT.TestTrigNegativeEval):
+ syntax = _type in (
+ RDFT.TestNQuadsPositiveSyntax,
+ RDFT.TestNTriplesPositiveSyntax,
+ RDFT.TestTrigPositiveSyntax,
+ RDFT.TestTurtlePositiveSyntax,
+ )
+
+ elif _type in (
+ RDFT.TestTurtleEval,
+ RDFT.TestTurtleNegativeEval,
+ RDFT.TestTrigEval,
+ RDFT.TestTrigNegativeEval,
+ ):
query = g.value(e, MF.action)
res = g.value(e, MF.result)
syntax = _type in (RDFT.TestTurtleEval, RDFT.TestTrigEval)
@@ -122,9 +139,16 @@ def read_manifest(f, base=None, legacy=False):
print("I dont know DAWG Test Type %s" % _type)
continue
- yield _type, RDFTest(e, _str(name), _str(comment),
- _str(data), graphdata, _str(query),
- res, syntax)
+ yield _type, RDFTest(
+ e,
+ _str(name),
+ _str(comment),
+ _str(data),
+ graphdata,
+ _str(query),
+ res,
+ syntax,
+ )
@nottest
diff --git a/test/store_performance.py b/test/store_performance.py
index 578a51e5..9e55d654 100644
--- a/test/store_performance.py
+++ b/test/store_performance.py
@@ -24,7 +24,8 @@ class StoreTestCase(unittest.TestCase):
something other than a unit test... but for now we'll add it as a
unit test.
"""
- store = 'default'
+
+ store = "default"
tmppath = None
configString = os.environ.get("DBURI", "dburi")
@@ -36,6 +37,7 @@ class StoreTestCase(unittest.TestCase):
if self.store == "MySQL":
# from test.mysql import configString
from rdflib.store.MySQL import MySQL
+
path = self.configString
MySQL().destroy(path)
else:
@@ -54,10 +56,10 @@ class StoreTestCase(unittest.TestCase):
def testTime(self):
number = 1
print(self.store)
- print("input:", end=' ')
+ print("input:", end=" ")
for i in itertools.repeat(None, number):
self._testInput()
- print("random:", end=' ')
+ print("random:", end=" ")
for i in itertools.repeat(None, number):
self._testRandom()
print(".")
@@ -77,7 +79,7 @@ class StoreTestCase(unittest.TestCase):
for _i in it:
add_random()
t1 = time()
- print("%.3g" % (t1 - t0), end=' ')
+ print("%.3g" % (t1 - t0), end=" ")
def _testInput(self):
number = 1
@@ -92,12 +94,12 @@ class StoreTestCase(unittest.TestCase):
for _i in it:
add_from_input()
t1 = time()
- print("%.3g" % (t1 - t0), end=' ')
+ print("%.3g" % (t1 - t0), end=" ")
class MemoryStoreTestCase(StoreTestCase):
store = "IOMemory"
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main()
diff --git a/test/test_aggregate_graphs.py b/test/test_aggregate_graphs.py
index e8e40e81..5d58f4d3 100644
--- a/test/test_aggregate_graphs.py
+++ b/test/test_aggregate_graphs.py
@@ -36,8 +36,7 @@ testGraph3N3 = """
<> a log:N3Document.
"""
-sparqlQ = \
- """
+sparqlQ = """
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
SELECT *
FROM NAMED <http://example.com/graph1>
@@ -47,14 +46,12 @@ FROM <http://www.w3.org/2000/01/rdf-schema#>
WHERE {?sub ?pred rdfs:Class }"""
-sparqlQ2 =\
- """
+sparqlQ2 = """
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
SELECT ?class
WHERE { GRAPH ?graph { ?member a ?class } }"""
-sparqlQ3 =\
- """
+sparqlQ3 = """
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX log: <http://www.w3.org/2000/10/swap/log#>
SELECT ?n3Doc
@@ -63,15 +60,17 @@ WHERE {?n3Doc a log:N3Document }"""
class GraphAggregates1(unittest.TestCase):
def setUp(self):
- memStore = plugin.get('IOMemory', Store)()
+ memStore = plugin.get("IOMemory", Store)()
self.graph1 = Graph(memStore)
self.graph2 = Graph(memStore)
self.graph3 = Graph(memStore)
- for n3Str, graph in [(testGraph1N3, self.graph1),
- (testGraph2N3, self.graph2),
- (testGraph3N3, self.graph3)]:
- graph.parse(StringIO(n3Str), format='n3')
+ for n3Str, graph in [
+ (testGraph1N3, self.graph1),
+ (testGraph2N3, self.graph2),
+ (testGraph3N3, self.graph3),
+ ]:
+ graph.parse(StringIO(n3Str), format="n3")
self.G = ReadOnlyGraphAggregate([self.graph1, self.graph2, self.graph3])
@@ -92,7 +91,16 @@ class GraphAggregates1(unittest.TestCase):
assert (URIRef("http://test/foo"), RDF.type, RDFS.Resource) in self.G
barPredicates = [URIRef("http://test/d"), RDFS.isDefinedBy]
- assert len(list(self.G.triples_choices((URIRef("http://test/bar"), barPredicates, None)))) == 2
+ assert (
+ len(
+ list(
+ self.G.triples_choices(
+ (URIRef("http://test/bar"), barPredicates, None)
+ )
+ )
+ )
+ == 2
+ )
class GraphAggregates2(unittest.TestCase):
@@ -101,20 +109,22 @@ class GraphAggregates2(unittest.TestCase):
sparql = True
def setUp(self):
- memStore = plugin.get('IOMemory', Store)()
+ memStore = plugin.get("IOMemory", Store)()
self.graph1 = Graph(memStore, URIRef("http://example.com/graph1"))
self.graph2 = Graph(memStore, URIRef("http://example.com/graph2"))
self.graph3 = Graph(memStore, URIRef("http://example.com/graph3"))
- for n3Str, graph in [(testGraph1N3, self.graph1),
- (testGraph2N3, self.graph2),
- (testGraph3N3, self.graph3)]:
- graph.parse(StringIO(n3Str), format='n3')
+ for n3Str, graph in [
+ (testGraph1N3, self.graph1),
+ (testGraph2N3, self.graph2),
+ (testGraph3N3, self.graph3),
+ ]:
+ graph.parse(StringIO(n3Str), format="n3")
self.graph4 = Graph(memStore, RDFS)
self.graph4.parse(RDFS.uri)
self.G = ConjunctiveGraph(memStore)
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main()
diff --git a/test/test_auditable.py b/test/test_auditable.py
index 63c7c5e5..e5aff715 100644
--- a/test/test_auditable.py
+++ b/test/test_auditable.py
@@ -9,7 +9,6 @@ EX = Namespace("http://example.org/")
class BaseTestAuditableStore(unittest.TestCase):
-
def assert_graph_equal(self, g1, g2):
try:
return self.assertSetEqual(set(g1), set(g2))
@@ -19,192 +18,157 @@ class BaseTestAuditableStore(unittest.TestCase):
class TestAuditableStore(BaseTestAuditableStore):
-
def setUp(self):
self.g = Graph()
self.g.add((EX.s0, EX.p0, EX.o0))
self.g.add((EX.s0, EX.p0, EX.o0bis))
- self.t = Graph(AuditableStore(self.g.store),
- self.g.identifier)
+ self.t = Graph(AuditableStore(self.g.store), self.g.identifier)
def test_add_commit(self):
self.t.add((EX.s1, EX.p1, EX.o1))
- self.assert_graph_equal(self.t, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- (EX.s1, EX.p1, EX.o1),
- ])
+ self.assert_graph_equal(
+ self.t,
+ [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis), (EX.s1, EX.p1, EX.o1),],
+ )
self.t.commit()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- (EX.s1, EX.p1, EX.o1),
- ])
+ self.assert_graph_equal(
+ self.g,
+ [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis), (EX.s1, EX.p1, EX.o1),],
+ )
def test_remove_commit(self):
self.t.remove((EX.s0, EX.p0, EX.o0))
- self.assert_graph_equal(self.t, [
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(self.t, [(EX.s0, EX.p0, EX.o0bis),])
self.t.commit()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(self.g, [(EX.s0, EX.p0, EX.o0bis),])
def test_multiple_remove_commit(self):
self.t.remove((EX.s0, EX.p0, None))
- self.assert_graph_equal(self.t, [
- ])
+ self.assert_graph_equal(self.t, [])
self.t.commit()
- self.assert_graph_equal(self.g, [
- ])
+ self.assert_graph_equal(self.g, [])
def test_noop_add_commit(self):
self.t.add((EX.s0, EX.p0, EX.o0))
- self.assert_graph_equal(self.t, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(
+ self.t, [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis),]
+ )
self.t.commit()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(
+ self.g, [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis),]
+ )
def test_noop_remove_commit(self):
self.t.add((EX.s0, EX.p0, EX.o0))
- self.assert_graph_equal(self.t, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(
+ self.t, [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis),]
+ )
self.t.commit()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(
+ self.g, [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis),]
+ )
def test_add_remove_commit(self):
self.t.add((EX.s1, EX.p1, EX.o1))
self.t.remove((EX.s1, EX.p1, EX.o1))
- self.assert_graph_equal(self.t, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(
+ self.t, [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis),]
+ )
self.t.commit()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(
+ self.g, [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis),]
+ )
def test_remove_add_commit(self):
self.t.remove((EX.s1, EX.p1, EX.o1))
self.t.add((EX.s1, EX.p1, EX.o1))
- self.assert_graph_equal(self.t, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- (EX.s1, EX.p1, EX.o1),
- ])
+ self.assert_graph_equal(
+ self.t,
+ [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis), (EX.s1, EX.p1, EX.o1),],
+ )
self.t.commit()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- (EX.s1, EX.p1, EX.o1),
- ])
+ self.assert_graph_equal(
+ self.g,
+ [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis), (EX.s1, EX.p1, EX.o1),],
+ )
def test_add_rollback(self):
self.t.add((EX.s1, EX.p1, EX.o1))
self.t.rollback()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(
+ self.g, [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis),]
+ )
def test_remove_rollback(self):
self.t.remove((EX.s0, EX.p0, EX.o0))
self.t.rollback()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(
+ self.g, [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis),]
+ )
def test_multiple_remove_rollback(self):
self.t.remove((EX.s0, EX.p0, None))
self.t.rollback()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(
+ self.g, [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis),]
+ )
def test_noop_add_rollback(self):
self.t.add((EX.s0, EX.p0, EX.o0))
self.t.rollback()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(
+ self.g, [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis),]
+ )
def test_noop_remove_rollback(self):
self.t.add((EX.s0, EX.p0, EX.o0))
self.t.rollback()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(
+ self.g, [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis),]
+ )
def test_add_remove_rollback(self):
self.t.add((EX.s1, EX.p1, EX.o1))
self.t.remove((EX.s1, EX.p1, EX.o1))
self.t.rollback()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(
+ self.g, [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis),]
+ )
def test_remove_add_rollback(self):
self.t.remove((EX.s1, EX.p1, EX.o1))
self.t.add((EX.s1, EX.p1, EX.o1))
self.t.rollback()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(
+ self.g, [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis),]
+ )
class TestAuditableStoreEmptyGraph(BaseTestAuditableStore):
-
def setUp(self):
self.g = Graph()
- self.t = Graph(AuditableStore(self.g.store),
- self.g.identifier)
+ self.t = Graph(AuditableStore(self.g.store), self.g.identifier)
def test_add_commit(self):
self.t.add((EX.s1, EX.p1, EX.o1))
- self.assert_graph_equal(self.t, [
- (EX.s1, EX.p1, EX.o1),
- ])
+ self.assert_graph_equal(self.t, [(EX.s1, EX.p1, EX.o1),])
self.t.commit()
- self.assert_graph_equal(self.g, [
- (EX.s1, EX.p1, EX.o1),
- ])
+ self.assert_graph_equal(self.g, [(EX.s1, EX.p1, EX.o1),])
def test_add_rollback(self):
self.t.add((EX.s1, EX.p1, EX.o1))
self.t.rollback()
- self.assert_graph_equal(self.g, [
- ])
+ self.assert_graph_equal(self.g, [])
class TestAuditableStoreConccurent(BaseTestAuditableStore):
-
def setUp(self):
self.g = Graph()
self.g.add((EX.s0, EX.p0, EX.o0))
self.g.add((EX.s0, EX.p0, EX.o0bis))
- self.t1 = Graph(AuditableStore(self.g.store),
- self.g.identifier)
- self.t2 = Graph(AuditableStore(self.g.store),
- self.g.identifier)
+ self.t1 = Graph(AuditableStore(self.g.store), self.g.identifier)
+ self.t2 = Graph(AuditableStore(self.g.store), self.g.identifier)
self.t1.add((EX.s1, EX.p1, EX.o1))
self.t2.add((EX.s2, EX.p2, EX.o2))
self.t1.remove((EX.s0, EX.p0, EX.o0))
@@ -213,93 +177,71 @@ class TestAuditableStoreConccurent(BaseTestAuditableStore):
def test_commit_commit(self):
self.t1.commit()
self.t2.commit()
- self.assert_graph_equal(self.g, [
- (EX.s1, EX.p1, EX.o1),
- (EX.s2, EX.p2, EX.o2),
- ])
+ self.assert_graph_equal(self.g, [(EX.s1, EX.p1, EX.o1), (EX.s2, EX.p2, EX.o2),])
def test_commit_rollback(self):
self.t1.commit()
self.t2.rollback()
- self.assert_graph_equal(self.g, [
- (EX.s1, EX.p1, EX.o1),
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(
+ self.g, [(EX.s1, EX.p1, EX.o1), (EX.s0, EX.p0, EX.o0bis),]
+ )
def test_rollback_commit(self):
self.t1.rollback()
self.t2.commit()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s2, EX.p2, EX.o2),
- ])
+ self.assert_graph_equal(self.g, [(EX.s0, EX.p0, EX.o0), (EX.s2, EX.p2, EX.o2),])
def test_rollback_rollback(self):
self.t1.rollback()
self.t2.rollback()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(
+ self.g, [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis),]
+ )
class TestAuditableStoreEmbeded(BaseTestAuditableStore):
-
def setUp(self):
self.g = Graph()
self.g.add((EX.s0, EX.p0, EX.o0))
self.g.add((EX.s0, EX.p0, EX.o0bis))
- self.t1 = Graph(AuditableStore(self.g.store),
- self.g.identifier)
+ self.t1 = Graph(AuditableStore(self.g.store), self.g.identifier)
self.t1.add((EX.s1, EX.p1, EX.o1))
self.t1.remove((EX.s0, EX.p0, EX.o0bis))
- self.t2 = Graph(AuditableStore(self.t1.store),
- self.t1.identifier)
+ self.t2 = Graph(AuditableStore(self.t1.store), self.t1.identifier)
self.t2.add((EX.s2, EX.p2, EX.o2))
self.t2.remove((EX.s1, EX.p1, EX.o1))
def test_commit_commit(self):
- self.assert_graph_equal(self.t2, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s2, EX.p2, EX.o2),
- ])
+ self.assert_graph_equal(
+ self.t2, [(EX.s0, EX.p0, EX.o0), (EX.s2, EX.p2, EX.o2),]
+ )
self.t2.commit()
- self.assert_graph_equal(self.t1, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s2, EX.p2, EX.o2),
- ])
+ self.assert_graph_equal(
+ self.t1, [(EX.s0, EX.p0, EX.o0), (EX.s2, EX.p2, EX.o2),]
+ )
self.t1.commit()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s2, EX.p2, EX.o2),
- ])
+ self.assert_graph_equal(self.g, [(EX.s0, EX.p0, EX.o0), (EX.s2, EX.p2, EX.o2),])
def test_commit_rollback(self):
self.t2.commit()
self.t1.rollback()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(
+ self.g, [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis),]
+ )
def test_rollback_commit(self):
self.t2.rollback()
- self.assert_graph_equal(self.t1, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s1, EX.p1, EX.o1),
- ])
+ self.assert_graph_equal(
+ self.t1, [(EX.s0, EX.p0, EX.o0), (EX.s1, EX.p1, EX.o1),]
+ )
self.t1.commit()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s1, EX.p1, EX.o1),
- ])
+ self.assert_graph_equal(self.g, [(EX.s0, EX.p0, EX.o0), (EX.s1, EX.p1, EX.o1),])
def test_rollback_rollback(self):
self.t2.rollback()
self.t1.rollback()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(
+ self.g, [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis),]
+ )
diff --git a/test/test_batch_add.py b/test/test_batch_add.py
index 1747100c..43457e5e 100644
--- a/test/test_batch_add.py
+++ b/test/test_batch_add.py
@@ -21,15 +21,14 @@ class TestBatchAddGraph(unittest.TestCase):
BatchAddGraph(Graph(), batch_size=-12)
def test_exit_submits_partial_batch(self):
- trip = (URIRef('a'), URIRef('b'), URIRef('c'))
+ trip = (URIRef("a"), URIRef("b"), URIRef("c"))
g = Graph()
with BatchAddGraph(g, batch_size=10) as cut:
cut.add(trip)
self.assertIn(trip, g)
def test_add_more_than_batch_size(self):
- trips = [(URIRef('a'), URIRef('b%d' % i), URIRef('c%d' % i))
- for i in range(12)]
+ trips = [(URIRef("a"), URIRef("b%d" % i), URIRef("c%d" % i)) for i in range(12)]
g = Graph()
with BatchAddGraph(g, batch_size=10) as cut:
for trip in trips:
@@ -37,38 +36,37 @@ class TestBatchAddGraph(unittest.TestCase):
self.assertEqual(12, len(g))
def test_add_quad_for_non_conjunctive_empty(self):
- '''
+ """
Graph drops quads that don't match our graph. Make sure we do the same
- '''
- g = Graph(identifier='http://example.org/g')
- badg = Graph(identifier='http://example.org/badness')
+ """
+ g = Graph(identifier="http://example.org/g")
+ badg = Graph(identifier="http://example.org/badness")
with BatchAddGraph(g) as cut:
- cut.add((URIRef('a'), URIRef('b'), URIRef('c'), badg))
+ cut.add((URIRef("a"), URIRef("b"), URIRef("c"), badg))
self.assertEqual(0, len(g))
def test_add_quad_for_non_conjunctive_pass_on_context_matches(self):
g = Graph()
with BatchAddGraph(g) as cut:
- cut.add((URIRef('a'), URIRef('b'), URIRef('c'), g))
+ cut.add((URIRef("a"), URIRef("b"), URIRef("c"), g))
self.assertEqual(1, len(g))
def test_no_addN_on_exception(self):
- '''
+ """
Even if we've added triples so far, it may be that attempting to add the last
batch is the cause of our exception, so we don't want to attempt again
- '''
+ """
g = Graph()
- trips = [(URIRef('a'), URIRef('b%d' % i), URIRef('c%d' % i))
- for i in range(12)]
+ trips = [(URIRef("a"), URIRef("b%d" % i), URIRef("c%d" % i)) for i in range(12)]
try:
with BatchAddGraph(g, batch_size=10) as cut:
for i, trip in enumerate(trips):
cut.add(trip)
if i == 11:
- raise Exception('myexc')
+ raise Exception("myexc")
except Exception as e:
- if str(e) != 'myexc':
+ if str(e) != "myexc":
pass
self.assertEqual(10, len(g))
@@ -81,8 +79,9 @@ class TestBatchAddGraph(unittest.TestCase):
self.counts.append(sum(1 for _ in quads))
g = MockGraph()
- quads = [(URIRef('a'), URIRef('b%d' % i), URIRef('c%d' % i), g)
- for i in range(12)]
+ quads = [
+ (URIRef("a"), URIRef("b%d" % i), URIRef("c%d" % i), g) for i in range(12)
+ ]
with BatchAddGraph(g, batch_size=10, batch_addn=True) as cut:
cut.addN(quads)
diff --git a/test/test_bnode_ncname.py b/test/test_bnode_ncname.py
index 7017ef09..3e621579 100644
--- a/test/test_bnode_ncname.py
+++ b/test/test_bnode_ncname.py
@@ -6,6 +6,7 @@ from hashlib import md5
try:
from uuid import uuid4
except ImportError:
+
def uuid4():
"""
Generates a uuid on behalf of Python 2.4
@@ -14,12 +15,13 @@ except ImportError:
import os
import time
import socket
+
try:
preseed = os.urandom(16)
except NotImplementedError:
- preseed = ''
+ preseed = ""
# Have doubts about this. random.seed will just hash the string
- random.seed('%s%s%s' % (preseed, os.getpid(), time.time()))
+ random.seed("%s%s%s" % (preseed, os.getpid(), time.time()))
del preseed
t = int(time.time() * 1000.0)
r = int(random.random() * 100000000000000000)
@@ -28,10 +30,11 @@ except ImportError:
except:
# if we can't get a network address, just imagine one
a = random.random() * 100000000000000000
- strdata = str(t) + ' ' + str(r) + ' ' + str(a)
- data = md5(strdata.encode('ascii')).hexdigest()
+ strdata = str(t) + " " + str(r) + " " + str(a)
+ data = md5(strdata.encode("ascii")).hexdigest()
yield data
+
# Adapted from http://icodesnip.com/snippet/python/simple-universally-unique-id-uuid-or-guid
@@ -69,13 +72,14 @@ def is_ncname(value):
>>> from rdflib import BNode
>>> assert is_ncname(BNode(_sn_gen=bnode_uuid, _prefix="urn:uuid:")) == True
"""
- ncnameexp = re.compile('[A-Za-z][A-Za-z0-9]*')
+ ncnameexp = re.compile("[A-Za-z][A-Za-z0-9]*")
if ncnameexp.match(value):
return True
else:
return False
-if __name__ == '__main__':
+if __name__ == "__main__":
import doctest
+
doctest.testmod()
diff --git a/test/test_canonicalization.py b/test/test_canonicalization.py
index df010432..12dd657f 100644
--- a/test/test_canonicalization.py
+++ b/test/test_canonicalization.py
@@ -19,39 +19,56 @@ def get_digest_value(rdf, mimetype):
def negative_graph_match_test():
- '''Test of FRIR identifiers against tricky RDF graphs with blank nodes.'''
+ """Test of FRIR identifiers against tricky RDF graphs with blank nodes."""
testInputs = [
- [str('''@prefix : <http://example.org/ns#> .
+ [
+ str(
+ """@prefix : <http://example.org/ns#> .
<http://example.org> :rel
[ :label "Same" ].
- '''),
- str('''@prefix : <http://example.org/ns#> .
+ """
+ ),
+ str(
+ """@prefix : <http://example.org/ns#> .
<http://example.org> :rel
[ :label "Same" ],
[ :label "Same" ].
- '''),
- False
- ],
- [str('''@prefix : <http://example.org/ns#> .
+ """
+ ),
+ False,
+ ],
+ [
+ str(
+ """@prefix : <http://example.org/ns#> .
<http://example.org> :rel
<http://example.org/a>.
- '''),
- str('''@prefix : <http://example.org/ns#> .
+ """
+ ),
+ str(
+ """@prefix : <http://example.org/ns#> .
<http://example.org> :rel
<http://example.org/a>,
<http://example.org/a>.
- '''),
- True
- ],
- [str('''@prefix : <http://example.org/ns#> .
+ """
+ ),
+ True,
+ ],
+ [
+ str(
+ """@prefix : <http://example.org/ns#> .
:linear_two_step_symmetry_start :related [ :related [ :related :linear_two_step_symmatry_end]],
- [ :related [ :related :linear_two_step_symmatry_end]].'''),
- str('''@prefix : <http://example.org/ns#> .
+ [ :related [ :related :linear_two_step_symmatry_end]]."""
+ ),
+ str(
+ """@prefix : <http://example.org/ns#> .
:linear_two_step_symmetry_start :related [ :related [ :related :linear_two_step_symmatry_end]],
- [ :related [ :related :linear_two_step_symmatry_end]].'''),
- True
- ],
- [str('''@prefix : <http://example.org/ns#> .
+ [ :related [ :related :linear_two_step_symmatry_end]]."""
+ ),
+ True,
+ ],
+ [
+ str(
+ """@prefix : <http://example.org/ns#> .
_:a :rel [
:rel [
:rel [
@@ -60,8 +77,10 @@ def negative_graph_match_test():
];
];
];
- ].'''),
- str('''@prefix : <http://example.org/ns#> .
+ ]."""
+ ),
+ str(
+ """@prefix : <http://example.org/ns#> .
_:a :rel [
:rel [
:rel [
@@ -72,11 +91,14 @@ def negative_graph_match_test():
];
];
];
- ].'''),
- False
- ],
+ ]."""
+ ),
+ False,
+ ],
# This test fails because the algorithm purposefully breaks the symmetry of symetric
- [str('''@prefix : <http://example.org/ns#> .
+ [
+ str(
+ """@prefix : <http://example.org/ns#> .
_:a :rel [
:rel [
:rel [
@@ -85,8 +107,10 @@ def negative_graph_match_test():
];
];
];
- ].'''),
- str('''@prefix : <http://example.org/ns#> .
+ ]."""
+ ),
+ str(
+ """@prefix : <http://example.org/ns#> .
_:a :rel [
:rel [
:rel [
@@ -95,10 +119,13 @@ def negative_graph_match_test():
];
];
];
- ].'''),
- True
- ],
- [str('''@prefix : <http://example.org/ns#> .
+ ]."""
+ ),
+ True,
+ ],
+ [
+ str(
+ """@prefix : <http://example.org/ns#> .
_:a :rel [
:rel [
:label "foo";
@@ -108,8 +135,10 @@ def negative_graph_match_test():
];
];
];
- ].'''),
- str('''@prefix : <http://example.org/ns#> .
+ ]."""
+ ),
+ str(
+ """@prefix : <http://example.org/ns#> .
_:a :rel [
:rel [
:rel [
@@ -118,10 +147,13 @@ def negative_graph_match_test():
];
];
];
- ].'''),
- False
- ],
- [str('''@prefix : <http://example.org/ns#> .
+ ]."""
+ ),
+ False,
+ ],
+ [
+ str(
+ """@prefix : <http://example.org/ns#> .
_:0001 :rel _:0003, _:0004.
_:0002 :rel _:0005, _:0006.
_:0003 :rel _:0001, _:0007, _:0010.
@@ -132,8 +164,10 @@ def negative_graph_match_test():
_:0008 :rel _:0004, _:0006, _:0010.
_:0009 :rel _:0004, _:0005, _:0007.
_:0010 :rel _:0003, _:0006, _:0008.
- '''),
- str('''@prefix : <http://example.org/ns#> .
+ """
+ ),
+ str(
+ """@prefix : <http://example.org/ns#> .
_:0001 :rel _:0003, _:0004.
_:0002 :rel _:0005, _:0006.
_:0003 :rel _:0001, _:0007, _:0010.
@@ -144,9 +178,10 @@ def negative_graph_match_test():
_:0005 :rel _:0002, _:0007, _:0009.
_:0006 :rel _:0002, _:0008, _:0010.
_:0007 :rel _:0003, _:0005, _:0009.
- '''),
- True
- ],
+ """
+ ),
+ True,
+ ],
]
def fn(rdf1, rdf2, identical):
@@ -157,6 +192,7 @@ def negative_graph_match_test():
print(rdf2)
print(digest2)
assert (digest1 == digest2) == identical
+
for inputs in testInputs:
yield fn, inputs[0], inputs[1], inputs[2]
@@ -165,66 +201,30 @@ def test_issue494_collapsing_bnodes():
"""Test for https://github.com/RDFLib/rdflib/issues/494 collapsing BNodes"""
g = Graph()
g += [
- (BNode('Na1a8fbcf755f41c1b5728f326be50994'),
- RDF['object'],
- URIRef(u'source')),
- (BNode('Na1a8fbcf755f41c1b5728f326be50994'),
- RDF['predicate'],
- BNode('vcb3')),
- (BNode('Na1a8fbcf755f41c1b5728f326be50994'),
- RDF['subject'],
- BNode('vcb2')),
- (BNode('Na1a8fbcf755f41c1b5728f326be50994'),
- RDF['type'],
- RDF['Statement']),
- (BNode('Na713b02f320d409c806ff0190db324f4'),
- RDF['object'],
- URIRef(u'target')),
- (BNode('Na713b02f320d409c806ff0190db324f4'),
- RDF['predicate'],
- BNode('vcb0')),
- (BNode('Na713b02f320d409c806ff0190db324f4'),
- RDF['subject'],
- URIRef(u'source')),
- (BNode('Na713b02f320d409c806ff0190db324f4'),
- RDF['type'],
- RDF['Statement']),
- (BNode('Ndb804ba690a64b3dbb9063c68d5e3550'),
- RDF['object'],
- BNode('vr0KcS4')),
- (BNode('Ndb804ba690a64b3dbb9063c68d5e3550'),
- RDF['predicate'],
- BNode('vrby3JV')),
- (BNode('Ndb804ba690a64b3dbb9063c68d5e3550'),
- RDF['subject'],
- URIRef(u'source')),
- (BNode('Ndb804ba690a64b3dbb9063c68d5e3550'),
- RDF['type'],
- RDF['Statement']),
- (BNode('Ndfc47fb1cd2d4382bcb8d5eb7835a636'),
- RDF['object'],
- URIRef(u'source')),
- (BNode('Ndfc47fb1cd2d4382bcb8d5eb7835a636'),
- RDF['predicate'],
- BNode('vcb5')),
- (BNode('Ndfc47fb1cd2d4382bcb8d5eb7835a636'),
- RDF['subject'],
- URIRef(u'target')),
- (BNode('Ndfc47fb1cd2d4382bcb8d5eb7835a636'),
- RDF['type'],
- RDF['Statement']),
- (BNode('Nec6864ef180843838aa9805bac835c98'),
- RDF['object'],
- URIRef(u'source')),
- (BNode('Nec6864ef180843838aa9805bac835c98'),
- RDF['predicate'],
- BNode('vcb4')),
- (BNode('Nec6864ef180843838aa9805bac835c98'),
- RDF['subject'],
- URIRef(u'source')),
- (BNode('Nec6864ef180843838aa9805bac835c98'),
- RDF['type'],
- RDF['Statement']),
+ (BNode("Na1a8fbcf755f41c1b5728f326be50994"), RDF["object"], URIRef(u"source")),
+ (BNode("Na1a8fbcf755f41c1b5728f326be50994"), RDF["predicate"], BNode("vcb3")),
+ (BNode("Na1a8fbcf755f41c1b5728f326be50994"), RDF["subject"], BNode("vcb2")),
+ (BNode("Na1a8fbcf755f41c1b5728f326be50994"), RDF["type"], RDF["Statement"]),
+ (BNode("Na713b02f320d409c806ff0190db324f4"), RDF["object"], URIRef(u"target")),
+ (BNode("Na713b02f320d409c806ff0190db324f4"), RDF["predicate"], BNode("vcb0")),
+ (BNode("Na713b02f320d409c806ff0190db324f4"), RDF["subject"], URIRef(u"source")),
+ (BNode("Na713b02f320d409c806ff0190db324f4"), RDF["type"], RDF["Statement"]),
+ (BNode("Ndb804ba690a64b3dbb9063c68d5e3550"), RDF["object"], BNode("vr0KcS4")),
+ (
+ BNode("Ndb804ba690a64b3dbb9063c68d5e3550"),
+ RDF["predicate"],
+ BNode("vrby3JV"),
+ ),
+ (BNode("Ndb804ba690a64b3dbb9063c68d5e3550"), RDF["subject"], URIRef(u"source")),
+ (BNode("Ndb804ba690a64b3dbb9063c68d5e3550"), RDF["type"], RDF["Statement"]),
+ (BNode("Ndfc47fb1cd2d4382bcb8d5eb7835a636"), RDF["object"], URIRef(u"source")),
+ (BNode("Ndfc47fb1cd2d4382bcb8d5eb7835a636"), RDF["predicate"], BNode("vcb5")),
+ (BNode("Ndfc47fb1cd2d4382bcb8d5eb7835a636"), RDF["subject"], URIRef(u"target")),
+ (BNode("Ndfc47fb1cd2d4382bcb8d5eb7835a636"), RDF["type"], RDF["Statement"]),
+ (BNode("Nec6864ef180843838aa9805bac835c98"), RDF["object"], URIRef(u"source")),
+ (BNode("Nec6864ef180843838aa9805bac835c98"), RDF["predicate"], BNode("vcb4")),
+ (BNode("Nec6864ef180843838aa9805bac835c98"), RDF["subject"], URIRef(u"source")),
+ (BNode("Nec6864ef180843838aa9805bac835c98"), RDF["type"], RDF["Statement"]),
]
# print('graph length: %d, nodes: %d' % (len(g), len(g.all_nodes())))
@@ -232,10 +232,10 @@ def test_issue494_collapsing_bnodes():
# for triple_bnode in g.subjects(RDF['type'], RDF['Statement']):
# print(len(list(g.triples([triple_bnode, None, None]))))
# print('all node degrees:')
- g_node_degs = sorted([
- len(list(g.triples([node, None, None])))
- for node in g.all_nodes()
- ], reverse=True)
+ g_node_degs = sorted(
+ [len(list(g.triples([node, None, None]))) for node in g.all_nodes()],
+ reverse=True,
+ )
# print(g_node_degs)
cg = to_canonical_graph(g)
@@ -244,21 +244,20 @@ def test_issue494_collapsing_bnodes():
# for triple_bnode in cg.subjects(RDF['type'], RDF['Statement']):
# print(len(list(cg.triples([triple_bnode, None, None]))))
# print('all node degrees:')
- cg_node_degs = sorted([
- len(list(cg.triples([node, None, None])))
- for node in cg.all_nodes()
- ], reverse=True)
+ cg_node_degs = sorted(
+ [len(list(cg.triples([node, None, None]))) for node in cg.all_nodes()],
+ reverse=True,
+ )
# print(cg_node_degs)
- assert len(g) == len(cg), \
- 'canonicalization changed number of triples in graph'
- assert len(g.all_nodes()) == len(cg.all_nodes()), \
- 'canonicalization changed number of nodes in graph'
- assert len(list(g.subjects(RDF['type'], RDF['Statement']))) == \
- len(list(cg.subjects(RDF['type'], RDF['Statement']))), \
- 'canonicalization changed number of statements'
- assert g_node_degs == cg_node_degs, \
- 'canonicalization changed node degrees'
+ assert len(g) == len(cg), "canonicalization changed number of triples in graph"
+ assert len(g.all_nodes()) == len(
+ cg.all_nodes()
+ ), "canonicalization changed number of nodes in graph"
+ assert len(list(g.subjects(RDF["type"], RDF["Statement"]))) == len(
+ list(cg.subjects(RDF["type"], RDF["Statement"]))
+ ), "canonicalization changed number of statements"
+ assert g_node_degs == cg_node_degs, "canonicalization changed node degrees"
# counter for subject, predicate and object nodes
g_pos_counts = Counter(), Counter(), Counter()
@@ -274,8 +273,9 @@ def test_issue494_collapsing_bnodes():
cg_pos_counts[i][t] += 1
cg_count_signature = [sorted(c.values()) for c in cg_pos_counts]
- assert g_count_signature == cg_count_signature, \
- 'canonicalization changed node position counts'
+ assert (
+ g_count_signature == cg_count_signature
+ ), "canonicalization changed node position counts"
def test_issue682_signing_named_graphs():
@@ -294,11 +294,11 @@ def test_issue682_signing_named_graphs():
gmary = Graph(store=store, identifier=cmary)
- gmary.add((mary, ns['hasName'], Literal("Mary")))
- gmary.add((mary, ns['loves'], john))
+ gmary.add((mary, ns["hasName"], Literal("Mary")))
+ gmary.add((mary, ns["loves"], john))
gjohn = Graph(store=store, identifier=cjohn)
- gjohn.add((john, ns['hasName'], Literal("John")))
+ gjohn.add((john, ns["hasName"], Literal("John")))
ig = to_isomorphic(g)
igmary = to_isomorphic(gmary)
@@ -312,69 +312,109 @@ def test_issue682_signing_named_graphs():
def test_issue725_collapsing_bnodes_2():
g = Graph()
g += [
- (BNode('N0a76d42406b84fe4b8029d0a7fa04244'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#object'),
- BNode('v2')),
- (BNode('N0a76d42406b84fe4b8029d0a7fa04244'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#predicate'),
- BNode('v0')),
- (BNode('N0a76d42406b84fe4b8029d0a7fa04244'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#subject'),
- URIRef(u'urn:gp_learner:fixed_var:target')),
- (BNode('N0a76d42406b84fe4b8029d0a7fa04244'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#Statement')),
- (BNode('N2f62af5936b94a8eb4b1e4bfa8e11d95'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#object'),
- BNode('v1')),
- (BNode('N2f62af5936b94a8eb4b1e4bfa8e11d95'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#predicate'),
- BNode('v0')),
- (BNode('N2f62af5936b94a8eb4b1e4bfa8e11d95'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#subject'),
- URIRef(u'urn:gp_learner:fixed_var:target')),
- (BNode('N2f62af5936b94a8eb4b1e4bfa8e11d95'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#Statement')),
- (BNode('N5ae541f93e1d4e5880450b1bdceb6404'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#object'),
- BNode('v5')),
- (BNode('N5ae541f93e1d4e5880450b1bdceb6404'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#predicate'),
- BNode('v4')),
- (BNode('N5ae541f93e1d4e5880450b1bdceb6404'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#subject'),
- URIRef(u'urn:gp_learner:fixed_var:target')),
- (BNode('N5ae541f93e1d4e5880450b1bdceb6404'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#Statement')),
- (BNode('N86ac7ca781f546ae939b8963895f672e'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#object'),
- URIRef(u'urn:gp_learner:fixed_var:source')),
- (BNode('N86ac7ca781f546ae939b8963895f672e'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#predicate'),
- BNode('v0')),
- (BNode('N86ac7ca781f546ae939b8963895f672e'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#subject'),
- URIRef(u'urn:gp_learner:fixed_var:target')),
- (BNode('N86ac7ca781f546ae939b8963895f672e'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#Statement')),
- (BNode('Nac82b883ca3849b5ab6820b7ac15e490'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#object'),
- BNode('v1')),
- (BNode('Nac82b883ca3849b5ab6820b7ac15e490'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#predicate'),
- BNode('v3')),
- (BNode('Nac82b883ca3849b5ab6820b7ac15e490'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#subject'),
- URIRef(u'urn:gp_learner:fixed_var:target')),
- (BNode('Nac82b883ca3849b5ab6820b7ac15e490'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#Statement'))
+ (
+ BNode("N0a76d42406b84fe4b8029d0a7fa04244"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#object"),
+ BNode("v2"),
+ ),
+ (
+ BNode("N0a76d42406b84fe4b8029d0a7fa04244"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#predicate"),
+ BNode("v0"),
+ ),
+ (
+ BNode("N0a76d42406b84fe4b8029d0a7fa04244"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#subject"),
+ URIRef(u"urn:gp_learner:fixed_var:target"),
+ ),
+ (
+ BNode("N0a76d42406b84fe4b8029d0a7fa04244"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#type"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#Statement"),
+ ),
+ (
+ BNode("N2f62af5936b94a8eb4b1e4bfa8e11d95"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#object"),
+ BNode("v1"),
+ ),
+ (
+ BNode("N2f62af5936b94a8eb4b1e4bfa8e11d95"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#predicate"),
+ BNode("v0"),
+ ),
+ (
+ BNode("N2f62af5936b94a8eb4b1e4bfa8e11d95"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#subject"),
+ URIRef(u"urn:gp_learner:fixed_var:target"),
+ ),
+ (
+ BNode("N2f62af5936b94a8eb4b1e4bfa8e11d95"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#type"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#Statement"),
+ ),
+ (
+ BNode("N5ae541f93e1d4e5880450b1bdceb6404"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#object"),
+ BNode("v5"),
+ ),
+ (
+ BNode("N5ae541f93e1d4e5880450b1bdceb6404"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#predicate"),
+ BNode("v4"),
+ ),
+ (
+ BNode("N5ae541f93e1d4e5880450b1bdceb6404"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#subject"),
+ URIRef(u"urn:gp_learner:fixed_var:target"),
+ ),
+ (
+ BNode("N5ae541f93e1d4e5880450b1bdceb6404"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#type"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#Statement"),
+ ),
+ (
+ BNode("N86ac7ca781f546ae939b8963895f672e"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#object"),
+ URIRef(u"urn:gp_learner:fixed_var:source"),
+ ),
+ (
+ BNode("N86ac7ca781f546ae939b8963895f672e"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#predicate"),
+ BNode("v0"),
+ ),
+ (
+ BNode("N86ac7ca781f546ae939b8963895f672e"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#subject"),
+ URIRef(u"urn:gp_learner:fixed_var:target"),
+ ),
+ (
+ BNode("N86ac7ca781f546ae939b8963895f672e"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#type"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#Statement"),
+ ),
+ (
+ BNode("Nac82b883ca3849b5ab6820b7ac15e490"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#object"),
+ BNode("v1"),
+ ),
+ (
+ BNode("Nac82b883ca3849b5ab6820b7ac15e490"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#predicate"),
+ BNode("v3"),
+ ),
+ (
+ BNode("Nac82b883ca3849b5ab6820b7ac15e490"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#subject"),
+ URIRef(u"urn:gp_learner:fixed_var:target"),
+ ),
+ (
+ BNode("Nac82b883ca3849b5ab6820b7ac15e490"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#type"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#Statement"),
+ ),
]
- turtle = '''
+ turtle = """
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix xml: <http://www.w3.org/XML/1998/namespace> .
@@ -403,7 +443,7 @@ def test_issue725_collapsing_bnodes_2():
[] a rdf:Statement ;
rdf:object _:v1 ;
rdf:predicate [ ] ;
- rdf:subject <urn:gp_learner:fixed_var:target> .'''
+ rdf:subject <urn:gp_learner:fixed_var:target> ."""
# g = Graph()
# g.parse(data=turtle, format='turtle')
@@ -436,16 +476,16 @@ def test_issue725_collapsing_bnodes_2():
# [len(list(cg.triples([None, None, node]))) for node in cg.all_nodes()]))
# print(cg.serialize(format='n3'))
- assert (len(g.all_nodes()) == len(cg.all_nodes()))
+ assert len(g.all_nodes()) == len(cg.all_nodes())
cg = to_canonical_graph(g)
- assert len(g) == len(cg), \
- 'canonicalization changed number of triples in graph'
- assert len(g.all_nodes()) == len(cg.all_nodes()), \
- 'canonicalization changed number of nodes in graph'
- assert len(list(g.subjects(RDF['type'], RDF['Statement']))) == \
- len(list(cg.subjects(RDF['type'], RDF['Statement']))), \
- 'canonicalization changed number of statements'
+ assert len(g) == len(cg), "canonicalization changed number of triples in graph"
+ assert len(g.all_nodes()) == len(
+ cg.all_nodes()
+ ), "canonicalization changed number of nodes in graph"
+ assert len(list(g.subjects(RDF["type"], RDF["Statement"]))) == len(
+ list(cg.subjects(RDF["type"], RDF["Statement"]))
+ ), "canonicalization changed number of statements"
# counter for subject, predicate and object nodes
g_pos_counts = Counter(), Counter(), Counter()
@@ -460,5 +500,6 @@ def test_issue725_collapsing_bnodes_2():
cg_pos_counts[i][t] += 1
cg_count_signature = [sorted(c.values()) for c in cg_pos_counts]
- assert g_count_signature == cg_count_signature, \
- 'canonicalization changed node position counts'
+ assert (
+ g_count_signature == cg_count_signature
+ ), "canonicalization changed node position counts"
diff --git a/test/test_comparison.py b/test/test_comparison.py
index 3c8e50d4..8455598c 100644
--- a/test/test_comparison.py
+++ b/test/test_comparison.py
@@ -33,7 +33,6 @@ Ah... it's coming back to me...
class IdentifierEquality(unittest.TestCase):
-
def setUp(self):
self.uriref = URIRef("http://example.org/")
self.bnode = BNode()
@@ -66,7 +65,11 @@ class IdentifierEquality(unittest.TestCase):
self.assertEqual("foo" in CORE_SYNTAX_TERMS, False)
def testH(self):
- self.assertEqual(URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#RDF") in CORE_SYNTAX_TERMS, True)
+ self.assertEqual(
+ URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#RDF")
+ in CORE_SYNTAX_TERMS,
+ True,
+ )
def testI(self):
g = Graph()
diff --git a/test/test_conjunctive_graph.py b/test/test_conjunctive_graph.py
index 5c686027..41bf432f 100644
--- a/test/test_conjunctive_graph.py
+++ b/test/test_conjunctive_graph.py
@@ -19,9 +19,9 @@ def test_bnode_publicid():
g = ConjunctiveGraph()
b = BNode()
- data = '<d:d> <e:e> <f:f> .'
+ data = "<d:d> <e:e> <f:f> ."
print("Parsing %r into %r" % (data, b))
- g.parse(data=data, format='turtle', publicID=b)
+ g.parse(data=data, format="turtle", publicID=b)
triples = list(g.get_context(b).triples((None, None, None)))
if not triples:
@@ -36,8 +36,8 @@ def test_bnode_publicid():
def test_quad_contexts():
g = ConjunctiveGraph()
- a = URIRef('urn:a')
- b = URIRef('urn:b')
+ a = URIRef("urn:a")
+ b = URIRef("urn:b")
g.get_context(a).add((a, a, a))
g.addN([(b, b, b, b)])
@@ -57,11 +57,12 @@ def test_graph_ids():
yield check, dict(data=DATA, publicID=PUBLIC_ID, format="turtle")
- source = StringInputSource(DATA.encode('utf8'))
+ source = StringInputSource(DATA.encode("utf8"))
source.setPublicId(PUBLIC_ID)
- yield check, dict(source=source, format='turtle')
+ yield check, dict(source=source, format="turtle")
-if __name__ == '__main__':
+if __name__ == "__main__":
import nose
+
nose.main(defaultTest=__name__)
diff --git a/test/test_conneg.py b/test/test_conneg.py
index 04b63d53..b8eee3bc 100644
--- a/test/test_conneg.py
+++ b/test/test_conneg.py
@@ -56,16 +56,15 @@ class TestHTTPHandler(BaseHTTPRequestHandler):
self.send_header("Content-type", rct)
self.end_headers()
- self.wfile.write(content.encode('utf-8'))
+ self.wfile.write(content.encode("utf-8"))
def log_message(self, *args):
pass
-def runHttpServer(server_class=HTTPServer,
- handler_class=TestHTTPHandler):
+def runHttpServer(server_class=HTTPServer, handler_class=TestHTTPHandler):
"""Start a server than can handle 3 requests :)"""
- server_address = ('localhost', 12345)
+ server_address = ("localhost", 12345)
httpd = server_class(server_address, handler_class)
httpd.handle_request()
@@ -87,5 +86,6 @@ if __name__ == "__main__":
import sys
import nose
+
if len(sys.argv) == 1:
nose.main(defaultTest=sys.argv[0])
diff --git a/test/test_conventions.py b/test/test_conventions.py
index 268047d7..11d7636a 100644
--- a/test/test_conventions.py
+++ b/test/test_conventions.py
@@ -12,10 +12,9 @@ modules should all be lower-case initial
class A(unittest.TestCase):
-
def module_names(self, path=None, names=None):
- skip_as_ignorably_private = ['embeddedRDF', 'OpenID', 'DublinCore']
+ skip_as_ignorably_private = ["embeddedRDF", "OpenID", "DublinCore"]
if path is None:
path = rdflib.__path__
@@ -23,13 +22,14 @@ class A(unittest.TestCase):
names = set()
# TODO: handle cases where len(path) is not 1
- assert len(path) == 1, "We're assuming the path has exactly one item in it for now"
+ assert (
+ len(path) == 1
+ ), "We're assuming the path has exactly one item in it for now"
path = path[0]
for importer, name, ispkg in pkgutil.iter_modules([path]):
if ispkg:
- result = self.module_names(path=os.path.join(path, name),
- names=names)
+ result = self.module_names(path=os.path.join(path, name), names=names)
names.union(result)
else:
if name != name.lower() and name not in skip_as_ignorably_private:
@@ -38,8 +38,7 @@ class A(unittest.TestCase):
def test_module_names(self):
names = self.module_names()
- self.assertTrue(
- names == set(), "module names '%s' are not lower case" % names)
+ self.assertTrue(names == set(), "module names '%s' are not lower case" % names)
if __name__ == "__main__":
diff --git a/test/test_core_sparqlstore.py b/test/test_core_sparqlstore.py
index 26c7554d..622e4a24 100644
--- a/test/test_core_sparqlstore.py
+++ b/test/test_core_sparqlstore.py
@@ -1,9 +1,10 @@
import unittest
from rdflib.graph import Graph
+
class TestSPARQLStoreGraphCore(unittest.TestCase):
- store_name = 'SPARQLStore'
+ store_name = "SPARQLStore"
path = "http://dbpedia.org/sparql"
storetest = True
create = False
@@ -21,5 +22,5 @@ class TestSPARQLStoreGraphCore(unittest.TestCase):
print("Done")
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main()
diff --git a/test/test_dataset.py b/test/test_dataset.py
index 9fcf424a..ef7eda76 100644
--- a/test/test_dataset.py
+++ b/test/test_dataset.py
@@ -22,12 +22,12 @@ from nose.exc import SkipTest
# THIS WILL DELETE ALL DATA IN THE /db dataset
-HOST = 'http://localhost:3030'
-DB = '/db/'
+HOST = "http://localhost:3030"
+DB = "/db/"
class DatasetTestCase(unittest.TestCase):
- store = 'default'
+ store = "default"
slow = True
tmppath = None
@@ -35,11 +35,9 @@ class DatasetTestCase(unittest.TestCase):
try:
self.graph = Dataset(store=self.store)
except ImportError:
- raise SkipTest(
- "Dependencies for store '%s' not available!" % self.store)
+ raise SkipTest("Dependencies for store '%s' not available!" % self.store)
if self.store == "SQLite":
- _, self.tmppath = mkstemp(
- prefix='test', dir='/tmp', suffix='.sqlite')
+ _, self.tmppath = mkstemp(prefix="test", dir="/tmp", suffix=".sqlite")
elif self.store == "SPARQLUpdateStore":
root = HOST + DB
self.graph.open((root + "sparql", root + "update"))
@@ -48,17 +46,17 @@ class DatasetTestCase(unittest.TestCase):
if self.store != "SPARQLUpdateStore":
self.graph.open(self.tmppath, create=True)
- self.michel = URIRef(u'urn:michel')
- self.tarek = URIRef(u'urn:tarek')
- self.bob = URIRef(u'urn:bob')
- self.likes = URIRef(u'urn:likes')
- self.hates = URIRef(u'urn:hates')
- self.pizza = URIRef(u'urn:pizza')
- self.cheese = URIRef(u'urn:cheese')
+ self.michel = URIRef(u"urn:michel")
+ self.tarek = URIRef(u"urn:tarek")
+ self.bob = URIRef(u"urn:bob")
+ self.likes = URIRef(u"urn:likes")
+ self.hates = URIRef(u"urn:hates")
+ self.pizza = URIRef(u"urn:pizza")
+ self.cheese = URIRef(u"urn:cheese")
# Use regular URIs because SPARQL endpoints like Fuseki alter short names
- self.c1 = URIRef(u'urn:context-1')
- self.c2 = URIRef(u'urn:context-2')
+ self.c1 = URIRef(u"urn:context-1")
+ self.c2 = URIRef(u"urn:context-2")
# delete the graph for each test!
self.graph.remove((None, None, None))
@@ -89,8 +87,10 @@ class DatasetTestCase(unittest.TestCase):
# empty named graphs
if self.store != "SPARQLUpdateStore":
# added graph exists
- self.assertEqual(set(x.identifier for x in self.graph.contexts()),
- set([self.c1, DATASET_DEFAULT_GRAPH_ID]))
+ self.assertEqual(
+ set(x.identifier for x in self.graph.contexts()),
+ set([self.c1, DATASET_DEFAULT_GRAPH_ID]),
+ )
# added graph is empty
self.assertEqual(len(g1), 0)
@@ -98,8 +98,10 @@ class DatasetTestCase(unittest.TestCase):
g1.add((self.tarek, self.likes, self.pizza))
# added graph still exists
- self.assertEqual(set(x.identifier for x in self.graph.contexts()),
- set([self.c1, DATASET_DEFAULT_GRAPH_ID]))
+ self.assertEqual(
+ set(x.identifier for x in self.graph.contexts()),
+ set([self.c1, DATASET_DEFAULT_GRAPH_ID]),
+ )
# added graph contains one triple
self.assertEqual(len(g1), 1)
@@ -113,60 +115,70 @@ class DatasetTestCase(unittest.TestCase):
# empty named graphs
if self.store != "SPARQLUpdateStore":
# graph still exists, although empty
- self.assertEqual(set(x.identifier for x in self.graph.contexts()),
- set([self.c1, DATASET_DEFAULT_GRAPH_ID]))
+ self.assertEqual(
+ set(x.identifier for x in self.graph.contexts()),
+ set([self.c1, DATASET_DEFAULT_GRAPH_ID]),
+ )
g.remove_graph(self.c1)
# graph is gone
- self.assertEqual(set(x.identifier for x in self.graph.contexts()),
- set([DATASET_DEFAULT_GRAPH_ID]))
+ self.assertEqual(
+ set(x.identifier for x in self.graph.contexts()),
+ set([DATASET_DEFAULT_GRAPH_ID]),
+ )
def testDefaultGraph(self):
# Something the default graph is read-only (e.g. TDB in union mode)
if self.store == "SPARQLUpdateStore":
- print("Please make sure updating the default graph "
- "is supported by your SPARQL endpoint")
+ print(
+ "Please make sure updating the default graph "
+ "is supported by your SPARQL endpoint"
+ )
self.graph.add((self.tarek, self.likes, self.pizza))
self.assertEqual(len(self.graph), 1)
# only default exists
- self.assertEqual(set(x.identifier for x in self.graph.contexts()),
- set([DATASET_DEFAULT_GRAPH_ID]))
+ self.assertEqual(
+ set(x.identifier for x in self.graph.contexts()),
+ set([DATASET_DEFAULT_GRAPH_ID]),
+ )
# removing default graph removes triples but not actual graph
self.graph.remove_graph(DATASET_DEFAULT_GRAPH_ID)
self.assertEqual(len(self.graph), 0)
# default still exists
- self.assertEqual(set(x.identifier for x in self.graph.contexts()),
- set([DATASET_DEFAULT_GRAPH_ID]))
+ self.assertEqual(
+ set(x.identifier for x in self.graph.contexts()),
+ set([DATASET_DEFAULT_GRAPH_ID]),
+ )
def testNotUnion(self):
# Union depends on the SPARQL endpoint configuration
if self.store == "SPARQLUpdateStore":
- print("Please make sure your SPARQL endpoint has not configured "
- "its default graph as the union of the named graphs")
+ print(
+ "Please make sure your SPARQL endpoint has not configured "
+ "its default graph as the union of the named graphs"
+ )
g1 = self.graph.graph(self.c1)
g1.add((self.tarek, self.likes, self.pizza))
- self.assertEqual(list(self.graph.objects(self.tarek, None)),
- [])
+ self.assertEqual(list(self.graph.objects(self.tarek, None)), [])
self.assertEqual(list(g1.objects(self.tarek, None)), [self.pizza])
# dynamically create classes for each registered Store
pluginname = None
-if __name__ == '__main__':
+if __name__ == "__main__":
if len(sys.argv) > 1:
pluginname = sys.argv[1]
tests = 0
for s in plugin.plugins(pluginname, plugin.Store):
- if s.name in ('default', 'IOMemory', 'Auditable',
- 'Concurrent', 'SPARQLStore'):
+ if s.name in ("default", "IOMemory", "Auditable", "Concurrent", "SPARQLStore"):
continue # these are tested by default
if not s.getClass().graph_aware:
@@ -174,16 +186,18 @@ for s in plugin.plugins(pluginname, plugin.Store):
if s.name == "SPARQLUpdateStore":
from urllib.request import urlopen
+
try:
assert len(urlopen(HOST).read()) > 0
except:
sys.stderr.write("No SPARQL endpoint for %s (tests skipped)\n" % s.name)
continue
- locals()["t%d" % tests] = type("%sContextTestCase" % s.name, (
- DatasetTestCase,), {"store": s.name})
+ locals()["t%d" % tests] = type(
+ "%sContextTestCase" % s.name, (DatasetTestCase,), {"store": s.name}
+ )
tests += 1
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main()
diff --git a/test/test_datetime.py b/test/test_datetime.py
index a8b68995..d71fc392 100644
--- a/test/test_datetime.py
+++ b/test/test_datetime.py
@@ -13,8 +13,10 @@ from rdflib.namespace import XSD
class TestRelativeBase(unittest.TestCase):
def test_equality(self):
- x = Literal("2008-12-01T18:02:00Z",
- datatype=URIRef('http://www.w3.org/2001/XMLSchema#dateTime'))
+ x = Literal(
+ "2008-12-01T18:02:00Z",
+ datatype=URIRef("http://www.w3.org/2001/XMLSchema#dateTime"),
+ )
self.assertEqual(x == x, True)
def test_microseconds(self):
@@ -23,7 +25,7 @@ class TestRelativeBase(unittest.TestCase):
# datetime with microseconds should be cast as a literal with using
# XML Schema dateTime as the literal datatype
- self.assertEqual(str(l), '2009-06-15T23:37:06.522630')
+ self.assertEqual(str(l), "2009-06-15T23:37:06.522630")
self.assertEqual(l.datatype, XSD.dateTime)
dt2 = l.toPython()
@@ -31,45 +33,41 @@ class TestRelativeBase(unittest.TestCase):
def test_to_python(self):
dt = "2008-12-01T18:02:00"
- l = Literal(dt,
- datatype=URIRef('http://www.w3.org/2001/XMLSchema#dateTime'))
+ l = Literal(dt, datatype=URIRef("http://www.w3.org/2001/XMLSchema#dateTime"))
self.assertTrue(isinstance(l.toPython(), datetime))
self.assertEqual(l.toPython().isoformat(), dt)
def test_timezone_z(self):
dt = "2008-12-01T18:02:00.522630Z"
- l = Literal(dt,
- datatype=URIRef('http://www.w3.org/2001/XMLSchema#dateTime'))
+ l = Literal(dt, datatype=URIRef("http://www.w3.org/2001/XMLSchema#dateTime"))
self.assertTrue(isinstance(l.toPython(), datetime))
- self.assertEqual(datetime_isoformat(l.toPython(),
- DATE_EXT_COMPLETE + 'T' + '%H:%M:%S.%f' + TZ_EXT),
- dt)
- self.assertEqual(l.toPython().isoformat(),
- "2008-12-01T18:02:00.522630+00:00")
+ self.assertEqual(
+ datetime_isoformat(
+ l.toPython(), DATE_EXT_COMPLETE + "T" + "%H:%M:%S.%f" + TZ_EXT
+ ),
+ dt,
+ )
+ self.assertEqual(l.toPython().isoformat(), "2008-12-01T18:02:00.522630+00:00")
def test_timezone_offset(self):
dt = "2010-02-10T12:36:00+03:00"
- l = Literal(dt,
- datatype=URIRef('http://www.w3.org/2001/XMLSchema#dateTime'))
+ l = Literal(dt, datatype=URIRef("http://www.w3.org/2001/XMLSchema#dateTime"))
self.assertTrue(isinstance(l.toPython(), datetime))
self.assertEqual(l.toPython().isoformat(), dt)
def test_timezone_offset_to_utc(self):
dt = "2010-02-10T12:36:00+03:00"
- l = Literal(dt,
- datatype=URIRef('http://www.w3.org/2001/XMLSchema#dateTime'))
+ l = Literal(dt, datatype=URIRef("http://www.w3.org/2001/XMLSchema#dateTime"))
utc_dt = l.toPython().astimezone(UTC)
- self.assertEqual(datetime_isoformat(utc_dt),
- "2010-02-10T09:36:00Z")
+ self.assertEqual(datetime_isoformat(utc_dt), "2010-02-10T09:36:00Z")
def test_timezone_offset_millisecond(self):
dt = "2011-01-16T19:39:18.239743+01:00"
- l = Literal(dt,
- datatype=URIRef('http://www.w3.org/2001/XMLSchema#dateTime'))
+ l = Literal(dt, datatype=URIRef("http://www.w3.org/2001/XMLSchema#dateTime"))
self.assertTrue(isinstance(l.toPython(), datetime))
self.assertEqual(l.toPython().isoformat(), dt)
diff --git a/test/test_dawg.py b/test/test_dawg.py
index b7343426..80289738 100644
--- a/test/test_dawg.py
+++ b/test/test_dawg.py
@@ -21,23 +21,21 @@ except:
defaultdict.__init__(self, int)
def most_common(self, N):
- return [x[0] for x in sorted(self.items(),
- key=itemgetter(1),
- reverse=True)[:10]]
+ return [
+ x[0] for x in sorted(self.items(), key=itemgetter(1), reverse=True)[:10]
+ ]
import datetime
import isodate
-from rdflib import (
- Dataset, Graph, URIRef, BNode)
+from rdflib import Dataset, Graph, URIRef, BNode
from rdflib.query import Result
from rdflib.compare import isomorphic
from rdflib.plugins import sparql as rdflib_sparql_module
-from rdflib.plugins.sparql.algebra import (
- pprintAlgebra, translateQuery, translateUpdate)
+from rdflib.plugins.sparql.algebra import pprintAlgebra, translateQuery, translateUpdate
from rdflib.plugins.sparql.parser import parseQuery, parseUpdate
from rdflib.plugins.sparql.results.rdfresults import RDFResultParser
from rdflib.plugins.sparql.update import evalUpdate
@@ -52,12 +50,15 @@ from nose import SkipTest
from .manifest import nose_tests, MF, UP
from .earl import report, add_test
+
+
def eq(a, b, msg):
- return eq_(a, b, msg + ': (%r!=%r)' % (a, b))
+ return eq_(a, b, msg + ": (%r!=%r)" % (a, b))
def setFlags():
import rdflib
+
# Several tests rely on lexical form of literals being kept!
rdflib.NORMALIZE_LITERALS = False
@@ -70,6 +71,7 @@ def setFlags():
def resetFlags():
import rdflib
+
# Several tests rely on lexical form of literals being kept!
rdflib.NORMALIZE_LITERALS = True
@@ -114,8 +116,12 @@ def bopen_read_close(fn):
try:
with open("skiptests.list") as skip_tests_f:
- skiptests = dict([(URIRef(x.strip().split(
- "\t")[0]), x.strip().split("\t")[1]) for x in skip_tests_f])
+ skiptests = dict(
+ [
+ (URIRef(x.strip().split("\t")[0]), x.strip().split("\t")[1])
+ for x in skip_tests_f
+ ]
+ )
except IOError:
skiptests = set()
@@ -163,8 +169,8 @@ def bindingsCompatible(a, b):
else:
m[b1] = y[v1]
else:
- # if y[v1]!=b1:
- # return False
+ # if y[v1]!=b1:
+ # return False
try:
if y[v1].neq(b1):
return False
@@ -191,9 +197,14 @@ def pp_binding(solutions):
"""
Pretty print a single binding - for less eye-strain when debugging
"""
- return "\n[" + ",\n\t".join("{" + ", ".join("%s:%s" % (
- x[0], x[1].n3()) for x in bindings.items()) + "}"
- for bindings in solutions) + "]\n"
+ return (
+ "\n["
+ + ",\n\t".join(
+ "{" + ", ".join("%s:%s" % (x[0], x[1].n3()) for x in bindings.items()) + "}"
+ for bindings in solutions
+ )
+ + "]\n"
+ )
@nottest
@@ -246,17 +257,21 @@ def update_test(t):
for x, l in resgraphdata:
resg.load(x, publicID=URIRef(l), format=_fmt(x))
- eq(set(x.identifier for x in g.contexts() if x != g.default_context),
- set(x.identifier for x in resg.contexts()
- if x != resg.default_context), 'named graphs in datasets do not match')
- assert isomorphic(g.default_context, resg.default_context), \
- 'Default graphs are not isomorphic'
+ eq(
+ set(x.identifier for x in g.contexts() if x != g.default_context),
+ set(x.identifier for x in resg.contexts() if x != resg.default_context),
+ "named graphs in datasets do not match",
+ )
+ assert isomorphic(
+ g.default_context, resg.default_context
+ ), "Default graphs are not isomorphic"
for x in g.contexts():
if x == g.default_context:
continue
- assert isomorphic(x, resg.get_context(x.identifier)), \
+ assert isomorphic(x, resg.get_context(x.identifier)), (
"Graphs with ID %s are not isomorphic" % x.identifier
+ )
except Exception as e:
@@ -305,7 +320,7 @@ def update_test(t):
print(bopen_read_close(x[7:]))
print("------------- MY RESULT ----------")
- print(g.serialize(format='trig'))
+ print(g.serialize(format="trig"))
try:
pq = translateUpdate(parseUpdate(bopen_read_close(query[7:])))
@@ -318,6 +333,7 @@ def update_test(t):
print(decodeStringEscape(str(e)))
import pdb
+
pdb.post_mortem(sys.exc_info()[2])
raise
@@ -332,7 +348,7 @@ def query_test(t):
if uri in skiptests:
raise SkipTest()
- def skip(reason='(none)'):
+ def skip(reason="(none)"):
print("Skipping %s from now on." % uri)
with bopen("skiptests.list", "a") as f:
f.write("%s\t%s\n" % (uri, reason))
@@ -350,91 +366,102 @@ def query_test(t):
# no result - syntax test
if syntax:
- translateQuery(parseQuery(
- bopen_read_close(query[7:])), base=urljoin(query, '.'))
+ translateQuery(
+ parseQuery(bopen_read_close(query[7:])), base=urljoin(query, ".")
+ )
else:
# negative syntax test
try:
- translateQuery(parseQuery(
- bopen_read_close(query[7:])), base=urljoin(query, '.'))
+ translateQuery(
+ parseQuery(bopen_read_close(query[7:])),
+ base=urljoin(query, "."),
+ )
- assert False, 'Query should not have parsed!'
+ assert False, "Query should not have parsed!"
except:
pass # it's fine - the query should not parse
return
# eval test - carry out query
- res2 = g.query(bopen_read_close(query[7:]), base=urljoin(query, '.'))
+ res2 = g.query(bopen_read_close(query[7:]), base=urljoin(query, "."))
- if resfile.endswith('ttl'):
+ if resfile.endswith("ttl"):
resg = Graph()
- resg.load(resfile, format='turtle', publicID=resfile)
+ resg.load(resfile, format="turtle", publicID=resfile)
res = RDFResultParser().parse(resg)
- elif resfile.endswith('rdf'):
+ elif resfile.endswith("rdf"):
resg = Graph()
resg.load(resfile, publicID=resfile)
res = RDFResultParser().parse(resg)
else:
with bopen(resfile[7:]) as f:
- if resfile.endswith('srj'):
- res = Result.parse(f, format='json')
- elif resfile.endswith('tsv'):
- res = Result.parse(f, format='tsv')
+ if resfile.endswith("srj"):
+ res = Result.parse(f, format="json")
+ elif resfile.endswith("tsv"):
+ res = Result.parse(f, format="tsv")
- elif resfile.endswith('csv'):
- res = Result.parse(f, format='csv')
+ elif resfile.endswith("csv"):
+ res = Result.parse(f, format="csv")
# CSV is lossy, round-trip our own resultset to
# lose the same info :)
# write bytes, read strings...
s = BytesIO()
- res2.serialize(s, format='csv')
+ res2.serialize(s, format="csv")
s.seek(0)
- res2 = Result.parse(s, format='csv')
+ res2 = Result.parse(s, format="csv")
s.close()
else:
- res = Result.parse(f, format='xml')
+ res = Result.parse(f, format="xml")
if not DETAILEDASSERT:
- eq(res.type, res2.type, 'Types do not match')
- if res.type == 'SELECT':
- eq(set(res.vars), set(res2.vars), 'Vars do not match')
- comp = bindingsCompatible(
- set(res),
- set(res2)
- )
- assert comp, 'Bindings do not match'
- elif res.type == 'ASK':
- eq(res.askAnswer, res2.askAnswer, 'Ask answer does not match')
- elif res.type in ('DESCRIBE', 'CONSTRUCT'):
- assert isomorphic(
- res.graph, res2.graph), 'graphs are not isomorphic!'
+ eq(res.type, res2.type, "Types do not match")
+ if res.type == "SELECT":
+ eq(set(res.vars), set(res2.vars), "Vars do not match")
+ comp = bindingsCompatible(set(res), set(res2))
+ assert comp, "Bindings do not match"
+ elif res.type == "ASK":
+ eq(res.askAnswer, res2.askAnswer, "Ask answer does not match")
+ elif res.type in ("DESCRIBE", "CONSTRUCT"):
+ assert isomorphic(res.graph, res2.graph), "graphs are not isomorphic!"
else:
- raise Exception('Unknown result type: %s' % res.type)
+ raise Exception("Unknown result type: %s" % res.type)
else:
- eq(res.type, res2.type,
- 'Types do not match: %r != %r' % (res.type, res2.type))
- if res.type == 'SELECT':
- eq(set(res.vars),
- set(res2.vars), 'Vars do not match: %r != %r' % (
- set(res.vars), set(res2.vars)))
- assert bindingsCompatible(
- set(res),
- set(res2)
- ), 'Bindings do not match: \nexpected:\n%s\n!=\ngot:\n%s' % (
- res.serialize(format='txt', namespace_manager=g.namespace_manager),
- res2.serialize(format='txt', namespace_manager=g.namespace_manager))
- elif res.type == 'ASK':
- eq(res.askAnswer,
- res2.askAnswer, "Ask answer does not match: %r != %r" % (
- res.askAnswer, res2.askAnswer))
- elif res.type in ('DESCRIBE', 'CONSTRUCT'):
- assert isomorphic(
- res.graph, res2.graph), 'graphs are not isomorphic!'
+ eq(
+ res.type,
+ res2.type,
+ "Types do not match: %r != %r" % (res.type, res2.type),
+ )
+ if res.type == "SELECT":
+ eq(
+ set(res.vars),
+ set(res2.vars),
+ "Vars do not match: %r != %r" % (set(res.vars), set(res2.vars)),
+ )
+ assert bindingsCompatible(set(res), set(res2)), (
+ "Bindings do not match: \nexpected:\n%s\n!=\ngot:\n%s"
+ % (
+ res.serialize(
+ format="txt", namespace_manager=g.namespace_manager
+ ),
+ res2.serialize(
+ format="txt", namespace_manager=g.namespace_manager
+ ),
+ )
+ )
+ elif res.type == "ASK":
+ eq(
+ res.askAnswer,
+ res2.askAnswer,
+ "Ask answer does not match: %r != %r"
+ % (res.askAnswer, res2.askAnswer),
+ )
+ elif res.type in ("DESCRIBE", "CONSTRUCT"):
+ assert isomorphic(res.graph, res2.graph), "graphs are not isomorphic!"
else:
- raise Exception('Unknown result type: %s' % res.type)
+ raise Exception("Unknown result type: %s" % res.type)
except Exception as e:
@@ -478,13 +505,14 @@ def query_test(t):
try:
pq = parseQuery(bopen_read_close(query[7:]))
print("----------------- Parsed ------------------")
- pprintAlgebra(translateQuery(pq, base=urljoin(query, '.')))
+ pprintAlgebra(translateQuery(pq, base=urljoin(query, ".")))
except:
print("(parser error)")
print(decodeStringEscape(str(e)))
import pdb
+
pdb.post_mortem(sys.exc_info()[2])
# pdb.set_trace()
# nose.tools.set_trace()
@@ -496,7 +524,6 @@ testers = {
MF.UpdateEvaluationTest: update_test,
MF.PositiveUpdateSyntaxTest11: update_test,
MF.NegativeUpdateSyntaxTest11: update_test,
-
MF.QueryEvaluationTest: query_test,
MF.NegativeSyntaxTest11: query_test,
MF.PositiveSyntaxTest11: query_test,
@@ -523,10 +550,11 @@ def test_dawg():
resetFlags()
-if __name__ == '__main__':
+if __name__ == "__main__":
import sys
import time
+
start = time.time()
if len(sys.argv) > 1:
NAME = sys.argv[1]
@@ -561,6 +589,7 @@ if __name__ == '__main__':
except:
add_test(t[0], "failed", "error")
import traceback
+
traceback.print_exc()
sys.stderr.write("%s\n" % t[0])
@@ -594,12 +623,13 @@ if __name__ == '__main__':
e_sum = sum(errors.values())
if success + f_sum + e_sum + skip != i:
- print("(Something is wrong, %d!=%d)" % (
- success + f_sum + e_sum + skip, i))
+ print("(Something is wrong, %d!=%d)" % (success + f_sum + e_sum + skip, i))
- print("\n%d tests, %d passed, %d failed, %d errors, \
- %d skipped (%.2f%% success)" % (
- i, success, f_sum, e_sum, skip, 100. * success / i))
+ print(
+ "\n%d tests, %d passed, %d failed, %d errors, \
+ %d skipped (%.2f%% success)"
+ % (i, success, f_sum, e_sum, skip, 100.0 * success / i)
+ )
print("Took %.2fs" % (time.time() - start))
if not NAME:
@@ -609,12 +639,12 @@ if __name__ == '__main__':
with open("testruns.txt", "a") as tf:
tf.write(
"%s\n%d tests, %d passed, %d failed, %d errors, %d "
- "skipped (%.2f%% success)\n\n" % (
- now, i, success, f_sum, e_sum, skip, 100. * success / i)
+ "skipped (%.2f%% success)\n\n"
+ % (now, i, success, f_sum, e_sum, skip, 100.0 * success / i)
)
- earl_report = 'test_reports/rdflib_sparql-%s.ttl' % now.replace(":", "")
+ earl_report = "test_reports/rdflib_sparql-%s.ttl" % now.replace(":", "")
- report.serialize(earl_report, format='n3')
- report.serialize('test_reports/rdflib_sparql-latest.ttl', format='n3')
+ report.serialize(earl_report, format="n3")
+ report.serialize("test_reports/rdflib_sparql-latest.ttl", format="n3")
print("Wrote EARL-report to '%s'" % earl_report)
diff --git a/test/test_diff.py b/test/test_diff.py
index bf49dd9d..7e4db728 100644
--- a/test/test_diff.py
+++ b/test/test_diff.py
@@ -13,7 +13,7 @@ class TestDiff(unittest.TestCase):
def testA(self):
"""with bnode"""
g = rdflib.Graph()
- g.add((rdflib.BNode(), rdflib.URIRef("urn:p"), rdflib.Literal(u'\xe9')))
+ g.add((rdflib.BNode(), rdflib.URIRef("urn:p"), rdflib.Literal(u"\xe9")))
diff = graph_diff(g, g)
@@ -21,7 +21,7 @@ class TestDiff(unittest.TestCase):
"""Curiously, this one passes, even before the fix in issue 151"""
g = rdflib.Graph()
- g.add((rdflib.URIRef("urn:a"), rdflib.URIRef("urn:p"), rdflib.Literal(u'\xe9')))
+ g.add((rdflib.URIRef("urn:a"), rdflib.URIRef("urn:p"), rdflib.Literal(u"\xe9")))
diff = graph_diff(g, g)
diff --git a/test/test_duration.py b/test/test_duration.py
index 07542a45..cdea7ab7 100644
--- a/test/test_duration.py
+++ b/test/test_duration.py
@@ -30,13 +30,15 @@ class TestDuration(unittest.TestCase):
def test_duration_le(self):
self.assertTrue(
- Literal("P4DT5H6M7S", datatype=XSD.duration) < Literal("P8DT10H12M14S", datatype=XSD.duration)
+ Literal("P4DT5H6M7S", datatype=XSD.duration)
+ < Literal("P8DT10H12M14S", datatype=XSD.duration)
)
def test_duration_sum(self):
self.assertEqual(
- Literal("P1Y2M4DT5H6M7S", datatype=XSD.duration) + Literal("P1Y2M4DT5H6M7S", datatype=XSD.duration).toPython(),
- Literal("P2Y4M8DT10H12M14S", datatype=XSD.duration)
+ Literal("P1Y2M4DT5H6M7S", datatype=XSD.duration)
+ + Literal("P1Y2M4DT5H6M7S", datatype=XSD.duration).toPython(),
+ Literal("P2Y4M8DT10H12M14S", datatype=XSD.duration),
)
diff --git a/test/test_empty_xml_base.py b/test/test_empty_xml_base.py
index 75e564a7..2f3364b8 100644
--- a/test/test_empty_xml_base.py
+++ b/test/test_empty_xml_base.py
@@ -13,7 +13,7 @@ from io import StringIO
import unittest
-FOAF = Namespace('http://xmlns.com/foaf/0.1/')
+FOAF = Namespace("http://xmlns.com/foaf/0.1/")
test_data = """
<rdf:RDF
@@ -36,8 +36,8 @@ test_data2 = """
</rdf:RDF>"""
-baseUri = URIRef('http://example.com/')
-baseUri2 = URIRef('http://example.com/foo/bar')
+baseUri = URIRef("http://example.com/")
+baseUri2 = URIRef("http://example.com/foo/bar")
class TestEmptyBase(unittest.TestCase):
@@ -46,10 +46,13 @@ class TestEmptyBase(unittest.TestCase):
self.graph.parse(StringIO(test_data), publicID=baseUri)
def test_base_ref(self):
- self.assertTrue(len(self.graph) == 1,
- "There should be at least one statement in the graph")
- self.assertTrue((baseUri, RDF.type, FOAF.Document) in self.graph,
- "There should be a triple with %s as the subject" % baseUri)
+ self.assertTrue(
+ len(self.graph) == 1, "There should be at least one statement in the graph"
+ )
+ self.assertTrue(
+ (baseUri, RDF.type, FOAF.Document) in self.graph,
+ "There should be a triple with %s as the subject" % baseUri,
+ )
class TestRelativeBase(unittest.TestCase):
@@ -58,11 +61,14 @@ class TestRelativeBase(unittest.TestCase):
self.graph.parse(StringIO(test_data2), publicID=baseUri2)
def test_base_ref(self):
- self.assertTrue(len(self.graph) == 1,
- "There should be at least one statement in the graph")
- resolvedBase = URIRef('http://example.com/baz')
- self.assertTrue((resolvedBase, RDF.type, FOAF.Document) in self.graph,
- "There should be a triple with %s as the subject" % resolvedBase)
+ self.assertTrue(
+ len(self.graph) == 1, "There should be at least one statement in the graph"
+ )
+ resolvedBase = URIRef("http://example.com/baz")
+ self.assertTrue(
+ (resolvedBase, RDF.type, FOAF.Document) in self.graph,
+ "There should be a triple with %s as the subject" % resolvedBase,
+ )
if __name__ == "__main__":
diff --git a/test/test_evaluate_bind.py b/test/test_evaluate_bind.py
index bd4ea440..382b4ed5 100644
--- a/test/test_evaluate_bind.py
+++ b/test/test_evaluate_bind.py
@@ -8,19 +8,29 @@ from rdflib import Graph, URIRef, Literal, Variable
def test_bind():
base = "http://example.org/"
g = Graph()
- g.add((URIRef(
- base + "thing"), URIRef(base + "ns#comment"), Literal("anything")))
+ g.add((URIRef(base + "thing"), URIRef(base + "ns#comment"), Literal("anything")))
def check(expr, var, obj):
- r = g.query("""
+ r = g.query(
+ """
prefix : <http://example.org/ns#>
- select * where { ?s ?p ?o . %s } """ % expr)
+ select * where { ?s ?p ?o . %s } """
+ % expr
+ )
assert r.bindings[0][Variable(var)] == obj
- yield (check, 'bind("thing" as ?name)', 'name', Literal("thing"))
+ yield (check, 'bind("thing" as ?name)', "name", Literal("thing"))
- yield (check, 'bind(<http://example.org/other> as ?other)', 'other',
- URIRef("http://example.org/other"))
+ yield (
+ check,
+ "bind(<http://example.org/other> as ?other)",
+ "other",
+ URIRef("http://example.org/other"),
+ )
- yield (check, "bind(:Thing as ?type)", 'type',
- URIRef("http://example.org/ns#Thing"))
+ yield (
+ check,
+ "bind(:Thing as ?type)",
+ "type",
+ URIRef("http://example.org/ns#Thing"),
+ )
diff --git a/test/test_events.py b/test/test_events.py
index f7f706a9..6b413781 100644
--- a/test/test_events.py
+++ b/test/test_events.py
@@ -1,4 +1,3 @@
-
import unittest
from rdflib import events
@@ -24,7 +23,6 @@ def subscribe_all(caches):
class Cache(events.Dispatcher):
-
def __init__(self, data=None):
if data is None:
data = {}
@@ -54,18 +52,17 @@ class Cache(events.Dispatcher):
class EventTestCase(unittest.TestCase):
-
def testEvents(self):
c1 = Cache()
c2 = Cache()
c3 = Cache()
subscribe_all([c1, c2, c3])
- c1['bob'] = 'uncle'
- assert c2['bob'] == 'uncle'
- assert c3['bob'] == 'uncle'
- del c3['bob']
- assert ('bob' in c1) == False
- assert ('bob' in c2) == False
+ c1["bob"] = "uncle"
+ assert c2["bob"] == "uncle"
+ assert c3["bob"] == "uncle"
+ del c3["bob"]
+ assert ("bob" in c1) == False
+ assert ("bob" in c2) == False
if __name__ == "__main__":
diff --git a/test/test_expressions.py b/test/test_expressions.py
index d88d7766..1323e4fc 100644
--- a/test/test_expressions.py
+++ b/test/test_expressions.py
@@ -24,88 +24,89 @@ def _eval(e, ctx=None):
def _translate(e):
- return simplify(traverse(
- e, visitPost=partial(translatePName, prologue=Prologue())))
+ return simplify(traverse(e, visitPost=partial(translatePName, prologue=Prologue())))
def testRegex():
- assert _eval(
- _translate((p.Expression.parseString('REGEX("zxcabczxc","abc")')[0])))
+ assert _eval(_translate((p.Expression.parseString('REGEX("zxcabczxc","abc")')[0])))
- eq(bool(_eval(_translate(
- (p.Expression.parseString('REGEX("zxczxc","abc")')[0])))), False)
+ eq(
+ bool(_eval(_translate((p.Expression.parseString('REGEX("zxczxc","abc")')[0])))),
+ False,
+ )
- assert _eval(_translate(
- (p.Expression.parseString('REGEX("bbbaaaaabbb","ba*b")')[0])))
+ assert _eval(
+ _translate((p.Expression.parseString('REGEX("bbbaaaaabbb","ba*b")')[0]))
+ )
def test_arithmetic():
- eq(_eval(_translate((p.Expression.parseString('2+3')[0]))).value, 5)
- eq(_eval(_translate((p.Expression.parseString('3-2')[0]))).value, 1)
+ eq(_eval(_translate((p.Expression.parseString("2+3")[0]))).value, 5)
+ eq(_eval(_translate((p.Expression.parseString("3-2")[0]))).value, 1)
- eq(_eval(_translate((p.Expression.parseString('2*3')[0]))).value, 6)
- eq(_eval(_translate((p.Expression.parseString('4/2')[0]))).value, 2)
+ eq(_eval(_translate((p.Expression.parseString("2*3")[0]))).value, 6)
+ eq(_eval(_translate((p.Expression.parseString("4/2")[0]))).value, 2)
- eq(_eval(_translate((p.Expression.parseString('2+2+2')[0]))).value, 6)
- eq(_eval(_translate((p.Expression.parseString('2-2+2')[0]))).value, 2)
- eq(_eval(_translate((p.Expression.parseString('(2-2)+2')[0]))).value, 2)
- eq(_eval(_translate((p.Expression.parseString('2-(2+2)')[0]))).value, -2)
+ eq(_eval(_translate((p.Expression.parseString("2+2+2")[0]))).value, 6)
+ eq(_eval(_translate((p.Expression.parseString("2-2+2")[0]))).value, 2)
+ eq(_eval(_translate((p.Expression.parseString("(2-2)+2")[0]))).value, 2)
+ eq(_eval(_translate((p.Expression.parseString("2-(2+2)")[0]))).value, -2)
- eq(_eval(_translate((p.Expression.parseString('2*2*2')[0]))).value, 8)
- eq(_eval(_translate((p.Expression.parseString('4/2*2')[0]))).value, 4)
- eq(_eval(_translate((p.Expression.parseString('8/4*2')[0]))).value, 4)
- eq(_eval(_translate((p.Expression.parseString('8/(4*2)')[0]))).value, 1)
- eq(_eval(_translate((p.Expression.parseString('(2/2)*2')[0]))).value, 2)
- eq(_eval(_translate((p.Expression.parseString('4/(2*2)')[0]))).value, 1)
+ eq(_eval(_translate((p.Expression.parseString("2*2*2")[0]))).value, 8)
+ eq(_eval(_translate((p.Expression.parseString("4/2*2")[0]))).value, 4)
+ eq(_eval(_translate((p.Expression.parseString("8/4*2")[0]))).value, 4)
+ eq(_eval(_translate((p.Expression.parseString("8/(4*2)")[0]))).value, 1)
+ eq(_eval(_translate((p.Expression.parseString("(2/2)*2")[0]))).value, 2)
+ eq(_eval(_translate((p.Expression.parseString("4/(2*2)")[0]))).value, 1)
- eq(_eval(_translate((p.Expression.parseString('2+3*2')[0]))).value, 8)
- eq(_eval(_translate((p.Expression.parseString('(2+3)*2')[0]))).value, 10)
- eq(_eval(_translate((p.Expression.parseString('2+4/2')[0]))).value, 4)
- eq(_eval(_translate((p.Expression.parseString('(2+4)/2')[0]))).value, 3)
+ eq(_eval(_translate((p.Expression.parseString("2+3*2")[0]))).value, 8)
+ eq(_eval(_translate((p.Expression.parseString("(2+3)*2")[0]))).value, 10)
+ eq(_eval(_translate((p.Expression.parseString("2+4/2")[0]))).value, 4)
+ eq(_eval(_translate((p.Expression.parseString("(2+4)/2")[0]))).value, 3)
def test_arithmetic_var():
ctx = QueryContext()
- ctx[Variable('x')] = Literal(2)
+ ctx[Variable("x")] = Literal(2)
- eq(_eval(_translate((p.Expression.parseString('2+?x')[0])), ctx).value, 4)
+ eq(_eval(_translate((p.Expression.parseString("2+?x")[0])), ctx).value, 4)
- eq(_eval(_translate((p.Expression.parseString('?x+3')[0])), ctx).value, 5)
- eq(_eval(_translate((p.Expression.parseString('3-?x')[0])), ctx).value, 1)
+ eq(_eval(_translate((p.Expression.parseString("?x+3")[0])), ctx).value, 5)
+ eq(_eval(_translate((p.Expression.parseString("3-?x")[0])), ctx).value, 1)
- eq(_eval(_translate((p.Expression.parseString('?x*3')[0])), ctx).value, 6)
- eq(_eval(_translate((p.Expression.parseString('4/?x')[0])), ctx).value, 2)
+ eq(_eval(_translate((p.Expression.parseString("?x*3")[0])), ctx).value, 6)
+ eq(_eval(_translate((p.Expression.parseString("4/?x")[0])), ctx).value, 2)
- eq(_eval(_translate((p.Expression.parseString('?x+?x+?x')[0])), ctx).value, 6)
- eq(_eval(_translate((p.Expression.parseString('?x-?x+?x')[0])), ctx).value, 2)
- eq(_eval(_translate((p.Expression.parseString('(?x-?x)+?x')[0])), ctx).value, 2)
- eq(_eval(_translate((p.Expression.parseString('?x-(?x+?x)')[0])), ctx).value, -2)
+ eq(_eval(_translate((p.Expression.parseString("?x+?x+?x")[0])), ctx).value, 6)
+ eq(_eval(_translate((p.Expression.parseString("?x-?x+?x")[0])), ctx).value, 2)
+ eq(_eval(_translate((p.Expression.parseString("(?x-?x)+?x")[0])), ctx).value, 2)
+ eq(_eval(_translate((p.Expression.parseString("?x-(?x+?x)")[0])), ctx).value, -2)
- eq(_eval(_translate((p.Expression.parseString('?x*?x*?x')[0])), ctx).value, 8)
- eq(_eval(_translate((p.Expression.parseString('4/?x*?x')[0])), ctx).value, 4)
- eq(_eval(_translate((p.Expression.parseString('8/4*?x')[0])), ctx).value, 4)
- eq(_eval(_translate((p.Expression.parseString('8/(4*?x)')[0])), ctx).value, 1)
- eq(_eval(_translate((p.Expression.parseString('(?x/?x)*?x')[0])), ctx).value, 2)
- eq(_eval(_translate((p.Expression.parseString('4/(?x*?x)')[0])), ctx).value, 1)
+ eq(_eval(_translate((p.Expression.parseString("?x*?x*?x")[0])), ctx).value, 8)
+ eq(_eval(_translate((p.Expression.parseString("4/?x*?x")[0])), ctx).value, 4)
+ eq(_eval(_translate((p.Expression.parseString("8/4*?x")[0])), ctx).value, 4)
+ eq(_eval(_translate((p.Expression.parseString("8/(4*?x)")[0])), ctx).value, 1)
+ eq(_eval(_translate((p.Expression.parseString("(?x/?x)*?x")[0])), ctx).value, 2)
+ eq(_eval(_translate((p.Expression.parseString("4/(?x*?x)")[0])), ctx).value, 1)
def test_comparisons():
- eq(bool(_eval(_translate((p.Expression.parseString('2<3')[0])))), True)
- eq(bool(_eval(_translate((p.Expression.parseString('2<3.0')[0])))), True)
- eq(bool(_eval(_translate((p.Expression.parseString('2<3e0')[0])))), True)
+ eq(bool(_eval(_translate((p.Expression.parseString("2<3")[0])))), True)
+ eq(bool(_eval(_translate((p.Expression.parseString("2<3.0")[0])))), True)
+ eq(bool(_eval(_translate((p.Expression.parseString("2<3e0")[0])))), True)
- eq(bool(_eval(_translate((p.Expression.parseString('4<3')[0])))), False)
- eq(bool(_eval(_translate((p.Expression.parseString('4<3.0')[0])))), False)
- eq(bool(_eval(_translate((p.Expression.parseString('4<3e0')[0])))), False)
+ eq(bool(_eval(_translate((p.Expression.parseString("4<3")[0])))), False)
+ eq(bool(_eval(_translate((p.Expression.parseString("4<3.0")[0])))), False)
+ eq(bool(_eval(_translate((p.Expression.parseString("4<3e0")[0])))), False)
- eq(bool(_eval(_translate((p.Expression.parseString('2<2.1')[0])))), True)
- eq(bool(_eval(_translate((p.Expression.parseString('2<21e-1')[0])))), True)
+ eq(bool(_eval(_translate((p.Expression.parseString("2<2.1")[0])))), True)
+ eq(bool(_eval(_translate((p.Expression.parseString("2<21e-1")[0])))), True)
- eq(bool(_eval(_translate((p.Expression.parseString('2=2.0')[0])))), True)
- eq(bool(_eval(_translate((p.Expression.parseString('2=2e0')[0])))), True)
+ eq(bool(_eval(_translate((p.Expression.parseString("2=2.0")[0])))), True)
+ eq(bool(_eval(_translate((p.Expression.parseString("2=2e0")[0])))), True)
eq(bool(_eval(_translate((p.Expression.parseString('2="cake"')[0])))), False)
@@ -113,39 +114,46 @@ def test_comparisons():
def test_comparisons_var():
ctx = QueryContext()
- ctx[Variable('x')] = Literal(2)
+ ctx[Variable("x")] = Literal(2)
- eq(bool(_eval(_translate((p.Expression.parseString('?x<3')[0])), ctx)), True)
- eq(bool(_eval(_translate((p.Expression.parseString('?x<3.0')[0])), ctx)), True)
- eq(bool(_eval(_translate((p.Expression.parseString('?x<3e0')[0])), ctx)), True)
+ eq(bool(_eval(_translate((p.Expression.parseString("?x<3")[0])), ctx)), True)
+ eq(bool(_eval(_translate((p.Expression.parseString("?x<3.0")[0])), ctx)), True)
+ eq(bool(_eval(_translate((p.Expression.parseString("?x<3e0")[0])), ctx)), True)
- eq(bool(_eval(_translate((p.Expression.parseString('?x<2.1')[0])), ctx)), True)
- eq(bool(_eval(_translate((p.Expression.parseString('?x<21e-1')[0])), ctx)), True)
+ eq(bool(_eval(_translate((p.Expression.parseString("?x<2.1")[0])), ctx)), True)
+ eq(bool(_eval(_translate((p.Expression.parseString("?x<21e-1")[0])), ctx)), True)
- eq(bool(_eval(_translate((p.Expression.parseString('?x=2.0')[0])), ctx)), True)
- eq(bool(_eval(_translate((p.Expression.parseString('?x=2e0')[0])), ctx)), True)
+ eq(bool(_eval(_translate((p.Expression.parseString("?x=2.0")[0])), ctx)), True)
+ eq(bool(_eval(_translate((p.Expression.parseString("?x=2e0")[0])), ctx)), True)
eq(bool(_eval(_translate((p.Expression.parseString('?x="cake"')[0])), ctx)), False)
ctx = QueryContext()
- ctx[Variable('x')] = Literal(4)
+ ctx[Variable("x")] = Literal(4)
- eq(bool(_eval(_translate((p.Expression.parseString('?x<3')[0])), ctx)), False)
- eq(bool(_eval(_translate((p.Expression.parseString('?x<3.0')[0])), ctx)), False)
- eq(bool(_eval(_translate((p.Expression.parseString('?x<3e0')[0])), ctx)), False)
+ eq(bool(_eval(_translate((p.Expression.parseString("?x<3")[0])), ctx)), False)
+ eq(bool(_eval(_translate((p.Expression.parseString("?x<3.0")[0])), ctx)), False)
+ eq(bool(_eval(_translate((p.Expression.parseString("?x<3e0")[0])), ctx)), False)
def test_and_or():
- eq(bool(_eval(_translate((p.Expression.parseString('3>2 && 3>1')[0])))), True)
- eq(bool(_eval(
- _translate((p.Expression.parseString('3>2 && 3>4 || 2>1')[0])))), True)
- eq(bool(_eval(
- _translate((p.Expression.parseString('2>1 || 3>2 && 3>4')[0])))), True)
- eq(bool(_eval(_translate(
- (p.Expression.parseString('(2>1 || 3>2) && 3>4')[0])))), False)
-
-
-if __name__ == '__main__':
+ eq(bool(_eval(_translate((p.Expression.parseString("3>2 && 3>1")[0])))), True)
+ eq(
+ bool(_eval(_translate((p.Expression.parseString("3>2 && 3>4 || 2>1")[0])))),
+ True,
+ )
+ eq(
+ bool(_eval(_translate((p.Expression.parseString("2>1 || 3>2 && 3>4")[0])))),
+ True,
+ )
+ eq(
+ bool(_eval(_translate((p.Expression.parseString("(2>1 || 3>2) && 3>4")[0])))),
+ False,
+ )
+
+
+if __name__ == "__main__":
import nose
import sys
+
nose.main(defaultTest=sys.argv[0])
diff --git a/test/test_extras_external_graph_libs.py b/test/test_extras_external_graph_libs.py
index d3dc45c8..25b69298 100644
--- a/test/test_extras_external_graph_libs.py
+++ b/test/test_extras_external_graph_libs.py
@@ -10,9 +10,10 @@ def test_rdflib_to_networkx():
from rdflib.extras.external_graph_libs import rdflib_to_networkx_multidigraph
from rdflib.extras.external_graph_libs import rdflib_to_networkx_digraph
from rdflib.extras.external_graph_libs import rdflib_to_networkx_graph
+
g = Graph()
- a, b, l = URIRef('a'), URIRef('b'), Literal('l')
- p, q = URIRef('p'), URIRef('q')
+ a, b, l = URIRef("a"), URIRef("b"), Literal("l")
+ p, q = URIRef("p"), URIRef("q")
edges = [(a, p, b), (a, q, b), (b, p, a), (b, p, l)]
for t in edges:
g.add(t)
@@ -28,26 +29,26 @@ def test_rdflib_to_networkx():
assert mdg.has_edge(a, b, key=1)
dg = rdflib_to_networkx_digraph(g)
- assert dg[a][b]['weight'] == 2
- assert sorted(dg[a][b]['triples']) == [(a, p, b), (a, q, b)]
+ assert dg[a][b]["weight"] == 2
+ assert sorted(dg[a][b]["triples"]) == [(a, p, b), (a, q, b)]
assert len(dg.edges()) == 3
assert dg.size() == 3
- assert dg.size(weight='weight') == 4.0
+ assert dg.size(weight="weight") == 4.0
dg = rdflib_to_networkx_graph(g, False, edge_attrs=lambda s, p, o: {})
- assert 'weight' not in dg[a][b]
- assert 'triples' not in dg[a][b]
+ assert "weight" not in dg[a][b]
+ assert "triples" not in dg[a][b]
ug = rdflib_to_networkx_graph(g)
- assert ug[a][b]['weight'] == 3
- assert sorted(ug[a][b]['triples']) == [(a, p, b), (a, q, b), (b, p, a)]
+ assert ug[a][b]["weight"] == 3
+ assert sorted(ug[a][b]["triples"]) == [(a, p, b), (a, q, b), (b, p, a)]
assert len(ug.edges()) == 2
assert ug.size() == 2
- assert ug.size(weight='weight') == 4.0
+ assert ug.size(weight="weight") == 4.0
ug = rdflib_to_networkx_graph(g, False, edge_attrs=lambda s, p, o: {})
- assert 'weight' not in ug[a][b]
- assert 'triples' not in ug[a][b]
+ assert "weight" not in ug[a][b]
+ assert "triples" not in ug[a][b]
def test_rdflib_to_graphtool():
@@ -56,9 +57,10 @@ def test_rdflib_to_graphtool():
except ImportError:
raise SkipTest("couldn't find graph_tool")
from rdflib.extras.external_graph_libs import rdflib_to_graphtool
+
g = Graph()
- a, b, l = URIRef('a'), URIRef('b'), Literal('l')
- p, q = URIRef('p'), URIRef('q')
+ a, b, l = URIRef("a"), URIRef("b"), Literal("l")
+ p, q = URIRef("p"), URIRef("q")
edges = [(a, p, b), (a, q, b), (b, p, a), (b, p, l)]
for t in edges:
g.add(t)
@@ -66,21 +68,20 @@ def test_rdflib_to_graphtool():
mdg = rdflib_to_graphtool(g)
assert len(list(mdg.edges())) == 4
- vpterm = mdg.vertex_properties['term']
+ vpterm = mdg.vertex_properties["term"]
va = gt_util.find_vertex(mdg, vpterm, a)[0]
vb = gt_util.find_vertex(mdg, vpterm, b)[0]
vl = gt_util.find_vertex(mdg, vpterm, l)[0]
assert (va, vb) in [(e.source(), e.target()) for e in list(mdg.edges())]
- epterm = mdg.edge_properties['term']
+ epterm = mdg.edge_properties["term"]
assert len(list(gt_util.find_edge(mdg, epterm, p))) == 3
assert len(list(gt_util.find_edge(mdg, epterm, q))) == 1
mdg = rdflib_to_graphtool(
- g,
- e_prop_names=[str('name')],
- transform_p=lambda s, p, o: {str('name'): str(p)})
- epterm = mdg.edge_properties['name']
+ g, e_prop_names=[str("name")], transform_p=lambda s, p, o: {str("name"): str(p)}
+ )
+ epterm = mdg.edge_properties["name"]
assert len(list(gt_util.find_edge(mdg, epterm, str(p)))) == 3
assert len(list(gt_util.find_edge(mdg, epterm, str(q)))) == 1
@@ -88,4 +89,5 @@ def test_rdflib_to_graphtool():
if __name__ == "__main__":
import sys
import nose
+
nose.main(defaultTest=sys.argv[0])
diff --git a/test/test_finalnewline.py b/test/test_finalnewline.py
index 8cfdcab3..c78ac247 100644
--- a/test/test_finalnewline.py
+++ b/test/test_finalnewline.py
@@ -1,4 +1,3 @@
-
from rdflib import ConjunctiveGraph, URIRef
import rdflib.plugin
@@ -10,15 +9,19 @@ def testFinalNewline():
import sys
graph = ConjunctiveGraph()
- graph.add((URIRef("http://ex.org/a"),
- URIRef("http://ex.org/b"),
- URIRef("http://ex.org/c")))
+ graph.add(
+ (
+ URIRef("http://ex.org/a"),
+ URIRef("http://ex.org/b"),
+ URIRef("http://ex.org/c"),
+ )
+ )
failed = set()
for p in rdflib.plugin.plugins(None, rdflib.plugin.Serializer):
v = graph.serialize(format=p.name)
lines = v.split("\n".encode("latin-1"))
- if "\n".encode("latin-1") not in v or (lines[-1] != ''.encode("latin-1")):
+ if "\n".encode("latin-1") not in v or (lines[-1] != "".encode("latin-1")):
failed.add(p.name)
assert len(failed) == 0, "No final newline for formats: '%s'" % failed
@@ -27,5 +30,6 @@ if __name__ == "__main__":
import sys
import nose
+
if len(sys.argv) == 1:
nose.main(defaultTest=sys.argv[0])
diff --git a/test/test_graph.py b/test/test_graph.py
index 228550ec..0032213e 100644
--- a/test/test_graph.py
+++ b/test/test_graph.py
@@ -11,29 +11,27 @@ from nose.exc import SkipTest
class GraphTestCase(unittest.TestCase):
- store = 'default'
+ store = "default"
tmppath = None
def setUp(self):
try:
self.graph = Graph(store=self.store)
except ImportError:
- raise SkipTest(
- "Dependencies for store '%s' not available!" % self.store)
+ raise SkipTest("Dependencies for store '%s' not available!" % self.store)
if self.store == "SQLite":
- _, self.tmppath = mkstemp(
- prefix='test', dir='/tmp', suffix='.sqlite')
+ _, self.tmppath = mkstemp(prefix="test", dir="/tmp", suffix=".sqlite")
else:
self.tmppath = mkdtemp()
self.graph.open(self.tmppath, create=True)
- self.michel = URIRef(u'michel')
- self.tarek = URIRef(u'tarek')
- self.bob = URIRef(u'bob')
- self.likes = URIRef(u'likes')
- self.hates = URIRef(u'hates')
- self.pizza = URIRef(u'pizza')
- self.cheese = URIRef(u'cheese')
+ self.michel = URIRef(u"michel")
+ self.tarek = URIRef(u"tarek")
+ self.bob = URIRef(u"bob")
+ self.likes = URIRef(u"likes")
+ self.hates = URIRef(u"hates")
+ self.pizza = URIRef(u"pizza")
+ self.cheese = URIRef(u"cheese")
def tearDown(self):
self.graph.close()
@@ -254,21 +252,27 @@ class GraphTestCase(unittest.TestCase):
# dynamically create classes for each registered Store
pluginname = None
-if __name__ == '__main__':
+if __name__ == "__main__":
if len(sys.argv) > 1:
pluginname = sys.argv[1]
tests = 0
for s in plugin.plugins(pluginname, plugin.Store):
- if s.name in ('default', 'IOMemory', 'Auditable',
- 'Concurrent', 'SPARQLStore',
- 'SPARQLUpdateStore'):
+ if s.name in (
+ "default",
+ "IOMemory",
+ "Auditable",
+ "Concurrent",
+ "SPARQLStore",
+ "SPARQLUpdateStore",
+ ):
continue # these are tested by default
- locals()["t%d" % tests] = type("%sGraphTestCase" %
- s.name, (GraphTestCase,), {"store": s.name})
+ locals()["t%d" % tests] = type(
+ "%sGraphTestCase" % s.name, (GraphTestCase,), {"store": s.name}
+ )
tests += 1
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main(argv=sys.argv[:1])
diff --git a/test/test_graph_context.py b/test/test_graph_context.py
index cc5786dd..0a7ac8a3 100644
--- a/test/test_graph_context.py
+++ b/test/test_graph_context.py
@@ -10,7 +10,7 @@ from nose.exc import SkipTest
class ContextTestCase(unittest.TestCase):
- store = 'default'
+ store = "default"
slow = True
tmppath = None
@@ -18,24 +18,22 @@ class ContextTestCase(unittest.TestCase):
try:
self.graph = ConjunctiveGraph(store=self.store)
except ImportError:
- raise SkipTest(
- "Dependencies for store '%s' not available!" % self.store)
+ raise SkipTest("Dependencies for store '%s' not available!" % self.store)
if self.store == "SQLite":
- _, self.tmppath = mkstemp(
- prefix='test', dir='/tmp', suffix='.sqlite')
+ _, self.tmppath = mkstemp(prefix="test", dir="/tmp", suffix=".sqlite")
else:
self.tmppath = mkdtemp()
self.graph.open(self.tmppath, create=True)
- self.michel = URIRef(u'michel')
- self.tarek = URIRef(u'tarek')
- self.bob = URIRef(u'bob')
- self.likes = URIRef(u'likes')
- self.hates = URIRef(u'hates')
- self.pizza = URIRef(u'pizza')
- self.cheese = URIRef(u'cheese')
+ self.michel = URIRef(u"michel")
+ self.tarek = URIRef(u"tarek")
+ self.bob = URIRef(u"bob")
+ self.likes = URIRef(u"likes")
+ self.hates = URIRef(u"hates")
+ self.pizza = URIRef(u"pizza")
+ self.cheese = URIRef(u"cheese")
- self.c1 = URIRef(u'context-1')
- self.c2 = URIRef(u'context-2')
+ self.c1 = URIRef(u"context-1")
+ self.c2 = URIRef(u"context-2")
# delete the graph for each test!
self.graph.remove((None, None, None))
@@ -176,6 +174,7 @@ class ContextTestCase(unittest.TestCase):
def cid(c):
return c.identifier
+
self.assertTrue(self.c1 in map(cid, self.graph.contexts()))
self.assertTrue(self.c2 in map(cid, self.graph.contexts()))
@@ -305,32 +304,55 @@ class ContextTestCase(unittest.TestCase):
asserte(set(c.predicates(bob, pizza)), set([hates]))
asserte(set(c.predicates(bob, michel)), set([hates]))
- asserte(set(
- c.subject_objects(hates)), set([(bob, pizza), (bob, michel)]))
+ asserte(set(c.subject_objects(hates)), set([(bob, pizza), (bob, michel)]))
+ asserte(
+ set(c.subject_objects(likes)),
+ set(
+ [
+ (tarek, cheese),
+ (michel, cheese),
+ (michel, pizza),
+ (bob, cheese),
+ (tarek, pizza),
+ ]
+ ),
+ )
+
+ asserte(
+ set(c.predicate_objects(michel)), set([(likes, cheese), (likes, pizza)])
+ )
+ asserte(
+ set(c.predicate_objects(bob)),
+ set([(likes, cheese), (hates, pizza), (hates, michel)]),
+ )
asserte(
- set(c.subject_objects(likes)), set(
- [(tarek, cheese), (michel, cheese),
- (michel, pizza), (bob, cheese),
- (tarek, pizza)]))
-
- asserte(set(c.predicate_objects(
- michel)), set([(likes, cheese), (likes, pizza)]))
- asserte(set(c.predicate_objects(bob)), set([(likes,
- cheese), (hates, pizza), (hates, michel)]))
- asserte(set(c.predicate_objects(
- tarek)), set([(likes, cheese), (likes, pizza)]))
-
- asserte(set(c.subject_predicates(
- pizza)), set([(bob, hates), (tarek, likes), (michel, likes)]))
- asserte(set(c.subject_predicates(cheese)), set([(
- bob, likes), (tarek, likes), (michel, likes)]))
+ set(c.predicate_objects(tarek)), set([(likes, cheese), (likes, pizza)])
+ )
+
+ asserte(
+ set(c.subject_predicates(pizza)),
+ set([(bob, hates), (tarek, likes), (michel, likes)]),
+ )
+ asserte(
+ set(c.subject_predicates(cheese)),
+ set([(bob, likes), (tarek, likes), (michel, likes)]),
+ )
asserte(set(c.subject_predicates(michel)), set([(bob, hates)]))
- asserte(set(c), set(
- [(bob, hates, michel), (bob, likes, cheese),
- (tarek, likes, pizza), (michel, likes, pizza),
- (michel, likes, cheese), (bob, hates, pizza),
- (tarek, likes, cheese)]))
+ asserte(
+ set(c),
+ set(
+ [
+ (bob, hates, michel),
+ (bob, likes, cheese),
+ (tarek, likes, pizza),
+ (michel, likes, pizza),
+ (michel, likes, cheese),
+ (bob, hates, pizza),
+ (tarek, likes, cheese),
+ ]
+ ),
+ )
# remove stuff and make sure the graph is empty again
self.removeStuff()
@@ -340,22 +362,29 @@ class ContextTestCase(unittest.TestCase):
# dynamically create classes for each registered Store
pluginname = None
-if __name__ == '__main__':
+if __name__ == "__main__":
if len(sys.argv) > 1:
pluginname = sys.argv[1]
tests = 0
for s in plugin.plugins(pluginname, plugin.Store):
- if s.name in ('default', 'IOMemory', 'Auditable',
- 'Concurrent', 'SPARQLStore', 'SPARQLUpdateStore'):
+ if s.name in (
+ "default",
+ "IOMemory",
+ "Auditable",
+ "Concurrent",
+ "SPARQLStore",
+ "SPARQLUpdateStore",
+ ):
continue # these are tested by default
if not s.getClass().context_aware:
continue
- locals()["t%d" % tests] = type("%sContextTestCase" % s.name, (
- ContextTestCase,), {"store": s.name})
+ locals()["t%d" % tests] = type(
+ "%sContextTestCase" % s.name, (ContextTestCase,), {"store": s.name}
+ )
tests += 1
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main()
diff --git a/test/test_graph_formula.py b/test/test_graph_formula.py
index 412e7a77..52764628 100644
--- a/test/test_graph_formula.py
+++ b/test/test_graph_formula.py
@@ -31,8 +31,8 @@ def testFormulaStore(store="default", configString=None):
g.destroy(configString)
g.open(configString)
else:
- if store == 'SQLite':
- _, path = mkstemp(prefix='test', dir='/tmp', suffix='.sqlite')
+ if store == "SQLite":
+ _, path = mkstemp(prefix="test", dir="/tmp", suffix=".sqlite")
g.open(path, create=True)
else:
g.open(mkdtemp(), create=True)
@@ -45,10 +45,10 @@ def testFormulaStore(store="default", configString=None):
assert type(formulaA) == QuotedGraph and type(formulaB) == QuotedGraph
# a = URIRef('http://test/a')
- b = URIRef('http://test/b')
- c = URIRef('http://test/c')
- d = URIRef('http://test/d')
- v = Variable('y')
+ b = URIRef("http://test/b")
+ c = URIRef("http://test/c")
+ d = URIRef("http://test/d")
+ v = Variable("y")
universe = ConjunctiveGraph(g.store)
@@ -69,10 +69,8 @@ def testFormulaStore(store="default", configString=None):
assert len(list(formulaA.triples((None, None, None)))) == 2
assert len(list(formulaB.triples((None, None, None)))) == 2
assert len(list(universe.triples((None, None, None)))) == 3
- assert len(list(formulaB.triples(
- (None, URIRef('http://test/d'), None)))) == 2
- assert len(list(universe.triples(
- (None, URIRef('http://test/d'), None)))) == 1
+ assert len(list(formulaB.triples((None, URIRef("http://test/d"), None)))) == 2
+ assert len(list(universe.triples((None, URIRef("http://test/d"), None)))) == 1
# #context tests
# #test contexts with triple argument
@@ -115,13 +113,13 @@ def testFormulaStore(store="default", configString=None):
assert len(universe) == 0
g.close()
- if store == 'SQLite':
+ if store == "SQLite":
os.unlink(path)
else:
g.store.destroy(configString)
except:
g.close()
- if store == 'SQLite':
+ if store == "SQLite":
os.unlink(path)
else:
g.store.destroy(configString)
@@ -130,21 +128,19 @@ def testFormulaStore(store="default", configString=None):
def testFormulaStores():
pluginname = None
- if __name__ == '__main__':
+ if __name__ == "__main__":
if len(sys.argv) > 1:
pluginname = sys.argv[1]
for s in plugin.plugins(pluginname, plugin.Store):
- if s.name in (
- 'Auditable', 'Concurrent',
- 'SPARQLStore', 'SPARQLUpdateStore',
- ):
+ if s.name in ("Auditable", "Concurrent", "SPARQLStore", "SPARQLUpdateStore",):
continue
if not s.getClass().formula_aware:
continue
yield testFormulaStore, s.name
-if __name__ == '__main__':
+if __name__ == "__main__":
import nose
+
nose.main(defaultTest=sys.argv[0])
diff --git a/test/test_graph_items.py b/test/test_graph_items.py
index b6cb2529..bc13c367 100644
--- a/test/test_graph_items.py
+++ b/test/test_graph_items.py
@@ -2,7 +2,8 @@ from rdflib import Graph, RDF
def test_recursive_list_detection():
- g = Graph().parse(data="""
+ g = Graph().parse(
+ data="""
@prefix : <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
<> :value _:a .
@@ -14,7 +15,9 @@ def test_recursive_list_detection():
<> :value [ :first "turtles"; :rest _:c ] .
_:c :first "all the way down"; :rest _:a .
- """, format="turtle")
+ """,
+ format="turtle",
+ )
for v in g.objects(None, RDF.value):
try:
diff --git a/test/test_hex_binary.py b/test/test_hex_binary.py
index f4ccbe5a..5f46bda5 100644
--- a/test/test_hex_binary.py
+++ b/test/test_hex_binary.py
@@ -6,7 +6,6 @@ from rdflib import Literal, XSD
class HexBinaryTestCase(unittest.TestCase):
-
def test_int(self):
self._test_integer(5)
self._test_integer(3452)
@@ -30,20 +29,20 @@ class HexBinaryTestCase(unittest.TestCase):
def test_unicode(self):
str1 = u"Test utf-8 string éàë"
# u hexstring
- hex_str1 = binascii.hexlify(str1.encode('utf-8')).decode()
+ hex_str1 = binascii.hexlify(str1.encode("utf-8")).decode()
l1 = Literal(hex_str1, datatype=XSD.hexBinary)
b_str1 = l1.toPython()
- self.assertEquals(b_str1.decode('utf-8'), str1)
+ self.assertEquals(b_str1.decode("utf-8"), str1)
self.assertEquals(str(l1), hex_str1)
# b hexstring
- hex_str1b = binascii.hexlify(str1.encode('utf-8'))
+ hex_str1b = binascii.hexlify(str1.encode("utf-8"))
l1b = Literal(hex_str1b, datatype=XSD.hexBinary)
b_str1b = l1b.toPython()
self.assertEquals(b_str1, b_str1b)
- self.assertEquals(b_str1b.decode('utf-8'), str1)
+ self.assertEquals(b_str1b.decode("utf-8"), str1)
self.assertEquals(str(l1b), hex_str1)
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main()
diff --git a/test/test_initbindings.py b/test/test_initbindings.py
index efa94191..138041b2 100644
--- a/test/test_initbindings.py
+++ b/test/test_initbindings.py
@@ -1,181 +1,349 @@
-
from nose import SkipTest
from rdflib.plugins.sparql import prepareQuery
from rdflib import ConjunctiveGraph, URIRef, Literal, Namespace, Variable
+
g = ConjunctiveGraph()
def testStr():
- a = set(g.query("SELECT (STR(?target) AS ?r) WHERE { }", initBindings={'target': URIRef('example:a')}))
- b = set(g.query("SELECT (STR(?target) AS ?r) WHERE { } VALUES (?target) {(<example:a>)}"))
+ a = set(
+ g.query(
+ "SELECT (STR(?target) AS ?r) WHERE { }",
+ initBindings={"target": URIRef("example:a")},
+ )
+ )
+ b = set(
+ g.query(
+ "SELECT (STR(?target) AS ?r) WHERE { } VALUES (?target) {(<example:a>)}"
+ )
+ )
assert a == b, "STR: %r != %r" % (a, b)
def testIsIRI():
- a = set(g.query("SELECT (isIRI(?target) AS ?r) WHERE { }", initBindings={'target': URIRef('example:a')}))
- b = set(g.query("SELECT (isIRI(?target) AS ?r) WHERE { } VALUES (?target) {(<example:a>)}"))
+ a = set(
+ g.query(
+ "SELECT (isIRI(?target) AS ?r) WHERE { }",
+ initBindings={"target": URIRef("example:a")},
+ )
+ )
+ b = set(
+ g.query(
+ "SELECT (isIRI(?target) AS ?r) WHERE { } VALUES (?target) {(<example:a>)}"
+ )
+ )
assert a == b, "isIRI: %r != %r" % (a, b)
def testIsBlank():
- a = set(g.query("SELECT (isBlank(?target) AS ?r) WHERE { }", initBindings={'target': URIRef('example:a')}))
- b = set(g.query("SELECT (isBlank(?target) AS ?r) WHERE { } VALUES (?target) {(<example:a>)}"))
+ a = set(
+ g.query(
+ "SELECT (isBlank(?target) AS ?r) WHERE { }",
+ initBindings={"target": URIRef("example:a")},
+ )
+ )
+ b = set(
+ g.query(
+ "SELECT (isBlank(?target) AS ?r) WHERE { } VALUES (?target) {(<example:a>)}"
+ )
+ )
assert a == b, "isBlank: %r != %r" % (a, b)
def testIsLiteral():
- a = set(g.query("SELECT (isLiteral(?target) AS ?r) WHERE { }", initBindings={'target': Literal('example')}))
- b = set(g.query("SELECT (isLiteral(?target) AS ?r) WHERE { } VALUES (?target) {('example')}"))
+ a = set(
+ g.query(
+ "SELECT (isLiteral(?target) AS ?r) WHERE { }",
+ initBindings={"target": Literal("example")},
+ )
+ )
+ b = set(
+ g.query(
+ "SELECT (isLiteral(?target) AS ?r) WHERE { } VALUES (?target) {('example')}"
+ )
+ )
assert a == b, "isLiteral: %r != %r" % (a, b)
def testUCase():
- a = set(g.query("SELECT (UCASE(?target) AS ?r) WHERE { }", initBindings={'target': Literal('example')}))
- b = set(g.query("SELECT (UCASE(?target) AS ?r) WHERE { } VALUES (?target) {('example')}"))
+ a = set(
+ g.query(
+ "SELECT (UCASE(?target) AS ?r) WHERE { }",
+ initBindings={"target": Literal("example")},
+ )
+ )
+ b = set(
+ g.query(
+ "SELECT (UCASE(?target) AS ?r) WHERE { } VALUES (?target) {('example')}"
+ )
+ )
assert a == b, "UCASE: %r != %r" % (a, b)
def testNoFunc():
- a = set(g.query("SELECT ?target WHERE { }", initBindings={'target': Literal('example')}))
+ a = set(
+ g.query("SELECT ?target WHERE { }", initBindings={"target": Literal("example")})
+ )
b = set(g.query("SELECT ?target WHERE { } VALUES (?target) {('example')}"))
assert a == b, "no func: %r != %r" % (a, b)
def testOrderBy():
- a = set(g.query("SELECT ?target WHERE { } ORDER BY ?target", initBindings={'target': Literal('example')}))
- b = set(g.query("SELECT ?target WHERE { } ORDER BY ?target VALUES (?target) {('example')}"))
+ a = set(
+ g.query(
+ "SELECT ?target WHERE { } ORDER BY ?target",
+ initBindings={"target": Literal("example")},
+ )
+ )
+ b = set(
+ g.query(
+ "SELECT ?target WHERE { } ORDER BY ?target VALUES (?target) {('example')}"
+ )
+ )
assert a == b, "orderby: %r != %r" % (a, b)
def testOrderByFunc():
- a = set(g.query("SELECT (UCASE(?target) as ?r) WHERE { } ORDER BY ?target", initBindings={'target': Literal('example')}))
- b = set(g.query("SELECT (UCASE(?target) as ?r) WHERE { } ORDER BY ?target VALUES (?target) {('example')} "))
+ a = set(
+ g.query(
+ "SELECT (UCASE(?target) as ?r) WHERE { } ORDER BY ?target",
+ initBindings={"target": Literal("example")},
+ )
+ )
+ b = set(
+ g.query(
+ "SELECT (UCASE(?target) as ?r) WHERE { } ORDER BY ?target VALUES (?target) {('example')} "
+ )
+ )
assert a == b, "orderbyFunc: %r != %r" % (a, b)
def testNoFuncLimit():
- a = set(g.query("SELECT ?target WHERE { } LIMIT 1", initBindings={'target': Literal('example')}))
+ a = set(
+ g.query(
+ "SELECT ?target WHERE { } LIMIT 1",
+ initBindings={"target": Literal("example")},
+ )
+ )
b = set(g.query("SELECT ?target WHERE { } LIMIT 1 VALUES (?target) {('example')}"))
assert a == b, "limit: %r != %r" % (a, b)
def testOrderByLimit():
- a = set(g.query("SELECT ?target WHERE { } ORDER BY ?target LIMIT 1", initBindings={'target': Literal('example')}))
- b = set(g.query("SELECT ?target WHERE { } ORDER BY ?target LIMIT 1 VALUES (?target) {('example')}"))
+ a = set(
+ g.query(
+ "SELECT ?target WHERE { } ORDER BY ?target LIMIT 1",
+ initBindings={"target": Literal("example")},
+ )
+ )
+ b = set(
+ g.query(
+ "SELECT ?target WHERE { } ORDER BY ?target LIMIT 1 VALUES (?target) {('example')}"
+ )
+ )
assert a == b, "orderbyLimit: %r != %r" % (a, b)
def testOrderByFuncLimit():
- a = set(g.query("SELECT (UCASE(?target) as ?r) WHERE { } ORDER BY ?target LIMIT 1", initBindings={'target': Literal('example')}))
- b = set(g.query("SELECT (UCASE(?target) as ?r) WHERE { } ORDER BY ?target LIMIT 1 VALUES (?target) {('example')}"))
+ a = set(
+ g.query(
+ "SELECT (UCASE(?target) as ?r) WHERE { } ORDER BY ?target LIMIT 1",
+ initBindings={"target": Literal("example")},
+ )
+ )
+ b = set(
+ g.query(
+ "SELECT (UCASE(?target) as ?r) WHERE { } ORDER BY ?target LIMIT 1 VALUES (?target) {('example')}"
+ )
+ )
assert a == b, "orderbyFuncLimit: %r != %r" % (a, b)
def testNoFuncOffset():
- a = set(g.query("SELECT ?target WHERE { } OFFSET 1", initBindings={'target': Literal('example')}))
+ a = set(
+ g.query(
+ "SELECT ?target WHERE { } OFFSET 1",
+ initBindings={"target": Literal("example")},
+ )
+ )
b = set(g.query("SELECT ?target WHERE { } OFFSET 1 VALUES (?target) {('example')}"))
assert a == b, "offset: %r != %r" % (a, b)
def testNoFuncLimitOffset():
- a = set(g.query("SELECT ?target WHERE { } LIMIT 1 OFFSET 1", initBindings={'target': Literal('example')}))
- b = set(g.query("SELECT ?target WHERE { } LIMIT 1 OFFSET 1 VALUES (?target) {('example')}"))
+ a = set(
+ g.query(
+ "SELECT ?target WHERE { } LIMIT 1 OFFSET 1",
+ initBindings={"target": Literal("example")},
+ )
+ )
+ b = set(
+ g.query(
+ "SELECT ?target WHERE { } LIMIT 1 OFFSET 1 VALUES (?target) {('example')}"
+ )
+ )
assert a == b, "limitOffset: %r != %r" % (a, b)
def testOrderByLimitOffset():
- a = set(g.query("SELECT ?target WHERE { } ORDER BY ?target LIMIT 1 OFFSET 1", initBindings={'target': Literal('example')}))
- b = set(g.query("SELECT ?target WHERE { } ORDER BY ?target LIMIT 1 OFFSET 1 VALUES (?target) {('example')}"))
+ a = set(
+ g.query(
+ "SELECT ?target WHERE { } ORDER BY ?target LIMIT 1 OFFSET 1",
+ initBindings={"target": Literal("example")},
+ )
+ )
+ b = set(
+ g.query(
+ "SELECT ?target WHERE { } ORDER BY ?target LIMIT 1 OFFSET 1 VALUES (?target) {('example')}"
+ )
+ )
assert a == b, "orderbyLimitOffset: %r != %r" % (a, b)
def testOrderByFuncLimitOffset():
- a = set(g.query("SELECT (UCASE(?target) as ?r) WHERE { } ORDER BY ?target LIMIT 1 OFFSET 1", initBindings={'target': Literal('example')}))
- b = set(g.query("SELECT (UCASE(?target) as ?r) WHERE { } ORDER BY ?target LIMIT 1 OFFSET 1 VALUES (?target) {('example')}"))
+ a = set(
+ g.query(
+ "SELECT (UCASE(?target) as ?r) WHERE { } ORDER BY ?target LIMIT 1 OFFSET 1",
+ initBindings={"target": Literal("example")},
+ )
+ )
+ b = set(
+ g.query(
+ "SELECT (UCASE(?target) as ?r) WHERE { } ORDER BY ?target LIMIT 1 OFFSET 1 VALUES (?target) {('example')}"
+ )
+ )
assert a == b, "orderbyFuncLimitOffset: %r != %r" % (a, b)
def testDistinct():
- a = set(g.query("SELECT DISTINCT ?target WHERE { }", initBindings={'target': Literal('example')}))
+ a = set(
+ g.query(
+ "SELECT DISTINCT ?target WHERE { }",
+ initBindings={"target": Literal("example")},
+ )
+ )
b = set(g.query("SELECT DISTINCT ?target WHERE { } VALUES (?target) {('example')}"))
assert a == b, "distinct: %r != %r" % (a, b)
def testDistinctOrderBy():
- a = set(g.query("SELECT DISTINCT ?target WHERE { } ORDER BY ?target", initBindings={'target': Literal('example')}))
- b = set(g.query("SELECT DISTINCT ?target WHERE { } ORDER BY ?target VALUES (?target) {('example')}"))
+ a = set(
+ g.query(
+ "SELECT DISTINCT ?target WHERE { } ORDER BY ?target",
+ initBindings={"target": Literal("example")},
+ )
+ )
+ b = set(
+ g.query(
+ "SELECT DISTINCT ?target WHERE { } ORDER BY ?target VALUES (?target) {('example')}"
+ )
+ )
assert a == b, "distinctOrderby: %r != %r" % (a, b)
def testDistinctOrderByLimit():
- a = set(g.query("SELECT DISTINCT ?target WHERE { } ORDER BY ?target LIMIT 1", initBindings={'target': Literal('example')}))
- b = set(g.query("SELECT DISTINCT ?target WHERE { } ORDER BY ?target LIMIT 1 VALUES (?target) {('example')}"))
+ a = set(
+ g.query(
+ "SELECT DISTINCT ?target WHERE { } ORDER BY ?target LIMIT 1",
+ initBindings={"target": Literal("example")},
+ )
+ )
+ b = set(
+ g.query(
+ "SELECT DISTINCT ?target WHERE { } ORDER BY ?target LIMIT 1 VALUES (?target) {('example')}"
+ )
+ )
assert a == b, "distinctOrderbyLimit: %r != %r" % (a, b)
def testPrepare():
- q = prepareQuery('SELECT ?target WHERE { }')
+ q = prepareQuery("SELECT ?target WHERE { }")
r = list(g.query(q))
e = []
- assert r == e, 'prepare: %r != %r' % (r, e)
+ assert r == e, "prepare: %r != %r" % (r, e)
- r = list(g.query(q, initBindings={'target': Literal('example')}))
- e = [(Literal('example'),)]
- assert r == e, 'prepare: %r != %r' % (r, e)
+ r = list(g.query(q, initBindings={"target": Literal("example")}))
+ e = [(Literal("example"),)]
+ assert r == e, "prepare: %r != %r" % (r, e)
r = list(g.query(q))
e = []
- assert r == e, 'prepare: %r != %r' % (r, e)
+ assert r == e, "prepare: %r != %r" % (r, e)
def testData():
data = ConjunctiveGraph()
- data += [(URIRef('urn:a'), URIRef('urn:p'), Literal('a')),
- (URIRef('urn:b'), URIRef('urn:p'), Literal('b'))]
-
- a = set(g.query("SELECT ?target WHERE { ?target <urn:p> ?val }", initBindings={'val': Literal('a')}))
- b = set(g.query("SELECT ?target WHERE { ?target <urn:p> ?val } VALUES (?val) {('a')}"))
+ data += [
+ (URIRef("urn:a"), URIRef("urn:p"), Literal("a")),
+ (URIRef("urn:b"), URIRef("urn:p"), Literal("b")),
+ ]
+
+ a = set(
+ g.query(
+ "SELECT ?target WHERE { ?target <urn:p> ?val }",
+ initBindings={"val": Literal("a")},
+ )
+ )
+ b = set(
+ g.query("SELECT ?target WHERE { ?target <urn:p> ?val } VALUES (?val) {('a')}")
+ )
assert a == b, "data: %r != %r" % (a, b)
def testAsk():
- a = set(g.query("ASK { }", initBindings={'target': Literal('example')}))
+ a = set(g.query("ASK { }", initBindings={"target": Literal("example")}))
b = set(g.query("ASK { } VALUES (?target) {('example')}"))
assert a == b, "ask: %r != %r" % (a, b)
EX = Namespace("http://example.com/")
g2 = ConjunctiveGraph()
-g2.bind('', EX)
-g2.add((EX['s1'], EX['p'], EX['o1']))
-g2.add((EX['s2'], EX['p'], EX['o2']))
+g2.bind("", EX)
+g2.add((EX["s1"], EX["p"], EX["o1"]))
+g2.add((EX["s2"], EX["p"], EX["o2"]))
def testStringKey():
- results = list(g2.query("SELECT ?o WHERE { ?s :p ?o }", initBindings={"s": EX['s1']}))
+ results = list(
+ g2.query("SELECT ?o WHERE { ?s :p ?o }", initBindings={"s": EX["s1"]})
+ )
assert len(results) == 1, results
def testStringKeyWithQuestionMark():
- results = list(g2.query("SELECT ?o WHERE { ?s :p ?o }", initBindings={"?s": EX['s1']}))
+ results = list(
+ g2.query("SELECT ?o WHERE { ?s :p ?o }", initBindings={"?s": EX["s1"]})
+ )
assert len(results) == 1, results
def testVariableKey():
- results = list(g2.query("SELECT ?o WHERE { ?s :p ?o }", initBindings={Variable("s"): EX['s1']}))
+ results = list(
+ g2.query("SELECT ?o WHERE { ?s :p ?o }", initBindings={Variable("s"): EX["s1"]})
+ )
assert len(results) == 1, results
+
def testVariableKeyWithQuestionMark():
- results = list(g2.query("SELECT ?o WHERE { ?s :p ?o }", initBindings={Variable("?s"): EX['s1']}))
+ results = list(
+ g2.query(
+ "SELECT ?o WHERE { ?s :p ?o }", initBindings={Variable("?s"): EX["s1"]}
+ )
+ )
assert len(results) == 1, results
def testFilter():
- results = list(g2.query("SELECT ?o WHERE { ?s :p ?o FILTER (?s = ?x)}", initBindings={Variable("?x"): EX['s1']}))
+ results = list(
+ g2.query(
+ "SELECT ?o WHERE { ?s :p ?o FILTER (?s = ?x)}",
+ initBindings={Variable("?x"): EX["s1"]},
+ )
+ )
assert len(results) == 1, results
@@ -183,5 +351,6 @@ if __name__ == "__main__":
import sys
import nose
+
if len(sys.argv) == 1:
nose.main(defaultTest=sys.argv[0])
diff --git a/test/test_iomemory.py b/test/test_iomemory.py
index 897cc8b2..4239fc3c 100644
--- a/test/test_iomemory.py
+++ b/test/test_iomemory.py
@@ -1,4 +1,3 @@
-
"""
Iteration and update conflict with set based IOMemory store
@@ -63,6 +62,6 @@ def test_concurrent2():
assert i == n
-if __name__ == '__main__':
+if __name__ == "__main__":
test_concurrent1()
test_concurrent2()
diff --git a/test/test_issue084.py b/test/test_issue084.py
index 527caf21..23536550 100644
--- a/test/test_issue084.py
+++ b/test/test_issue084.py
@@ -20,55 +20,65 @@ rdf = u"""@prefix skos:
"""
-rdf_utf8 = rdf.encode('utf-8')
+rdf_utf8 = rdf.encode("utf-8")
-rdf_reader = getreader('utf-8')(BytesIO(rdf.encode('utf-8')))
+rdf_reader = getreader("utf-8")(BytesIO(rdf.encode("utf-8")))
def test_a():
"""Test reading N3 from a unicode objects as data"""
g = Graph()
- g.parse(data=rdf, format='n3')
- v = g.value(subject=URIRef("http://www.test.org/#CI"),
- predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"))
- assert v == Literal(u"C\u00f4te d'Ivoire", lang='fr')
+ g.parse(data=rdf, format="n3")
+ v = g.value(
+ subject=URIRef("http://www.test.org/#CI"),
+ predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"),
+ )
+ assert v == Literal(u"C\u00f4te d'Ivoire", lang="fr")
def test_b():
"""Test reading N3 from a utf8 encoded string as data"""
g = Graph()
- g.parse(data=rdf_utf8, format='n3')
- v = g.value(subject=URIRef("http://www.test.org/#CI"),
- predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"))
- assert v == Literal(u"C\u00f4te d'Ivoire", lang='fr')
+ g.parse(data=rdf_utf8, format="n3")
+ v = g.value(
+ subject=URIRef("http://www.test.org/#CI"),
+ predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"),
+ )
+ assert v == Literal(u"C\u00f4te d'Ivoire", lang="fr")
def test_c():
"""Test reading N3 from a codecs.StreamReader, outputting unicode"""
g = Graph()
-# rdf_reader.seek(0)
- g.parse(source=rdf_reader, format='n3')
- v = g.value(subject=URIRef("http://www.test.org/#CI"),
- predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"))
- assert v == Literal(u"C\u00f4te d'Ivoire", lang='fr')
+ # rdf_reader.seek(0)
+ g.parse(source=rdf_reader, format="n3")
+ v = g.value(
+ subject=URIRef("http://www.test.org/#CI"),
+ predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"),
+ )
+ assert v == Literal(u"C\u00f4te d'Ivoire", lang="fr")
def test_d():
"""Test reading N3 from a StringIO over the unicode object"""
g = Graph()
- g.parse(source=StringIO(rdf), format='n3')
- v = g.value(subject=URIRef("http://www.test.org/#CI"),
- predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"))
- assert v == Literal(u"C\u00f4te d'Ivoire", lang='fr')
+ g.parse(source=StringIO(rdf), format="n3")
+ v = g.value(
+ subject=URIRef("http://www.test.org/#CI"),
+ predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"),
+ )
+ assert v == Literal(u"C\u00f4te d'Ivoire", lang="fr")
def test_e():
"""Test reading N3 from a BytesIO over the string object"""
g = Graph()
- g.parse(source=BytesIO(rdf_utf8), format='n3')
- v = g.value(subject=URIRef("http://www.test.org/#CI"),
- predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"))
- assert v == Literal(u"C\u00f4te d'Ivoire", lang='fr')
+ g.parse(source=BytesIO(rdf_utf8), format="n3")
+ v = g.value(
+ subject=URIRef("http://www.test.org/#CI"),
+ predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"),
+ )
+ assert v == Literal(u"C\u00f4te d'Ivoire", lang="fr")
# this is unicode
@@ -86,27 +96,32 @@ rdfxml = u"""<?xml version="1.0" encoding="UTF-8"?>
"""
# this is a str
-rdfxml_utf8 = rdfxml.encode('utf-8')
+rdfxml_utf8 = rdfxml.encode("utf-8")
-rdfxml_reader = getreader('utf-8')(BytesIO(rdfxml.encode('utf-8')))
+rdfxml_reader = getreader("utf-8")(BytesIO(rdfxml.encode("utf-8")))
def test_xml_a():
"""Test reading XML from a unicode object as data"""
g = Graph()
- g.parse(data=rdfxml, format='xml')
- v = g.value(subject=URIRef("http://www.test.org/#CI"),
- predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"))
- assert v == Literal(u"C\u00f4te d'Ivoire", lang='fr')
+ g.parse(data=rdfxml, format="xml")
+ v = g.value(
+ subject=URIRef("http://www.test.org/#CI"),
+ predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"),
+ )
+ assert v == Literal(u"C\u00f4te d'Ivoire", lang="fr")
def test_xml_b():
"""Test reading XML from a utf8 encoded string object as data"""
g = Graph()
- g.parse(data=rdfxml_utf8, format='xml')
- v = g.value(subject=URIRef("http://www.test.org/#CI"),
- predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"))
- assert v == Literal(u"C\u00f4te d'Ivoire", lang='fr')
+ g.parse(data=rdfxml_utf8, format="xml")
+ v = g.value(
+ subject=URIRef("http://www.test.org/#CI"),
+ predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"),
+ )
+ assert v == Literal(u"C\u00f4te d'Ivoire", lang="fr")
+
# The following two cases are currently not supported by Graph.parse
# def test_xml_c():
@@ -127,7 +142,9 @@ def test_xml_b():
def test_xml_e():
"""Test reading XML from a BytesIO created from utf8 encoded string"""
g = Graph()
- g.parse(source=BytesIO(rdfxml_utf8), format='xml')
- v = g.value(subject=URIRef("http://www.test.org/#CI"),
- predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"))
- assert v == Literal(u"C\u00f4te d'Ivoire", lang='fr')
+ g.parse(source=BytesIO(rdfxml_utf8), format="xml")
+ v = g.value(
+ subject=URIRef("http://www.test.org/#CI"),
+ predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"),
+ )
+ assert v == Literal(u"C\u00f4te d'Ivoire", lang="fr")
diff --git a/test/test_issue1003.py b/test/test_issue1003.py
index fdc56c82..d59caf3d 100644
--- a/test/test_issue1003.py
+++ b/test/test_issue1003.py
@@ -34,41 +34,49 @@ g.bind("skos", SKOS)
g1 = Graph()
g1 += g
# @base should not be in output
-assert "@base" not in g.serialize(format='turtle').decode("utf-8")
+assert "@base" not in g.serialize(format="turtle").decode("utf-8")
# 2. base one set for graph, no base set for serialization
g2 = Graph(base=base_one)
g2 += g
# @base should be in output, from Graph (one)
-assert "@base <http://one.org/> ." in g2.serialize(format='turtle').decode("utf-8")
+assert "@base <http://one.org/> ." in g2.serialize(format="turtle").decode("utf-8")
# 3. no base set for graph, base two set for serialization
g3 = Graph()
g3 += g
# @base should be in output, from serialization (two)
-assert "@base <http://two.org/> ." in g3.serialize(format='turtle', base=base_two).decode("utf-8")
+assert "@base <http://two.org/> ." in g3.serialize(
+ format="turtle", base=base_two
+).decode("utf-8")
# 4. base one set for graph, base two set for serialization, Graph one overrides
g4 = Graph(base=base_one)
g4 += g
# @base should be in output, from graph (one)
-assert "@base <http://two.org/> ." in g4.serialize(format='turtle', base=base_two).decode("utf-8")
+assert "@base <http://two.org/> ." in g4.serialize(
+ format="turtle", base=base_two
+).decode("utf-8")
# just checking that the serialization setting (two) hasn't snuck through
-assert "@base <http://one.org/> ." not in g4.serialize(format='turtle', base=base_two).decode("utf-8")
+assert "@base <http://one.org/> ." not in g4.serialize(
+ format="turtle", base=base_two
+).decode("utf-8")
# 5. multiple serialization side effect checking
g5 = Graph()
g5 += g
# @base should be in output, from serialization (two)
-assert "@base <http://two.org/> ." in g5.serialize(format='turtle', base=base_two).decode("utf-8")
+assert "@base <http://two.org/> ." in g5.serialize(
+ format="turtle", base=base_two
+).decode("utf-8")
# checking for side affects - no base now set for this serialization
# @base should not be in output
-assert "@base" not in g5.serialize(format='turtle').decode("utf-8")
+assert "@base" not in g5.serialize(format="turtle").decode("utf-8")
# 6. checking results for RDF/XML
@@ -76,22 +84,30 @@ g6 = Graph()
g6 += g
g6.bind("dct", DCTERMS)
g6.bind("skos", SKOS)
-assert "@xml:base" not in g6.serialize(format='xml').decode("utf-8")
-assert 'xml:base="http://one.org/"' in g6.serialize(format='xml', base=base_one).decode("utf-8")
+assert "@xml:base" not in g6.serialize(format="xml").decode("utf-8")
+assert 'xml:base="http://one.org/"' in g6.serialize(format="xml", base=base_one).decode(
+ "utf-8"
+)
g6.base = base_two
-assert 'xml:base="http://two.org/"' in g6.serialize(format='xml').decode("utf-8")
-assert 'xml:base="http://one.org/"' in g6.serialize(format='xml', base=base_one).decode("utf-8")
+assert 'xml:base="http://two.org/"' in g6.serialize(format="xml").decode("utf-8")
+assert 'xml:base="http://one.org/"' in g6.serialize(format="xml", base=base_one).decode(
+ "utf-8"
+)
# 7. checking results for N3
g7 = Graph()
g7 += g
g7.bind("dct", DCTERMS)
g7.bind("skos", SKOS)
-assert "@xml:base" not in g7.serialize(format='xml').decode("utf-8")
-assert "@base <http://one.org/> ." in g7.serialize(format='n3', base=base_one).decode("utf-8")
+assert "@xml:base" not in g7.serialize(format="xml").decode("utf-8")
+assert "@base <http://one.org/> ." in g7.serialize(format="n3", base=base_one).decode(
+ "utf-8"
+)
g7.base = base_two
-assert "@base <http://two.org/> ." in g7.serialize(format='n3').decode("utf-8")
-assert "@base <http://one.org/> ." in g7.serialize(format='n3', base=base_one).decode("utf-8")
+assert "@base <http://two.org/> ." in g7.serialize(format="n3").decode("utf-8")
+assert "@base <http://one.org/> ." in g7.serialize(format="n3", base=base_one).decode(
+ "utf-8"
+)
# 8. checking results for TriX & TriG
# TriX can specify a base per graph but setting a base for the whole
@@ -99,19 +115,19 @@ base_three = Namespace("http://three.org/")
ds1 = Dataset()
ds1.bind("dct", DCTERMS)
ds1.bind("skos", SKOS)
-g8 = ds1.graph(URIRef('http://g8.com/'), base=base_one)
-g9 = ds1.graph(URIRef('http://g9.com/'))
+g8 = ds1.graph(URIRef("http://g8.com/"), base=base_one)
+g9 = ds1.graph(URIRef("http://g9.com/"))
g8 += g
g9 += g
g9.base = base_two
ds1.base = base_three
-trix = ds1.serialize(format='trix', base=Namespace("http://two.org/")).decode("utf-8")
+trix = ds1.serialize(format="trix", base=Namespace("http://two.org/")).decode("utf-8")
assert '<graph xml:base="http://one.org/">' in trix
assert '<graph xml:base="http://two.org/">' in trix
assert '<TriX xml:base="http://two.org/"' in trix
-trig = ds1.serialize(format='trig', base=Namespace("http://two.org/")).decode("utf-8")
-assert '@base <http://one.org/> .' not in trig
-assert '@base <http://three.org/> .' not in trig
-assert '@base <http://two.org/> .' in trig
+trig = ds1.serialize(format="trig", base=Namespace("http://two.org/")).decode("utf-8")
+assert "@base <http://one.org/> ." not in trig
+assert "@base <http://three.org/> ." not in trig
+assert "@base <http://two.org/> ." in trig
diff --git a/test/test_issue160.py b/test/test_issue160.py
index 17ae18c5..b3c7b422 100644
--- a/test/test_issue160.py
+++ b/test/test_issue160.py
@@ -43,11 +43,10 @@ target2xml = """\
class CollectionTest(TestCase):
-
def test_collection_render(self):
- foo = Namespace('http://www.example.org/foo/ns/')
- ex = Namespace('http://www.example.org/example/foo/')
- rdf = Namespace('http://www.w3.org/1999/02/22-rdf-syntax-ns#')
+ foo = Namespace("http://www.example.org/foo/ns/")
+ ex = Namespace("http://www.example.org/example/foo/")
+ rdf = Namespace("http://www.w3.org/1999/02/22-rdf-syntax-ns#")
# Works: x a rdf:List, a foo:Other ;
# Fails: y a foo:Wrapper, foo:wraps x; x a rdf:List, a foo:Other ;
@@ -58,14 +57,14 @@ class CollectionTest(TestCase):
target2.parse(data=target2xml)
g = ConjunctiveGraph()
- bits = [ex['a'], ex['b'], ex['c']]
- l = Collection(g, ex['thing'], bits)
- triple = (ex['thing'], rdf['type'], foo['Other'])
+ bits = [ex["a"], ex["b"], ex["c"]]
+ l = Collection(g, ex["thing"], bits)
+ triple = (ex["thing"], rdf["type"], foo["Other"])
g.add(triple)
- triple = (ex['thing'], foo['property'], Literal('Some Value'))
+ triple = (ex["thing"], foo["property"], Literal("Some Value"))
g.add(triple)
for b in bits:
- triple = (b, rdf['type'], foo['Item'])
+ triple = (b, rdf["type"], foo["Item"])
g.add(triple)
self.assertEqual(g.isomorphic(target1), True)
diff --git a/test/test_issue161.py b/test/test_issue161.py
index df0b6b7c..fa7529dc 100644
--- a/test/test_issue161.py
+++ b/test/test_issue161.py
@@ -3,12 +3,10 @@ from rdflib.graph import ConjunctiveGraph
class EntityTest(TestCase):
-
def test_turtle_namespace_prefixes(self):
g = ConjunctiveGraph()
- n3 = \
- """
+ n3 = """
@prefix _9: <http://data.linkedmdb.org/resource/movie/> .
@prefix p_9: <urn:test:> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@@ -20,13 +18,13 @@ class EntityTest(TestCase):
rdfs:label "Cecil B. DeMille (Director)";
_9:director_name "Cecil B. DeMille" ."""
- g.parse(data=n3, format='n3')
+ g.parse(data=n3, format="n3")
turtle = g.serialize(format="turtle")
# Check round-tripping, just for kicks.
g = ConjunctiveGraph()
- g.parse(data=turtle, format='turtle')
+ g.parse(data=turtle, format="turtle")
# Shouldn't have got to here
s = g.serialize(format="turtle")
- self.assertTrue('@prefix _9'.encode("latin-1") not in s)
+ self.assertTrue("@prefix _9".encode("latin-1") not in s)
diff --git a/test/test_issue184.py b/test/test_issue184.py
index b4fba8d3..7693dd1c 100644
--- a/test/test_issue184.py
+++ b/test/test_issue184.py
@@ -12,8 +12,8 @@ def test_escaping_of_triple_doublequotes():
is emitted by the serializer, which in turn cannot be parsed correctly.
"""
g = ConjunctiveGraph()
- g.add((URIRef('http://foobar'), URIRef('http://fooprop'), Literal('abc\ndef"""""')))
+ g.add((URIRef("http://foobar"), URIRef("http://fooprop"), Literal('abc\ndef"""""')))
# assert g.serialize(format='n3') == '@prefix ns1: <http:// .\n\nns1:foobar ns1:fooprop """abc\ndef\\"\\"\\"\\"\\"""" .\n\n'
g2 = ConjunctiveGraph()
- g2.parse(data=g.serialize(format='n3'), format='n3')
+ g2.parse(data=g.serialize(format="n3"), format="n3")
assert g.isomorphic(g2) is True
diff --git a/test/test_issue190.py b/test/test_issue190.py
index e5173eff..f8ab37e7 100644
--- a/test/test_issue190.py
+++ b/test/test_issue190.py
@@ -4,7 +4,8 @@ from rdflib.graph import ConjunctiveGraph
from rdflib.parser import StringInputSource
import textwrap
-prefix = textwrap.dedent('''\
+prefix = textwrap.dedent(
+ """\
@prefix nie: <http://www.semanticdesktop.org/ontologies/2007/01/19/nie#> .
@prefix nfo: <http://www.semanticdesktop.org/ontologies/2007/03/22/nfo#> .
@prefix nco: <http://www.semanticdesktop.org/ontologies/2007/03/22/nco#> .
@@ -15,9 +16,11 @@ prefix = textwrap.dedent('''\
@prefix dc: <http://dublincore.org/documents/2010/10/11/dces/#> .
@prefix nmm: <http://library.gnome.org/devel/ontology/unstable/nmm-classes.html#> .
@prefix nao: <http://www.semanticdesktop.org/ontologies/2007/08/15/nao#> .
- ''')
+ """
+)
-meta = textwrap.dedent(u"""\
+meta = textwrap.dedent(
+ u"""\
a nfo:PaginatedTextDocument ;
nie:title "SV Meldung" ;
nco:creator [ a nco:Contact ;
@@ -30,16 +33,20 @@ a nfo:PaginatedTextDocument ;
nie:plainTextContent "%s" .
} } WHERE { {
?tag1 a nao:Tag ; nao:prefLabel "()" .
-""")
+"""
+)
test_string1 = u"""\
Betriebsnummer der Einzugsstelle:\nKnappschaft\n980 0000 6\nWICHTIGES DOKUMENT - SORGFÄLTIG AUFBEWAHREN!\n """
def test1():
- meta1 = meta.encode('utf-8') % test_string1.encode('utf-8')
+ meta1 = meta.encode("utf-8") % test_string1.encode("utf-8")
graph = ConjunctiveGraph()
- graph.parse(StringInputSource(prefix + '<http://example.org/>' + meta1), format='n3')
+ graph.parse(
+ StringInputSource(prefix + "<http://example.org/>" + meta1), format="n3"
+ )
+
test_string2 = u"""\
Betriebsnummer der Einzugsstelle:
@@ -50,8 +57,11 @@ WICHTIGES DOKUMENT - SORGFÄLTIG AUFBEWAHREN!
def test2():
- meta2 = meta.encode('utf-8') % test_string2.encode('utf-8')
+ meta2 = meta.encode("utf-8") % test_string2.encode("utf-8")
graph = ConjunctiveGraph()
- graph.parse(StringInputSource(prefix + '<http://example.org/>' + meta2), format='n3')
+ graph.parse(
+ StringInputSource(prefix + "<http://example.org/>" + meta2), format="n3"
+ )
+
raise SkipTest("Known issue, with newlines in text")
diff --git a/test/test_issue200.py b/test/test_issue200.py
index 80ce3f31..3fb76894 100644
--- a/test/test_issue200.py
+++ b/test/test_issue200.py
@@ -9,11 +9,11 @@ try:
import os.pipe
except ImportError:
from nose import SkipTest
- raise SkipTest('No os.fork() and/or os.pipe() on this platform, skipping')
+ raise SkipTest("No os.fork() and/or os.pipe() on this platform, skipping")
-class TestRandomSeedInFork(unittest.TestCase):
+class TestRandomSeedInFork(unittest.TestCase):
def test_bnode_id_differs_in_fork(self):
"""Checks that os.fork()ed child processes produce a
different sequence of BNode ids from the parent process.
@@ -28,14 +28,15 @@ class TestRandomSeedInFork(unittest.TestCase):
os.waitpid(pid, 0) # make sure the child process gets cleaned up
else:
os.close(r)
- w = os.fdopen(w, 'w')
+ w = os.fdopen(w, "w")
cb = rdflib.term.BNode()
w.write(cb)
w.close()
os._exit(0)
- assert txt != str(pb1), "Parent process BNode id: " + \
- "%s, child process BNode id: %s" % (
- txt, str(pb1))
+ assert txt != str(pb1), (
+ "Parent process BNode id: "
+ + "%s, child process BNode id: %s" % (txt, str(pb1))
+ )
if __name__ == "__main__":
diff --git a/test/test_issue209.py b/test/test_issue209.py
index 1feb0615..083d763d 100644
--- a/test/test_issue209.py
+++ b/test/test_issue209.py
@@ -11,7 +11,6 @@ def makeNode():
class TestRandomSeedInThread(unittest.TestCase):
-
def test_bnode_id_gen_in_thread(self):
"""
"""
diff --git a/test/test_issue223.py b/test/test_issue223.py
index e1981a30..ab61d9d8 100644
--- a/test/test_issue223.py
+++ b/test/test_issue223.py
@@ -11,12 +11,14 @@ ttl = """
def test_collection_with_duplicates():
g = Graph().parse(data=ttl, format="turtle")
- for _, _, o in g.triples((URIRef("http://example.org/s"), URIRef("http://example.org/p"), None)):
+ for _, _, o in g.triples(
+ (URIRef("http://example.org/s"), URIRef("http://example.org/p"), None)
+ ):
break
c = g.collection(o)
assert list(c) == list(URIRef("http://example.org/" + x) for x in ["a", "b", "a"])
assert len(c) == 3
-if __name__ == '__main__':
+if __name__ == "__main__":
test_collection_with_duplicates()
diff --git a/test/test_issue247.py b/test/test_issue247.py
index 780d578b..747dd1e0 100644
--- a/test/test_issue247.py
+++ b/test/test_issue247.py
@@ -31,7 +31,6 @@ passxml = """\
class TestXMLLiteralwithLangAttr(unittest.TestCase):
-
def test_successful_parse_of_literal_without_xmllang_attr(self):
"""
Test parse of Literal without xmllang attr passes
diff --git a/test/test_issue248.py b/test/test_issue248.py
index 4cc490a6..528e81a2 100644
--- a/test/test_issue248.py
+++ b/test/test_issue248.py
@@ -3,7 +3,6 @@ import unittest
class TestSerialization(unittest.TestCase):
-
def test_issue_248(self):
"""
Ed Summers Thu, 24 May 2007 12:21:17 -0700
@@ -63,31 +62,22 @@ class TestSerialization(unittest.TestCase):
"""
graph = rdflib.Graph()
- DC = rdflib.Namespace('http://purl.org/dc/terms/')
- SKOS = rdflib.Namespace('http://www.w3.org/2004/02/skos/core#')
- LCCO = rdflib.Namespace('http://loc.gov/catdir/cpso/lcco/')
-
- graph.bind('dc', DC)
- graph.bind('skos', SKOS)
- graph.bind('lcco', LCCO)
-
- concept = rdflib.URIRef(LCCO['1'])
- graph.add(
- (concept,
- rdflib.RDF.type,
- SKOS['Concept']))
- graph.add(
- (concept,
- SKOS['prefLabel'],
- rdflib.Literal('Scrapbooks')))
- graph.add(
- (concept,
- DC['LCC'],
- rdflib.Literal('AC999.0999 - AC999999.Z9999')))
- sg = graph.serialize(format='n3', base=LCCO).decode('utf8')
+ DC = rdflib.Namespace("http://purl.org/dc/terms/")
+ SKOS = rdflib.Namespace("http://www.w3.org/2004/02/skos/core#")
+ LCCO = rdflib.Namespace("http://loc.gov/catdir/cpso/lcco/")
+
+ graph.bind("dc", DC)
+ graph.bind("skos", SKOS)
+ graph.bind("lcco", LCCO)
+
+ concept = rdflib.URIRef(LCCO["1"])
+ graph.add((concept, rdflib.RDF.type, SKOS["Concept"]))
+ graph.add((concept, SKOS["prefLabel"], rdflib.Literal("Scrapbooks")))
+ graph.add((concept, DC["LCC"], rdflib.Literal("AC999.0999 - AC999999.Z9999")))
+ sg = graph.serialize(format="n3", base=LCCO).decode("utf8")
# See issue 248
# Actual test should be the inverse of the below ...
- self.assertTrue('<1> a skos:Concept ;' in sg, sg)
+ self.assertTrue("<1> a skos:Concept ;" in sg, sg)
if __name__ == "__main__":
diff --git a/test/test_issue274.py b/test/test_issue274.py
index 288d7857..79fc4d15 100644
--- a/test/test_issue274.py
+++ b/test/test_issue274.py
@@ -3,15 +3,18 @@ from nose.tools import eq_
from unittest import TestCase
from rdflib import BNode, Graph, Literal, Namespace, RDFS, XSD
-from rdflib.plugins.sparql.operators import register_custom_function, unregister_custom_function
+from rdflib.plugins.sparql.operators import (
+ register_custom_function,
+ unregister_custom_function,
+)
-EX = Namespace('http://example.org/')
+EX = Namespace("http://example.org/")
G = Graph()
G.add((BNode(), RDFS.label, Literal("bnode")))
NS = {
- 'ex': EX,
- 'rdfs': RDFS,
- 'xsd': XSD,
+ "ex": EX,
+ "rdfs": RDFS,
+ "xsd": XSD,
}
@@ -28,142 +31,145 @@ def teardown():
def test_cast_string_to_string():
- res = query('''SELECT (xsd:string("hello") as ?x) {}''')
+ res = query("""SELECT (xsd:string("hello") as ?x) {}""")
eq_(list(res)[0][0], Literal("hello", datatype=XSD.string))
def test_cast_int_to_string():
- res = query('''SELECT (xsd:string(42) as ?x) {}''')
+ res = query("""SELECT (xsd:string(42) as ?x) {}""")
eq_(list(res)[0][0], Literal("42", datatype=XSD.string))
def test_cast_float_to_string():
- res = query('''SELECT (xsd:string(3.14) as ?x) {}''')
+ res = query("""SELECT (xsd:string(3.14) as ?x) {}""")
eq_(list(res)[0][0], Literal("3.14", datatype=XSD.string))
def test_cast_bool_to_string():
- res = query('''SELECT (xsd:string(true) as ?x) {}''')
+ res = query("""SELECT (xsd:string(true) as ?x) {}""")
eq_(list(res)[0][0], Literal("true", datatype=XSD.string))
def test_cast_iri_to_string():
- res = query('''SELECT (xsd:string(<http://example.org/>) as ?x) {}''')
+ res = query("""SELECT (xsd:string(<http://example.org/>) as ?x) {}""")
eq_(list(res)[0][0], Literal("http://example.org/", datatype=XSD.string))
def test_cast_datetime_to_datetime():
- res = query('''SELECT (xsd:dateTime("1970-01-01T00:00:00Z"^^xsd:dateTime) as ?x) {}''')
+ res = query(
+ """SELECT (xsd:dateTime("1970-01-01T00:00:00Z"^^xsd:dateTime) as ?x) {}"""
+ )
eq_(list(res)[0][0], Literal("1970-01-01T00:00:00Z", datatype=XSD.dateTime))
def test_cast_string_to_datetime():
- res = query('''SELECT (xsd:dateTime("1970-01-01T00:00:00Z"^^xsd:string) as ?x) {}''')
+ res = query(
+ """SELECT (xsd:dateTime("1970-01-01T00:00:00Z"^^xsd:string) as ?x) {}"""
+ )
eq_(list(res)[0][0], Literal("1970-01-01T00:00:00Z", datatype=XSD.dateTime))
def test_cast_string_to_float():
- res = query('''SELECT (xsd:float("0.5") as ?x) {}''')
+ res = query("""SELECT (xsd:float("0.5") as ?x) {}""")
eq_(list(res)[0][0], Literal("0.5", datatype=XSD.float))
def test_cast_int_to_float():
- res = query('''SELECT (xsd:float(1) as ?x) {}''')
+ res = query("""SELECT (xsd:float(1) as ?x) {}""")
eq_(list(res)[0][0], Literal("1", datatype=XSD.float))
def test_cast_float_to_float():
- res = query('''SELECT (xsd:float("0.5"^^xsd:float) as ?x) {}''')
+ res = query("""SELECT (xsd:float("0.5"^^xsd:float) as ?x) {}""")
eq_(list(res)[0][0], Literal("0.5", datatype=XSD.float))
def test_cast_double_to_float():
- res = query('''SELECT (xsd:float("0.5"^^xsd:double) as ?x) {}''')
+ res = query("""SELECT (xsd:float("0.5"^^xsd:double) as ?x) {}""")
eq_(list(res)[0][0], Literal("0.5", datatype=XSD.float))
def test_cast_decimal_to_float():
- res = query('''SELECT (xsd:float("0.5"^^xsd:decimal) as ?x) {}''')
+ res = query("""SELECT (xsd:float("0.5"^^xsd:decimal) as ?x) {}""")
eq_(list(res)[0][0], Literal("0.5", datatype=XSD.float))
def test_cast_string_to_double():
- res = query('''SELECT (xsd:double("0.5") as ?x) {}''')
+ res = query("""SELECT (xsd:double("0.5") as ?x) {}""")
eq_(list(res)[0][0], Literal("0.5", datatype=XSD.double))
def test_cast_int_to_double():
- res = query('''SELECT (xsd:double(1) as ?x) {}''')
+ res = query("""SELECT (xsd:double(1) as ?x) {}""")
eq_(list(res)[0][0], Literal("1", datatype=XSD.double))
def test_cast_float_to_double():
- res = query('''SELECT (xsd:double("0.5"^^xsd:float) as ?x) {}''')
+ res = query("""SELECT (xsd:double("0.5"^^xsd:float) as ?x) {}""")
eq_(list(res)[0][0], Literal("0.5", datatype=XSD.double))
def test_cast_double_to_double():
- res = query('''SELECT (xsd:double("0.5"^^xsd:double) as ?x) {}''')
+ res = query("""SELECT (xsd:double("0.5"^^xsd:double) as ?x) {}""")
eq_(list(res)[0][0], Literal("0.5", datatype=XSD.double))
def test_cast_decimal_to_double():
- res = query('''SELECT (xsd:double("0.5"^^xsd:decimal) as ?x) {}''')
+ res = query("""SELECT (xsd:double("0.5"^^xsd:decimal) as ?x) {}""")
eq_(list(res)[0][0], Literal("0.5", datatype=XSD.double))
def test_cast_string_to_decimal():
- res = query('''SELECT (xsd:decimal("0.5") as ?x) {}''')
+ res = query("""SELECT (xsd:decimal("0.5") as ?x) {}""")
eq_(list(res)[0][0], Literal("0.5", datatype=XSD.decimal))
def test_cast_int_to_decimal():
- res = query('''SELECT (xsd:decimal(1) as ?x) {}''')
+ res = query("""SELECT (xsd:decimal(1) as ?x) {}""")
eq_(list(res)[0][0], Literal("1", datatype=XSD.decimal))
def test_cast_float_to_decimal():
- res = query('''SELECT (xsd:decimal("0.5"^^xsd:float) as ?x) {}''')
+ res = query("""SELECT (xsd:decimal("0.5"^^xsd:float) as ?x) {}""")
eq_(list(res)[0][0], Literal("0.5", datatype=XSD.decimal))
def test_cast_double_to_decimal():
- res = query('''SELECT (xsd:decimal("0.5"^^xsd:double) as ?x) {}''')
+ res = query("""SELECT (xsd:decimal("0.5"^^xsd:double) as ?x) {}""")
eq_(list(res)[0][0], Literal("0.5", datatype=XSD.decimal))
def test_cast_decimal_to_decimal():
- res = query('''SELECT (xsd:decimal("0.5"^^xsd:decimal) as ?x) {}''')
+ res = query("""SELECT (xsd:decimal("0.5"^^xsd:decimal) as ?x) {}""")
eq_(list(res)[0][0], Literal("0.5", datatype=XSD.decimal))
def test_cast_string_to_int():
- res = query('''SELECT (xsd:integer("42") as ?x) {}''')
+ res = query("""SELECT (xsd:integer("42") as ?x) {}""")
eq_(list(res)[0][0], Literal("42", datatype=XSD.integer))
def test_cast_int_to_int():
- res = query('''SELECT (xsd:integer(42) as ?x) {}''')
+ res = query("""SELECT (xsd:integer(42) as ?x) {}""")
eq_(list(res)[0][0], Literal("42", datatype=XSD.integer))
def test_cast_string_to_bool():
- res = query('''SELECT (xsd:boolean("TRUE") as ?x) {}''')
+ res = query("""SELECT (xsd:boolean("TRUE") as ?x) {}""")
eq_(list(res)[0][0], Literal("true", datatype=XSD.boolean))
def test_cast_bool_to_bool():
- res = query('''SELECT (xsd:boolean(true) as ?x) {}''')
+ res = query("""SELECT (xsd:boolean(true) as ?x) {}""")
eq_(list(res)[0][0], Literal("true", datatype=XSD.boolean))
def test_cast_bool_to_bool():
- res = query('''SELECT (ex:f(42, "hello") as ?x) {}''')
+ res = query("""SELECT (ex:f(42, "hello") as ?x) {}""")
eq_(len(list(res)), 0)
class TestCustom(TestCase):
-
@staticmethod
def f(x, y):
return Literal("%s %s" % (x, y), datatype=XSD.string)
@@ -186,13 +192,13 @@ class TestCustom(TestCase):
unregister_custom_function(EX.f, lambda x, y: None)
def test_f(self):
- res = query('''SELECT (ex:f(42, "hello") as ?x) {}''')
+ res = query("""SELECT (ex:f(42, "hello") as ?x) {}""")
eq_(list(res)[0][0], Literal("42 hello", datatype=XSD.string))
def test_f_too_few_args(self):
- res = query('''SELECT (ex:f(42) as ?x) {}''')
+ res = query("""SELECT (ex:f(42) as ?x) {}""")
eq_(len(list(res)), 0)
def test_f_too_many_args(self):
- res = query('''SELECT (ex:f(42, "hello", "world") as ?x) {}''')
+ res = query("""SELECT (ex:f(42, "hello", "world") as ?x) {}""")
eq_(len(list(res)), 0)
diff --git a/test/test_issue363.py b/test/test_issue363.py
index 7fc6cb26..792c2441 100644
--- a/test/test_issue363.py
+++ b/test/test_issue363.py
@@ -1,7 +1,7 @@
import rdflib
from nose.tools import assert_raises
-data = '''<?xml version="1.0" encoding="utf-8"?>
+data = """<?xml version="1.0" encoding="utf-8"?>
<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:http="http://www.w3.org/2011/http#">
@@ -13,9 +13,9 @@ data = '''<?xml version="1.0" encoding="utf-8"?>
</http:HeaderElement>
</rdf:RDF>
-'''
+"""
-data2 = '''<?xml version="1.0" encoding="utf-8"?>
+data2 = """<?xml version="1.0" encoding="utf-8"?>
<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns="http://www.example.org/meeting_organization#">
@@ -26,11 +26,11 @@ data2 = '''<?xml version="1.0" encoding="utf-8"?>
</Location>
</rdf:Description>
</rdf:RDF>
-'''
+"""
def test_broken_rdfxml():
- #import ipdb; ipdb.set_trace()
+ # import ipdb; ipdb.set_trace()
def p():
rdflib.Graph().parse(data=data)
@@ -39,9 +39,9 @@ def test_broken_rdfxml():
def test_parsetype_resource():
g = rdflib.Graph().parse(data=data2)
- print(g.serialize(format='n3'))
+ print(g.serialize(format="n3"))
-if __name__ == '__main__':
+if __name__ == "__main__":
test_broken_rdfxml()
test_parsetype_resource()
diff --git a/test/test_issue379.py b/test/test_issue379.py
index 31dfce2b..348e3d0f 100644
--- a/test/test_issue379.py
+++ b/test/test_issue379.py
@@ -41,7 +41,7 @@ class TestBaseAllowsHash(TestCase):
permitted for an IRIREF:
http://www.w3.org/TR/2014/REC-turtle-20140225/#grammar-production-prefixID
"""
- self.g.parse(data=prefix_data, format='n3')
+ self.g.parse(data=prefix_data, format="n3")
self.assertIsInstance(next(self.g.subjects()), rdflib.URIRef)
def test_parse_successful_base_with_hash(self):
@@ -50,7 +50,7 @@ class TestBaseAllowsHash(TestCase):
permitted for an '@prefix' since both allow an IRIREF:
http://www.w3.org/TR/2014/REC-turtle-20140225/#grammar-production-base
"""
- self.g.parse(data=base_data, format='n3')
+ self.g.parse(data=base_data, format="n3")
self.assertIsInstance(next(self.g.subjects()), rdflib.URIRef)
diff --git a/test/test_issue381.py b/test/test_issue381.py
index 3ab21d88..a48cafe7 100644
--- a/test/test_issue381.py
+++ b/test/test_issue381.py
@@ -12,10 +12,9 @@ def test_no_spurious_semicolon():
} WHERE {}
"""
expected = Graph()
- expected.addN(t + (expected,) for t in [
- (NS.a, NS.b, NS.c),
- (NS.a, NS.d, NS.e),
- ])
+ expected.addN(
+ t + (expected,) for t in [(NS.a, NS.b, NS.c), (NS.a, NS.d, NS.e),]
+ )
got = Graph().query(sparql).graph
assert isomorphic(got, expected), got.serialize(format="turtle")
@@ -28,10 +27,9 @@ def test_one_spurious_semicolon():
} WHERE {}
"""
expected = Graph()
- expected.addN(t + (expected,) for t in [
- (NS.a, NS.b, NS.c),
- (NS.a, NS.d, NS.e),
- ])
+ expected.addN(
+ t + (expected,) for t in [(NS.a, NS.b, NS.c), (NS.a, NS.d, NS.e),]
+ )
got = Graph().query(sparql).graph
assert isomorphic(got, expected), got.serialize(format="turtle")
@@ -44,10 +42,9 @@ def test_one_spurious_semicolon_no_perdiod():
} WHERE {}
"""
expected = Graph()
- expected.addN(t + (expected,) for t in [
- (NS.a, NS.b, NS.c),
- (NS.a, NS.d, NS.e),
- ])
+ expected.addN(
+ t + (expected,) for t in [(NS.a, NS.b, NS.c), (NS.a, NS.d, NS.e),]
+ )
got = Graph().query(sparql).graph
assert isomorphic(got, expected), got.serialize(format="turtle")
@@ -60,10 +57,9 @@ def test_two_spurious_semicolons_no_period():
} WHERE {}
"""
expected = Graph()
- expected.addN(t + (expected,) for t in [
- (NS.a, NS.b, NS.c),
- (NS.a, NS.d, NS.e),
- ])
+ expected.addN(
+ t + (expected,) for t in [(NS.a, NS.b, NS.c), (NS.a, NS.d, NS.e),]
+ )
got = Graph().query(sparql).graph
assert isomorphic(got, expected), got.serialize(format="turtle")
@@ -76,10 +72,9 @@ def test_one_spurious_semicolons_bnode():
} WHERE {}
"""
expected = Graph()
- expected.addN(t + (expected,) for t in [
- (BNode("a"), NS.b, NS.c),
- (BNode("a"), NS.d, NS.e),
- ])
+ expected.addN(
+ t + (expected,) for t in [(BNode("a"), NS.b, NS.c), (BNode("a"), NS.d, NS.e),]
+ )
got = Graph().query(sparql).graph
assert isomorphic(got, expected), got.serialize(format="turtle")
@@ -98,11 +93,10 @@ def test_pathological():
} WHERE {}
"""
expected = Graph()
- expected.addN(t + (expected,) for t in [
- (NS.a, NS.b, NS.c),
- (NS.a, NS.d, NS.e),
- (NS.a, NS.f, NS.g),
- ])
+ expected.addN(
+ t + (expected,)
+ for t in [(NS.a, NS.b, NS.c), (NS.a, NS.d, NS.e), (NS.a, NS.f, NS.g),]
+ )
got = Graph().query(sparql).graph
assert isomorphic(got, expected), got.serialize(format="turtle")
@@ -116,10 +110,9 @@ def test_mixing_spurious_semicolons_and_commas():
} WHERE {}
"""
expected = Graph()
- expected.addN(t + (expected,) for t in [
- (NS.a, NS.b, NS.c),
- (NS.a, NS.d, NS.e),
- (NS.a, NS.d, NS.f),
- ])
+ expected.addN(
+ t + (expected,)
+ for t in [(NS.a, NS.b, NS.c), (NS.a, NS.d, NS.e), (NS.a, NS.d, NS.f),]
+ )
got = Graph().query(sparql).graph
assert isomorphic(got, expected), got.serialize(format="turtle")
diff --git a/test/test_issue432.py b/test/test_issue432.py
index 05d8258a..c0731eb0 100644
--- a/test/test_issue432.py
+++ b/test/test_issue432.py
@@ -12,7 +12,7 @@ def test_trig_default_graph():
<g1> { <d> <e> <f> . }
<g2> { <g> <h> <i> . }
"""
- ds.parse(data=data, format='trig', publicID=ds.default_context.identifier)
+ ds.parse(data=data, format="trig", publicID=ds.default_context.identifier)
assert len(list(ds.contexts())) == 3
assert len(list(ds.default_context)) == 2
diff --git a/test/test_issue446.py b/test/test_issue446.py
index 79cd41be..98c46578 100644
--- a/test/test_issue446.py
+++ b/test/test_issue446.py
@@ -7,16 +7,15 @@ from rdflib import Graph, URIRef, Literal
def test_sparql_unicode():
g = Graph()
trip = (
- URIRef('http://example.org/foo'),
- URIRef('http://example.org/bar'),
- URIRef(u'http://example.org/jörn')
+ URIRef("http://example.org/foo"),
+ URIRef("http://example.org/bar"),
+ URIRef(u"http://example.org/jörn"),
)
g.add(trip)
q = 'select ?s ?p ?o where { ?s ?p ?o . FILTER(lang(?o) = "") }'
r = list(g.query(q))
- assert r == [], \
- 'sparql query %r should return nothing but returns %r' % (q, r)
+ assert r == [], "sparql query %r should return nothing but returns %r" % (q, r)
-if __name__ == '__main__':
+if __name__ == "__main__":
test_sparql_unicode()
diff --git a/test/test_issue492.py b/test/test_issue492.py
index 754e5cbf..713ce7ac 100644
--- a/test/test_issue492.py
+++ b/test/test_issue492.py
@@ -6,7 +6,7 @@ import rdflib
def test_issue492():
- query = '''
+ query = """
prefix owl: <http://www.w3.org/2002/07/owl#>
prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
select ?x
@@ -15,7 +15,7 @@ def test_issue492():
?x rdf:rest/rdf:first _:6.
?x rdf:rest/rdf:first _:5.
}
- '''
+ """
print(rdflib.__version__)
g = rdflib.Graph()
diff --git a/test/test_issue523.py b/test/test_issue523.py
index 774167f3..2910cdd7 100644
--- a/test/test_issue523.py
+++ b/test/test_issue523.py
@@ -5,10 +5,12 @@ import rdflib
def test_issue523():
g = rdflib.Graph()
- r = g.query("SELECT (<../baz> as ?test) WHERE {}",
- base=rdflib.URIRef("http://example.org/foo/bar"))
+ r = g.query(
+ "SELECT (<../baz> as ?test) WHERE {}",
+ base=rdflib.URIRef("http://example.org/foo/bar"),
+ )
res = r.serialize(format="csv")
- assert res == b'test\r\nhttp://example.org/baz\r\n', repr(res)
+ assert res == b"test\r\nhttp://example.org/baz\r\n", repr(res)
# expected result:
# test
diff --git a/test/test_issue532.py b/test/test_issue532.py
index 422dd507..0e9fa89f 100644
--- a/test/test_issue532.py
+++ b/test/test_issue532.py
@@ -32,7 +32,7 @@ def test_issue532():
"""
g = Graph()
- g.parse(data=data, format='n3')
+ g.parse(data=data, format="n3")
getnewMeps = """
PREFIX lpv: <http://purl.org/linkedpolitics/vocabulary/>
diff --git a/test/test_issue545.py b/test/test_issue545.py
index 86c8723a..ea9f185b 100644
--- a/test/test_issue545.py
+++ b/test/test_issue545.py
@@ -1,4 +1,3 @@
-
from rdflib.plugins import sparql
from rdflib.namespace import RDFS, OWL, DC, SKOS
@@ -15,4 +14,5 @@ def test_issue():
?property rdfs:label | skos:altLabel ?label .
}
""",
- initNs={"rdfs": RDFS, "owl": OWL, "dc": DC, "skos": SKOS})
+ initNs={"rdfs": RDFS, "owl": OWL, "dc": DC, "skos": SKOS},
+ )
diff --git a/test/test_issue554.py b/test/test_issue554.py
index ba946cf4..4ea83d21 100644
--- a/test/test_issue554.py
+++ b/test/test_issue554.py
@@ -5,11 +5,10 @@ import rdflib
def test_sparql_empty_no_row():
g = rdflib.Graph()
- q = 'select ?whatever { }'
+ q = "select ?whatever { }"
r = list(g.query(q))
- assert r == [], \
- 'sparql query %s should return empty list but returns %s' % (q, r)
+ assert r == [], "sparql query %s should return empty list but returns %s" % (q, r)
-if __name__ == '__main__':
+if __name__ == "__main__":
test_sparql_empty_no_row()
diff --git a/test/test_issue563.py b/test/test_issue563.py
index 1ae8460d..58721236 100644
--- a/test/test_issue563.py
+++ b/test/test_issue563.py
@@ -25,22 +25,26 @@ def test_sample():
g = Graph()
results = set(tuple(i) for i in g.query(QUERY % ("SAMPLE", "SAMPLE")))
- assert results == set([
- (Literal(2), Literal(6), Literal(10)),
- (Literal(3), Literal(9), Literal(15)),
- (Literal(5), None, Literal(25)),
- ])
+ assert results == set(
+ [
+ (Literal(2), Literal(6), Literal(10)),
+ (Literal(3), Literal(9), Literal(15)),
+ (Literal(5), None, Literal(25)),
+ ]
+ )
def test_count():
g = Graph()
results = set(tuple(i) for i in g.query(QUERY % ("COUNT", "COUNT")))
- assert results == set([
- (Literal(2), Literal(1), Literal(1)),
- (Literal(3), Literal(1), Literal(1)),
- (Literal(5), Literal(0), Literal(1)),
- ])
+ assert results == set(
+ [
+ (Literal(2), Literal(1), Literal(1)),
+ (Literal(3), Literal(1), Literal(1)),
+ (Literal(5), Literal(0), Literal(1)),
+ ]
+ )
if __name__ == "__main__":
diff --git a/test/test_issue579.py b/test/test_issue579.py
index 9ba326b3..2420e077 100644
--- a/test/test_issue579.py
+++ b/test/test_issue579.py
@@ -6,9 +6,9 @@ from rdflib.namespace import FOAF, RDF
def test_issue579():
g = Graph()
- g.bind('foaf', FOAF)
+ g.bind("foaf", FOAF)
n = Namespace("http://myname/")
- g.add((n.bob, FOAF.name, Literal('bb')))
+ g.add((n.bob, FOAF.name, Literal("bb")))
# query is successful.
assert len(g.query("select ?n where { ?n foaf:name 'bb' . }")) == 1
# update is not.
diff --git a/test/test_issue604.py b/test/test_issue604.py
index aef19b8c..7a827241 100644
--- a/test/test_issue604.py
+++ b/test/test_issue604.py
@@ -6,7 +6,7 @@ from rdflib.collection import Collection
def test_issue604():
- EX = Namespace('http://ex.co/')
+ EX = Namespace("http://ex.co/")
g = Graph()
bn = BNode()
g.add((EX.s, EX.p, bn))
diff --git a/test/test_issue655.py b/test/test_issue655.py
index 1c640709..cac449f1 100644
--- a/test/test_issue655.py
+++ b/test/test_issue655.py
@@ -5,53 +5,27 @@ from rdflib.compare import to_isomorphic
class TestIssue655(unittest.TestCase):
-
def test_issue655(self):
# make sure that inf and nan are serialized correctly
- dt = XSD['double'].n3()
- self.assertEqual(
- Literal(float("inf"))._literal_n3(True),
- '"INF"^^%s' % dt
- )
- self.assertEqual(
- Literal(float("-inf"))._literal_n3(True),
- '"-INF"^^%s' % dt
- )
- self.assertEqual(
- Literal(float("nan"))._literal_n3(True),
- '"NaN"^^%s' % dt
- )
+ dt = XSD["double"].n3()
+ self.assertEqual(Literal(float("inf"))._literal_n3(True), '"INF"^^%s' % dt)
+ self.assertEqual(Literal(float("-inf"))._literal_n3(True), '"-INF"^^%s' % dt)
+ self.assertEqual(Literal(float("nan"))._literal_n3(True), '"NaN"^^%s' % dt)
- dt = XSD['decimal'].n3()
- self.assertEqual(
- Literal(Decimal("inf"))._literal_n3(True),
- '"INF"^^%s' % dt
- )
- self.assertEqual(
- Literal(Decimal("-inf"))._literal_n3(True),
- '"-INF"^^%s' % dt
- )
- self.assertEqual(
- Literal(Decimal("nan"))._literal_n3(True),
- '"NaN"^^%s' % dt
- )
+ dt = XSD["decimal"].n3()
+ self.assertEqual(Literal(Decimal("inf"))._literal_n3(True), '"INF"^^%s' % dt)
+ self.assertEqual(Literal(Decimal("-inf"))._literal_n3(True), '"-INF"^^%s' % dt)
+ self.assertEqual(Literal(Decimal("nan"))._literal_n3(True), '"NaN"^^%s' % dt)
self.assertEqual(
- Literal("inf", datatype=XSD['decimal'])._literal_n3(True),
- '"INF"^^%s' % dt
+ Literal("inf", datatype=XSD["decimal"])._literal_n3(True), '"INF"^^%s' % dt
)
# assert that non-numerical aren't changed
- self.assertEqual(
- Literal('inf')._literal_n3(True),
- '"inf"'
- )
- self.assertEqual(
- Literal('nan')._literal_n3(True),
- '"nan"'
- )
+ self.assertEqual(Literal("inf")._literal_n3(True), '"inf"')
+ self.assertEqual(Literal("nan")._literal_n3(True), '"nan"')
- PROV = Namespace('http://www.w3.org/ns/prov#')
+ PROV = Namespace("http://www.w3.org/ns/prov#")
bob = URIRef("http://example.org/object/Bob")
@@ -62,7 +36,7 @@ class TestIssue655(unittest.TestCase):
# Build g2 out of the deserialisation of g1 serialisation
g2 = Graph()
- g2.parse(data=g1.serialize(format='turtle'), format='turtle')
+ g2.parse(data=g1.serialize(format="turtle"), format="turtle")
self.assertTrue(to_isomorphic(g1) == to_isomorphic(g2))
diff --git a/test/test_issue715.py b/test/test_issue715.py
index 121e05fd..a2e21169 100644
--- a/test/test_issue715.py
+++ b/test/test_issue715.py
@@ -11,19 +11,18 @@ from rdflib import URIRef, Graph
def test_issue_715():
g = Graph()
a, b, x, y, z = [URIRef(s) for s in "abxyz"]
- isa = URIRef('isa')
+ isa = URIRef("isa")
g.add((a, isa, x))
g.add((a, isa, y))
g.add((b, isa, x))
- l1 = list(g.query('SELECT ?child ?parent WHERE {?child <isa> ?parent .}'))
- l2 = list(g.query('SELECT ?child ?parent WHERE {?child <isa>+ ?parent .}'))
+ l1 = list(g.query("SELECT ?child ?parent WHERE {?child <isa> ?parent .}"))
+ l2 = list(g.query("SELECT ?child ?parent WHERE {?child <isa>+ ?parent .}"))
assert len(l1) == len(l2)
assert set(l1) == set(l2)
- l3 = list(g.query('SELECT ?child ?parent WHERE {?child <isa>* ?parent .}'))
+ l3 = list(g.query("SELECT ?child ?parent WHERE {?child <isa>* ?parent .}"))
assert len(l3) == 7
- assert set(l3) == set(l1).union({(URIRef(n), URIRef(n)) for
- n in (a, b, x, y)})
+ assert set(l3) == set(l1).union({(URIRef(n), URIRef(n)) for n in (a, b, x, y)})
g.add((y, isa, z))
- l4 = list(g.query('SELECT ?child ?parent WHERE {?child <isa>* ?parent .}'))
+ l4 = list(g.query("SELECT ?child ?parent WHERE {?child <isa>* ?parent .}"))
assert len(l4) == 10
assert (a, z) in l4
diff --git a/test/test_issue733.py b/test/test_issue733.py
index bffeb400..2a6b612a 100644
--- a/test/test_issue733.py
+++ b/test/test_issue733.py
@@ -12,13 +12,12 @@ from rdflib.namespace import RDF, RDFS, NamespaceManager, Namespace
class TestIssue733(unittest.TestCase):
-
def test_issue_733(self):
g = Graph()
- example = Namespace('http://example.org/')
+ example = Namespace("http://example.org/")
g.add((example.S, example.P, example.O1))
g.add((example.S, example.P, example.O2))
- q = '''
+ q = """
prefix ex:<http://example.org/>
select ?st ?ot ?gt where {
{SELECT (count(*) as ?st) where {
@@ -34,20 +33,20 @@ class TestIssue733(unittest.TestCase):
FILTER (?o!=ex:O1 && ?s!=ex:O2)
}}
}
- '''
+ """
res = g.query(q)
assert len(res) == 1
results = [[lit.toPython() for lit in line] for line in res]
- assert results[0][0]== 2
+ assert results[0][0] == 2
assert results[0][1] == 1
assert results[0][2] == 1
def test_issue_733_independant(self):
g = Graph()
- example = Namespace('http://example.org/')
+ example = Namespace("http://example.org/")
g.add((example.S, example.P, example.O1))
g.add((example.S, example.P, example.O2))
- q = '''
+ q = """
prefix ex:<http://example.org/>
select ?st where {
{SELECT (count(*) as ?st) where {
@@ -55,12 +54,12 @@ class TestIssue733(unittest.TestCase):
FILTER (?s=ex:S)
}}
}
- '''
+ """
res = g.query(q)
assert len(res) == 1
results = [[lit.toPython() for lit in line] for line in res]
assert results[0][0] == 2
- q = '''
+ q = """
prefix ex:<http://example.org/>
select ?st where {
{SELECT (count(*) as ?st) where {
@@ -68,7 +67,7 @@ class TestIssue733(unittest.TestCase):
FILTER (?o=ex:O1)
}}
}
- '''
+ """
res = g.query(q)
results = [[lit.toPython() for lit in line] for line in res]
assert results[0][0] == 1
diff --git a/test/test_issue920.py b/test/test_issue920.py
index eb12edc4..7aafa794 100644
--- a/test/test_issue920.py
+++ b/test/test_issue920.py
@@ -14,22 +14,21 @@ import unittest
class TestIssue920(unittest.TestCase):
-
def test_issue_920(self):
g = Graph()
# NT tests
- g.parse(data='<a:> <b:> <c:> .', format='nt')
- g.parse(data='<http://a> <http://b> <http://c> .', format='nt')
- g.parse(data='<https://a> <http://> <http://c> .', format='nt')
+ g.parse(data="<a:> <b:> <c:> .", format="nt")
+ g.parse(data="<http://a> <http://b> <http://c> .", format="nt")
+ g.parse(data="<https://a> <http://> <http://c> .", format="nt")
# related parser tests
- g.parse(data='<a:> <b:> <c:> .', format='turtle')
- g.parse(data='<http://a> <http://b> <http://c> .', format='turtle')
- g.parse(data='<https://a> <http://> <http://c> .', format='turtle')
+ g.parse(data="<a:> <b:> <c:> .", format="turtle")
+ g.parse(data="<http://a> <http://b> <http://c> .", format="turtle")
+ g.parse(data="<https://a> <http://> <http://c> .", format="turtle")
- g.parse(data='<a:> <b:> <c:> .', format='n3')
- g.parse(data='<http://a> <http://b> <http://c> .', format='n3')
- g.parse(data='<https://a> <http://> <http://c> .', format='n3')
+ g.parse(data="<a:> <b:> <c:> .", format="n3")
+ g.parse(data="<http://a> <http://b> <http://c> .", format="n3")
+ g.parse(data="<https://a> <http://> <http://c> .", format="n3")
if __name__ == "__main__":
diff --git a/test/test_issue923.py b/test/test_issue923.py
index 3becb6f8..48f2e4de 100644
--- a/test/test_issue923.py
+++ b/test/test_issue923.py
@@ -32,4 +32,7 @@ RESULT_SOURCE = u"""\
def test_issue_923():
with StringIO(RESULT_SOURCE) as result_source:
- Result.parse(source=result_source, content_type="application/sparql-results+json;charset=utf-8")
+ Result.parse(
+ source=result_source,
+ content_type="application/sparql-results+json;charset=utf-8",
+ )
diff --git a/test/test_issue953.py b/test/test_issue953.py
index 1e211e12..879486d8 100644
--- a/test/test_issue953.py
+++ b/test/test_issue953.py
@@ -5,11 +5,11 @@ import unittest
class TestIssue953(unittest.TestCase):
-
def test_issue_939(self):
- lit = Literal(Fraction('2/3'))
- assert lit.datatype == URIRef('http://www.w3.org/2002/07/owl#rational')
+ lit = Literal(Fraction("2/3"))
+ assert lit.datatype == URIRef("http://www.w3.org/2002/07/owl#rational")
assert lit.n3() == '"2/3"^^<http://www.w3.org/2002/07/owl#rational>'
+
if __name__ == "__main__":
unittest.main()
diff --git a/test/test_issue_git_200.py b/test/test_issue_git_200.py
index 32a4ba9f..84e06b1a 100644
--- a/test/test_issue_git_200.py
+++ b/test/test_issue_git_200.py
@@ -10,7 +10,8 @@ def test_broken_add():
nose.tools.assert_raises(AssertionError, lambda: g.addN([(1, 2, 3, g)]))
-if __name__ == '__main__':
+if __name__ == "__main__":
import nose
import sys
+
nose.main(defaultTest=sys.argv[0])
diff --git a/test/test_issue_git_336.py b/test/test_issue_git_336.py
index f3250107..6a8abb7c 100644
--- a/test/test_issue_git_336.py
+++ b/test/test_issue_git_336.py
@@ -8,7 +8,7 @@ import nose.tools
# stripped-down culprit:
-'''\
+"""\
@prefix fs: <http://freesurfer.net/fswiki/terms/> .
@prefix prov: <http://www.w3.org/ns/prov#> .
@@ -17,21 +17,24 @@ import nose.tools
prov:Entity ;
fs:mrisurf.c-cvs_version
"$Id: mrisurf.c,v 1.693.2.2 2011/04/27 19:21:05 nicks Exp $" .
-'''
+"""
def test_ns_localname_roundtrip():
- XNS = rdflib.Namespace('http://example.net/fs')
+ XNS = rdflib.Namespace("http://example.net/fs")
g = rdflib.Graph()
- g.bind('xns', str(XNS))
- g.add((
- rdflib.URIRef('http://example.com/thingy'),
- XNS['lowecase.xxx-xxx_xxx'], # <- not round trippable
- rdflib.Literal("Junk")))
- turtledump = g.serialize(format="turtle").decode('utf-8')
- xmldump = g.serialize().decode('utf-8')
+ g.bind("xns", str(XNS))
+ g.add(
+ (
+ rdflib.URIRef("http://example.com/thingy"),
+ XNS["lowecase.xxx-xxx_xxx"], # <- not round trippable
+ rdflib.Literal("Junk"),
+ )
+ )
+ turtledump = g.serialize(format="turtle").decode("utf-8")
+ xmldump = g.serialize().decode("utf-8")
g1 = rdflib.Graph()
g1.parse(data=xmldump)
@@ -39,7 +42,8 @@ def test_ns_localname_roundtrip():
g1.parse(data=turtledump, format="turtle")
-if __name__ == '__main__':
+if __name__ == "__main__":
import nose
import sys
+
nose.main(defaultTest=sys.argv[0])
diff --git a/test/test_literal.py b/test/test_literal.py
index 0a20d85e..8124f99d 100644
--- a/test/test_literal.py
+++ b/test/test_literal.py
@@ -34,7 +34,7 @@ class TestLiteral(unittest.TestCase):
"""
g = rdflib.Graph()
g.parse(data=d)
- a = rdflib.Literal('a\\b')
+ a = rdflib.Literal("a\\b")
b = list(g.objects())[0]
self.assertEqual(a, b)
@@ -45,8 +45,9 @@ class TestLiteral(unittest.TestCase):
class TestNew(unittest.TestCase):
def testCantPassLangAndDatatype(self):
- self.assertRaises(TypeError,
- Literal, 'foo', lang='en', datatype=URIRef("http://example.com/"))
+ self.assertRaises(
+ TypeError, Literal, "foo", lang="en", datatype=URIRef("http://example.com/")
+ )
def testFromOtherLiteral(self):
l = Literal(1)
@@ -71,21 +72,26 @@ class TestNew(unittest.TestCase):
class TestRepr(unittest.TestCase):
def testOmitsMissingDatatypeAndLang(self):
- self.assertEqual(repr(Literal("foo")),
- uformat("rdflib.term.Literal(u'foo')"))
+ self.assertEqual(repr(Literal("foo")), uformat("rdflib.term.Literal(u'foo')"))
def testOmitsMissingDatatype(self):
- self.assertEqual(repr(Literal("foo", lang='en')),
- uformat("rdflib.term.Literal(u'foo', lang='en')"))
+ self.assertEqual(
+ repr(Literal("foo", lang="en")),
+ uformat("rdflib.term.Literal(u'foo', lang='en')"),
+ )
def testOmitsMissingLang(self):
self.assertEqual(
- repr(Literal("foo", datatype=URIRef('http://example.com/'))),
- uformat("rdflib.term.Literal(u'foo', datatype=rdflib.term.URIRef(u'http://example.com/'))"))
+ repr(Literal("foo", datatype=URIRef("http://example.com/"))),
+ uformat(
+ "rdflib.term.Literal(u'foo', datatype=rdflib.term.URIRef(u'http://example.com/'))"
+ ),
+ )
def testSubclassNameAppearsInRepr(self):
class MyLiteral(Literal):
pass
+
x = MyLiteral(u"foo")
self.assertEqual(repr(x), uformat("MyLiteral(u'foo')"))
@@ -97,42 +103,41 @@ class TestDoubleOutput(unittest.TestCase):
out = vv._literal_n3(use_plain=True)
self.assertTrue(out in ["8.8e-01", "0.88"], out)
+
class TestParseBoolean(unittest.TestCase):
"""confirms the fix for https://github.com/RDFLib/rdflib/issues/913"""
+
def testTrueBoolean(self):
- test_value = Literal("tRue", datatype = _XSD_BOOLEAN)
+ test_value = Literal("tRue", datatype=_XSD_BOOLEAN)
self.assertTrue(test_value.value)
- test_value = Literal("1",datatype = _XSD_BOOLEAN)
+ test_value = Literal("1", datatype=_XSD_BOOLEAN)
self.assertTrue(test_value.value)
def testFalseBoolean(self):
- test_value = Literal("falsE", datatype = _XSD_BOOLEAN)
+ test_value = Literal("falsE", datatype=_XSD_BOOLEAN)
self.assertFalse(test_value.value)
- test_value = Literal("0",datatype = _XSD_BOOLEAN)
+ test_value = Literal("0", datatype=_XSD_BOOLEAN)
self.assertFalse(test_value.value)
def testNonFalseBoolean(self):
- test_value = Literal("abcd", datatype = _XSD_BOOLEAN)
+ test_value = Literal("abcd", datatype=_XSD_BOOLEAN)
self.assertRaises(DeprecationWarning)
self.assertFalse(test_value.value)
- test_value = Literal("10",datatype = _XSD_BOOLEAN)
+ test_value = Literal("10", datatype=_XSD_BOOLEAN)
self.assertRaises(DeprecationWarning)
self.assertFalse(test_value.value)
-
class TestBindings(unittest.TestCase):
-
def testBinding(self):
-
class a:
def __init__(self, v):
self.v = v[3:-3]
def __str__(self):
- return '<<<%s>>>' % self.v
+ return "<<<%s>>>" % self.v
- dtA = rdflib.URIRef('urn:dt:a')
+ dtA = rdflib.URIRef("urn:dt:a")
bind(dtA, a)
va = a("<<<2>>>")
@@ -149,10 +154,10 @@ class TestBindings(unittest.TestCase):
self.v = v[3:-3]
def __str__(self):
- return 'B%s' % self.v
+ return "B%s" % self.v
- dtB = rdflib.URIRef('urn:dt:b')
- bind(dtB, b, None, lambda x: '<<<%s>>>' % x)
+ dtB = rdflib.URIRef("urn:dt:b")
+ bind(dtB, b, None, lambda x: "<<<%s>>>" % x)
vb = b("<<<3>>>")
lb = Literal(vb, normalize=True)
@@ -160,16 +165,15 @@ class TestBindings(unittest.TestCase):
self.assertEqual(lb.datatype, dtB)
def testSpecificBinding(self):
-
def lexify(s):
return "--%s--" % s
def unlexify(s):
return s[2:-2]
- datatype = rdflib.URIRef('urn:dt:mystring')
+ datatype = rdflib.URIRef("urn:dt:mystring")
- #Datatype-specific rule
+ # Datatype-specific rule
bind(datatype, str, unlexify, lexify, datatype_specific=True)
s = "Hello"
diff --git a/test/test_memory_store.py b/test/test_memory_store.py
index f579250e..546d12ad 100644
--- a/test/test_memory_store.py
+++ b/test/test_memory_store.py
@@ -1,21 +1,21 @@
import unittest
import rdflib
-rdflib.plugin.register('Memory', rdflib.store.Store,
- 'rdflib.plugins.memory', 'Memory')
+rdflib.plugin.register("Memory", rdflib.store.Store, "rdflib.plugins.memory", "Memory")
class StoreTestCase(unittest.TestCase):
-
def test_memory_store(self):
g = rdflib.Graph("Memory")
subj1 = rdflib.URIRef("http://example.org/foo#bar1")
pred1 = rdflib.URIRef("http://example.org/foo#bar2")
obj1 = rdflib.URIRef("http://example.org/foo#bar3")
triple1 = (subj1, pred1, obj1)
- triple2 = (subj1,
- rdflib.URIRef("http://example.org/foo#bar4"),
- rdflib.URIRef("http://example.org/foo#bar5"))
+ triple2 = (
+ subj1,
+ rdflib.URIRef("http://example.org/foo#bar4"),
+ rdflib.URIRef("http://example.org/foo#bar5"),
+ )
g.add(triple1)
self.assertTrue(len(g) == 1)
g.add(triple2)
@@ -27,5 +27,5 @@ class StoreTestCase(unittest.TestCase):
g.serialize()
-if __name__ == '__main__':
- unittest.main(defaultTest='test_suite')
+if __name__ == "__main__":
+ unittest.main(defaultTest="test_suite")
diff --git a/test/test_mulpath_n3.py b/test/test_mulpath_n3.py
index f0bbda73..f4f26dc4 100644
--- a/test/test_mulpath_n3.py
+++ b/test/test_mulpath_n3.py
@@ -4,6 +4,6 @@ from rdflib import RDFS, URIRef
def test_mulpath_n3():
- uri = 'http://example.com/foo'
+ uri = "http://example.com/foo"
n3 = (URIRef(uri) * ZeroOrMore).n3()
- assert n3 == '<' + uri + '>*'
+ assert n3 == "<" + uri + ">*"
diff --git a/test/test_n3.py b/test/test_n3.py
index 48a77eb4..9a378843 100644
--- a/test/test_n3.py
+++ b/test/test_n3.py
@@ -61,7 +61,6 @@ n3:context a rdf:Property; rdfs:domain n3:statement;
class TestN3Case(unittest.TestCase):
-
def setUp(self):
pass
@@ -92,12 +91,10 @@ class TestN3Case(unittest.TestCase):
g = Graph()
g.parse(data=input, format="n3")
print(list(g))
- self.assertTrue((None, None, Literal('Foo')) in g)
- self.assertTrue(
- (URIRef('http://example.com/doc/bar'), None, None) in g)
- self.assertTrue(
- (URIRef('http://example.com/doc/doc2/bing'), None, None) in g)
- self.assertTrue((URIRef('http://test.com/bong'), None, None) in g)
+ self.assertTrue((None, None, Literal("Foo")) in g)
+ self.assertTrue((URIRef("http://example.com/doc/bar"), None, None) in g)
+ self.assertTrue((URIRef("http://example.com/doc/doc2/bing"), None, None) in g)
+ self.assertTrue((URIRef("http://test.com/bong"), None, None) in g)
def testBaseExplicit(self):
"""
@@ -114,21 +111,24 @@ class TestN3Case(unittest.TestCase):
<bar> :name "Bar" .
"""
g = Graph()
- g.parse(data=input, publicID='http://blah.com/', format="n3")
+ g.parse(data=input, publicID="http://blah.com/", format="n3")
print(list(g))
- self.assertTrue(
- (URIRef('http://blah.com/foo'), None, Literal('Foo')) in g)
- self.assertTrue(
- (URIRef('http://example.com/doc/bar'), None, None) in g)
+ self.assertTrue((URIRef("http://blah.com/foo"), None, Literal("Foo")) in g)
+ self.assertTrue((URIRef("http://example.com/doc/bar"), None, None) in g)
def testBaseSerialize(self):
g = Graph()
- g.add((URIRef('http://example.com/people/Bob'), URIRef(
- 'urn:knows'), URIRef('http://example.com/people/Linda')))
- s = g.serialize(base='http://example.com/', format='n3')
- self.assertTrue('<people/Bob>'.encode("latin-1") in s)
+ g.add(
+ (
+ URIRef("http://example.com/people/Bob"),
+ URIRef("urn:knows"),
+ URIRef("http://example.com/people/Linda"),
+ )
+ )
+ s = g.serialize(base="http://example.com/", format="n3")
+ self.assertTrue("<people/Bob>".encode("latin-1") in s)
g2 = ConjunctiveGraph()
- g2.parse(data=s, publicID='http://example.com/', format='n3')
+ g2.parse(data=s, publicID="http://example.com/", format="n3")
self.assertEqual(list(g), list(g2))
def testIssue23(self):
@@ -192,7 +192,8 @@ foo-bar:Ex foo-bar:name "Test" . """
g = Graph()
g.parse(
data="@prefix a.1: <http://example.org/> .\n a.1:cake <urn:x> <urn:y> . \n",
- format='n3')
+ format="n3",
+ )
def testModel(self):
g = ConjunctiveGraph()
@@ -215,47 +216,62 @@ foo-bar:Ex foo-bar:name "Test" . """
g = ConjunctiveGraph()
try:
g.parse(
- "http://groups.csail.mit.edu/dig/2005/09/rein/examples/troop42-policy.n3", format="n3")
+ "http://groups.csail.mit.edu/dig/2005/09/rein/examples/troop42-policy.n3",
+ format="n3",
+ )
except URLError:
from nose import SkipTest
- raise SkipTest(
- 'No network to retrieve the information, skipping test')
+
+ raise SkipTest("No network to retrieve the information, skipping test")
def testSingleQuotedLiterals(self):
- test_data = ["""@prefix : <#> . :s :p 'o' .""",
- """@prefix : <#> . :s :p '''o''' ."""]
+ test_data = [
+ """@prefix : <#> . :s :p 'o' .""",
+ """@prefix : <#> . :s :p '''o''' .""",
+ ]
for data in test_data:
# N3 doesn't accept single quotes around string literals
g = ConjunctiveGraph()
- self.assertRaises(BadSyntax, g.parse,
- data=data, format='n3')
+ self.assertRaises(BadSyntax, g.parse, data=data, format="n3")
g = ConjunctiveGraph()
- g.parse(data=data, format='turtle')
+ g.parse(data=data, format="turtle")
self.assertEqual(len(g), 1)
for _, _, o in g:
- self.assertEqual(o, Literal('o'))
+ self.assertEqual(o, Literal("o"))
def testEmptyPrefix(self):
# this is issue https://github.com/RDFLib/rdflib/issues/312
g1 = Graph()
- g1.parse(data=":a :b :c .", format='n3')
+ g1.parse(data=":a :b :c .", format="n3")
g2 = Graph()
- g2.parse(data="@prefix : <#> . :a :b :c .", format='n3')
+ g2.parse(data="@prefix : <#> . :a :b :c .", format="n3")
assert set(g1) == set(
- g2), 'Document with declared empty prefix must match default #'
+ g2
+ ), "Document with declared empty prefix must match default #"
class TestRegularExpressions(unittest.TestCase):
def testExponents(self):
signs = ("", "+", "-")
- mantissas = ("1", "1.", ".1",
- "12", "12.", "1.2", ".12",
- "123", "123.", "12.3", "1.23", ".123")
+ mantissas = (
+ "1",
+ "1.",
+ ".1",
+ "12",
+ "12.",
+ "1.2",
+ ".12",
+ "123",
+ "123.",
+ "12.3",
+ "1.23",
+ ".123",
+ )
es = "eE"
exps = ("1", "12", "+1", "-1", "+12", "-12")
for parts in itertools.product(signs, mantissas, es, exps):
@@ -269,5 +285,5 @@ class TestRegularExpressions(unittest.TestCase):
self.assertFalse(exponent_syntax.match(expstring))
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main()
diff --git a/test/test_n3_suite.py b/test/test_n3_suite.py
index 21e6bcba..f2ab1ab6 100644
--- a/test/test_n3_suite.py
+++ b/test/test_n3_suite.py
@@ -11,19 +11,19 @@ except:
def _get_test_files_formats():
- skiptests = [
- ]
- for f in os.listdir('test/n3'):
+ skiptests = []
+ for f in os.listdir("test/n3"):
if f not in skiptests:
fpath = "test/n3/" + f
- if f.endswith('.rdf'):
- yield fpath, 'xml'
- elif f.endswith('.n3'):
- yield fpath, 'n3'
+ if f.endswith(".rdf"):
+ yield fpath, "xml"
+ elif f.endswith(".n3"):
+ yield fpath, "n3"
+
def all_n3_files():
skiptests = [
- 'test/n3/example-lots_of_graphs.n3', # only n3 can serialize QuotedGraph, no point in testing roundtrip
+ "test/n3/example-lots_of_graphs.n3", # only n3 can serialize QuotedGraph, no point in testing roundtrip
]
for fpath, fmt in _get_test_files_formats():
if fpath in skiptests:
@@ -31,15 +31,17 @@ def all_n3_files():
else:
yield fpath, fmt
+
def test_n3_writing():
for fpath, fmt in _get_test_files_formats():
- yield check_serialize_parse, fpath, fmt, 'n3'
+ yield check_serialize_parse, fpath, fmt, "n3"
if __name__ == "__main__":
if len(sys.argv) > 1:
- check_serialize_parse(sys.argv[1], 'n3', 'n3', True)
+ check_serialize_parse(sys.argv[1], "n3", "n3", True)
sys.exit()
else:
import nose
+
nose.main(defaultTest=__name__)
diff --git a/test/test_namespace.py b/test/test_namespace.py
index 7dd1a25f..48896fdc 100644
--- a/test/test_namespace.py
+++ b/test/test_namespace.py
@@ -6,53 +6,72 @@ from rdflib.term import URIRef
class NamespacePrefixTest(unittest.TestCase):
-
def test_compute_qname(self):
"""Test sequential assignment of unknown prefixes"""
g = Graph()
- self.assertEqual(g.compute_qname(URIRef("http://foo/bar/baz")),
- ("ns1", URIRef("http://foo/bar/"), "baz"))
+ self.assertEqual(
+ g.compute_qname(URIRef("http://foo/bar/baz")),
+ ("ns1", URIRef("http://foo/bar/"), "baz"),
+ )
- self.assertEqual(g.compute_qname(URIRef("http://foo/bar#baz")),
- ("ns2", URIRef("http://foo/bar#"), "baz"))
+ self.assertEqual(
+ g.compute_qname(URIRef("http://foo/bar#baz")),
+ ("ns2", URIRef("http://foo/bar#"), "baz"),
+ )
# should skip to ns4 when ns3 is already assigned
g.bind("ns3", URIRef("http://example.org/"))
- self.assertEqual(g.compute_qname(URIRef("http://blip/blop")),
- ("ns4", URIRef("http://blip/"), "blop"))
+ self.assertEqual(
+ g.compute_qname(URIRef("http://blip/blop")),
+ ("ns4", URIRef("http://blip/"), "blop"),
+ )
# should return empty qnames correctly
- self.assertEqual(g.compute_qname(URIRef("http://foo/bar/")),
- ("ns1", URIRef("http://foo/bar/"), ""))
+ self.assertEqual(
+ g.compute_qname(URIRef("http://foo/bar/")),
+ ("ns1", URIRef("http://foo/bar/"), ""),
+ )
def test_reset(self):
- data = ('@prefix a: <http://example.org/a> .\n'
- 'a: <http://example.org/b> <http://example.org/c> .')
- graph = Graph().parse(data=data, format='turtle')
+ data = (
+ "@prefix a: <http://example.org/a> .\n"
+ "a: <http://example.org/b> <http://example.org/c> ."
+ )
+ graph = Graph().parse(data=data, format="turtle")
for p, n in tuple(graph.namespaces()):
graph.store._IOMemory__namespace.pop(p)
graph.store._IOMemory__prefix.pop(n)
graph.namespace_manager.reset()
self.assertFalse(tuple(graph.namespaces()))
- u = URIRef('http://example.org/a')
- prefix, namespace, name = graph.namespace_manager.compute_qname(u, generate=True)
+ u = URIRef("http://example.org/a")
+ prefix, namespace, name = graph.namespace_manager.compute_qname(
+ u, generate=True
+ )
self.assertNotEqual(namespace, u)
def test_reset_preserve_prefixes(self):
- data = ('@prefix a: <http://example.org/a> .\n'
- 'a: <http://example.org/b> <http://example.org/c> .')
- graph = Graph().parse(data=data, format='turtle')
+ data = (
+ "@prefix a: <http://example.org/a> .\n"
+ "a: <http://example.org/b> <http://example.org/c> ."
+ )
+ graph = Graph().parse(data=data, format="turtle")
graph.namespace_manager.reset()
self.assertTrue(tuple(graph.namespaces()))
- u = URIRef('http://example.org/a')
- prefix, namespace, name = graph.namespace_manager.compute_qname(u, generate=True)
+ u = URIRef("http://example.org/a")
+ prefix, namespace, name = graph.namespace_manager.compute_qname(
+ u, generate=True
+ )
self.assertEqual(namespace, u)
def test_n3(self):
g = Graph()
- g.add((URIRef("http://example.com/foo"),
- URIRef("http://example.com/bar"),
- URIRef("http://example.com/baz")))
+ g.add(
+ (
+ URIRef("http://example.com/foo"),
+ URIRef("http://example.com/bar"),
+ URIRef("http://example.com/baz"),
+ )
+ )
n3 = g.serialize(format="n3")
# Gunnar disagrees that this is right:
# self.assertTrue("<http://example.com/foo> ns1:bar <http://example.com/baz> ." in n3)
@@ -62,12 +81,21 @@ class NamespacePrefixTest(unittest.TestCase):
def test_n32(self):
# this test not generating prefixes for subjects/objects
g = Graph()
- g.add((URIRef("http://example1.com/foo"),
- URIRef("http://example2.com/bar"),
- URIRef("http://example3.com/baz")))
+ g.add(
+ (
+ URIRef("http://example1.com/foo"),
+ URIRef("http://example2.com/bar"),
+ URIRef("http://example3.com/baz"),
+ )
+ )
n3 = g.serialize(format="n3")
- self.assertTrue("<http://example1.com/foo> ns1:bar <http://example3.com/baz> .".encode("latin-1") in n3)
+ self.assertTrue(
+ "<http://example1.com/foo> ns1:bar <http://example3.com/baz> .".encode(
+ "latin-1"
+ )
+ in n3
+ )
def test_closed_namespace(self):
"""Tests terms both in an out of the ClosedNamespace FOAF"""
@@ -82,4 +110,7 @@ class NamespacePrefixTest(unittest.TestCase):
self.assertRaises(KeyError, add_not_in_namespace, "firstName")
# a property name within the core FOAF namespace
- self.assertEqual(add_not_in_namespace("givenName"), URIRef("http://xmlns.com/foaf/0.1/givenName"))
+ self.assertEqual(
+ add_not_in_namespace("givenName"),
+ URIRef("http://xmlns.com/foaf/0.1/givenName"),
+ )
diff --git a/test/test_nodepickler.py b/test/test_nodepickler.py
index 31a667da..970ec232 100644
--- a/test/test_nodepickler.py
+++ b/test/test_nodepickler.py
@@ -7,7 +7,7 @@ from rdflib.store import NodePickler
# same as nt/more_literals.nt
cases = [
- 'no quotes',
+ "no quotes",
"single ' quote",
'double " quote',
'triple """ quotes',
@@ -15,7 +15,7 @@ cases = [
'"',
"'",
'"\'"',
- '\\', # len 1
+ "\\", # len 1
'\\"', # len 2
'\\\\"', # len 3
'\\"\\', # len 3
@@ -24,12 +24,13 @@ cases = [
class UtilTestCase(unittest.TestCase):
-
def test_to_bits_from_bits_round_trip(self):
np = NodePickler()
- a = Literal(u'''A test with a \\n (backslash n), "\u00a9" , and newline \n and a second line.
-''')
+ a = Literal(
+ u"""A test with a \\n (backslash n), "\u00a9" , and newline \n and a second line.
+"""
+ )
b = np.loads(np.dumps(a))
self.assertEqual(a, b)
@@ -49,5 +50,5 @@ class UtilTestCase(unittest.TestCase):
self.assertEqual(np._objects, np2._objects)
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main()
diff --git a/test/test_nquads.py b/test/test_nquads.py
index 72d15ea7..c25bc7ed 100644
--- a/test/test_nquads.py
+++ b/test/test_nquads.py
@@ -1,11 +1,10 @@
import unittest
from rdflib import ConjunctiveGraph, URIRef, Namespace
-TEST_BASE = 'test/nquads.rdflib'
+TEST_BASE = "test/nquads.rdflib"
class NQuadsParserTest(unittest.TestCase):
-
def _load_example(self):
g = ConjunctiveGraph()
with open("test/nquads.rdflib/example.nquads", "rb") as data:
@@ -46,22 +45,26 @@ class NQuadsParserTest(unittest.TestCase):
uri1 = URIRef("http://example.org/mygraph1")
uri2 = URIRef("http://example.org/mygraph2")
- bob = URIRef(u'urn:bob')
- likes = URIRef(u'urn:likes')
- pizza = URIRef(u'urn:pizza')
+ bob = URIRef(u"urn:bob")
+ likes = URIRef(u"urn:likes")
+ pizza = URIRef(u"urn:pizza")
g.get_context(uri1).add((bob, likes, pizza))
g.get_context(uri2).add((bob, likes, pizza))
- s = g.serialize(format='nquads')
- self.assertEqual(len([x for x in s.split("\n".encode("latin-1")) if x.strip()]), 2)
+ s = g.serialize(format="nquads")
+ self.assertEqual(
+ len([x for x in s.split("\n".encode("latin-1")) if x.strip()]), 2
+ )
g2 = ConjunctiveGraph()
- g2.parse(data=s, format='nquads')
+ g2.parse(data=s, format="nquads")
self.assertEqual(len(g), len(g2))
- self.assertEqual(sorted(x.identifier for x in g.contexts()),
- sorted(x.identifier for x in g2.contexts()))
+ self.assertEqual(
+ sorted(x.identifier for x in g.contexts()),
+ sorted(x.identifier for x in g2.contexts()),
+ )
if __name__ == "__main__":
diff --git a/test/test_nquads_w3c.py b/test/test_nquads_w3c.py
index f12850d2..02d79576 100644
--- a/test/test_nquads_w3c.py
+++ b/test/test_nquads_w3c.py
@@ -13,7 +13,7 @@ def nquads(test):
g = ConjunctiveGraph()
try:
- g.parse(test.action, format='nquads')
+ g.parse(test.action, format="nquads")
if not test.syntax:
raise AssertionError("Input shouldn't have parsed!")
except:
@@ -21,14 +21,11 @@ def nquads(test):
raise
-testers = {
- RDFT.TestNQuadsPositiveSyntax: nquads,
- RDFT.TestNQuadsNegativeSyntax: nquads
-}
+testers = {RDFT.TestNQuadsPositiveSyntax: nquads, RDFT.TestNQuadsNegativeSyntax: nquads}
def test_nquads(tests=None):
- for t in nose_tests(testers, 'test/w3c/nquads/manifest.ttl'):
+ for t in nose_tests(testers, "test/w3c/nquads/manifest.ttl"):
if tests:
for test in tests:
if test in t[1].uri:
@@ -39,7 +36,7 @@ def test_nquads(tests=None):
yield t
-if __name__ == '__main__':
+if __name__ == "__main__":
verbose = True
- nose_tst_earl_report(test_nquads, 'rdflib_nquads')
+ nose_tst_earl_report(test_nquads, "rdflib_nquads")
diff --git a/test/test_nt_misc.py b/test/test_nt_misc.py
index ff49e0c4..7934f70e 100644
--- a/test/test_nt_misc.py
+++ b/test/test_nt_misc.py
@@ -10,7 +10,6 @@ log = logging.getLogger(__name__)
class NTTestCase(unittest.TestCase):
-
def testIssue859(self):
graphA = Graph()
graphB = Graph()
@@ -25,7 +24,7 @@ class NTTestCase(unittest.TestCase):
def testIssue78(self):
g = Graph()
g.add((URIRef("foo"), URIRef("foo"), Literal(u"R\u00E4ksm\u00F6rg\u00E5s")))
- s = g.serialize(format='nt')
+ s = g.serialize(format="nt")
self.assertEqual(type(s), bytes)
self.assertTrue(r"R\u00E4ksm\u00F6rg\u00E5s".encode("latin-1") in s)
@@ -79,7 +78,7 @@ class NTTestCase(unittest.TestCase):
self.assertEqual(res, uniquot)
def test_NTriplesParser_fpath(self):
- fpath = "test/nt/" + os.listdir('test/nt')[0]
+ fpath = "test/nt/" + os.listdir("test/nt")[0]
p = ntriples.NTriplesParser()
self.assertRaises(ntriples.ParseError, p.parse, fpath)
@@ -88,7 +87,7 @@ class NTTestCase(unittest.TestCase):
data = 3
self.assertRaises(ntriples.ParseError, p.parsestring, data)
fname = "test/nt/lists-02.nt"
- with open(fname, 'r') as f:
+ with open(fname, "r") as f:
data = f.read()
p = ntriples.NTriplesParser()
res = p.parsestring(data)
@@ -105,15 +104,21 @@ class NTTestCase(unittest.TestCase):
self.assertTrue(sink is not None)
def test_bad_line(self):
- data = '''<http://example.org/resource32> 3 <http://example.org/datatype1> .\n'''
+ data = (
+ """<http://example.org/resource32> 3 <http://example.org/datatype1> .\n"""
+ )
p = ntriples.NTriplesParser()
self.assertRaises(ntriples.ParseError, p.parsestring, data)
def test_cover_eat(self):
- data = '''<http://example.org/resource32> 3 <http://example.org/datatype1> .\n'''
+ data = (
+ """<http://example.org/resource32> 3 <http://example.org/datatype1> .\n"""
+ )
p = ntriples.NTriplesParser()
p.line = data
- self.assertRaises(ntriples.ParseError, p.eat, re.compile('<http://example.org/datatype1>'))
+ self.assertRaises(
+ ntriples.ParseError, p.eat, re.compile("<http://example.org/datatype1>")
+ )
def test_cover_subjectobjectliteral(self):
# data = '''<http://example.org/resource32> 3 <http://example.org/datatype1> .\n'''
diff --git a/test/test_nt_suite.py b/test/test_nt_suite.py
index c9175320..753d2897 100644
--- a/test/test_nt_suite.py
+++ b/test/test_nt_suite.py
@@ -12,41 +12,39 @@ The actual tests are done in test_roundtrip
def _get_test_files_formats():
- for f in os.listdir('test/nt'):
+ for f in os.listdir("test/nt"):
fpath = "test/nt/" + f
- if f.endswith('.rdf'):
- yield fpath, 'xml'
- elif f.endswith('.nt'):
- yield fpath, 'nt'
+ if f.endswith(".rdf"):
+ yield fpath, "xml"
+ elif f.endswith(".nt"):
+ yield fpath, "nt"
def all_nt_files():
skiptests = [
# illegal literal as subject
- 'test/nt/literals-01.nt',
- 'test/nt/keywords-08.nt',
- 'test/nt/paths-04.nt',
- 'test/nt/numeric-01.nt',
- 'test/nt/numeric-02.nt',
- 'test/nt/numeric-03.nt',
- 'test/nt/numeric-04.nt',
- 'test/nt/numeric-05.nt',
-
+ "test/nt/literals-01.nt",
+ "test/nt/keywords-08.nt",
+ "test/nt/paths-04.nt",
+ "test/nt/numeric-01.nt",
+ "test/nt/numeric-02.nt",
+ "test/nt/numeric-03.nt",
+ "test/nt/numeric-04.nt",
+ "test/nt/numeric-05.nt",
# illegal variables
- 'test/nt/formulae-01.nt',
- 'test/nt/formulae-02.nt',
- 'test/nt/formulae-03.nt',
- 'test/nt/formulae-05.nt',
- 'test/nt/formulae-06.nt',
- 'test/nt/formulae-10.nt',
-
+ "test/nt/formulae-01.nt",
+ "test/nt/formulae-02.nt",
+ "test/nt/formulae-03.nt",
+ "test/nt/formulae-05.nt",
+ "test/nt/formulae-06.nt",
+ "test/nt/formulae-10.nt",
# illegal bnode as predicate
- 'test/nt/paths-06.nt',
- 'test/nt/anons-02.nt',
- 'test/nt/anons-03.nt',
- 'test/nt/qname-01.nt',
- 'test/nt/lists-06.nt',
- ]
+ "test/nt/paths-06.nt",
+ "test/nt/anons-02.nt",
+ "test/nt/anons-03.nt",
+ "test/nt/qname-01.nt",
+ "test/nt/lists-06.nt",
+ ]
for fpath, fmt in _get_test_files_formats():
if fpath in skiptests:
log.debug("Skipping %s, known issue" % fpath)
diff --git a/test/test_nt_w3c.py b/test/test_nt_w3c.py
index 65166f5e..8294e8ff 100644
--- a/test/test_nt_w3c.py
+++ b/test/test_nt_w3c.py
@@ -13,7 +13,7 @@ def nt(test):
g = Graph()
try:
- g.parse(test.action, format='nt')
+ g.parse(test.action, format="nt")
if not test.syntax:
raise AssertionError("Input shouldn't have parsed!")
except:
@@ -21,14 +21,11 @@ def nt(test):
raise
-testers = {
- RDFT.TestNTriplesPositiveSyntax: nt,
- RDFT.TestNTriplesNegativeSyntax: nt
-}
+testers = {RDFT.TestNTriplesPositiveSyntax: nt, RDFT.TestNTriplesNegativeSyntax: nt}
def test_nt(tests=None):
- for t in nose_tests(testers, 'test/w3c/nt/manifest.ttl', legacy=True):
+ for t in nose_tests(testers, "test/w3c/nt/manifest.ttl", legacy=True):
if tests:
for test in tests:
if test in t[1].uri:
@@ -39,7 +36,7 @@ def test_nt(tests=None):
yield t
-if __name__ == '__main__':
+if __name__ == "__main__":
verbose = True
- nose_tst_earl_report(test_nt, 'rdflib_nt')
+ nose_tst_earl_report(test_nt, "rdflib_nt")
diff --git a/test/test_parser.py b/test/test_parser.py
index d311a89b..3aaf5658 100644
--- a/test/test_parser.py
+++ b/test/test_parser.py
@@ -7,8 +7,8 @@ from rdflib.graph import Graph
class ParserTestCase(unittest.TestCase):
- backend = 'default'
- path = 'store'
+ backend = "default"
+ path = "store"
def setUp(self):
self.graph = Graph(store=self.backend)
@@ -19,7 +19,8 @@ class ParserTestCase(unittest.TestCase):
def testNoPathWithHash(self):
g = self.graph
- g.parse(data="""\
+ g.parse(
+ data="""\
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<rdf:RDF
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
@@ -31,7 +32,9 @@ class ParserTestCase(unittest.TestCase):
</rdfs:Class>
</rdf:RDF>
-""", publicID="http://example.org")
+""",
+ publicID="http://example.org",
+ )
subject = URIRef("http://example.org#")
label = g.value(subject, RDFS.label)
diff --git a/test/test_parser_helpers.py b/test/test_parser_helpers.py
index 58d083cb..090a8a49 100644
--- a/test/test_parser_helpers.py
+++ b/test/test_parser_helpers.py
@@ -1,4 +1,5 @@
from rdflib.plugins.sparql.parser import TriplesSameSubject
+
# from rdflib.plugins.sparql.algebra import triples
diff --git a/test/test_prefixTypes.py b/test/test_prefixTypes.py
index 2cf89596..8a785094 100644
--- a/test/test_prefixTypes.py
+++ b/test/test_prefixTypes.py
@@ -2,14 +2,17 @@ import unittest
from rdflib import Graph
-graph = Graph().parse(format='n3', data="""
+graph = Graph().parse(
+ format="n3",
+ data="""
@prefix dct: <http://purl.org/dc/terms/> .
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
<http://example.org/doc> a foaf:Document;
dct:created "2011-03-20"^^xsd:date .
-""")
+""",
+)
class PrefixTypesTest(unittest.TestCase):
@@ -22,11 +25,11 @@ class PrefixTypesTest(unittest.TestCase):
"""
def test(self):
- s = graph.serialize(format='n3')
+ s = graph.serialize(format="n3")
print(s)
self.assertTrue("foaf:Document".encode("latin-1") in s)
self.assertTrue("xsd:date".encode("latin-1") in s)
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main()
diff --git a/test/test_preflabel.py b/test/test_preflabel.py
index 76c4131e..b35c626c 100644
--- a/test/test_preflabel.py
+++ b/test/test_preflabel.py
@@ -8,50 +8,73 @@ from rdflib import URIRef
class TestPrefLabel(unittest.TestCase):
-
def setUp(self):
self.g = ConjunctiveGraph()
- self.u = URIRef('http://example.com/foo')
- self.g.add([self.u, RDFS.label, Literal('foo')])
- self.g.add([self.u, RDFS.label, Literal('bar')])
+ self.u = URIRef("http://example.com/foo")
+ self.g.add([self.u, RDFS.label, Literal("foo")])
+ self.g.add([self.u, RDFS.label, Literal("bar")])
def test_default_label_sorting(self):
res = sorted(self.g.preferredLabel(self.u))
- tgt = [(rdflib.term.URIRef('http://www.w3.org/2000/01/rdf-schema#label'),
- rdflib.term.Literal(u'bar')),
- (rdflib.term.URIRef('http://www.w3.org/2000/01/rdf-schema#label'),
- rdflib.term.Literal(u'foo'))]
+ tgt = [
+ (
+ rdflib.term.URIRef("http://www.w3.org/2000/01/rdf-schema#label"),
+ rdflib.term.Literal(u"bar"),
+ ),
+ (
+ rdflib.term.URIRef("http://www.w3.org/2000/01/rdf-schema#label"),
+ rdflib.term.Literal(u"foo"),
+ ),
+ ]
self.assertEqual(res, tgt)
def test_default_preflabel_sorting(self):
- self.g.add([self.u, SKOS.prefLabel, Literal('bla')])
+ self.g.add([self.u, SKOS.prefLabel, Literal("bla")])
res = self.g.preferredLabel(self.u)
- tgt = [(rdflib.term.URIRef('http://www.w3.org/2004/02/skos/core#prefLabel'),
- rdflib.term.Literal(u'bla'))]
+ tgt = [
+ (
+ rdflib.term.URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"),
+ rdflib.term.Literal(u"bla"),
+ )
+ ]
self.assertEqual(res, tgt)
def test_preflabel_lang_sorting_no_lang_attr(self):
- self.g.add([self.u, SKOS.prefLabel, Literal('bla')])
- self.g.add([self.u, SKOS.prefLabel, Literal('blubb', lang='en')])
+ self.g.add([self.u, SKOS.prefLabel, Literal("bla")])
+ self.g.add([self.u, SKOS.prefLabel, Literal("blubb", lang="en")])
res = sorted(self.g.preferredLabel(self.u))
- tgt = [(rdflib.term.URIRef('http://www.w3.org/2004/02/skos/core#prefLabel'),
- rdflib.term.Literal(u'bla')),
- (rdflib.term.URIRef('http://www.w3.org/2004/02/skos/core#prefLabel'),
- rdflib.term.Literal(u'blubb', lang='en'))]
+ tgt = [
+ (
+ rdflib.term.URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"),
+ rdflib.term.Literal(u"bla"),
+ ),
+ (
+ rdflib.term.URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"),
+ rdflib.term.Literal(u"blubb", lang="en"),
+ ),
+ ]
self.assertEqual(res, tgt)
def test_preflabel_lang_sorting_empty_lang_attr(self):
- self.g.add([self.u, SKOS.prefLabel, Literal('bla')])
- self.g.add([self.u, SKOS.prefLabel, Literal('blubb', lang='en')])
- res = self.g.preferredLabel(self.u, lang='')
- tgt = [(rdflib.term.URIRef('http://www.w3.org/2004/02/skos/core#prefLabel'),
- rdflib.term.Literal(u'bla'))]
+ self.g.add([self.u, SKOS.prefLabel, Literal("bla")])
+ self.g.add([self.u, SKOS.prefLabel, Literal("blubb", lang="en")])
+ res = self.g.preferredLabel(self.u, lang="")
+ tgt = [
+ (
+ rdflib.term.URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"),
+ rdflib.term.Literal(u"bla"),
+ )
+ ]
self.assertEqual(res, tgt)
def test_preflabel_lang_sorting_en_lang_attr(self):
- self.g.add([self.u, SKOS.prefLabel, Literal('blubb', lang='en')])
- res = self.g.preferredLabel(self.u, lang='en')
- tgt = [(rdflib.term.URIRef('http://www.w3.org/2004/02/skos/core#prefLabel'),
- rdflib.term.Literal(u'blubb', lang='en'))]
+ self.g.add([self.u, SKOS.prefLabel, Literal("blubb", lang="en")])
+ res = self.g.preferredLabel(self.u, lang="en")
+ tgt = [
+ (
+ rdflib.term.URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"),
+ rdflib.term.Literal(u"blubb", lang="en"),
+ )
+ ]
self.assertEqual(res, tgt)
diff --git a/test/test_prettyxml.py b/test/test_prettyxml.py
index e20ec067..4a033fa4 100644
--- a/test/test_prettyxml.py
+++ b/test/test_prettyxml.py
@@ -45,7 +45,9 @@ def _mangled_copy(g):
"Makes a copy of the graph, replacing all bnodes with the bnode ``_blank``."
gcopy = ConjunctiveGraph()
- def isbnode(v): return isinstance(v, BNode)
+ def isbnode(v):
+ return isinstance(v, BNode)
+
for s, p, o in g:
if isbnode(s):
s = _blank
@@ -116,56 +118,101 @@ class TestPrettyXmlSerializer(SerializerTestBase):
rdfs:seeAlso _:bnode2 .
"""
- testContentFormat = 'n3'
+ testContentFormat = "n3"
def test_result_fragments(self):
rdfXml = serialize(self.sourceGraph, self.serializer)
- assert '<Test rdf:about="http://example.org/data/a">'.encode("latin-1") in rdfXml
- assert '<rdf:Description rdf:about="http://example.org/data/b">'.encode("latin-1") in rdfXml
+ assert (
+ '<Test rdf:about="http://example.org/data/a">'.encode("latin-1") in rdfXml
+ )
+ assert (
+ '<rdf:Description rdf:about="http://example.org/data/b">'.encode("latin-1")
+ in rdfXml
+ )
assert '<name xml:lang="en">Bee</name>'.encode("latin-1") in rdfXml
- assert '<value rdf:datatype="http://www.w3.org/2001/XMLSchema#integer">3</value>'.encode("latin-1") in rdfXml
- assert '<BNode rdf:nodeID="'.encode("latin-1") in rdfXml, "expected one identified bnode in serialized graph"
- #onlyBNodesMsg = "expected only inlined subClassOf-bnodes in serialized graph"
- #assert '<rdfs:subClassOf>' in rdfXml, onlyBNodesMsg
- #assert not '<rdfs:subClassOf ' in rdfXml, onlyBNodesMsg
+ assert (
+ '<value rdf:datatype="http://www.w3.org/2001/XMLSchema#integer">3</value>'.encode(
+ "latin-1"
+ )
+ in rdfXml
+ )
+ assert (
+ '<BNode rdf:nodeID="'.encode("latin-1") in rdfXml
+ ), "expected one identified bnode in serialized graph"
+ # onlyBNodesMsg = "expected only inlined subClassOf-bnodes in serialized graph"
+ # assert '<rdfs:subClassOf>' in rdfXml, onlyBNodesMsg
+ # assert not '<rdfs:subClassOf ' in rdfXml, onlyBNodesMsg
def test_result_fragments_with_base(self):
- rdfXml = serialize(self.sourceGraph, self.serializer,
- extra_args={'base': "http://example.org/", 'xml_base': "http://example.org/"})
+ rdfXml = serialize(
+ self.sourceGraph,
+ self.serializer,
+ extra_args={
+ "base": "http://example.org/",
+ "xml_base": "http://example.org/",
+ },
+ )
assert 'xml:base="http://example.org/"'.encode("latin-1") in rdfXml
assert '<Test rdf:about="data/a">'.encode("latin-1") in rdfXml
assert '<rdf:Description rdf:about="data/b">'.encode("latin-1") in rdfXml
- assert '<value rdf:datatype="http://www.w3.org/2001/XMLSchema#integer">3</value>'.encode("latin-1") in rdfXml
- assert '<BNode rdf:nodeID="'.encode("latin-1") in rdfXml, "expected one identified bnode in serialized graph"
+ assert (
+ '<value rdf:datatype="http://www.w3.org/2001/XMLSchema#integer">3</value>'.encode(
+ "latin-1"
+ )
+ in rdfXml
+ )
+ assert (
+ '<BNode rdf:nodeID="'.encode("latin-1") in rdfXml
+ ), "expected one identified bnode in serialized graph"
def test_subClassOf_objects(self):
reparsedGraph = serialize_and_load(self.sourceGraph, self.serializer)
- _assert_expected_object_types_for_predicates(reparsedGraph,
- [RDFS.seeAlso, RDFS.subClassOf],
- [URIRef, BNode])
+ _assert_expected_object_types_for_predicates(
+ reparsedGraph, [RDFS.seeAlso, RDFS.subClassOf], [URIRef, BNode]
+ )
def test_pretty_xmlliteral(self):
# given:
g = ConjunctiveGraph()
- g.add((BNode(), RDF.value, Literal(u'''<p xmlns="http://www.w3.org/1999/xhtml">See also <a href="#aring">Å</a></p>''', datatype=RDF.XMLLiteral)))
+ g.add(
+ (
+ BNode(),
+ RDF.value,
+ Literal(
+ u"""<p xmlns="http://www.w3.org/1999/xhtml">See also <a href="#aring">Å</a></p>""",
+ datatype=RDF.XMLLiteral,
+ ),
+ )
+ )
# when:
- xmlrepr = g.serialize(format='pretty-xml')
+ xmlrepr = g.serialize(format="pretty-xml")
# then:
- assert u'''<rdf:value rdf:parseType="Literal"><p xmlns="http://www.w3.org/1999/xhtml">See also <a href="#aring">Å</a></p></rdf:value>'''.encode('utf-8') in xmlrepr
+ assert (
+ u"""<rdf:value rdf:parseType="Literal"><p xmlns="http://www.w3.org/1999/xhtml">See also <a href="#aring">Å</a></p></rdf:value>""".encode(
+ "utf-8"
+ )
+ in xmlrepr
+ )
def test_pretty_broken_xmlliteral(self):
# given:
g = ConjunctiveGraph()
- g.add((BNode(), RDF.value, Literal(u'''<p ''', datatype=RDF.XMLLiteral)))
+ g.add((BNode(), RDF.value, Literal(u"""<p """, datatype=RDF.XMLLiteral)))
# when:
- xmlrepr = g.serialize(format='pretty-xml')
+ xmlrepr = g.serialize(format="pretty-xml")
# then:
- assert u'''<rdf:value rdf:datatype="http://www.w3.org/1999/02/22-rdf-syntax-ns#XMLLiteral">&lt;p '''.encode('utf-8') in xmlrepr
+ assert (
+ u"""<rdf:value rdf:datatype="http://www.w3.org/1999/02/22-rdf-syntax-ns#XMLLiteral">&lt;p """.encode(
+ "utf-8"
+ )
+ in xmlrepr
+ )
def _assert_expected_object_types_for_predicates(graph, predicates, types):
for s, p, o in graph:
if p in predicates:
someTrue = [isinstance(o, t) for t in types]
- assert True in someTrue, \
- "Bad type %s for object when predicate is <%s>." % (type(o), p)
+ assert (
+ True in someTrue
+ ), "Bad type %s for object when predicate is <%s>." % (type(o), p)
diff --git a/test/test_rdf_lists.py b/test/test_rdf_lists.py
index a73d14d8..466b4847 100644
--- a/test/test_rdf_lists.py
+++ b/test/test_rdf_lists.py
@@ -5,8 +5,7 @@ from rdflib.graph import Graph
from rdflib.term import URIRef
-DATA =\
- """<http://example.com#C> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://www.w3.org/2002/07/owl#Class>.
+DATA = """<http://example.com#C> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://www.w3.org/2002/07/owl#Class>.
<http://example.com#B> <http://www.w3.org/2000/01/rdf-schema#subClassOf> _:fIYNVPxd4.
<http://example.com#B> <http://www.w3.org/2000/01/rdf-schema#subClassOf> <http://example.com#A>.
<http://example.com#B> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://www.w3.org/2002/07/owl#Class>.
@@ -20,8 +19,7 @@ _:fIYNVPxd3 <http://www.w3.org/1999/02/22-rdf-syntax-ns#first> <http://example.c
_:fIYNVPxd3 <http://www.w3.org/1999/02/22-rdf-syntax-ns#rest> <http://www.w3.org/1999/02/22-rdf-syntax-ns#nil>.
"""
-DATA_FALSE_ELEMENT =\
- """
+DATA_FALSE_ELEMENT = """
<http://example.org/#ThreeMemberList> <http://www.w3.org/1999/02/22-rdf-syntax-ns#first> <http://example.org/#p> .
<http://example.org/#ThreeMemberList> <http://www.w3.org/1999/02/22-rdf-syntax-ns#rest> _:list2 .
_:list2 <http://www.w3.org/1999/02/22-rdf-syntax-ns#first> "false"^^<http://www.w3.org/2001/XMLSchema#boolean> .
@@ -36,19 +34,19 @@ def main():
class OWLCollectionTest(unittest.TestCase):
-
def testCollectionRDFXML(self):
- g = Graph().parse(data=DATA, format='nt')
- g.namespace_manager.bind('owl', URIRef('http://www.w3.org/2002/07/owl#'))
- print(g.serialize(format='pretty-xml'))
+ g = Graph().parse(data=DATA, format="nt")
+ g.namespace_manager.bind("owl", URIRef("http://www.w3.org/2002/07/owl#"))
+ print(g.serialize(format="pretty-xml"))
class ListTest(unittest.TestCase):
def testFalseElement(self):
- g = Graph().parse(data=DATA_FALSE_ELEMENT, format='nt')
+ g = Graph().parse(data=DATA_FALSE_ELEMENT, format="nt")
self.assertEqual(
- len(list(g.items(URIRef('http://example.org/#ThreeMemberList')))), 3)
+ len(list(g.items(URIRef("http://example.org/#ThreeMemberList")))), 3
+ )
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/test/test_rdfxml.py b/test/test_rdfxml.py
index 22d8fdb7..845a9a7d 100644
--- a/test/test_rdfxml.py
+++ b/test/test_rdfxml.py
@@ -42,7 +42,10 @@ class TestStore(Graph):
if not isinstance(s, BNode) and not isinstance(o, BNode):
if not (s, p, o) in self.expected:
m = "Triple not in expected result: %s, %s, %s" % (
- s.n3(), p.n3(), o.n3())
+ s.n3(),
+ p.n3(),
+ o.n3(),
+ )
if verbose:
write(m)
# raise Exception(m)
@@ -73,7 +76,7 @@ def cached_file(url):
folder = os.path.dirname(fpath)
if not os.path.exists(folder):
os.makedirs(folder)
- f = open(fpath, 'w')
+ f = open(fpath, "w")
try:
f.write(urlopen(url).read())
finally:
@@ -85,7 +88,7 @@ RDFCOREBASE = "http://www.w3.org/2000/10/rdf-tests/rdfcore/"
def relative(url):
- return url[len(RDFCOREBASE):]
+ return url[len(RDFCOREBASE) :]
def resolve(rel):
@@ -164,15 +167,16 @@ def _testNegative(uri, manifest):
class ParserTestCase(unittest.TestCase):
- store = 'default'
- path = 'store'
+ store = "default"
+ path = "store"
slow = True
def setUp(self):
self.manifest = manifest = Graph(store=self.store)
manifest.open(self.path)
- manifest.load(cached_file(
- "http://www.w3.org/2000/10/rdf-tests/rdfcore/Manifest.rdf"))
+ manifest.load(
+ cached_file("http://www.w3.org/2000/10/rdf-tests/rdfcore/Manifest.rdf")
+ )
def tearDown(self):
self.manifest.close()
@@ -188,8 +192,7 @@ class ParserTestCase(unittest.TestCase):
result = _testNegative(neg, manifest)
total += 1
num_failed += result
- self.assertEqual(
- num_failed, 0, "Failed: %s of %s." % (num_failed, total))
+ self.assertEqual(num_failed, 0, "Failed: %s of %s." % (num_failed, total))
def testPositive(self):
manifest = self.manifest
@@ -213,8 +216,7 @@ class ParserTestCase(unittest.TestCase):
results.add((test, RDF.type, RESULT["FailingRun"]))
total += 1
num_failed += result
- self.assertEqual(
- num_failed, 0, "Failed: %s of %s." % (num_failed, total))
+ self.assertEqual(num_failed, 0, "Failed: %s of %s." % (num_failed, total))
RESULT = Namespace("http://www.w3.org/2002/03owlt/resultsOntology#")
@@ -231,12 +233,14 @@ results.add((system, RDFS.comment, Literal("")))
if __name__ == "__main__":
manifest = Graph()
- manifest.load(cached_file(
- "http://www.w3.org/2000/10/rdf-tests/rdfcore/Manifest.rdf"))
+ manifest.load(
+ cached_file("http://www.w3.org/2000/10/rdf-tests/rdfcore/Manifest.rdf")
+ )
import sys
import getopt
+
try:
- optlist, args = getopt.getopt(sys.argv[1:], 'h:', ["help"])
+ optlist, args = getopt.getopt(sys.argv[1:], "h:", ["help"])
except getopt.GetoptError as msg:
write(msg)
# usage()
diff --git a/test/test_roundtrip.py b/test/test_roundtrip.py
index 9dfed952..149e9eb5 100644
--- a/test/test_roundtrip.py
+++ b/test/test_roundtrip.py
@@ -4,8 +4,10 @@ import rdflib.compare
try:
from .test_nt_suite import all_nt_files
+
assert all_nt_files
from .test_n3_suite import all_n3_files
+
assert all_n3_files
except:
from test.test_nt_suite import all_nt_files
@@ -28,10 +30,13 @@ tests roundtripping through rdf/xml with only the literals-02 file
SKIP = [
- ('xml', 'test/n3/n3-writer-test-29.n3'), # has predicates that cannot be shortened to strict qnames
- ('xml', 'test/nt/qname-02.nt'), # uses a property that cannot be qname'd
- ('trix', 'test/n3/strquot.n3'), # contains charachters forbidden by the xml spec
- ('xml', 'test/n3/strquot.n3'), # contains charachters forbidden by the xml spec
+ (
+ "xml",
+ "test/n3/n3-writer-test-29.n3",
+ ), # has predicates that cannot be shortened to strict qnames
+ ("xml", "test/nt/qname-02.nt"), # uses a property that cannot be qname'd
+ ("trix", "test/n3/strquot.n3"), # contains charachters forbidden by the xml spec
+ ("xml", "test/n3/strquot.n3"), # contains charachters forbidden by the xml spec
]
@@ -78,11 +83,9 @@ def test_cases():
global formats
if not formats:
serializers = set(
- x.name for x in rdflib.plugin.plugins(
- None, rdflib.plugin.Serializer))
- parsers = set(
- x.name for x in rdflib.plugin.plugins(
- None, rdflib.plugin.Parser))
+ x.name for x in rdflib.plugin.plugins(None, rdflib.plugin.Serializer)
+ )
+ parsers = set(x.name for x in rdflib.plugin.plugins(None, rdflib.plugin.Parser))
formats = parsers.intersection(serializers)
for testfmt in formats:
@@ -97,15 +100,14 @@ def test_n3():
global formats
if not formats:
serializers = set(
- x.name for x in rdflib.plugin.plugins(
- None, rdflib.plugin.Serializer))
- parsers = set(
- x.name for x in rdflib.plugin.plugins(
- None, rdflib.plugin.Parser))
+ x.name for x in rdflib.plugin.plugins(None, rdflib.plugin.Serializer)
+ )
+ parsers = set(x.name for x in rdflib.plugin.plugins(None, rdflib.plugin.Parser))
formats = parsers.intersection(serializers)
for testfmt in formats:
- if "/" in testfmt: continue # skip double testing
+ if "/" in testfmt:
+ continue # skip double testing
for f, infmt in all_n3_files():
if (testfmt, f) not in SKIP:
yield roundtrip, (infmt, testfmt, f)
@@ -113,12 +115,13 @@ def test_n3():
if __name__ == "__main__":
import nose
+
if len(sys.argv) == 1:
nose.main(defaultTest=sys.argv[0])
elif len(sys.argv) == 2:
import test.test_roundtrip
+
test.test_roundtrip.formats = [sys.argv[1]]
nose.main(defaultTest=sys.argv[0], argv=sys.argv[:1])
else:
- roundtrip(
- (sys.argv[2], sys.argv[1], sys.argv[3]), verbose=True)
+ roundtrip((sys.argv[2], sys.argv[1], sys.argv[3]), verbose=True)
diff --git a/test/test_rules.py b/test/test_rules.py
index 008104da..c2496760 100644
--- a/test/test_rules.py
+++ b/test/test_rules.py
@@ -36,11 +36,15 @@ try:
def facts(g):
for s, p, o in g:
- if p != LOG.implies and not isinstance(s, BNode) and not isinstance(o, BNode):
+ if (
+ p != LOG.implies
+ and not isinstance(s, BNode)
+ and not isinstance(o, BNode)
+ ):
yield terms.Fact(_convert(s), _convert(p), _convert(o))
class PychinkoTestCase(unittest.TestCase):
- backend = 'default'
+ backend = "default"
tmppath = None
def setUp(self):
@@ -66,7 +70,8 @@ try:
source = self.g
interp.addFacts(set(facts(source)), initialSet=True)
interp.run()
- #_logger.debug("inferred facts: %s" % interp.inferredFacts)
+ # _logger.debug("inferred facts: %s" % interp.inferredFacts)
+
except ImportError as e:
print("Could not test Pychinko: %s" % e)
diff --git a/test/test_seq.py b/test/test_seq.py
index a1411649..7f177574 100644
--- a/test/test_seq.py
+++ b/test/test_seq.py
@@ -23,8 +23,8 @@ s = """\
class SeqTestCase(unittest.TestCase):
- backend = 'default'
- path = 'store'
+ backend = "default"
+ path = "store"
def setUp(self):
store = self.store = Graph(store=self.backend)
@@ -47,5 +47,5 @@ def test_suite():
return unittest.makeSuite(SeqTestCase)
-if __name__ == '__main__':
- unittest.main(defaultTest='test_suite')
+if __name__ == "__main__":
+ unittest.main(defaultTest="test_suite")
diff --git a/test/test_serializexml.py b/test/test_serializexml.py
index d79c1d5f..6ca25a92 100644
--- a/test/test_serializexml.py
+++ b/test/test_serializexml.py
@@ -44,7 +44,9 @@ def _mangled_copy(g):
"Makes a copy of the graph, replacing all bnodes with the bnode ``_blank``."
gcopy = ConjunctiveGraph()
- def isbnode(v): return isinstance(v, BNode)
+ def isbnode(v):
+ return isinstance(v, BNode)
+
for s, p, o in g:
if isbnode(s):
s = _blank
@@ -115,23 +117,47 @@ class TestXMLSerializer(SerializerTestBase):
rdfs:seeAlso _:bnode2 .
"""
- testContentFormat = 'n3'
+ testContentFormat = "n3"
def test_result_fragments(self):
rdfXml = serialize(self.sourceGraph, self.serializer)
# print "--------"
# print rdfXml
# print "--------"
- assert '<rdf:Description rdf:about="http://example.org/data/a">'.encode("latin-1") in rdfXml
- assert '<rdf:type rdf:resource="http://example.org/model/test#Test"/>'.encode("latin-1") in rdfXml
- assert '<rdf:Description rdf:about="http://example.org/data/b">'.encode("latin-1") in rdfXml
+ assert (
+ '<rdf:Description rdf:about="http://example.org/data/a">'.encode("latin-1")
+ in rdfXml
+ )
+ assert (
+ '<rdf:type rdf:resource="http://example.org/model/test#Test"/>'.encode(
+ "latin-1"
+ )
+ in rdfXml
+ )
+ assert (
+ '<rdf:Description rdf:about="http://example.org/data/b">'.encode("latin-1")
+ in rdfXml
+ )
assert '<name xml:lang="en">Bee</name>'.encode("latin-1") in rdfXml
- assert '<value rdf:datatype="http://www.w3.org/2001/XMLSchema#integer">3</value>'.encode("latin-1") in rdfXml
- assert '<rdf:Description rdf:nodeID="'.encode("latin-1") in rdfXml, "expected one identified bnode in serialized graph"
+ assert (
+ '<value rdf:datatype="http://www.w3.org/2001/XMLSchema#integer">3</value>'.encode(
+ "latin-1"
+ )
+ in rdfXml
+ )
+ assert (
+ '<rdf:Description rdf:nodeID="'.encode("latin-1") in rdfXml
+ ), "expected one identified bnode in serialized graph"
def test_result_fragments_with_base(self):
- rdfXml = serialize(self.sourceGraph, self.serializer,
- extra_args={'base': "http://example.org/", 'xml_base': "http://example.org/"})
+ rdfXml = serialize(
+ self.sourceGraph,
+ self.serializer,
+ extra_args={
+ "base": "http://example.org/",
+ "xml_base": "http://example.org/",
+ },
+ )
# print "--------"
# print rdfXml
# print "--------"
@@ -139,19 +165,27 @@ class TestXMLSerializer(SerializerTestBase):
assert '<rdf:Description rdf:about="data/a">'.encode("latin-1") in rdfXml
assert '<rdf:type rdf:resource="model/test#Test"/>'.encode("latin-1") in rdfXml
assert '<rdf:Description rdf:about="data/b">'.encode("latin-1") in rdfXml
- assert '<value rdf:datatype="http://www.w3.org/2001/XMLSchema#integer">3</value>'.encode("latin-1") in rdfXml
- assert '<rdf:Description rdf:nodeID="'.encode("latin-1") in rdfXml, "expected one identified bnode in serialized graph"
+ assert (
+ '<value rdf:datatype="http://www.w3.org/2001/XMLSchema#integer">3</value>'.encode(
+ "latin-1"
+ )
+ in rdfXml
+ )
+ assert (
+ '<rdf:Description rdf:nodeID="'.encode("latin-1") in rdfXml
+ ), "expected one identified bnode in serialized graph"
def test_subClassOf_objects(self):
reparsedGraph = serialize_and_load(self.sourceGraph, self.serializer)
- _assert_expected_object_types_for_predicates(reparsedGraph,
- [RDFS.seeAlso, RDFS.subClassOf],
- [URIRef, BNode])
+ _assert_expected_object_types_for_predicates(
+ reparsedGraph, [RDFS.seeAlso, RDFS.subClassOf], [URIRef, BNode]
+ )
def _assert_expected_object_types_for_predicates(graph, predicates, types):
for s, p, o in graph:
if p in predicates:
someTrue = [isinstance(o, t) for t in types]
- assert True in someTrue, \
- "Bad type %s for object when predicate is <%s>." % (type(o), p)
+ assert (
+ True in someTrue
+ ), "Bad type %s for object when predicate is <%s>." % (type(o), p)
diff --git a/test/test_slice.py b/test/test_slice.py
index 27e6e49a..36c72ca8 100644
--- a/test/test_slice.py
+++ b/test/test_slice.py
@@ -1,10 +1,8 @@
-
from rdflib import Graph, URIRef
import unittest
class GraphSlice(unittest.TestCase):
-
def testSlice(self):
"""
We pervert the slice object,
@@ -13,10 +11,12 @@ class GraphSlice(unittest.TestCase):
all operations return generators over full triples
"""
- def sl(x, y): return self.assertEqual(len(list(x)), y)
+ def sl(x, y):
+ return self.assertEqual(len(list(x)), y)
+
+ def soe(x, y):
+ return self.assertEqual(set([a[2] for a in x]), set(y)) # equals objects
- def soe(x, y): return self.assertEqual(
- set([a[2] for a in x]), set(y)) # equals objects
g = self.graph
# Single terms are all trivial:
@@ -27,35 +27,35 @@ class GraphSlice(unittest.TestCase):
# single slice slices by s,p,o, with : used to split
# tell me everything about "tarek" (same as above)
- sl(g[self.tarek::], 2)
+ sl(g[self.tarek : :], 2)
# give me every "likes" relationship
- sl(g[:self.likes:], 5)
+ sl(g[: self.likes :], 5)
# give me every relationship to pizza
- sl(g[::self.pizza], 3)
+ sl(g[:: self.pizza], 3)
# give me everyone who likes pizza
- sl(g[:self.likes:self.pizza], 2)
+ sl(g[: self.likes : self.pizza], 2)
# does tarek like pizza?
- self.assertTrue(g[self.tarek:self.likes:self.pizza])
+ self.assertTrue(g[self.tarek : self.likes : self.pizza])
# More intesting is using paths
# everything hated or liked
- sl(g[:self.hates | self.likes], 7)
+ sl(g[: self.hates | self.likes], 7)
def setUp(self):
self.graph = Graph()
- self.michel = URIRef(u'michel')
- self.tarek = URIRef(u'tarek')
- self.bob = URIRef(u'bob')
- self.likes = URIRef(u'likes')
- self.hates = URIRef(u'hates')
- self.pizza = URIRef(u'pizza')
- self.cheese = URIRef(u'cheese')
+ self.michel = URIRef(u"michel")
+ self.tarek = URIRef(u"tarek")
+ self.bob = URIRef(u"bob")
+ self.likes = URIRef(u"likes")
+ self.hates = URIRef(u"hates")
+ self.pizza = URIRef(u"pizza")
+ self.cheese = URIRef(u"cheese")
self.addStuff()
@@ -77,5 +77,5 @@ class GraphSlice(unittest.TestCase):
self.graph.add((bob, hates, michel)) # gasp!
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main()
diff --git a/test/test_sparql.py b/test/test_sparql.py
index c3f289fc..fdf29c3c 100644
--- a/test/test_sparql.py
+++ b/test/test_sparql.py
@@ -11,24 +11,30 @@ def test_graph_prefix():
"""
g1 = Graph()
- g1.parse(data="""
+ g1.parse(
+ data="""
@prefix : <urn:ns1:> .
:foo <p> 42.
- """, format="n3")
+ """,
+ format="n3",
+ )
g2 = Graph()
- g2.parse(data="""
+ g2.parse(
+ data="""
@prefix : <urn:somethingelse:> .
<urn:ns1:foo> <p> 42.
- """, format="n3")
+ """,
+ format="n3",
+ )
assert isomorphic(g1, g2)
- q_str = ("""
+ q_str = """
PREFIX : <urn:ns1:>
SELECT ?val
WHERE { :foo ?p ?val }
- """)
+ """
q_prepared = prepareQuery(q_str)
expected = [(Literal(42),)]
@@ -61,21 +67,21 @@ def test_sparql_bnodelist():
"""
- prepareQuery('select * where { ?s ?p ( [] ) . }')
- prepareQuery('select * where { ?s ?p ( [ ?p2 ?o2 ] ) . }')
- prepareQuery('select * where { ?s ?p ( [ ?p2 ?o2 ] [] ) . }')
- prepareQuery('select * where { ?s ?p ( [] [ ?p2 ?o2 ] [] ) . }')
+ prepareQuery("select * where { ?s ?p ( [] ) . }")
+ prepareQuery("select * where { ?s ?p ( [ ?p2 ?o2 ] ) . }")
+ prepareQuery("select * where { ?s ?p ( [ ?p2 ?o2 ] [] ) . }")
+ prepareQuery("select * where { ?s ?p ( [] [ ?p2 ?o2 ] [] ) . }")
def test_complex_sparql_construct():
g = Graph()
- q = '''select ?subject ?study ?id where {
+ q = """select ?subject ?study ?id where {
?s a <urn:Person>;
<urn:partOf> ?c;
<urn:hasParent> ?mother, ?father;
<urn:id> [ a <urn:Identifier>; <urn:has-value> ?id].
- }'''
+ }"""
g.query(q)
@@ -84,8 +90,7 @@ def test_sparql_update_with_bnode():
Test if the blank node is inserted correctly.
"""
graph = Graph()
- graph.update(
- "INSERT DATA { _:blankA <urn:type> <urn:Blank> }")
+ graph.update("INSERT DATA { _:blankA <urn:type> <urn:Blank> }")
for t in graph.triples((None, None, None)):
assert isinstance(t[0], BNode)
eq_(t[1].n3(), "<urn:type>")
@@ -97,9 +102,8 @@ def test_sparql_update_with_bnode_serialize_parse():
Test if the blank node is inserted correctly, can be serialized and parsed.
"""
graph = Graph()
- graph.update(
- "INSERT DATA { _:blankA <urn:type> <urn:Blank> }")
- string = graph.serialize(format='ntriples').decode('utf-8')
+ graph.update("INSERT DATA { _:blankA <urn:type> <urn:Blank> }")
+ string = graph.serialize(format="ntriples").decode("utf-8")
raised = False
try:
Graph().parse(data=string, format="ntriples")
@@ -108,6 +112,7 @@ def test_sparql_update_with_bnode_serialize_parse():
assert not raised
-if __name__ == '__main__':
+if __name__ == "__main__":
import nose
+
nose.main(defaultTest=__name__)
diff --git a/test/test_sparql_agg_distinct.py b/test/test_sparql_agg_distinct.py
index 7ab0f58a..39d6eb95 100644
--- a/test/test_sparql_agg_distinct.py
+++ b/test/test_sparql_agg_distinct.py
@@ -1,6 +1,6 @@
from rdflib import Graph
-query_tpl = '''
+query_tpl = """
SELECT ?x (MIN(?y_) as ?y) (%s(DISTINCT ?z_) as ?z) {
VALUES (?x ?y_ ?z_) {
("x1" 10 1)
@@ -8,42 +8,37 @@ SELECT ?x (MIN(?y_) as ?y) (%s(DISTINCT ?z_) as ?z) {
("x2" 20 2)
}
} GROUP BY ?x ORDER BY ?x
-'''
+"""
def test_group_concat_distinct():
g = Graph()
- results = g.query(query_tpl % 'GROUP_CONCAT')
+ results = g.query(query_tpl % "GROUP_CONCAT")
results = [[lit.toPython() for lit in line] for line in results]
# this is the tricky part
assert results[0][2] == "1", results[0][2]
# still check the whole result, to be on the safe side
- assert results == [
- ["x1", 10, "1"],
- ["x2", 20, "2"],
- ], results
+ assert results == [["x1", 10, "1"], ["x2", 20, "2"],], results
def test_sum_distinct():
g = Graph()
- results = g.query(query_tpl % 'SUM')
+ results = g.query(query_tpl % "SUM")
results = [[lit.toPython() for lit in line] for line in results]
# this is the tricky part
assert results[0][2] == 1, results[0][2]
# still check the whole result, to be on the safe side
- assert results == [
- ["x1", 10, 1],
- ["x2", 20, 2],
- ], results
+ assert results == [["x1", 10, 1], ["x2", 20, 2],], results
def test_avg_distinct():
g = Graph()
- results = g.query("""
+ results = g.query(
+ """
SELECT ?x (MIN(?y_) as ?y) (AVG(DISTINCT ?z_) as ?z) {
VALUES (?x ?y_ ?z_) {
("x1" 10 1)
@@ -52,23 +47,24 @@ def test_avg_distinct():
("x2" 20 2)
}
} GROUP BY ?x ORDER BY ?x
- """)
+ """
+ )
results = [[lit.toPython() for lit in line] for line in results]
# this is the tricky part
assert results[0][2] == 2, results[0][2]
# still check the whole result, to be on the safe side
- assert results == [
- ["x1", 10, 2],
- ["x2", 20, 2],
- ], results
+ assert results == [["x1", 10, 2], ["x2", 20, 2],], results
def test_count_distinct():
g = Graph()
- g.parse(format="turtle", publicID="http://example.org/", data="""
+ g.parse(
+ format="turtle",
+ publicID="http://example.org/",
+ data="""
@prefix : <> .
<#a>
@@ -83,26 +79,31 @@ def test_count_distinct():
:knows <#b>, <#c> ;
:age 20 .
- """)
+ """,
+ )
# Query 1: people knowing someone younger
- results = g.query("""
+ results = g.query(
+ """
PREFIX : <http://example.org/>
SELECT DISTINCT ?x {
?x :age ?ax ; :knows [ :age ?ay ].
FILTER( ?ax > ?ay )
}
- """)
+ """
+ )
assert len(results) == 2
# nQuery 2: count people knowing someone younger
- results = g.query("""
+ results = g.query(
+ """
PREFIX : <http://example.org/>
SELECT (COUNT(DISTINCT ?x) as ?cx) {
?x :age ?ax ; :knows [ :age ?ay ].
FILTER( ?ax > ?ay )
}
- """)
+ """
+ )
assert list(results)[0][0].toPython() == 2
diff --git a/test/test_sparql_agg_undef.py b/test/test_sparql_agg_undef.py
index 649a5a8c..f36e9eb5 100644
--- a/test/test_sparql_agg_undef.py
+++ b/test/test_sparql_agg_undef.py
@@ -1,6 +1,6 @@
from rdflib import Graph, Literal, Variable
-query_tpl = '''
+query_tpl = """
SELECT ?x (%s(?y_) as ?y) {
VALUES (?x ?y_ ?z) {
("x1" undef 1)
@@ -9,7 +9,7 @@ SELECT ?x (%s(?y_) as ?y) {
("x2" 42 4)
}
} GROUP BY ?x ORDER BY ?x
-'''
+"""
Y = Variable("y")
@@ -24,18 +24,20 @@ def template_tst(agg_func, first, second):
def test_aggregates():
- yield template_tst, 'SUM', Literal(0), Literal(42)
- yield template_tst, 'MIN', None, Literal(42)
- yield template_tst, 'MAX', None, Literal(42)
+ yield template_tst, "SUM", Literal(0), Literal(42)
+ yield template_tst, "MIN", None, Literal(42)
+ yield template_tst, "MAX", None, Literal(42)
# yield template_tst, 'AVG', Literal(0), Literal(42)
- yield template_tst, 'SAMPLE', None, Literal(42)
- yield template_tst, 'COUNT', Literal(0), Literal(1)
- yield template_tst, 'GROUP_CONCAT', Literal(''), Literal("42")
+ yield template_tst, "SAMPLE", None, Literal(42)
+ yield template_tst, "COUNT", Literal(0), Literal(1)
+ yield template_tst, "GROUP_CONCAT", Literal(""), Literal("42")
def test_group_by_null():
g = Graph()
- results = list(g.query("""
+ results = list(
+ g.query(
+ """
SELECT ?x ?y (AVG(?z) as ?az) {
VALUES (?x ?y ?z) {
(1 undef 10)
@@ -46,7 +48,9 @@ def test_group_by_null():
}
} GROUP BY ?x ?y
ORDER BY ?x
- """))
+ """
+ )
+ )
assert len(results) == 2
assert results[0][0] == Literal(1)
assert results[1][0] == Literal(2)
diff --git a/test/test_sparql_construct_bindings.py b/test/test_sparql_construct_bindings.py
index d5a68b94..8f8240b2 100644
--- a/test/test_sparql_construct_bindings.py
+++ b/test/test_sparql_construct_bindings.py
@@ -5,16 +5,16 @@ from rdflib.compare import isomorphic
import unittest
from nose.tools import eq_
-class TestConstructInitBindings(unittest.TestCase):
+class TestConstructInitBindings(unittest.TestCase):
def test_construct_init_bindings(self):
"""
This is issue https://github.com/RDFLib/rdflib/issues/1001
"""
g1 = Graph()
-
- q_str = ("""
+
+ q_str = """
PREFIX : <urn:ns1:>
CONSTRUCT {
?uri :prop1 ?val1;
@@ -24,17 +24,16 @@ class TestConstructInitBindings(unittest.TestCase):
bind(uri(concat("urn:ns1:", ?a)) as ?uri)
bind(?b as ?val1)
}
- """)
+ """
q_prepared = prepareQuery(q_str)
expected = [
- (URIRef('urn:ns1:A'),URIRef('urn:ns1:prop1'), Literal('B')),
- (URIRef('urn:ns1:A'),URIRef('urn:ns1:prop2'), Literal('C'))
+ (URIRef("urn:ns1:A"), URIRef("urn:ns1:prop1"), Literal("B")),
+ (URIRef("urn:ns1:A"), URIRef("urn:ns1:prop2"), Literal("C")),
]
- results = g1.query(q_prepared, initBindings={
- 'a': Literal('A'),
- 'b': Literal('B'),
- 'c': Literal('C')
- })
+ results = g1.query(
+ q_prepared,
+ initBindings={"a": Literal("A"), "b": Literal("B"), "c": Literal("C")},
+ )
eq_(sorted(results, key=lambda x: str(x[1])), expected)
diff --git a/test/test_sparql_service.py b/test/test_sparql_service.py
index 19f713c3..550bfcb2 100644
--- a/test/test_sparql_service.py
+++ b/test/test_sparql_service.py
@@ -5,7 +5,7 @@ from rdflib.compare import isomorphic
def test_service():
g = Graph()
- q = '''select ?dbpHypernym ?dbpComment
+ q = """select ?dbpHypernym ?dbpComment
where
{ service <http://DBpedia.org/sparql>
{ select ?dbpHypernym ?dbpComment
@@ -15,7 +15,7 @@ def test_service():
<http://purl.org/linguistics/gold/hypernym> ?dbpHypernym ;
<http://www.w3.org/2000/01/rdf-schema#comment> ?dbpComment .
- } } } limit 2'''
+ } } } limit 2"""
results = g.query(q)
assert len(results) == 2
@@ -25,7 +25,7 @@ def test_service():
def test_service_with_bind():
g = Graph()
- q = '''select ?dbpHypernym ?dbpComment ?dbpDeathPlace
+ q = """select ?dbpHypernym ?dbpComment ?dbpDeathPlace
where
{ bind (<http://dbpedia.org/resource/Eltham> as ?dbpDeathPlace)
service <http://DBpedia.org/sparql>
@@ -37,7 +37,7 @@ def test_service_with_bind():
<http://www.w3.org/2000/01/rdf-schema#comment> ?dbpComment ;
<http://dbpedia.org/ontology/deathPlace> ?dbpDeathPlace .
- } } } limit 2'''
+ } } } limit 2"""
results = g.query(q)
assert len(results) == 2
@@ -47,7 +47,7 @@ def test_service_with_bind():
def test_service_with_values():
g = Graph()
- q = '''select ?dbpHypernym ?dbpComment ?dbpDeathPlace
+ q = """select ?dbpHypernym ?dbpComment ?dbpDeathPlace
where
{ values (?dbpHypernym ?dbpDeathPlace) {(<http://dbpedia.org/resource/Leveller> <http://dbpedia.org/resource/London>) (<http://dbpedia.org/resource/Leveller> <http://dbpedia.org/resource/Eltham>)}
service <http://DBpedia.org/sparql>
@@ -59,7 +59,7 @@ def test_service_with_values():
<http://www.w3.org/2000/01/rdf-schema#comment> ?dbpComment ;
<http://dbpedia.org/ontology/deathPlace> ?dbpDeathPlace .
- } } } limit 2'''
+ } } } limit 2"""
results = g.query(q)
assert len(results) == 2
@@ -69,13 +69,13 @@ def test_service_with_values():
def test_service_with_implicit_select():
g = Graph()
- q = '''select ?s ?p ?o
+ q = """select ?s ?p ?o
where
{
service <http://DBpedia.org/sparql>
{
values (?s ?p ?o) {(<http://example.org/a> <http://example.org/b> 1) (<http://example.org/a> <http://example.org/b> 2)}
- }} limit 2'''
+ }} limit 2"""
results = g.query(q)
assert len(results) == 2
@@ -85,14 +85,14 @@ def test_service_with_implicit_select():
def test_service_with_implicit_select_and_prefix():
g = Graph()
- q = '''prefix ex:<http://example.org/>
+ q = """prefix ex:<http://example.org/>
select ?s ?p ?o
where
{
service <http://DBpedia.org/sparql>
{
values (?s ?p ?o) {(ex:a ex:b 1) (<http://example.org/a> <http://example.org/b> 2)}
- }} limit 2'''
+ }} limit 2"""
results = g.query(q)
assert len(results) == 2
@@ -102,14 +102,14 @@ def test_service_with_implicit_select_and_prefix():
def test_service_with_implicit_select_and_base():
g = Graph()
- q = '''base <http://example.org/>
+ q = """base <http://example.org/>
select ?s ?p ?o
where
{
service <http://DBpedia.org/sparql>
{
values (?s ?p ?o) {(<a> <b> 1) (<a> <b> 2)}
- }} limit 2'''
+ }} limit 2"""
results = g.query(q)
assert len(results) == 2
@@ -119,19 +119,19 @@ def test_service_with_implicit_select_and_base():
def test_service_with_implicit_select_and_allcaps():
g = Graph()
- q = '''SELECT ?s
+ q = """SELECT ?s
WHERE
{
SERVICE <http://dbpedia.org/sparql>
{
?s <http://purl.org/linguistics/gold/hypernym> <http://dbpedia.org/resource/Leveller> .
}
- } LIMIT 3'''
+ } LIMIT 3"""
results = g.query(q)
assert len(results) == 3
-#def test_with_fixture(httpserver):
+# def test_with_fixture(httpserver):
# httpserver.expect_request("/sparql/?query=SELECT * WHERE ?s ?p ?o").respond_with_json({"vars": ["s","p","o"], "bindings":[]})
# test_server = httpserver.url_for('/sparql')
# g = Graph()
@@ -140,7 +140,7 @@ def test_service_with_implicit_select_and_allcaps():
# assert len(results) == 0
-if __name__ == '__main__':
+if __name__ == "__main__":
# import nose
# nose.main(defaultTest=__name__)
test_service()
diff --git a/test/test_sparqlstore.py b/test/test_sparqlstore.py
index f638a178..38a8b481 100644
--- a/test/test_sparqlstore.py
+++ b/test/test_sparqlstore.py
@@ -15,7 +15,7 @@ except:
class SPARQLStoreDBPediaTestCase(unittest.TestCase):
- store_name = 'SPARQLStore'
+ store_name = "SPARQLStore"
path = "http://dbpedia.org/sparql"
storetest = True
create = False
@@ -41,8 +41,8 @@ class SPARQLStoreDBPediaTestCase(unittest.TestCase):
{ ?s a xyzzy:Concept ; xyzzy:prefLabel ?label . } LIMIT 10
"""
res = self.graph.query(
- query,
- initNs={"xyzzy": "http://www.w3.org/2004/02/skos/core#"})
+ query, initNs={"xyzzy": "http://www.w3.org/2004/02/skos/core#"}
+ )
for i in res:
assert type(i[0]) == Literal, i[0].n3()
@@ -51,10 +51,7 @@ class SPARQLStoreDBPediaTestCase(unittest.TestCase):
SELECT ?label WHERE
{ ?s a xyzzy:Concept ; xyzzy:prefLabel ?label . } LIMIT 10
"""
- self.assertRaises(
- HTTPError,
- self.graph.query,
- query)
+ self.assertRaises(HTTPError, self.graph.query, query)
def test_query_with_added_prolog(self):
prologue = """\
@@ -73,25 +70,34 @@ class SPARQLStoreUpdateTestCase(unittest.TestCase):
def setUp(self):
port = self.setup_mocked_endpoint()
self.graph = Graph(store="SPARQLUpdateStore", identifier=URIRef("urn:ex"))
- self.graph.open(("http://localhost:{port}/query".format(port=port),
- "http://localhost:{port}/update".format(port=port)), create=False)
+ self.graph.open(
+ (
+ "http://localhost:{port}/query".format(port=port),
+ "http://localhost:{port}/update".format(port=port),
+ ),
+ create=False,
+ )
ns = list(self.graph.namespaces())
assert len(ns) > 0, ns
def setup_mocked_endpoint(self):
# Configure mock server.
s = socket.socket(socket.AF_INET, type=socket.SOCK_STREAM)
- s.bind(('localhost', 0))
+ s.bind(("localhost", 0))
address, port = s.getsockname()
s.close()
- mock_server = HTTPServer(('localhost', port), SPARQL11ProtocolStoreMock)
+ mock_server = HTTPServer(("localhost", port), SPARQL11ProtocolStoreMock)
# Start running mock server in a separate thread.
# Daemon threads automatically shut down when the main process exits.
mock_server_thread = Thread(target=mock_server.serve_forever)
mock_server_thread.setDaemon(True)
mock_server_thread.start()
- print("Started mocked sparql endpoint on http://localhost:{port}/".format(port=port))
+ print(
+ "Started mocked sparql endpoint on http://localhost:{port}/".format(
+ port=port
+ )
+ )
return port
def tearDown(self):
@@ -116,7 +122,9 @@ class SPARQL11ProtocolStoreMock(BaseHTTPRequestHandler):
if self.path == "/query":
if self.headers.get("Content-Type") == "application/sparql-query":
pass
- elif self.headers.get("Content-Type") == "application/x-www-form-urlencoded":
+ elif (
+ self.headers.get("Content-Type") == "application/x-www-form-urlencoded"
+ ):
pass
else:
self.send_response(requests.codes.not_acceptable)
@@ -124,7 +132,9 @@ class SPARQL11ProtocolStoreMock(BaseHTTPRequestHandler):
elif self.path == "/update":
if self.headers.get("Content-Type") == "application/sparql-update":
pass
- elif self.headers.get("Content-Type") == "application/x-www-form-urlencoded":
+ elif (
+ self.headers.get("Content-Type") == "application/x-www-form-urlencoded"
+ ):
pass
else:
self.send_response(requests.codes.not_acceptable)
@@ -142,5 +152,6 @@ class SPARQL11ProtocolStoreMock(BaseHTTPRequestHandler):
self.end_headers()
return
-if __name__ == '__main__':
+
+if __name__ == "__main__":
unittest.main()
diff --git a/test/test_sparqlupdatestore.py b/test/test_sparqlupdatestore.py
index 233dd834..b6dcedbd 100644
--- a/test/test_sparqlupdatestore.py
+++ b/test/test_sparqlupdatestore.py
@@ -9,8 +9,8 @@ import re
from rdflib import ConjunctiveGraph, URIRef, Literal, BNode, Graph
from urllib.request import urlopen
-HOST = 'http://localhost:3031'
-DB = '/db/'
+HOST = "http://localhost:3031"
+DB = "/db/"
# this assumes SPARQL1.1 query/update endpoints running locally at
# http://localhost:3031/db/
@@ -24,23 +24,22 @@ DB = '/db/'
# THIS WILL DELETE ALL DATA IN THE /db dataset
-michel = URIRef(u'urn:michel')
-tarek = URIRef(u'urn:tarek')
-bob = URIRef(u'urn:bob')
-likes = URIRef(u'urn:likes')
-hates = URIRef(u'urn:hates')
-pizza = URIRef(u'urn:pizza')
-cheese = URIRef(u'urn:cheese')
+michel = URIRef("urn:michel")
+tarek = URIRef("urn:tarek")
+bob = URIRef("urn:bob")
+likes = URIRef("urn:likes")
+hates = URIRef("urn:hates")
+pizza = URIRef("urn:pizza")
+cheese = URIRef("urn:cheese")
-graphuri = URIRef('urn:graph')
-othergraphuri = URIRef('urn:othergraph')
+graphuri = URIRef("urn:graph")
+othergraphuri = URIRef("urn:othergraph")
class TestSparql11(unittest.TestCase):
-
def setUp(self):
self.longMessage = True
- self.graph = ConjunctiveGraph('SPARQLUpdateStore')
+ self.graph = ConjunctiveGraph("SPARQLUpdateStore")
root = HOST + DB
self.graph.open((root + "sparql", root + "update"))
@@ -62,8 +61,8 @@ class TestSparql11(unittest.TestCase):
g2 = self.graph.get_context(othergraphuri)
g2.add((michel, likes, pizza))
- self.assertEqual(3, len(g), 'graph contains 3 triples')
- self.assertEqual(1, len(g2), 'other graph contains 1 triple')
+ self.assertEqual(3, len(g), "graph contains 3 triples")
+ self.assertEqual(1, len(g2), "other graph contains 1 triple")
r = g.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }")
self.assertEqual(2, len(list(r)), "two people like pizza")
@@ -72,8 +71,9 @@ class TestSparql11(unittest.TestCase):
self.assertEqual(2, len(list(r)), "two people like pizza")
# Test initBindings
- r = g.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }",
- initBindings={'s': tarek})
+ r = g.query(
+ "SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }", initBindings={"s": tarek}
+ )
self.assertEqual(1, len(list(r)), "i was asking only about tarek")
r = g.triples((tarek, likes, pizza))
@@ -94,7 +94,7 @@ class TestSparql11(unittest.TestCase):
g2.add((bob, likes, pizza))
g.add((tarek, hates, cheese))
- self.assertEqual(2, len(g), 'graph contains 2 triples')
+ self.assertEqual(2, len(g), "graph contains 2 triples")
# the following are actually bad tests as they depend on your endpoint,
# as pointed out in the sparqlstore.py code:
@@ -106,15 +106,19 @@ class TestSparql11(unittest.TestCase):
##
# Fuseki/TDB has a flag for specifying that the default graph
# is the union of all graphs (tdb:unionDefaultGraph in the Fuseki config).
- self.assertEqual(3, len(self.graph),
- 'default union graph should contain three triples but contains:\n'
- '%s' % list(self.graph))
+ self.assertEqual(
+ 3,
+ len(self.graph),
+ "default union graph should contain three triples but contains:\n"
+ "%s" % list(self.graph),
+ )
r = self.graph.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }")
self.assertEqual(2, len(list(r)), "two people like pizza")
- r = self.graph.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }",
- initBindings={'s': tarek})
+ r = self.graph.query(
+ "SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }", initBindings={"s": tarek}
+ )
self.assertEqual(1, len(list(r)), "i was asking only about tarek")
r = self.graph.triples((tarek, likes, pizza))
@@ -129,44 +133,47 @@ class TestSparql11(unittest.TestCase):
self.assertEqual(1, len(list(r)), "only tarek likes pizza")
def testUpdate(self):
- self.graph.update("INSERT DATA { GRAPH <urn:graph> { <urn:michel> <urn:likes> <urn:pizza> . } }")
+ self.graph.update(
+ "INSERT DATA { GRAPH <urn:graph> { <urn:michel> <urn:likes> <urn:pizza> . } }"
+ )
g = self.graph.get_context(graphuri)
- self.assertEqual(1, len(g), 'graph contains 1 triples')
+ self.assertEqual(1, len(g), "graph contains 1 triples")
def testUpdateWithInitNs(self):
self.graph.update(
"INSERT DATA { GRAPH ns:graph { ns:michel ns:likes ns:pizza . } }",
- initNs={'ns': URIRef('urn:')}
+ initNs={"ns": URIRef("urn:")},
)
g = self.graph.get_context(graphuri)
self.assertEqual(
set(g.triples((None, None, None))),
set([(michel, likes, pizza)]),
- 'only michel likes pizza'
+ "only michel likes pizza",
)
def testUpdateWithInitBindings(self):
self.graph.update(
"INSERT { GRAPH <urn:graph> { ?a ?b ?c . } } WherE { }",
initBindings={
- 'a': URIRef('urn:michel'),
- 'b': URIRef('urn:likes'),
- 'c': URIRef('urn:pizza'),
- }
+ "a": URIRef("urn:michel"),
+ "b": URIRef("urn:likes"),
+ "c": URIRef("urn:pizza"),
+ },
)
g = self.graph.get_context(graphuri)
self.assertEqual(
set(g.triples((None, None, None))),
set([(michel, likes, pizza)]),
- 'only michel likes pizza'
+ "only michel likes pizza",
)
def testUpdateWithBlankNode(self):
self.graph.update(
- "INSERT DATA { GRAPH <urn:graph> { _:blankA <urn:type> <urn:Blank> } }")
+ "INSERT DATA { GRAPH <urn:graph> { _:blankA <urn:type> <urn:Blank> } }"
+ )
g = self.graph.get_context(graphuri)
for t in g.triples((None, None, None)):
self.assertTrue(isinstance(t[0], BNode))
@@ -175,33 +182,34 @@ class TestSparql11(unittest.TestCase):
def testUpdateWithBlankNodeSerializeAndParse(self):
self.graph.update(
- "INSERT DATA { GRAPH <urn:graph> { _:blankA <urn:type> <urn:Blank> } }")
+ "INSERT DATA { GRAPH <urn:graph> { _:blankA <urn:type> <urn:Blank> } }"
+ )
g = self.graph.get_context(graphuri)
- string = g.serialize(format='ntriples').decode('utf-8')
+ string = g.serialize(format="ntriples").decode("utf-8")
raised = False
try:
Graph().parse(data=string, format="ntriples")
except Exception as e:
raised = True
- self.assertFalse(raised, 'Exception raised when parsing: ' + string)
+ self.assertFalse(raised, "Exception raised when parsing: " + string)
def testMultipleUpdateWithInitBindings(self):
self.graph.update(
"INSERT { GRAPH <urn:graph> { ?a ?b ?c . } } WHERE { };"
"INSERT { GRAPH <urn:graph> { ?d ?b ?c . } } WHERE { }",
initBindings={
- 'a': URIRef('urn:michel'),
- 'b': URIRef('urn:likes'),
- 'c': URIRef('urn:pizza'),
- 'd': URIRef('urn:bob'),
- }
+ "a": URIRef("urn:michel"),
+ "b": URIRef("urn:likes"),
+ "c": URIRef("urn:pizza"),
+ "d": URIRef("urn:bob"),
+ },
)
g = self.graph.get_context(graphuri)
self.assertEqual(
set(g.triples((None, None, None))),
set([(michel, likes, pizza), (bob, likes, pizza)]),
- 'michel and bob like pizza'
+ "michel and bob like pizza",
)
def testNamedGraphUpdate(self):
@@ -211,25 +219,31 @@ class TestSparql11(unittest.TestCase):
self.assertEqual(
set(g.triples((None, None, None))),
set([(michel, likes, pizza)]),
- 'only michel likes pizza'
+ "only michel likes pizza",
)
- r2 = "DELETE { <urn:michel> <urn:likes> <urn:pizza> } " + \
- "INSERT { <urn:bob> <urn:likes> <urn:pizza> } WHERE {}"
+ r2 = (
+ "DELETE { <urn:michel> <urn:likes> <urn:pizza> } "
+ + "INSERT { <urn:bob> <urn:likes> <urn:pizza> } WHERE {}"
+ )
g.update(r2)
self.assertEqual(
set(g.triples((None, None, None))),
set([(bob, likes, pizza)]),
- 'only bob likes pizza'
+ "only bob likes pizza",
)
says = URIRef("urn:says")
# Strings with unbalanced curly braces
- tricky_strs = ["With an unbalanced curly brace %s " % brace
- for brace in ["{", "}"]]
+ tricky_strs = [
+ "With an unbalanced curly brace %s " % brace for brace in ["{", "}"]
+ ]
for tricky_str in tricky_strs:
- r3 = """INSERT { ?b <urn:says> "%s" }
- WHERE { ?b <urn:likes> <urn:pizza>} """ % tricky_str
+ r3 = (
+ """INSERT { ?b <urn:says> "%s" }
+ WHERE { ?b <urn:likes> <urn:pizza>} """
+ % tricky_str
+ )
g.update(r3)
values = set()
@@ -253,16 +267,26 @@ class TestSparql11(unittest.TestCase):
r4strings.append(r"""'''9: adfk } <foo> #éï \\'''""")
r4strings.append("'''10: ad adsfj \n { \n sadfj'''")
- r4 = "\n".join([
- u'INSERT DATA { <urn:michel> <urn:says> %s } ;' % s
- for s in r4strings
- ])
+ r4 = "\n".join(
+ ["INSERT DATA { <urn:michel> <urn:says> %s } ;" % s for s in r4strings]
+ )
g.update(r4)
values = set()
for v in g.objects(michel, says):
values.add(str(v))
- self.assertEqual(values, set([re.sub(r"\\(.)", r"\1", re.sub(
- r"^'''|'''$|^'|'$|" + r'^"""|"""$|^"|"$', r"", s)) for s in r4strings]))
+ self.assertEqual(
+ values,
+ set(
+ [
+ re.sub(
+ r"\\(.)",
+ r"\1",
+ re.sub(r"^'''|'''$|^'|'$|" + r'^"""|"""$|^"|"$', r"", s),
+ )
+ for s in r4strings
+ ]
+ ),
+ )
# IRI Containing ' or #
# The fragment identifier must not be misinterpreted as a comment
@@ -275,10 +299,10 @@ class TestSparql11(unittest.TestCase):
values = set()
for v in g.objects(michel, hates):
values.add(str(v))
- self.assertEqual(values, set([u"urn:foo'bar?baz;a=1&b=2#fragment", u"'}"]))
+ self.assertEqual(values, set(["urn:foo'bar?baz;a=1&b=2#fragment", "'}"]))
# Comments
- r6 = u"""
+ r6 = """
INSERT DATA {
<urn:bob> <urn:hates> <urn:bob> . # No closing brace: }
<urn:bob> <urn:hates> <urn:michel>.
@@ -294,39 +318,40 @@ class TestSparql11(unittest.TestCase):
def testNamedGraphUpdateWithInitBindings(self):
g = self.graph.get_context(graphuri)
r = "INSERT { ?a ?b ?c } WHERE {}"
- g.update(r, initBindings={
- 'a': michel,
- 'b': likes,
- 'c': pizza
- })
+ g.update(r, initBindings={"a": michel, "b": likes, "c": pizza})
self.assertEqual(
set(g.triples((None, None, None))),
set([(michel, likes, pizza)]),
- 'only michel likes pizza'
+ "only michel likes pizza",
)
def testEmptyNamedGraph(self):
empty_graph_iri = "urn:empty-graph-1"
self.graph.update("CREATE GRAPH <%s>" % empty_graph_iri)
- named_graphs = [str(r[0]) for r in self.graph.query(
- "SELECT ?name WHERE { GRAPH ?name {} }")]
+ named_graphs = [
+ str(r[0]) for r in self.graph.query("SELECT ?name WHERE { GRAPH ?name {} }")
+ ]
# Some SPARQL endpoint backends (like TDB) are not able to find empty named graphs
# (at least with this query)
if empty_graph_iri in named_graphs:
- self.assertTrue(empty_graph_iri in [str(g.identifier)
- for g in self.graph.contexts()])
+ self.assertTrue(
+ empty_graph_iri in [str(g.identifier) for g in self.graph.contexts()]
+ )
def testEmptyLiteral(self):
# test for https://github.com/RDFLib/rdflib/issues/457
# also see test_issue457.py which is sparql store independent!
g = self.graph.get_context(graphuri)
- g.add((
- URIRef('http://example.com/s'),
- URIRef('http://example.com/p'),
- Literal('')))
+ g.add(
+ (
+ URIRef("http://example.com/s"),
+ URIRef("http://example.com/p"),
+ Literal(""),
+ )
+ )
o = tuple(g)[0][2]
- self.assertEqual(o, Literal(''), repr(o))
+ self.assertEqual(o, Literal(""), repr(o))
try:
@@ -335,5 +360,5 @@ except:
raise SkipTest(HOST + " is unavailable.")
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main()
diff --git a/test/test_swap_n3.py b/test/test_swap_n3.py
index b6dcc698..f7071bec 100644
--- a/test/test_swap_n3.py
+++ b/test/test_swap_n3.py
@@ -2,6 +2,7 @@ from nose.exc import SkipTest
import os
import sys
import unittest
+
try:
maketrans = str.maketrans
except AttributeError:
@@ -42,22 +43,22 @@ qt = rdflib.Namespace("http://www.w3.org/2001/sw/DataAccess/tests/test-query#")
skiptests = [
- 'syntax_neg_single_quote',
- 'syntax_neg_literal_predicate',
- 'syntax_this_quantifiers',
- 'syntax_trailing_semicolon',
- 'syntax_neg_thisadoc',
- 'syntax_equals1',
- 'syntax_equals2',
- 'syntax_this_rules',
- 'syntax_neg_keywords3',
- 'syntax_zero_objects',
- 'syntax_neg_formula_predicate',
- 'syntax_zero_predicates',
+ "syntax_neg_single_quote",
+ "syntax_neg_literal_predicate",
+ "syntax_this_quantifiers",
+ "syntax_trailing_semicolon",
+ "syntax_neg_thisadoc",
+ "syntax_equals1",
+ "syntax_equals2",
+ "syntax_this_rules",
+ "syntax_neg_keywords3",
+ "syntax_zero_objects",
+ "syntax_neg_formula_predicate",
+ "syntax_zero_predicates",
# 'syntax_qvars1',
# 'syntax_qvars2',
# 'contexts',
- 'syntax_too_nested'
+ "syntax_too_nested",
]
@@ -81,7 +82,7 @@ def generictest(e):
def dir_to_uri(directory, sep=os.path.sep):
- '''
+ """
Convert a local path to a File URI.
>>> dir_to_uri('c:\\\\temp\\\\foo\\\\file.txt', sep='\\\\')
@@ -89,28 +90,36 @@ def dir_to_uri(directory, sep=os.path.sep):
>>> dir_to_uri('/tmp/foo/file.txt', sep='/')
'file:///tmp/foo/file.txt'
- '''
+ """
items = directory.split(sep)
- path = '/'.join(items)
- if path.startswith('/'):
+ path = "/".join(items)
+ if path.startswith("/"):
path = path[1:]
- return 'file:///%s' % (path,)
+ return "file:///%s" % (path,)
def test_cases():
from copy import deepcopy
+
g = rdflib.Graph()
- swap_dir = os.path.join(os.getcwd(), 'test', 'swap-n3')
- g.parse(os.path.join(swap_dir, 'n3-rdf.tests'), format="n3")
- g.parse(os.path.join(swap_dir, 'n3-full.tests'), format="n3")
+ swap_dir = os.path.join(os.getcwd(), "test", "swap-n3")
+ g.parse(os.path.join(swap_dir, "n3-rdf.tests"), format="n3")
+ g.parse(os.path.join(swap_dir, "n3-full.tests"), format="n3")
tfiles = []
- swap_dir_uri = dir_to_uri(swap_dir) + '/'
+ swap_dir_uri = dir_to_uri(swap_dir) + "/"
for tst in g.subjects():
- files = [str(tfile).replace('http://www.w3.org/2000/10/', swap_dir_uri)
- for tfile in g.objects(tst, rdflib.URIRef("http://www.w3.org/2004/11/n3test#inputDocument")) if tfile.endswith('n3')]
+ files = [
+ str(tfile).replace("http://www.w3.org/2000/10/", swap_dir_uri)
+ for tfile in g.objects(
+ tst, rdflib.URIRef("http://www.w3.org/2004/11/n3test#inputDocument")
+ )
+ if tfile.endswith("n3")
+ ]
tfiles += files
for tfile in set(tfiles):
- gname = tfile.split('/swap-n3/swap/test/')[1][:-3].translate(maketrans('-/','__'))
+ gname = tfile.split("/swap-n3/swap/test/")[1][:-3].translate(
+ maketrans("-/", "__")
+ )
e = Envelope(gname, tfile)
if gname in skiptests:
e.skip = True
@@ -119,6 +128,7 @@ def test_cases():
# e.skip = True
if sys.version_info[:2] == (2, 4):
import pickle
+
gjt = pickle.dumps(generictest)
gt = pickle.loads(gjt)
else:
@@ -130,4 +140,3 @@ def test_cases():
if __name__ == "__main__":
test_cases()
# unittest.main()
-
diff --git a/test/test_term.py b/test/test_term.py
index aae05600..0363baba 100644
--- a/test/test_term.py
+++ b/test/test_term.py
@@ -23,11 +23,12 @@ class TestURIRefRepr(unittest.TestCase):
def testSubclassNameAppearsInRepr(self):
class MyURIRef(URIRef):
pass
- x = MyURIRef('http://example.com/')
+
+ x = MyURIRef("http://example.com/")
self.assertEqual(repr(x), uformat("MyURIRef(u'http://example.com/')"))
def testGracefulOrdering(self):
- u = URIRef('cake')
+ u = URIRef("cake")
g = Graph()
a = u > u
a = u > BNode()
@@ -36,18 +37,17 @@ class TestURIRefRepr(unittest.TestCase):
class TestBNodeRepr(unittest.TestCase):
-
def testSubclassNameAppearsInRepr(self):
class MyBNode(BNode):
pass
+
x = MyBNode()
self.assertTrue(repr(x).startswith("MyBNode("))
class TestLiteral(unittest.TestCase):
-
def test_base64_values(self):
- b64msg = 'cmRmbGliIGlzIGNvb2whIGFsc28gaGVyZSdzIHNvbWUgYmluYXJ5IAAR83UC'
+ b64msg = "cmRmbGliIGlzIGNvb2whIGFsc28gaGVyZSdzIHNvbWUgYmluYXJ5IAAR83UC"
decoded_b64msg = base64.b64decode(b64msg)
lit = Literal(b64msg, datatype=XSD.base64Binary)
self.assertEqual(lit.value, decoded_b64msg)
@@ -56,30 +56,14 @@ class TestLiteral(unittest.TestCase):
def test_total_order(self):
types = {
XSD.dateTime: (
- '2001-01-01T00:00:00',
- '2001-01-01T00:00:00Z',
- '2001-01-01T00:00:00-00:00'
- ),
- XSD.date: (
- '2001-01-01',
- '2001-01-01Z',
- '2001-01-01-00:00'
- ),
- XSD.time: (
- '00:00:00',
- '00:00:00Z',
- '00:00:00-00:00'
- ),
- XSD.gYear: (
- '2001',
- '2001Z',
- '2001-00:00'
- ), # interval
- XSD.gYearMonth: (
- '2001-01',
- '2001-01Z',
- '2001-01-00:00'
+ "2001-01-01T00:00:00",
+ "2001-01-01T00:00:00Z",
+ "2001-01-01T00:00:00-00:00",
),
+ XSD.date: ("2001-01-01", "2001-01-01Z", "2001-01-01-00:00"),
+ XSD.time: ("00:00:00", "00:00:00Z", "00:00:00-00:00"),
+ XSD.gYear: ("2001", "2001Z", "2001-00:00"), # interval
+ XSD.gYearMonth: ("2001-01", "2001-01Z", "2001-01-00:00"),
}
literals = [
Literal(literal, datatype=t)
@@ -100,19 +84,19 @@ class TestLiteral(unittest.TestCase):
l1 = [
Literal(l, datatype=XSD.dateTime)
for l in [
- '2001-01-01T00:00:00',
- '2001-01-01T01:00:00',
- '2001-01-01T01:00:01',
- '2001-01-02T01:00:01',
- '2001-01-01T00:00:00Z',
- '2001-01-01T00:00:00-00:00',
- '2001-01-01T01:00:00Z',
- '2001-01-01T01:00:00-00:00',
- '2001-01-01T00:00:00-01:30',
- '2001-01-01T01:00:00-01:30',
- '2001-01-02T01:00:01Z',
- '2001-01-02T01:00:01-00:00',
- '2001-01-02T01:00:01-01:30'
+ "2001-01-01T00:00:00",
+ "2001-01-01T01:00:00",
+ "2001-01-01T01:00:01",
+ "2001-01-02T01:00:01",
+ "2001-01-01T00:00:00Z",
+ "2001-01-01T00:00:00-00:00",
+ "2001-01-01T01:00:00Z",
+ "2001-01-01T01:00:00-00:00",
+ "2001-01-01T00:00:00-01:30",
+ "2001-01-01T01:00:00-01:30",
+ "2001-01-02T01:00:01Z",
+ "2001-01-02T01:00:01-00:00",
+ "2001-01-02T01:00:01-01:30",
]
]
l2 = list(l1)
@@ -134,7 +118,12 @@ class TestLiteral(unittest.TestCase):
(3, Literal(float(1)), Literal(float(1)), Literal(float(2))),
(4, Literal(1), Literal(1.1), Literal(2.1, datatype=XSD.decimal)),
(5, Literal(1.1), Literal(1.1), Literal(2.2)),
- (6, Literal(Decimal(1)), Literal(Decimal(1.1)), Literal(Decimal(2.1), datatype=XSD.decimal)),
+ (
+ 6,
+ Literal(Decimal(1)),
+ Literal(Decimal(1.1)),
+ Literal(Decimal(2.1), datatype=XSD.decimal),
+ ),
(7, Literal(Decimal(1.1)), Literal(Decimal(1.1)), Literal(Decimal(2.2))),
(8, Literal(float(1)), Literal(float(1.1)), Literal(float(2.1))),
(9, Literal(float(1.1)), Literal(float(1.1)), Literal(float(2.2))),
@@ -144,27 +133,74 @@ class TestLiteral(unittest.TestCase):
(14, Literal(-1), Literal(-1.1), Literal(-2.1)),
(15, Literal(-1.1), Literal(-1.1), Literal(-2.2)),
(16, Literal(Decimal(-1)), Literal(Decimal(-1.1)), Literal(Decimal(-2.1))),
- (17, Literal(Decimal(-1.1)), Literal(Decimal(-1.1)), Literal(Decimal(-2.2))),
+ (
+ 17,
+ Literal(Decimal(-1.1)),
+ Literal(Decimal(-1.1)),
+ Literal(Decimal(-2.2)),
+ ),
(18, Literal(float(-1)), Literal(float(-1.1)), Literal(float(-2.1))),
(19, Literal(float(-1.1)), Literal(float(-1.1)), Literal(float(-2.2))),
-
(20, Literal(1), Literal(1.0), Literal(2.0)),
(21, Literal(1.0), Literal(1.0), Literal(2.0)),
(22, Literal(Decimal(1)), Literal(Decimal(1.0)), Literal(Decimal(2.0))),
(23, Literal(Decimal(1.0)), Literal(Decimal(1.0)), Literal(Decimal(2.0))),
(24, Literal(float(1)), Literal(float(1.0)), Literal(float(2.0))),
(25, Literal(float(1.0)), Literal(float(1.0)), Literal(float(2.0))),
-
- (26, Literal(1, datatype=XSD.integer), Literal(1, datatype=XSD.integer), Literal(2, datatype=XSD.integer)),
- (27, Literal(1, datatype=XSD.integer), Literal("1", datatype=XSD.integer), Literal("2", datatype=XSD.integer)),
- (28, Literal("1", datatype=XSD.integer), Literal("1", datatype=XSD.integer), Literal("2", datatype=XSD.integer)),
- (29, Literal("1"), Literal("1", datatype=XSD.integer), Literal("11", datatype=XSD.string)),
- (30, Literal(1), Literal("1", datatype=XSD.integer), Literal("2", datatype=XSD.integer)),
- (31, Literal(Decimal(1), datatype=XSD.decimal), Literal(Decimal(1), datatype=XSD.decimal), Literal(Decimal(2), datatype=XSD.decimal)),
- (32, Literal(Decimal(1)), Literal(Decimal(1), datatype=XSD.decimal), Literal(Decimal(2), datatype=XSD.decimal)),
- (33, Literal(float(1)), Literal(float(1), datatype=XSD.float), Literal(float(2), datatype=XSD.float)),
- (34, Literal(float(1), datatype=XSD.float), Literal(float(1), datatype=XSD.float), Literal(float(2), datatype=XSD.float)),
-
+ (
+ 26,
+ Literal(1, datatype=XSD.integer),
+ Literal(1, datatype=XSD.integer),
+ Literal(2, datatype=XSD.integer),
+ ),
+ (
+ 27,
+ Literal(1, datatype=XSD.integer),
+ Literal("1", datatype=XSD.integer),
+ Literal("2", datatype=XSD.integer),
+ ),
+ (
+ 28,
+ Literal("1", datatype=XSD.integer),
+ Literal("1", datatype=XSD.integer),
+ Literal("2", datatype=XSD.integer),
+ ),
+ (
+ 29,
+ Literal("1"),
+ Literal("1", datatype=XSD.integer),
+ Literal("11", datatype=XSD.string),
+ ),
+ (
+ 30,
+ Literal(1),
+ Literal("1", datatype=XSD.integer),
+ Literal("2", datatype=XSD.integer),
+ ),
+ (
+ 31,
+ Literal(Decimal(1), datatype=XSD.decimal),
+ Literal(Decimal(1), datatype=XSD.decimal),
+ Literal(Decimal(2), datatype=XSD.decimal),
+ ),
+ (
+ 32,
+ Literal(Decimal(1)),
+ Literal(Decimal(1), datatype=XSD.decimal),
+ Literal(Decimal(2), datatype=XSD.decimal),
+ ),
+ (
+ 33,
+ Literal(float(1)),
+ Literal(float(1), datatype=XSD.float),
+ Literal(float(2), datatype=XSD.float),
+ ),
+ (
+ 34,
+ Literal(float(1), datatype=XSD.float),
+ Literal(float(1), datatype=XSD.float),
+ Literal(float(2), datatype=XSD.float),
+ ),
(35, Literal(1), 1, Literal(2)),
(36, Literal(1), 1.0, Literal(2, datatype=XSD.decimal)),
(37, Literal(1.0), 1, Literal(2, datatype=XSD.decimal)),
@@ -173,14 +209,42 @@ class TestLiteral(unittest.TestCase):
(40, Literal(Decimal(1.0)), Decimal(1.0), Literal(Decimal(2.0))),
(41, Literal(float(1.0)), float(1), Literal(float(2.0))),
(42, Literal(float(1.0)), float(1.0), Literal(float(2.0))),
-
- (43, Literal(1, datatype=XSD.integer), "+1.1", Literal("1+1.1", datatype=XSD.string)),
- (44, Literal(1, datatype=XSD.integer), Literal("+1.1", datatype=XSD.string), Literal("1+1.1", datatype=XSD.string)),
- (45, Literal(Decimal(1.0), datatype=XSD.integer), Literal(u"1", datatype=XSD.string), Literal("11", datatype=XSD.string)),
- (46, Literal(1.1, datatype=XSD.integer), Literal("1", datatype=XSD.string), Literal("1.11", datatype=XSD.string)),
-
- (47, Literal(1, datatype=XSD.integer), None, Literal(1, datatype=XSD.integer)),
- (48, Literal("1", datatype=XSD.string), None, Literal("1", datatype=XSD.string)),
+ (
+ 43,
+ Literal(1, datatype=XSD.integer),
+ "+1.1",
+ Literal("1+1.1", datatype=XSD.string),
+ ),
+ (
+ 44,
+ Literal(1, datatype=XSD.integer),
+ Literal("+1.1", datatype=XSD.string),
+ Literal("1+1.1", datatype=XSD.string),
+ ),
+ (
+ 45,
+ Literal(Decimal(1.0), datatype=XSD.integer),
+ Literal(u"1", datatype=XSD.string),
+ Literal("11", datatype=XSD.string),
+ ),
+ (
+ 46,
+ Literal(1.1, datatype=XSD.integer),
+ Literal("1", datatype=XSD.string),
+ Literal("1.11", datatype=XSD.string),
+ ),
+ (
+ 47,
+ Literal(1, datatype=XSD.integer),
+ None,
+ Literal(1, datatype=XSD.integer),
+ ),
+ (
+ 48,
+ Literal("1", datatype=XSD.string),
+ None,
+ Literal("1", datatype=XSD.string),
+ ),
]
for case in cases:
@@ -196,22 +260,26 @@ class TestLiteral(unittest.TestCase):
if not case_passed:
print(case[1], case[2])
print("expected: " + case[3] + ", " + case[3].datatype)
- print("actual: " + (case[1] + case[2]) + ", " + (case[1] + case[2]).datatype)
+ print(
+ "actual: "
+ + (case[1] + case[2])
+ + ", "
+ + (case[1] + case[2]).datatype
+ )
self.assertTrue(case_passed, "Case " + str(case[0]) + " failed")
class TestValidityFunctions(unittest.TestCase):
-
def test_is_valid_unicode(self):
testcase_list = (
(None, True),
(1, True),
- (['foo'], True),
- ({'foo': b'bar'}, True),
- ('foo', True),
- (b'foo\x00', True),
- (b'foo\xf3\x02', False)
+ (["foo"], True),
+ ({"foo": b"bar"}, True),
+ ("foo", True),
+ (b"foo\x00", True),
+ (b"foo\xf3\x02", False),
)
for val, expected in testcase_list:
self.assertEqual(_is_valid_unicode(val), expected)
diff --git a/test/test_trig.py b/test/test_trig.py
index 78f257ea..90321c5c 100644
--- a/test/test_trig.py
+++ b/test/test_trig.py
@@ -4,85 +4,86 @@ import re
from nose import SkipTest
-TRIPLE = (rdflib.URIRef("http://example.com/s"),
- rdflib.RDFS.label,
- rdflib.Literal("example 1"))
+TRIPLE = (
+ rdflib.URIRef("http://example.com/s"),
+ rdflib.RDFS.label,
+ rdflib.Literal("example 1"),
+)
class TestTrig(unittest.TestCase):
-
def testEmpty(self):
g = rdflib.Graph()
- s = g.serialize(format='trig')
+ s = g.serialize(format="trig")
self.assertTrue(s is not None)
def testRepeatTriples(self):
g = rdflib.ConjunctiveGraph()
- g.get_context('urn:a').add((rdflib.URIRef('urn:1'),
- rdflib.URIRef('urn:2'),
- rdflib.URIRef('urn:3')))
+ g.get_context("urn:a").add(
+ (rdflib.URIRef("urn:1"), rdflib.URIRef("urn:2"), rdflib.URIRef("urn:3"))
+ )
- g.get_context('urn:b').add((rdflib.URIRef('urn:1'),
- rdflib.URIRef('urn:2'),
- rdflib.URIRef('urn:3')))
+ g.get_context("urn:b").add(
+ (rdflib.URIRef("urn:1"), rdflib.URIRef("urn:2"), rdflib.URIRef("urn:3"))
+ )
- self.assertEqual(len(g.get_context('urn:a')), 1)
- self.assertEqual(len(g.get_context('urn:b')), 1)
+ self.assertEqual(len(g.get_context("urn:a")), 1)
+ self.assertEqual(len(g.get_context("urn:b")), 1)
- s = g.serialize(format='trig')
- self.assertTrue('{}'.encode("latin-1") not in s) # no empty graphs!
+ s = g.serialize(format="trig")
+ self.assertTrue("{}".encode("latin-1") not in s) # no empty graphs!
def testSameSubject(self):
g = rdflib.ConjunctiveGraph()
- g.get_context('urn:a').add((rdflib.URIRef('urn:1'),
- rdflib.URIRef('urn:p1'),
- rdflib.URIRef('urn:o1')))
+ g.get_context("urn:a").add(
+ (rdflib.URIRef("urn:1"), rdflib.URIRef("urn:p1"), rdflib.URIRef("urn:o1"))
+ )
- g.get_context('urn:b').add((rdflib.URIRef('urn:1'),
- rdflib.URIRef('urn:p2'),
- rdflib.URIRef('urn:o2')))
+ g.get_context("urn:b").add(
+ (rdflib.URIRef("urn:1"), rdflib.URIRef("urn:p2"), rdflib.URIRef("urn:o2"))
+ )
- self.assertEqual(len(g.get_context('urn:a')), 1)
- self.assertEqual(len(g.get_context('urn:b')), 1)
+ self.assertEqual(len(g.get_context("urn:a")), 1)
+ self.assertEqual(len(g.get_context("urn:b")), 1)
- s = g.serialize(format='trig')
+ s = g.serialize(format="trig")
self.assertEqual(len(re.findall("p1".encode("latin-1"), s)), 1)
self.assertEqual(len(re.findall("p2".encode("latin-1"), s)), 1)
- self.assertTrue('{}'.encode("latin-1") not in s) # no empty graphs!
+ self.assertTrue("{}".encode("latin-1") not in s) # no empty graphs!
def testRememberNamespace(self):
g = rdflib.ConjunctiveGraph()
g.add(TRIPLE + (rdflib.URIRef("http://example.com/graph1"),))
# In 4.2.0 the first serialization would fail to include the
# prefix for the graph but later serialize() calls would work.
- first_out = g.serialize(format='trig')
- second_out = g.serialize(format='trig')
- self.assertTrue(b'@prefix ns1: <http://example.com/> .' in second_out)
- self.assertTrue(b'@prefix ns1: <http://example.com/> .' in first_out)
+ first_out = g.serialize(format="trig")
+ second_out = g.serialize(format="trig")
+ self.assertTrue(b"@prefix ns1: <http://example.com/> ." in second_out)
+ self.assertTrue(b"@prefix ns1: <http://example.com/> ." in first_out)
def testGraphQnameSyntax(self):
g = rdflib.ConjunctiveGraph()
g.add(TRIPLE + (rdflib.URIRef("http://example.com/graph1"),))
- out = g.serialize(format='trig')
- self.assertTrue(b'ns1:graph1 {' in out)
+ out = g.serialize(format="trig")
+ self.assertTrue(b"ns1:graph1 {" in out)
def testGraphUriSyntax(self):
g = rdflib.ConjunctiveGraph()
# getQName will not abbreviate this, so it should serialize as
# a '<...>' term.
g.add(TRIPLE + (rdflib.URIRef("http://example.com/foo."),))
- out = g.serialize(format='trig')
- self.assertTrue(b'<http://example.com/foo.> {' in out)
+ out = g.serialize(format="trig")
+ self.assertTrue(b"<http://example.com/foo.> {" in out)
def testBlankGraphIdentifier(self):
g = rdflib.ConjunctiveGraph()
g.add(TRIPLE + (rdflib.BNode(),))
- out = g.serialize(format='trig')
+ out = g.serialize(format="trig")
graph_label_line = out.splitlines()[-4]
- self.assertTrue(re.match(br'^_:[a-zA-Z0-9]+ \{', graph_label_line))
+ self.assertTrue(re.match(br"^_:[a-zA-Z0-9]+ \{", graph_label_line))
def testGraphParsing(self):
# should parse into single default graph context
@@ -90,7 +91,7 @@ class TestTrig(unittest.TestCase):
<http://example.com/thing#thing_a> <http://example.com/knows> <http://example.com/thing#thing_b> .
"""
g = rdflib.ConjunctiveGraph()
- g.parse(data=data, format='trig')
+ g.parse(data=data, format="trig")
self.assertEqual(len(list(g.contexts())), 1)
# should parse into single default graph context
@@ -100,7 +101,7 @@ class TestTrig(unittest.TestCase):
{ <http://example.com/thing#thing_c> <http://example.com/knows> <http://example.com/thing#thing_d> . }
"""
g = rdflib.ConjunctiveGraph()
- g.parse(data=data, format='trig')
+ g.parse(data=data, format="trig")
self.assertEqual(len(list(g.contexts())), 1)
# should parse into 2 contexts, one default, one named
@@ -114,12 +115,12 @@ class TestTrig(unittest.TestCase):
}
"""
g = rdflib.ConjunctiveGraph()
- g.parse(data=data, format='trig')
+ g.parse(data=data, format="trig")
self.assertEqual(len(list(g.contexts())), 2)
def testRoundTrips(self):
- raise SkipTest('skipped until 5.0')
+ raise SkipTest("skipped until 5.0")
data = """
<http://example.com/thing#thing_a> <http://example.com/knows> <http://example.com/thing#thing_b> .
@@ -132,17 +133,17 @@ class TestTrig(unittest.TestCase):
"""
g = rdflib.ConjunctiveGraph()
for i in range(5):
- g.parse(data=data, format='trig')
- data = g.serialize(format='trig')
+ g.parse(data=data, format="trig")
+ data = g.serialize(format="trig")
# output should only contain 1 mention of each resource/graph name
- self.assertEqual(data.count('thing_a'), 1)
- self.assertEqual(data.count('thing_b'), 1)
- self.assertEqual(data.count('thing_c'), 1)
- self.assertEqual(data.count('thing_d'), 1)
- self.assertEqual(data.count('thing_e'), 1)
- self.assertEqual(data.count('thing_f'), 1)
- self.assertEqual(data.count('graph_a'), 1)
+ self.assertEqual(data.count("thing_a"), 1)
+ self.assertEqual(data.count("thing_b"), 1)
+ self.assertEqual(data.count("thing_c"), 1)
+ self.assertEqual(data.count("thing_d"), 1)
+ self.assertEqual(data.count("thing_e"), 1)
+ self.assertEqual(data.count("thing_f"), 1)
+ self.assertEqual(data.count("graph_a"), 1)
def testDefaultGraphSerializesWithoutName(self):
data = """
@@ -151,10 +152,10 @@ class TestTrig(unittest.TestCase):
{ <http://example.com/thing#thing_c> <http://example.com/knows> <http://example.com/thing#thing_d> . }
"""
g = rdflib.ConjunctiveGraph()
- g.parse(data=data, format='trig')
- data = g.serialize(format='trig')
+ g.parse(data=data, format="trig")
+ data = g.serialize(format="trig")
- self.assertTrue('None'.encode("latin-1") not in data)
+ self.assertTrue("None".encode("latin-1") not in data)
def testPrefixes(self):
@@ -171,9 +172,9 @@ class TestTrig(unittest.TestCase):
"""
cg = rdflib.ConjunctiveGraph()
- cg.parse(data=data, format='trig')
- data = cg.serialize(format='trig')
+ cg.parse(data=data, format="trig")
+ data = cg.serialize(format="trig")
- self.assert_('ns2: <http://ex.org/docs/'.encode("latin-1") in data, data)
- self.assert_('<ns2:document1>'.encode("latin-1") not in data, data)
- self.assert_('ns2:document1'.encode("latin-1") in data, data)
+ self.assert_("ns2: <http://ex.org/docs/".encode("latin-1") in data, data)
+ self.assert_("<ns2:document1>".encode("latin-1") not in data, data)
+ self.assert_("ns2:document1".encode("latin-1") in data, data)
diff --git a/test/test_trig_w3c.py b/test/test_trig_w3c.py
index bb8588e0..d59a2f08 100644
--- a/test/test_trig_w3c.py
+++ b/test/test_trig_w3c.py
@@ -16,15 +16,15 @@ def trig(test):
g = ConjunctiveGraph()
try:
- base = 'http://www.w3.org/2013/TriGTests/' + split_uri(test.action)[1]
+ base = "http://www.w3.org/2013/TriGTests/" + split_uri(test.action)[1]
- g.parse(test.action, publicID=base, format='trig')
+ g.parse(test.action, publicID=base, format="trig")
if not test.syntax:
raise AssertionError("Input shouldn't have parsed!")
if test.result: # eval test
res = ConjunctiveGraph()
- res.parse(test.result, format='nquads')
+ res.parse(test.result, format="nquads")
if verbose:
@@ -32,13 +32,13 @@ def trig(test):
if not first and not second:
return
- print('===============================')
- print('TriG')
- print(g.serialize(format='nquads'))
- print('===============================')
- print('NQuads')
- print(res.serialize(format='nquads'))
- print('===============================')
+ print("===============================")
+ print("TriG")
+ print(g.serialize(format="nquads"))
+ print("===============================")
+ print("NQuads")
+ print(res.serialize(format="nquads"))
+ print("===============================")
print("Diff:")
# print "%d triples in both"%len(both)
@@ -50,9 +50,9 @@ def trig(test):
print("NQuads Only")
for t in second:
print(t)
- raise Exception('Graphs do not match!')
+ raise Exception("Graphs do not match!")
- assert isomorphic(g, res), 'graphs must be the same'
+ assert isomorphic(g, res), "graphs must be the same"
except:
if test.syntax:
@@ -63,12 +63,12 @@ testers = {
RDFT.TestTrigPositiveSyntax: trig,
RDFT.TestTrigNegativeSyntax: trig,
RDFT.TestTrigEval: trig,
- RDFT.TestTrigNegativeEval: trig
+ RDFT.TestTrigNegativeEval: trig,
}
def test_trig(tests=None):
- for t in nose_tests(testers, 'test/w3c/trig/manifest.ttl'):
+ for t in nose_tests(testers, "test/w3c/trig/manifest.ttl"):
if tests:
for test in tests:
if test in t[1].uri:
@@ -79,7 +79,7 @@ def test_trig(tests=None):
yield t
-if __name__ == '__main__':
+if __name__ == "__main__":
verbose = True
- nose_tst_earl_report(test_trig, 'rdflib_trig')
+ nose_tst_earl_report(test_trig, "rdflib_trig")
diff --git a/test/test_trix_parse.py b/test/test_trix_parse.py
index 1b0f9fb9..290ce0b6 100644
--- a/test/test_trix_parse.py
+++ b/test/test_trix_parse.py
@@ -6,7 +6,6 @@ import unittest
class TestTrixParse(unittest.TestCase):
-
def setUp(self):
pass
@@ -45,5 +44,5 @@ class TestTrixParse(unittest.TestCase):
# print "Parsed %d triples"%len(g)
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main()
diff --git a/test/test_trix_serialize.py b/test/test_trix_serialize.py
index 3330234b..4fe78a18 100644
--- a/test/test_trix_serialize.py
+++ b/test/test_trix_serialize.py
@@ -9,7 +9,6 @@ from io import BytesIO
class TestTrixSerialize(unittest.TestCase):
-
def setUp(self):
pass
@@ -18,17 +17,17 @@ class TestTrixSerialize(unittest.TestCase):
def testSerialize(self):
- s1 = URIRef('store:1')
- r1 = URIRef('resource:1')
- r2 = URIRef('resource:2')
+ s1 = URIRef("store:1")
+ r1 = URIRef("resource:1")
+ r2 = URIRef("resource:2")
- label = URIRef('predicate:label')
+ label = URIRef("predicate:label")
g1 = Graph(identifier=s1)
g1.add((r1, label, Literal("label 1", lang="en")))
g1.add((r1, label, Literal("label 2")))
- s2 = URIRef('store:2')
+ s2 = URIRef("store:2")
g2 = Graph(identifier=s2)
g2.add((r2, label, Literal("label 3")))
@@ -37,13 +36,13 @@ class TestTrixSerialize(unittest.TestCase):
g.addN([(s, p, o, g1)])
for s, p, o in g2.triples((None, None, None)):
g.addN([(s, p, o, g2)])
- r3 = URIRef('resource:3')
+ r3 = URIRef("resource:3")
g.add((r3, label, Literal(4)))
- r = g.serialize(format='trix')
+ r = g.serialize(format="trix")
g3 = ConjunctiveGraph()
- g3.parse(BytesIO(r), format='trix')
+ g3.parse(BytesIO(r), format="trix")
for q in g3.quads((None, None, None)):
# TODO: Fix once getGraph/getContext is in conjunctive graph
@@ -87,12 +86,10 @@ class TestTrixSerialize(unittest.TestCase):
graph = ConjunctiveGraph()
graph.bind(None, "http://defaultnamespace")
- sg = graph.serialize(format='trix').decode('UTF-8')
- self.assertTrue(
- 'xmlns="http://defaultnamespace"' not in sg, sg)
- self.assertTrue(
- 'xmlns="http://www.w3.org/2004/03/trix/trix-1/' in sg, sg)
+ sg = graph.serialize(format="trix").decode("UTF-8")
+ self.assertTrue('xmlns="http://defaultnamespace"' not in sg, sg)
+ self.assertTrue('xmlns="http://www.w3.org/2004/03/trix/trix-1/' in sg, sg)
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main()
diff --git a/test/test_tsvresults.py b/test/test_tsvresults.py
index 36d8e7fa..5c4d12d0 100644
--- a/test/test_tsvresults.py
+++ b/test/test_tsvresults.py
@@ -4,7 +4,6 @@ from rdflib.plugins.sparql.results.tsvresults import TSVResultParser
class TestTSVResults(unittest.TestCase):
-
def test_empty_tsvresults_bindings(self):
# check that optional bindings are ordered properly
source = """?s\t?p\t?o
diff --git a/test/test_turtle_serialize.py b/test/test_turtle_serialize.py
index c34c1d79..155cdffd 100644
--- a/test/test_turtle_serialize.py
+++ b/test/test_turtle_serialize.py
@@ -12,7 +12,7 @@ def testTurtleFinalDot():
u = URIRef("http://ex.org/bob.")
g.bind("ns", "http://ex.org/")
g.add((u, u, u))
- s = g.serialize(format='turtle')
+ s = g.serialize(format="turtle")
assert "ns:bob.".encode("latin-1") not in s
@@ -50,18 +50,23 @@ def testUnicodeEscaping():
assert len(triples) == 3
print(triples)
# Now check that was decoded into python values properly
- assert triples[0][2] == URIRef(u'http://example.com/aaa\xf3bbbb')
- assert triples[1][2] == URIRef(u'http://example.com/zzz\U00100000zzz')
- assert triples[2][2] == URIRef(u'http://example.com/aaa\xf3bbb')
+ assert triples[0][2] == URIRef(u"http://example.com/aaa\xf3bbbb")
+ assert triples[1][2] == URIRef(u"http://example.com/zzz\U00100000zzz")
+ assert triples[2][2] == URIRef(u"http://example.com/aaa\xf3bbb")
def test_turtle_valid_list():
- NS = Namespace('http://example.org/ns/')
+ NS = Namespace("http://example.org/ns/")
g = Graph()
- g.parse(data="""
+ g.parse(
+ data="""
@prefix : <{0}> .
:s :p (""), (0), (false) .
- """.format(NS), format='turtle')
+ """.format(
+ NS
+ ),
+ format="turtle",
+ )
turtle_serializer = TurtleSerializer(g)
@@ -70,24 +75,30 @@ def test_turtle_valid_list():
def test_turtle_namespace():
- graph = Graph()
- graph.bind('OBO', 'http://purl.obolibrary.org/obo/')
- graph.bind('GENO', 'http://purl.obolibrary.org/obo/GENO_')
- graph.bind('RO', 'http://purl.obolibrary.org/obo/RO_')
- graph.bind('RO_has_phenotype',
- 'http://purl.obolibrary.org/obo/RO_0002200')
- graph.add((URIRef('http://example.org'),
- URIRef('http://purl.obolibrary.org/obo/RO_0002200'),
- URIRef('http://purl.obolibrary.org/obo/GENO_0000385')))
- output = [val for val in
- graph.serialize(format='turtle').decode().splitlines()
- if not val.startswith('@prefix')]
- output = ' '.join(output)
- assert 'RO_has_phenotype:' in output
- assert 'GENO:0000385' in output
+ graph = Graph()
+ graph.bind("OBO", "http://purl.obolibrary.org/obo/")
+ graph.bind("GENO", "http://purl.obolibrary.org/obo/GENO_")
+ graph.bind("RO", "http://purl.obolibrary.org/obo/RO_")
+ graph.bind("RO_has_phenotype", "http://purl.obolibrary.org/obo/RO_0002200")
+ graph.add(
+ (
+ URIRef("http://example.org"),
+ URIRef("http://purl.obolibrary.org/obo/RO_0002200"),
+ URIRef("http://purl.obolibrary.org/obo/GENO_0000385"),
+ )
+ )
+ output = [
+ val
+ for val in graph.serialize(format="turtle").decode().splitlines()
+ if not val.startswith("@prefix")
+ ]
+ output = " ".join(output)
+ assert "RO_has_phenotype:" in output
+ assert "GENO:0000385" in output
if __name__ == "__main__":
import nose
import sys
+
nose.main(defaultTest=sys.argv[0])
diff --git a/test/test_turtle_sort_issue613.py b/test/test_turtle_sort_issue613.py
index f81cba33..a26ede28 100644
--- a/test/test_turtle_sort_issue613.py
+++ b/test/test_turtle_sort_issue613.py
@@ -17,8 +17,8 @@ https://github.com/RDFLib/rdflib/issues/676
def test_sort_dates():
g = rdflib.Graph()
- y = '''@prefix ex: <http://ex.org> .
-ex:X ex:p "2016-01-01T00:00:00"^^<http://www.w3.org/2001/XMLSchema#dateTime>, "2016-01-01T00:00:00Z"^^<http://www.w3.org/2001/XMLSchema#dateTime> . '''
+ y = """@prefix ex: <http://ex.org> .
+ex:X ex:p "2016-01-01T00:00:00"^^<http://www.w3.org/2001/XMLSchema#dateTime>, "2016-01-01T00:00:00Z"^^<http://www.w3.org/2001/XMLSchema#dateTime> . """
p = g.parse(data=y, format="turtle")
p.serialize(format="turtle")
@@ -27,14 +27,14 @@ ex:X ex:p "2016-01-01T00:00:00"^^<http://www.w3.org/2001/XMLSchema#dateTime>, "2
def test_sort_docfrag():
g = rdflib.Graph()
- y = '''@prefix ex: <http://ex.org> .
+ y = """@prefix ex: <http://ex.org> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
-ex:X ex:p "<h1>hi</h1>"^^rdf:HTML, "<h1>ho</h1>"^^rdf:HTML . '''
+ex:X ex:p "<h1>hi</h1>"^^rdf:HTML, "<h1>ho</h1>"^^rdf:HTML . """
p = g.parse(data=y, format="turtle")
p.serialize(format="turtle")
-if __name__ == '__main__':
+if __name__ == "__main__":
test_sort_docfrag()
diff --git a/test/test_turtle_w3c.py b/test/test_turtle_w3c.py
index 469ed023..b89ce66b 100644
--- a/test/test_turtle_w3c.py
+++ b/test/test_turtle_w3c.py
@@ -15,15 +15,15 @@ def turtle(test):
g = Graph()
try:
- base = 'http://www.w3.org/2013/TurtleTests/' + split_uri(test.action)[1]
+ base = "http://www.w3.org/2013/TurtleTests/" + split_uri(test.action)[1]
- g.parse(test.action, publicID=base, format='turtle')
+ g.parse(test.action, publicID=base, format="turtle")
if not test.syntax:
raise AssertionError("Input shouldn't have parsed!")
if test.result: # eval test
res = Graph()
- res.parse(test.result, format='nt')
+ res.parse(test.result, format="nt")
if verbose:
both, first, second = graph_diff(g, res)
@@ -39,9 +39,9 @@ def turtle(test):
print("NT Only")
for t in second:
print(t)
- raise Exception('Graphs do not match!')
+ raise Exception("Graphs do not match!")
- assert isomorphic(g, res), 'graphs must be the same'
+ assert isomorphic(g, res), "graphs must be the same"
except:
if test.syntax:
@@ -52,13 +52,12 @@ testers = {
RDFT.TestTurtlePositiveSyntax: turtle,
RDFT.TestTurtleNegativeSyntax: turtle,
RDFT.TestTurtleEval: turtle,
- RDFT.TestTurtleNegativeEval: turtle
+ RDFT.TestTurtleNegativeEval: turtle,
}
def test_turtle(tests=None):
- for t in nose_tests(testers,
- 'test/w3c/turtle/manifest.ttl'):
+ for t in nose_tests(testers, "test/w3c/turtle/manifest.ttl"):
if tests:
for test in tests:
if test in t[1].uri:
@@ -69,8 +68,8 @@ def test_turtle(tests=None):
yield t
-if __name__ == '__main__':
+if __name__ == "__main__":
verbose = True
- nose_tst_earl_report(test_turtle, 'rdflib_turtle')
+ nose_tst_earl_report(test_turtle, "rdflib_turtle")
diff --git a/test/test_util.py b/test/test_util.py
index 4184b659..89890c8d 100644
--- a/test/test_util.py
+++ b/test/test_util.py
@@ -54,11 +54,13 @@ n3source = """\
class TestUtilMisc(unittest.TestCase):
def setUp(self):
- self.x = Literal("2008-12-01T18:02:00Z",
- datatype=URIRef('http://www.w3.org/2001/XMLSchema#dateTime'))
+ self.x = Literal(
+ "2008-12-01T18:02:00Z",
+ datatype=URIRef("http://www.w3.org/2001/XMLSchema#dateTime"),
+ )
def test_util_list2set(self):
- base = [Literal('foo'), self.x]
+ base = [Literal("foo"), self.x]
r = util.list2set(base + base)
self.assertTrue(r == base)
@@ -75,10 +77,11 @@ class TestUtilMisc(unittest.TestCase):
class TestUtilDateTime(unittest.TestCase):
-
def setUp(self):
- self.x = Literal("2008-12-01T18:02:00Z",
- datatype=URIRef('http://www.w3.org/2001/XMLSchema#dateTime'))
+ self.x = Literal(
+ "2008-12-01T18:02:00Z",
+ datatype=URIRef("http://www.w3.org/2001/XMLSchema#dateTime"),
+ )
def test_util_date_time_tisnoneandnotz(self):
t = None
@@ -115,8 +118,10 @@ class TestUtilDateTime(unittest.TestCase):
def ablocaltime(t):
from time import gmtime
+
res = gmtime(t)
return res
+
util.localtime = ablocaltime
res = util.date_time(t, local_time_zone=True)
self.assertTrue(res is not t)
@@ -124,8 +129,10 @@ class TestUtilDateTime(unittest.TestCase):
class TestUtilTermConvert(unittest.TestCase):
def setUp(self):
- self.x = Literal("2008-12-01T18:02:00Z",
- datatype=URIRef('http://www.w3.org/2001/XMLSchema#dateTime'))
+ self.x = Literal(
+ "2008-12-01T18:02:00Z",
+ datatype=URIRef("http://www.w3.org/2001/XMLSchema#dateTime"),
+ )
def test_util_to_term_sisNone(self):
s = None
@@ -145,12 +152,12 @@ class TestUtilTermConvert(unittest.TestCase):
self.assertEqual(str(res), s[1:-1])
def test_util_to_term_sisbnode(self):
- s = '_http%23%4F%4Fexample%33com'
+ s = "_http%23%4F%4Fexample%33com"
res = util.to_term(s)
self.assertTrue(isinstance(res, BNode))
def test_util_to_term_sisunknown(self):
- s = 'http://example.com'
+ s = "http://example.com"
self.assertRaises(Exception, util.to_term, s)
def test_util_to_term_sisnotstr(self):
@@ -185,7 +192,7 @@ class TestUtilTermConvert(unittest.TestCase):
self.assertTrue(isinstance(res, Literal))
def test_util_from_n3_expecturiref(self):
- s = '<http://example.org/schema>'
+ s = "<http://example.org/schema>"
res = util.from_n3(s, default=None, backend=None)
self.assertTrue(isinstance(res, URIRef))
@@ -198,89 +205,99 @@ class TestUtilTermConvert(unittest.TestCase):
s = '"michel"@fr^^xsd:fr'
res = util.from_n3(s, default=None, backend=None)
self.assertTrue(isinstance(res, Literal))
- self.assertEqual(res, Literal('michel',
- datatype=XSD['fr']))
+ self.assertEqual(res, Literal("michel", datatype=XSD["fr"]))
def test_util_from_n3_expectliteralanddtype(self):
s = '"true"^^xsd:boolean'
res = util.from_n3(s, default=None, backend=None)
- self.assertTrue(res.eq(Literal('true', datatype=XSD['boolean'])))
+ self.assertTrue(res.eq(Literal("true", datatype=XSD["boolean"])))
def test_util_from_n3_expectliteralwithdatatypefromint(self):
- s = '42'
+ s = "42"
res = util.from_n3(s)
self.assertEqual(res, Literal(42))
def test_util_from_n3_expectliteralwithdatatypefrombool(self):
- s = 'true'
+ s = "true"
res = util.from_n3(s)
self.assertEqual(res, Literal(True))
- s = 'false'
+ s = "false"
res = util.from_n3(s)
self.assertEqual(res, Literal(False))
def test_util_from_n3_expectliteralmultiline(self):
s = '"""multi\nline\nstring"""@en'
res = util.from_n3(s, default=None, backend=None)
- self.assertTrue(res, Literal('multi\nline\nstring', lang='en'))
+ self.assertTrue(res, Literal("multi\nline\nstring", lang="en"))
def test_util_from_n3_expectliteralwithescapedquote(self):
s = '"\\""'
res = util.from_n3(s, default=None, backend=None)
- self.assertTrue(res, Literal('\\"', lang='en'))
+ self.assertTrue(res, Literal('\\"', lang="en"))
def test_util_from_n3_expectliteralwithtrailingbackslash(self):
s = '"trailing\\\\"^^<http://www.w3.org/2001/XMLSchema#string>'
res = util.from_n3(s)
- self.assertTrue(res, Literal('trailing\\', datatype=XSD['string']))
+ self.assertTrue(res, Literal("trailing\\", datatype=XSD["string"]))
self.assertTrue(res.n3(), s)
def test_util_from_n3_expectpartialidempotencewithn3(self):
- for n3 in ('<http://ex.com/foo>',
- '"foo"@de',
- u'<http://ex.com/漢字>',
- u'<http://ex.com/a#あ>',
- # '"\\""', # exception as '\\"' --> '"' by orig parser as well
- '"""multi\n"line"\nstring"""@en'):
- self.assertEqual(util.from_n3(n3).n3(), n3,
- 'from_n3(%(n3e)r).n3() != %(n3e)r' % {'n3e': n3})
+ for n3 in (
+ "<http://ex.com/foo>",
+ '"foo"@de',
+ u"<http://ex.com/漢字>",
+ u"<http://ex.com/a#あ>",
+ # '"\\""', # exception as '\\"' --> '"' by orig parser as well
+ '"""multi\n"line"\nstring"""@en',
+ ):
+ self.assertEqual(
+ util.from_n3(n3).n3(),
+ n3,
+ "from_n3(%(n3e)r).n3() != %(n3e)r" % {"n3e": n3},
+ )
def test_util_from_n3_expectsameasn3parser(self):
def parse_n3(term_n3):
- ''' Disclaimer: Quick and dirty hack using the n3 parser. '''
- prepstr = ("@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .\n"
- "<urn:no_use> <urn:no_use> %s.\n" % term_n3)
+ """ Disclaimer: Quick and dirty hack using the n3 parser. """
+ prepstr = (
+ "@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .\n"
+ "<urn:no_use> <urn:no_use> %s.\n" % term_n3
+ )
g = ConjunctiveGraph()
- g.parse(data=prepstr, format='n3')
+ g.parse(data=prepstr, format="n3")
return [t for t in g.triples((None, None, None))][0][2]
for n3 in ( # "michel", # won't parse in original parser
# "_:michel", # BNodes won't be the same
'"michel"',
- '<http://example.org/schema>',
+ "<http://example.org/schema>",
'"michel"@fr',
# '"michel"@fr^^xsd:fr', # FIXME: invalid n3, orig parser will prefer datatype
# '"true"^^xsd:boolean', # FIXME: orig parser will expand xsd prefix
- '42',
- 'true',
- 'false',
+ "42",
+ "true",
+ "false",
'"""multi\nline\nstring"""@en',
- '<http://ex.com/foo>',
+ "<http://ex.com/foo>",
'"foo"@de',
'"\\""@en',
- '"""multi\n"line"\nstring"""@en'):
+ '"""multi\n"line"\nstring"""@en',
+ ):
res, exp = util.from_n3(n3), parse_n3(n3)
- self.assertEqual(res, exp,
- 'from_n3(%(n3e)r): %(res)r != parser.notation3: %(exp)r' % {
- 'res': res, 'exp': exp, 'n3e': n3})
+ self.assertEqual(
+ res,
+ exp,
+ "from_n3(%(n3e)r): %(res)r != parser.notation3: %(exp)r"
+ % {"res": res, "exp": exp, "n3e": n3},
+ )
def test_util_from_n3_expectquotedgraph(self):
- s = '{<http://example.com/schema>}'
+ s = "{<http://example.com/schema>}"
res = util.from_n3(s, default=None, backend="IOMemory")
self.assertTrue(isinstance(res, QuotedGraph))
def test_util_from_n3_expectgraph(self):
- s = '[<http://example.com/schema>]'
+ s = "[<http://example.com/schema>]"
res = util.from_n3(s, default=None, backend="IOMemory")
self.assertTrue(isinstance(res, Graph))
@@ -317,35 +334,17 @@ class TestUtilCheckers(unittest.TestCase):
def test_util_check_statement(self):
c = "http://example.com"
- self.assertRaises(
- SubjectTypeError,
- util.check_statement,
- (c, self.p, self.o))
- self.assertRaises(
- PredicateTypeError,
- util.check_statement,
- (self.s, c, self.o))
- self.assertRaises(
- ObjectTypeError,
- util.check_statement,
- (self.s, self.p, c))
+ self.assertRaises(SubjectTypeError, util.check_statement, (c, self.p, self.o))
+ self.assertRaises(PredicateTypeError, util.check_statement, (self.s, c, self.o))
+ self.assertRaises(ObjectTypeError, util.check_statement, (self.s, self.p, c))
res = util.check_statement((self.s, self.p, self.o))
self.assertTrue(res == None)
def test_util_check_pattern(self):
c = "http://example.com"
- self.assertRaises(
- SubjectTypeError,
- util.check_pattern,
- (c, self.p, self.o))
- self.assertRaises(
- PredicateTypeError,
- util.check_pattern,
- (self.s, c, self.o))
- self.assertRaises(
- ObjectTypeError,
- util.check_pattern,
- (self.s, self.p, c))
+ self.assertRaises(SubjectTypeError, util.check_pattern, (c, self.p, self.o))
+ self.assertRaises(PredicateTypeError, util.check_pattern, (self.s, c, self.o))
+ self.assertRaises(ObjectTypeError, util.check_pattern, (self.s, self.p, c))
res = util.check_pattern((self.s, self.p, self.o))
self.assertTrue(res == None)
diff --git a/test/test_wide_python.py b/test/test_wide_python.py
index feef4519..5463e798 100644
--- a/test/test_wide_python.py
+++ b/test/test_wide_python.py
@@ -1,14 +1,13 @@
-
def test_wide_python_build():
"""This test is meant to fail on narrow python builds (common on Mac OS X).
See https://github.com/RDFLib/rdflib/issues/456 for more information.
"""
- assert len(u'\U0010FFFF') == 1, (
- 'You are using a narrow Python build!\n'
- 'This means that your Python does not properly support chars > 16bit.\n'
+ assert len(u"\U0010FFFF") == 1, (
+ "You are using a narrow Python build!\n"
+ "This means that your Python does not properly support chars > 16bit.\n"
'On your system chars like c=u"\\U0010FFFF" will have a len(c)==2.\n'
- 'As this can cause hard to debug problems with string processing\n'
- '(slicing, regexp, ...) later on, we strongly advise to use a wide\n'
- 'Python build in production systems.'
+ "As this can cause hard to debug problems with string processing\n"
+ "(slicing, regexp, ...) later on, we strongly advise to use a wide\n"
+ "Python build in production systems."
)
diff --git a/test/test_xmlliterals.py b/test/test_xmlliterals.py
index b467e82a..fcc0ddf2 100644
--- a/test/test_xmlliterals.py
+++ b/test/test_xmlliterals.py
@@ -3,24 +3,24 @@ from rdflib import RDF, Graph, Literal
def testPythonRoundtrip():
- l1 = Literal('<msg>hello</msg>', datatype=RDF.XMLLiteral)
- assert l1.value is not None, 'xml must have been parsed'
- assert l1.datatype == RDF.XMLLiteral, 'literal must have right datatype'
+ l1 = Literal("<msg>hello</msg>", datatype=RDF.XMLLiteral)
+ assert l1.value is not None, "xml must have been parsed"
+ assert l1.datatype == RDF.XMLLiteral, "literal must have right datatype"
- l2 = Literal('<msg>good morning</msg>', datatype=RDF.XMLLiteral)
- assert l2.value is not None, 'xml must have been parsed'
- assert not l1.eq(l2), 'literals must NOT be equal'
+ l2 = Literal("<msg>good morning</msg>", datatype=RDF.XMLLiteral)
+ assert l2.value is not None, "xml must have been parsed"
+ assert not l1.eq(l2), "literals must NOT be equal"
l3 = Literal(l1.value)
- assert l1.eq(l3), 'roundtripped literals must be equal'
- assert l3.datatype == RDF.XMLLiteral, 'literal must have right datatype'
+ assert l1.eq(l3), "roundtripped literals must be equal"
+ assert l3.datatype == RDF.XMLLiteral, "literal must have right datatype"
- l4 = Literal('<msg >hello</msg>', datatype=RDF.XMLLiteral)
+ l4 = Literal("<msg >hello</msg>", datatype=RDF.XMLLiteral)
assert l1 == l4
assert l1.eq(l4)
rdflib.NORMALIZE_LITERALS = False
- l4 = Literal('<msg >hello</msg>', datatype=RDF.XMLLiteral)
+ l4 = Literal("<msg >hello</msg>", datatype=RDF.XMLLiteral)
assert l1 != l4
assert l1.eq(l4)
rdflib.NORMALIZE_LITERALS = True
@@ -49,9 +49,13 @@ def testRDFXMLParse():
def graph():
g = rdflib.Graph()
- g.add((rdflib.URIRef('http://example.org/a'),
- rdflib.URIRef('http://example.org/p'),
- rdflib.Literal('<msg>hei</hei>', datatype=RDF.XMLLiteral)))
+ g.add(
+ (
+ rdflib.URIRef("http://example.org/a"),
+ rdflib.URIRef("http://example.org/p"),
+ rdflib.Literal("<msg>hei</hei>", datatype=RDF.XMLLiteral),
+ )
+ )
return g
@@ -65,20 +69,20 @@ def roundtrip(fmt):
def testRoundtrip():
- roundtrip('xml')
- roundtrip('n3')
- roundtrip('nt')
+ roundtrip("xml")
+ roundtrip("n3")
+ roundtrip("nt")
def testHTML():
- l1 = Literal('<msg>hello</msg>', datatype=RDF.XMLLiteral)
- assert l1.value is not None, 'xml must have been parsed'
- assert l1.datatype == RDF.XMLLiteral, 'literal must have right datatype'
+ l1 = Literal("<msg>hello</msg>", datatype=RDF.XMLLiteral)
+ assert l1.value is not None, "xml must have been parsed"
+ assert l1.datatype == RDF.XMLLiteral, "literal must have right datatype"
- l2 = Literal('<msg>hello</msg>', datatype=RDF.HTML)
- assert l2.value is not None, 'xml must have been parsed'
- assert l2.datatype == RDF.HTML, 'literal must have right datatype'
+ l2 = Literal("<msg>hello</msg>", datatype=RDF.HTML)
+ assert l2.value is not None, "xml must have been parsed"
+ assert l2.datatype == RDF.HTML, "literal must have right datatype"
assert l1 != l2
assert not l1.eq(l2)
diff --git a/test/testutils.py b/test/testutils.py
index 20b060d3..03366cfb 100644
--- a/test/testutils.py
+++ b/test/testutils.py
@@ -65,6 +65,7 @@ def _parse_or_report(verbose, graph, *args, **kwargs):
def nose_tst_earl_report(generator, earl_report_name=None):
from optparse import OptionParser
+
p = OptionParser()
(options, args) = p.parse_args()
@@ -74,7 +75,7 @@ def nose_tst_earl_report(generator, earl_report_name=None):
for t in generator(args):
tests += 1
- print('Running ', t[1].uri)
+ print("Running ", t[1].uri)
try:
t[0](t[1])
add_test(t[1].uri, "passed")
@@ -93,11 +94,16 @@ def nose_tst_earl_report(generator, earl_report_name=None):
print_exc()
sys.stderr.write("%s\n" % t[1].uri)
- print("Ran %d tests, %d skipped, %d failed. "%(tests, skip, tests-skip-success))
+ print(
+ "Ran %d tests, %d skipped, %d failed. " % (tests, skip, tests - skip - success)
+ )
if earl_report_name:
now = isodate.datetime_isoformat(datetime.datetime.utcnow())
- earl_report = 'test_reports/%s-%s.ttl' % (earl_report_name, now.replace(":", ""))
+ earl_report = "test_reports/%s-%s.ttl" % (
+ earl_report_name,
+ now.replace(":", ""),
+ )
- report.serialize(earl_report, format='n3')
- report.serialize('test_reports/%s-latest.ttl'%earl_report_name, format='n3')
+ report.serialize(earl_report, format="n3")
+ report.serialize("test_reports/%s-latest.ttl" % earl_report_name, format="n3")
print("Wrote EARL-report to '%s'" % earl_report)
diff --git a/test/triple_store.py b/test/triple_store.py
index b9c5221a..f37bea33 100644
--- a/test/triple_store.py
+++ b/test/triple_store.py
@@ -6,8 +6,8 @@ from rdflib.graph import Graph
class GraphTest(unittest.TestCase):
- backend = 'default'
- path = 'store'
+ backend = "default"
+ path = "store"
def setUp(self):
self.store = Graph(store=self.backend)
diff --git a/test/type_check.py b/test/type_check.py
index 605f0916..19329a39 100644
--- a/test/type_check.py
+++ b/test/type_check.py
@@ -10,9 +10,11 @@ foo = URIRef("foo")
class TypeCheckCase(unittest.TestCase):
- unstable = True # TODO: until we decide if we want to add type checking back to rdflib
- backend = 'default'
- path = 'store'
+ unstable = (
+ True # TODO: until we decide if we want to add type checking back to rdflib
+ )
+ backend = "default"
+ path = "store"
def setUp(self):
self.store = Graph(backend=self.backend)
@@ -22,13 +24,10 @@ class TypeCheckCase(unittest.TestCase):
self.store.close()
def testSubjectTypeCheck(self):
- self.assertRaises(SubjectTypeError,
- self.store.add, (None, foo, foo))
+ self.assertRaises(SubjectTypeError, self.store.add, (None, foo, foo))
def testPredicateTypeCheck(self):
- self.assertRaises(PredicateTypeError,
- self.store.add, (foo, None, foo))
+ self.assertRaises(PredicateTypeError, self.store.add, (foo, None, foo))
def testObjectTypeCheck(self):
- self.assertRaises(ObjectTypeError,
- self.store.add, (foo, foo, None))
+ self.assertRaises(ObjectTypeError, self.store.add, (foo, foo, None))