summaryrefslogtreecommitdiff
path: root/rdflib/plugins
diff options
context:
space:
mode:
authorNicholas Car <nicholas.car@surroundaustralia.com>2020-05-16 21:00:24 +1000
committerNicholas Car <nicholas.car@surroundaustralia.com>2020-05-16 21:00:24 +1000
commit0be6f6039479ce29cf71b11e76be54e186130036 (patch)
tree897d208525a7e8952fb496151db074d49dcdeb3a /rdflib/plugins
parent2a8d70824e1b4caf0c606074a44ac3a15fa72718 (diff)
downloadrdflib-0be6f6039479ce29cf71b11e76be54e186130036.tar.gz
blacked all python files
Diffstat (limited to 'rdflib/plugins')
-rw-r--r--rdflib/plugins/memory.py64
-rwxr-xr-xrdflib/plugins/parsers/notation3.py871
-rw-r--r--rdflib/plugins/parsers/nquads.py16
-rw-r--r--rdflib/plugins/parsers/nt.py2
-rw-r--r--rdflib/plugins/parsers/ntriples.py53
-rw-r--r--rdflib/plugins/parsers/rdfxml.py151
-rw-r--r--rdflib/plugins/parsers/trig.py43
-rw-r--r--rdflib/plugins/parsers/trix.py60
-rw-r--r--rdflib/plugins/serializers/n3.py28
-rw-r--r--rdflib/plugins/serializers/nquads.py32
-rw-r--r--rdflib/plugins/serializers/nt.py34
-rw-r--r--rdflib/plugins/serializers/rdfxml.py73
-rw-r--r--rdflib/plugins/serializers/trig.py21
-rw-r--r--rdflib/plugins/serializers/trix.py39
-rw-r--r--rdflib/plugins/serializers/turtle.py104
-rw-r--r--rdflib/plugins/serializers/xmlwriter.py19
-rw-r--r--rdflib/plugins/sleepycat.py133
-rw-r--r--rdflib/plugins/sparql/__init__.py2
-rw-r--r--rdflib/plugins/sparql/aggregates.py11
-rw-r--r--rdflib/plugins/sparql/algebra.py240
-rw-r--r--rdflib/plugins/sparql/datatypes.py78
-rw-r--r--rdflib/plugins/sparql/evaluate.py172
-rw-r--r--rdflib/plugins/sparql/evalutils.py15
-rw-r--r--rdflib/plugins/sparql/operators.py190
-rw-r--r--rdflib/plugins/sparql/parser.py1123
-rw-r--r--rdflib/plugins/sparql/parserutils.py24
-rw-r--r--rdflib/plugins/sparql/processor.py13
-rw-r--r--rdflib/plugins/sparql/results/csvresults.py24
-rw-r--r--rdflib/plugins/sparql/results/graph.py10
-rw-r--r--rdflib/plugins/sparql/results/jsonresults.py59
-rw-r--r--rdflib/plugins/sparql/results/rdfresults.py25
-rw-r--r--rdflib/plugins/sparql/results/tsvresults.py51
-rw-r--r--rdflib/plugins/sparql/results/txtresults.py16
-rw-r--r--rdflib/plugins/sparql/results/xmlresults.py150
-rw-r--r--rdflib/plugins/sparql/sparql.py73
-rw-r--r--rdflib/plugins/sparql/update.py37
-rw-r--r--rdflib/plugins/stores/auditable.py62
-rw-r--r--rdflib/plugins/stores/concurrent.py11
-rw-r--r--rdflib/plugins/stores/regexmatching.py70
-rw-r--r--rdflib/plugins/stores/sparqlconnector.py65
-rw-r--r--rdflib/plugins/stores/sparqlstore.py253
41 files changed, 2638 insertions, 1879 deletions
diff --git a/rdflib/plugins/memory.py b/rdflib/plugins/memory.py
index 1c1161e9..c102799f 100644
--- a/rdflib/plugins/memory.py
+++ b/rdflib/plugins/memory.py
@@ -6,7 +6,7 @@ import random
from rdflib.store import Store, NO_STORE, VALID_STORE
-__all__ = ['Memory', 'IOMemory']
+__all__ = ["Memory", "IOMemory"]
ANY = Any = None
@@ -96,14 +96,12 @@ class Memory(Store):
if predicate in subjectDictionary:
if object != ANY: # subject+predicate+object is given
if object in subjectDictionary[predicate]:
- yield (subject, predicate, object), \
- self.__contexts()
+ yield (subject, predicate, object), self.__contexts()
else: # given object not found
pass
else: # subject+predicate is given, object unbound
for o in subjectDictionary[predicate].keys():
- yield (subject, predicate, o), \
- self.__contexts()
+ yield (subject, predicate, o), self.__contexts()
else: # given predicate not found
pass
else: # subject given, predicate unbound
@@ -196,6 +194,7 @@ class IOMemory(Store):
slow.
"""
+
context_aware = True
formula_aware = True
graph_aware = True
@@ -222,11 +221,12 @@ class IOMemory(Store):
self.__obj2int = {None: None} # maps objects to integer keys
# Indexes for each triple part, and a list of contexts for each triple
- self.__subjectIndex = {} # key: sid val: set(enctriples)
+ self.__subjectIndex = {} # key: sid val: set(enctriples)
self.__predicateIndex = {} # key: pid val: set(enctriples)
- self.__objectIndex = {} # key: oid val: set(enctriples)
- self.__tripleContexts = {
- } # key: enctriple val: {cid1: quoted, cid2: quoted ...}
+ self.__objectIndex = {} # key: oid val: set(enctriples)
+ self.__tripleContexts = (
+ {}
+ ) # key: enctriple val: {cid1: quoted, cid2: quoted ...}
self.__contextTriples = {None: set()} # key: cid val: set(enctriples)
# all contexts used in store (unencoded)
@@ -294,16 +294,20 @@ class IOMemory(Store):
del self.__tripleContexts[enctriple]
- if not req_cid is None and \
- req_cid in self.__contextTriples and \
- len(self.__contextTriples[req_cid]) == 0:
+ if (
+ not req_cid is None
+ and req_cid in self.__contextTriples
+ and len(self.__contextTriples[req_cid]) == 0
+ ):
# all triples are removed out of this context
# and it's not the default context so delete it
del self.__contextTriples[req_cid]
- if triplepat == (None, None, None) and \
- context in self.__all_contexts and \
- not self.graph_aware:
+ if (
+ triplepat == (None, None, None)
+ and context in self.__all_contexts
+ and not self.graph_aware
+ ):
# remove the whole context
self.__all_contexts.remove(context)
@@ -322,9 +326,11 @@ class IOMemory(Store):
# optimize "triple in graph" case (all parts given)
if sid is not None and pid is not None and oid is not None:
- if sid in self.__subjectIndex and \
- enctriple in self.__subjectIndex[sid] and \
- self.__tripleHasContext(enctriple, cid):
+ if (
+ sid in self.__subjectIndex
+ and enctriple in self.__subjectIndex[sid]
+ and self.__tripleHasContext(enctriple, cid)
+ ):
return ((triplein, self.__contexts(enctriple)) for i in [0])
else:
return self.__emptygen()
@@ -353,9 +359,11 @@ class IOMemory(Store):
else:
enctriples = sets[0].copy()
- return ((self.__decodeTriple(enctriple), self.__contexts(enctriple))
- for enctriple in enctriples
- if self.__tripleHasContext(enctriple, cid))
+ return (
+ (self.__decodeTriple(enctriple), self.__contexts(enctriple))
+ for enctriple in enctriples
+ if self.__tripleHasContext(enctriple, cid)
+ )
def contexts(self, triple=None):
if triple is None or triple == (None, None, None):
@@ -402,8 +410,7 @@ class IOMemory(Store):
if enctriple not in self.__tripleContexts:
# triple exists with default ctx info
# start with a copy of the default ctx info
- self.__tripleContexts[
- enctriple] = self.__defaultContexts.copy()
+ self.__tripleContexts[enctriple] = self.__defaultContexts.copy()
self.__tripleContexts[enctriple][cid] = quoted
if not quoted:
@@ -446,12 +453,11 @@ class IOMemory(Store):
def __tripleHasContext(self, enctriple, cid):
"""return True iff the triple exists in the given context"""
ctxs = self.__tripleContexts.get(enctriple, self.__defaultContexts)
- return (cid in ctxs)
+ return cid in ctxs
def __removeTripleContext(self, enctriple, cid):
"""remove the context from the triple"""
- ctxs = self.__tripleContexts.get(
- enctriple, self.__defaultContexts).copy()
+ ctxs = self.__tripleContexts.get(enctriple, self.__defaultContexts).copy()
del ctxs[cid]
if ctxs == self.__defaultContexts:
del self.__tripleContexts[enctriple]
@@ -491,7 +497,11 @@ class IOMemory(Store):
def __contexts(self, enctriple):
"""return a generator for all the non-quoted contexts
(unencoded) the encoded triple appears in"""
- return (self.__int2obj.get(cid) for cid in self.__getTripleContexts(enctriple, skipQuoted=True) if cid is not None)
+ return (
+ self.__int2obj.get(cid)
+ for cid in self.__getTripleContexts(enctriple, skipQuoted=True)
+ if cid is not None
+ )
def __emptygen(self):
"""return an empty generator"""
diff --git a/rdflib/plugins/parsers/notation3.py b/rdflib/plugins/parsers/notation3.py
index 44a25adc..3bc2169f 100755
--- a/rdflib/plugins/parsers/notation3.py
+++ b/rdflib/plugins/parsers/notation3.py
@@ -48,9 +48,17 @@ from rdflib.graph import QuotedGraph, ConjunctiveGraph, Graph
from rdflib.compat import long_type
from rdflib.compat import narrow_build
-__all__ = ['BadSyntax', 'N3Parser', 'TurtleParser',
- "splitFragP", "join", "base",
- "runNamespace", "uniqueURI", "hexify"]
+__all__ = [
+ "BadSyntax",
+ "N3Parser",
+ "TurtleParser",
+ "splitFragP",
+ "join",
+ "base",
+ "runNamespace",
+ "uniqueURI",
+ "hexify",
+]
from rdflib.parser import Parser
@@ -74,7 +82,7 @@ def splitFragP(uriref, punct=0):
if i >= 0:
return uriref[:i], uriref[i:]
else:
- return uriref, ''
+ return uriref, ""
def join(here, there):
@@ -112,65 +120,67 @@ def join(here, there):
u'http://example.org/#Andr\\xe9'
"""
-# assert(here.find("#") < 0), \
-# "Base may not contain hash: '%s'" % here # why must caller splitFrag?
+ # assert(here.find("#") < 0), \
+ # "Base may not contain hash: '%s'" % here # why must caller splitFrag?
- slashl = there.find('/')
- colonl = there.find(':')
+ slashl = there.find("/")
+ colonl = there.find(":")
# join(base, 'foo:/') -- absolute
if colonl >= 0 and (slashl < 0 or colonl < slashl):
return there
- bcolonl = here.find(':')
- assert(bcolonl >= 0), \
- "Base uri '%s' is not absolute" % here # else it's not absolute
+ bcolonl = here.find(":")
+ assert bcolonl >= 0, (
+ "Base uri '%s' is not absolute" % here
+ ) # else it's not absolute
path, frag = splitFragP(there)
if not path:
return here + frag
# join('mid:foo@example', '../foo') bzzt
- if here[bcolonl + 1:bcolonl + 2] != '/':
+ if here[bcolonl + 1 : bcolonl + 2] != "/":
raise ValueError(
- ("Base <%s> has no slash after "
- "colon - with relative '%s'.") % (here, there))
+ ("Base <%s> has no slash after " "colon - with relative '%s'.")
+ % (here, there)
+ )
- if here[bcolonl + 1:bcolonl + 3] == '//':
- bpath = here.find('/', bcolonl + 3)
+ if here[bcolonl + 1 : bcolonl + 3] == "//":
+ bpath = here.find("/", bcolonl + 3)
else:
bpath = bcolonl + 1
# join('http://xyz', 'foo')
if bpath < 0:
bpath = len(here)
- here = here + '/'
+ here = here + "/"
# join('http://xyz/', '//abc') => 'http://abc'
- if there[:2] == '//':
- return here[:bcolonl + 1] + there
+ if there[:2] == "//":
+ return here[: bcolonl + 1] + there
# join('http://xyz/', '/abc') => 'http://xyz/abc'
- if there[:1] == '/':
+ if there[:1] == "/":
return here[:bpath] + there
- slashr = here.rfind('/')
+ slashr = here.rfind("/")
while 1:
- if path[:2] == './':
+ if path[:2] == "./":
path = path[2:]
- if path == '.':
- path = ''
- elif path[:3] == '../' or path == '..':
+ if path == ".":
+ path = ""
+ elif path[:3] == "../" or path == "..":
path = path[3:]
- i = here.rfind('/', bpath, slashr)
+ i = here.rfind("/", bpath, slashr)
if i >= 0:
- here = here[:i + 1]
+ here = here[: i + 1]
slashr = i
else:
break
- return here[:slashr + 1] + path + frag
+ return here[: slashr + 1] + path + frag
def base():
@@ -190,7 +200,7 @@ def _fixslash(s):
""" Fix windowslike filename to unixlike - (#ifdef WINDOWS)"""
s = s.replace("\\", "/")
if s[0] != "/" and s[1] == ":":
- s = s[2:] # @@@ Hack when drive letter present
+ s = s[2:] # @@@ Hack when drive letter present
return s
@@ -211,7 +221,7 @@ ANONYMOUS = 3
XMLLITERAL = 25
Logic_NS = "http://www.w3.org/2000/10/swap/log#"
-NODE_MERGE_URI = Logic_NS + "is" # Pseudo-property indicating node merging
+NODE_MERGE_URI = Logic_NS + "is" # Pseudo-property indicating node merging
forSomeSym = Logic_NS + "forSome"
forAllSym = Logic_NS + "forAll"
@@ -222,7 +232,7 @@ DAML_sameAs_URI = OWL_NS + "sameAs"
parsesTo_URI = Logic_NS + "parsesTo"
RDF_spec = "http://www.w3.org/TR/REC-rdf-syntax/"
-List_NS = RDF_NS_URI # From 20030808
+List_NS = RDF_NS_URI # From 20030808
_Old_Logic_NS = "http://www.w3.org/2000/10/swap/log.n3#"
N3_first = (SYMBOL, List_NS + "first")
@@ -238,12 +248,13 @@ runNamespaceValue = None
def runNamespace():
"Return a URI suitable as a namespace for run-local objects"
- # @@@ include hostname (privacy?) (hash it?)
+ # @@@ include hostname (privacy?) (hash it?)
global runNamespaceValue
if runNamespaceValue is None:
- runNamespaceValue = join(base(), _unique_id()) + '#'
+ runNamespaceValue = join(base(), _unique_id()) + "#"
return runNamespaceValue
+
nextu = 0
@@ -262,20 +273,21 @@ chatty_flag = 50
def BecauseOfData(*args, **kargs):
- # print args, kargs
+ # print args, kargs
pass
def becauseSubexpression(*args, **kargs):
- # print args, kargs
+ # print args, kargs
pass
+
N3_forSome_URI = forSomeSym
N3_forAll_URI = forAllSym
# Magic resources we know about
-ADDED_HASH = "#" # Stop where we use this in case we want to remove it!
+ADDED_HASH = "#" # Stop where we use this in case we want to remove it!
# This is the hash on namespace URIs
RDF_type = (SYMBOL, RDF_type_URI)
@@ -289,20 +301,20 @@ DOUBLE_DATATYPE = _XSD_PFX + "double"
FLOAT_DATATYPE = _XSD_PFX + "float"
INTEGER_DATATYPE = _XSD_PFX + "integer"
-option_noregen = 0 # If set, do not regenerate genids on output
+option_noregen = 0 # If set, do not regenerate genids on output
# @@ I18n - the notname chars need extending for well known unicode non-text
# characters. The XML spec switched to assuming unknown things were name
# characaters.
# _namechars = string.lowercase + string.uppercase + string.digits + '_-'
-_notQNameChars = \
- "\t\r\n !\"#$&'()*,+/;<=>?@[\\]^`{|}~" # else valid qname :-/
+_notQNameChars = "\t\r\n !\"#$&'()*,+/;<=>?@[\\]^`{|}~" # else valid qname :-/
_notKeywordsChars = _notQNameChars + "."
-_notNameChars = _notQNameChars + ":" # Assume anything else valid name :-/
-_rdfns = 'http://www.w3.org/1999/02/22-rdf-syntax-ns#'
+_notNameChars = _notQNameChars + ":" # Assume anything else valid name :-/
+_rdfns = "http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+
+hexChars = "ABCDEFabcdef0123456789"
+escapeChars = "(_~.-!$&'()*+,;=/?#@%)" # valid for \ escapes in localnames
-hexChars = 'ABCDEFabcdef0123456789'
-escapeChars = "(_~.-!$&'()*+,;=/?#@%)" # valid for \ escapes in localnames
def unicodeExpand(m):
try:
@@ -310,81 +322,90 @@ def unicodeExpand(m):
except:
raise Exception("Invalid unicode code point: " + m.group(1))
+
if narrow_build:
+
def unicodeExpand(m):
try:
return chr(int(m.group(1), 16))
except ValueError:
warnings.warn(
- 'Encountered a unicode char > 0xFFFF in a narrow python build. '
- 'Trying to degrade gracefully, but this can cause problems '
- 'later when working with the string:\n%s' % m.group(0))
- return codecs.decode(m.group(0), 'unicode_escape')
+ "Encountered a unicode char > 0xFFFF in a narrow python build. "
+ "Trying to degrade gracefully, but this can cause problems "
+ "later when working with the string:\n%s" % m.group(0)
+ )
+ return codecs.decode(m.group(0), "unicode_escape")
-unicodeEscape4 = re.compile(
- r'\\u([0-9a-fA-F]{4})')
-unicodeEscape8 = re.compile(
- r'\\U([0-9a-fA-F]{8})')
+unicodeEscape4 = re.compile(r"\\u([0-9a-fA-F]{4})")
+unicodeEscape8 = re.compile(r"\\U([0-9a-fA-F]{8})")
-N3CommentCharacter = "#" # For unix script # ! compatabilty
+N3CommentCharacter = "#" # For unix script # ! compatabilty
########################################## Parse string to sink
#
# Regular expressions:
-eol = re.compile(
- r'[ \t]*(#[^\n]*)?\r?\n') # end of line, poss. w/comment
-eof = re.compile(
- r'[ \t]*(#[^\n]*)?$') # end of file, poss. w/comment
-ws = re.compile(r'[ \t]*') # Whitespace not including NL
-signed_integer = re.compile(r'[-+]?[0-9]+') # integer
-integer_syntax = re.compile(r'[-+]?[0-9]+')
-decimal_syntax = re.compile(r'[-+]?[0-9]*\.[0-9]+')
-exponent_syntax = re.compile(r'[-+]?(?:[0-9]+\.[0-9]*|\.[0-9]+|[0-9]+)(?:e|E)[-+]?[0-9]+')
-digitstring = re.compile(r'[0-9]+') # Unsigned integer
+eol = re.compile(r"[ \t]*(#[^\n]*)?\r?\n") # end of line, poss. w/comment
+eof = re.compile(r"[ \t]*(#[^\n]*)?$") # end of file, poss. w/comment
+ws = re.compile(r"[ \t]*") # Whitespace not including NL
+signed_integer = re.compile(r"[-+]?[0-9]+") # integer
+integer_syntax = re.compile(r"[-+]?[0-9]+")
+decimal_syntax = re.compile(r"[-+]?[0-9]*\.[0-9]+")
+exponent_syntax = re.compile(
+ r"[-+]?(?:[0-9]+\.[0-9]*|\.[0-9]+|[0-9]+)(?:e|E)[-+]?[0-9]+"
+)
+digitstring = re.compile(r"[0-9]+") # Unsigned integer
interesting = re.compile(r"""[\\\r\n\"\']""")
-langcode = re.compile(r'[a-zA-Z0-9]+(-[a-zA-Z0-9]+)*')
+langcode = re.compile(r"[a-zA-Z0-9]+(-[a-zA-Z0-9]+)*")
class SinkParser:
- def __init__(self, store, openFormula=None, thisDoc="", baseURI=None,
- genPrefix="", why=None, turtle=False):
+ def __init__(
+ self,
+ store,
+ openFormula=None,
+ thisDoc="",
+ baseURI=None,
+ genPrefix="",
+ why=None,
+ turtle=False,
+ ):
""" note: namespace names should *not* end in # ;
the # will get added during qname processing """
self._bindings = {}
if thisDoc != "":
- assert ':' in thisDoc, "Document URI not absolute: <%s>" % thisDoc
- self._bindings[""] = thisDoc + "#" # default
+ assert ":" in thisDoc, "Document URI not absolute: <%s>" % thisDoc
+ self._bindings[""] = thisDoc + "#" # default
self._store = store
if genPrefix:
store.setGenPrefix(genPrefix) # pass it on
self._thisDoc = thisDoc
- self.lines = 0 # for error handling
- self.startOfLine = 0 # For calculating character number
+ self.lines = 0 # for error handling
+ self.startOfLine = 0 # For calculating character number
self._genPrefix = genPrefix
- self.keywords = ['a', 'this', 'bind', 'has', 'is', 'of',
- 'true', 'false']
- self.keywordsSet = 0 # Then only can others be considerd qnames
+ self.keywords = ["a", "this", "bind", "has", "is", "of", "true", "false"]
+ self.keywordsSet = 0 # Then only can others be considerd qnames
self._anonymousNodes = {}
- # Dict of anon nodes already declared ln: Term
+ # Dict of anon nodes already declared ln: Term
self._variables = {}
self._parentVariables = {}
- self._reason = why # Why the parser was asked to parse this
+ self._reason = why # Why the parser was asked to parse this
- self.turtle = turtle # raise exception when encountering N3 extensions
+ self.turtle = turtle # raise exception when encountering N3 extensions
# Turtle allows single or double quotes around strings, whereas N3
# only allows double quotes.
self.string_delimiters = ('"', "'") if turtle else ('"',)
- self._reason2 = None # Why these triples
- # was: diag.tracking
+ self._reason2 = None # Why these triples
+ # was: diag.tracking
if tracking:
self._reason2 = BecauseOfData(
- store.newSymbol(thisDoc), because=self._reason)
+ store.newSymbol(thisDoc), because=self._reason
+ )
if baseURI:
self._baseURI = baseURI
@@ -394,7 +415,7 @@ class SinkParser:
else:
self._baseURI = None
- assert not self._baseURI or ':' in self._baseURI
+ assert not self._baseURI or ":" in self._baseURI
if not self._genPrefix:
if self._thisDoc:
@@ -424,21 +445,20 @@ class SinkParser:
_L1C1. It used to be used only for tracking, but for tests in general
it makes the canonical ordering of bnodes repeatable."""
- return "%s_L%iC%i" % (self._genPrefix, self.lines,
- i - self.startOfLine + 1)
+ return "%s_L%iC%i" % (self._genPrefix, self.lines, i - self.startOfLine + 1)
def formula(self):
return self._formula
def loadStream(self, stream):
- return self.loadBuf(stream.read()) # Not ideal
+ return self.loadBuf(stream.read()) # Not ideal
def loadBuf(self, buf):
"""Parses a buffer and returns its top level formula"""
self.startDoc()
self.feed(buf)
- return self.endDoc() # self._formula
+ return self.endDoc() # self._formula
def feed(self, octets):
"""Feed an octet stream tothe parser
@@ -450,9 +470,9 @@ class SinkParser:
parser, it should be straightforward to recover."""
if not isinstance(octets, str):
- s = octets.decode('utf-8')
- # NB already decoded, so \ufeff
- if len(s) > 0 and s[0] == codecs.BOM_UTF8.decode('utf-8'):
+ s = octets.decode("utf-8")
+ # NB already decoded, so \ufeff
+ if len(s) > 0 and s[0] == codecs.BOM_UTF8.decode("utf-8"):
s = s[1:]
else:
s = octets
@@ -465,15 +485,14 @@ class SinkParser:
i = self.directiveOrStatement(s, j)
if i < 0:
- #print("# next char: %s" % s[j])
- self.BadSyntax(s, j,
- "expected directive or statement")
+ # print("# next char: %s" % s[j])
+ self.BadSyntax(s, j, "expected directive or statement")
def directiveOrStatement(self, argstr, h):
i = self.skipSpace(argstr, h)
if i < 0:
- return i # EOF
+ return i # EOF
if self.turtle:
j = self.sparqlDirective(argstr, i)
@@ -490,8 +509,8 @@ class SinkParser:
return j
- # @@I18N
- # _namechars = string.lowercase + string.uppercase + string.digits + '_-'
+ # @@I18N
+ # _namechars = string.lowercase + string.uppercase + string.digits + '_-'
def tok(self, tok, argstr, i, colon=False):
"""Check for keyword. Space must have been stripped on entry and
@@ -502,15 +521,17 @@ class SinkParser:
"""
assert tok[0] not in _notNameChars # not for punctuation
- if argstr[i:i + 1] == "@":
+ if argstr[i : i + 1] == "@":
i = i + 1
else:
if tok not in self.keywords:
return -1 # No, this has neither keywords declaration nor "@"
- if (argstr[i:i + len(tok)] == tok
- and ( argstr[i + len(tok)] in _notKeywordsChars)
- or (colon and argstr[i+len(tok)] == ':')):
+ if (
+ argstr[i : i + len(tok)] == tok
+ and (argstr[i + len(tok)] in _notKeywordsChars)
+ or (colon and argstr[i + len(tok)] == ":")
+ ):
i = i + len(tok)
return i
else:
@@ -524,109 +545,114 @@ class SinkParser:
assert tok[0] not in _notNameChars # not for punctuation
- if (argstr[i:i + len(tok)].lower() == tok.lower()
- and (argstr[i + len(tok)] in _notQNameChars)):
+ if argstr[i : i + len(tok)].lower() == tok.lower() and (
+ argstr[i + len(tok)] in _notQNameChars
+ ):
i = i + len(tok)
return i
else:
return -1
-
def directive(self, argstr, i):
j = self.skipSpace(argstr, i)
if j < 0:
return j # eof
res = []
- j = self.tok('bind', argstr, i) # implied "#". Obsolete.
+ j = self.tok("bind", argstr, i) # implied "#". Obsolete.
if j > 0:
- self.BadSyntax(argstr, i,
- "keyword bind is obsolete: use @prefix")
+ self.BadSyntax(argstr, i, "keyword bind is obsolete: use @prefix")
- j = self.tok('keywords', argstr, i)
+ j = self.tok("keywords", argstr, i)
if j > 0:
if self.turtle:
self.BadSyntax(argstr, i, "Found 'keywords' when in Turtle mode.")
i = self.commaSeparatedList(argstr, j, res, self.bareWord)
if i < 0:
- self.BadSyntax(argstr, i,
- "'@keywords' needs comma separated list of words")
+ self.BadSyntax(
+ argstr, i, "'@keywords' needs comma separated list of words"
+ )
self.setKeywords(res[:])
return i
- j = self.tok('forAll', argstr, i)
+ j = self.tok("forAll", argstr, i)
if j > 0:
if self.turtle:
self.BadSyntax(argstr, i, "Found 'forAll' when in Turtle mode.")
i = self.commaSeparatedList(argstr, j, res, self.uri_ref2)
if i < 0:
- self.BadSyntax(argstr, i,
- "Bad variable list after @forAll")
+ self.BadSyntax(argstr, i, "Bad variable list after @forAll")
for x in res:
- # self._context.declareUniversal(x)
+ # self._context.declareUniversal(x)
if x not in self._variables or x in self._parentVariables:
self._variables[x] = self._context.newUniversal(x)
return i
- j = self.tok('forSome', argstr, i)
+ j = self.tok("forSome", argstr, i)
if j > 0:
if self.turtle:
self.BadSyntax(argstr, i, "Found 'forSome' when in Turtle mode.")
- i = self. commaSeparatedList(argstr, j, res, self.uri_ref2)
+ i = self.commaSeparatedList(argstr, j, res, self.uri_ref2)
if i < 0:
- self.BadSyntax(argstr, i,
- "Bad variable list after @forSome")
+ self.BadSyntax(argstr, i, "Bad variable list after @forSome")
for x in res:
self._context.declareExistential(x)
return i
- j = self.tok('prefix', argstr, i, colon=True) # no implied "#"
+ j = self.tok("prefix", argstr, i, colon=True) # no implied "#"
if j >= 0:
t = []
i = self.qname(argstr, j, t)
if i < 0:
- self.BadSyntax(argstr, j,
- "expected qname after @prefix")
+ self.BadSyntax(argstr, j, "expected qname after @prefix")
j = self.uri_ref2(argstr, i, t)
if j < 0:
- self.BadSyntax(argstr, i,
- "expected <uriref> after @prefix _qname_")
+ self.BadSyntax(argstr, i, "expected <uriref> after @prefix _qname_")
ns = self.uriOf(t[1])
if self._baseURI:
ns = join(self._baseURI, ns)
elif ":" not in ns:
- self.BadSyntax(argstr, j,
- "With no base URI, cannot use " +
- "relative URI in @prefix <" + ns + ">")
- assert ':' in ns # must be absolute
+ self.BadSyntax(
+ argstr,
+ j,
+ "With no base URI, cannot use "
+ + "relative URI in @prefix <"
+ + ns
+ + ">",
+ )
+ assert ":" in ns # must be absolute
self._bindings[t[0][0]] = ns
self.bind(t[0][0], hexify(ns))
return j
- j = self.tok('base', argstr, i) # Added 2007/7/7
+ j = self.tok("base", argstr, i) # Added 2007/7/7
if j >= 0:
t = []
i = self.uri_ref2(argstr, j, t)
if i < 0:
- self.BadSyntax(argstr, j,
- "expected <uri> after @base ")
+ self.BadSyntax(argstr, j, "expected <uri> after @base ")
ns = self.uriOf(t[0])
if self._baseURI:
ns = join(self._baseURI, ns)
else:
- self.BadSyntax(argstr, j,
- "With no previous base URI, cannot use " +
- "relative URI in @base <" + ns + ">")
- assert ':' in ns # must be absolute
+ self.BadSyntax(
+ argstr,
+ j,
+ "With no previous base URI, cannot use "
+ + "relative URI in @base <"
+ + ns
+ + ">",
+ )
+ assert ":" in ns # must be absolute
self._baseURI = ns
return i
- return -1 # Not a directive, could be something else.
+ return -1 # Not a directive, could be something else.
def sparqlDirective(self, argstr, i):
@@ -639,55 +665,60 @@ class SinkParser:
if j < 0:
return j # eof
- j = self.sparqlTok('PREFIX', argstr, i)
+ j = self.sparqlTok("PREFIX", argstr, i)
if j >= 0:
t = []
i = self.qname(argstr, j, t)
if i < 0:
- self.BadSyntax(argstr, j,
- "expected qname after @prefix")
+ self.BadSyntax(argstr, j, "expected qname after @prefix")
j = self.uri_ref2(argstr, i, t)
if j < 0:
- self.BadSyntax(argstr, i,
- "expected <uriref> after @prefix _qname_")
+ self.BadSyntax(argstr, i, "expected <uriref> after @prefix _qname_")
ns = self.uriOf(t[1])
if self._baseURI:
ns = join(self._baseURI, ns)
elif ":" not in ns:
- self.BadSyntax(argstr, j,
- "With no base URI, cannot use " +
- "relative URI in @prefix <" + ns + ">")
- assert ':' in ns # must be absolute
+ self.BadSyntax(
+ argstr,
+ j,
+ "With no base URI, cannot use "
+ + "relative URI in @prefix <"
+ + ns
+ + ">",
+ )
+ assert ":" in ns # must be absolute
self._bindings[t[0][0]] = ns
self.bind(t[0][0], hexify(ns))
return j
- j = self.sparqlTok('BASE', argstr, i)
+ j = self.sparqlTok("BASE", argstr, i)
if j >= 0:
t = []
i = self.uri_ref2(argstr, j, t)
if i < 0:
- self.BadSyntax(argstr, j,
- "expected <uri> after @base ")
+ self.BadSyntax(argstr, j, "expected <uri> after @base ")
ns = self.uriOf(t[0])
if self._baseURI:
ns = join(self._baseURI, ns)
else:
- self.BadSyntax(argstr, j,
- "With no previous base URI, cannot use " +
- "relative URI in @base <" + ns + ">")
- assert ':' in ns # must be absolute
+ self.BadSyntax(
+ argstr,
+ j,
+ "With no previous base URI, cannot use "
+ + "relative URI in @base <"
+ + ns
+ + ">",
+ )
+ assert ":" in ns # must be absolute
self._baseURI = ns
return i
- return -1 # Not a directive, could be something else.
-
+ return -1 # Not a directive, could be something else.
def bind(self, qn, uri):
- assert isinstance(
- uri, bytes), "Any unicode must be %x-encoded already"
+ assert isinstance(uri, bytes), "Any unicode must be %x-encoded already"
if qn == "":
self._store.setDefaultNamespace(uri)
else:
@@ -702,31 +733,29 @@ class SinkParser:
self.keywordsSet = 1
def startDoc(self):
- # was: self._store.startDoc()
+ # was: self._store.startDoc()
self._store.startDoc(self._formula)
def endDoc(self):
"""Signal end of document and stop parsing. returns formula"""
- self._store.endDoc(self._formula) # don't canonicalize yet
+ self._store.endDoc(self._formula) # don't canonicalize yet
return self._formula
def makeStatement(self, quadruple):
- # $$$$$$$$$$$$$$$$$$$$$
- # print "# Parser output: ", `quadruple`
+ # $$$$$$$$$$$$$$$$$$$$$
+ # print "# Parser output: ", `quadruple`
self._store.makeStatement(quadruple, why=self._reason2)
def statement(self, argstr, i):
r = []
- i = self.object(
- argstr, i, r) # Allow literal for subject - extends RDF
+ i = self.object(argstr, i, r) # Allow literal for subject - extends RDF
if i < 0:
return i
j = self.property_list(argstr, i, r[0])
if j < 0:
- self.BadSyntax(
- argstr, i, "expected propertylist")
+ self.BadSyntax(argstr, i, "expected propertylist")
return j
def subject(self, argstr, i, res):
@@ -748,77 +777,73 @@ class SinkParser:
r = []
- j = self.tok('has', argstr, i)
+ j = self.tok("has", argstr, i)
if j >= 0:
if self.turtle:
self.BadSyntax(argstr, i, "Found 'has' keyword in Turtle mode")
i = self.prop(argstr, j, r)
if i < 0:
- self.BadSyntax(argstr, j,
- "expected property after 'has'")
- res.append(('->', r[0]))
+ self.BadSyntax(argstr, j, "expected property after 'has'")
+ res.append(("->", r[0]))
return i
- j = self.tok('is', argstr, i)
+ j = self.tok("is", argstr, i)
if j >= 0:
if self.turtle:
self.BadSyntax(argstr, i, "Found 'is' keyword in Turtle mode")
i = self.prop(argstr, j, r)
if i < 0:
- self.BadSyntax(argstr, j,
- "expected <property> after 'is'")
+ self.BadSyntax(argstr, j, "expected <property> after 'is'")
j = self.skipSpace(argstr, i)
if j < 0:
- self.BadSyntax(argstr, i,
- "End of file found, expected property after 'is'")
+ self.BadSyntax(
+ argstr, i, "End of file found, expected property after 'is'"
+ )
i = j
- j = self.tok('of', argstr, i)
+ j = self.tok("of", argstr, i)
if j < 0:
- self.BadSyntax(argstr, i,
- "expected 'of' after 'is' <prop>")
- res.append(('<-', r[0]))
+ self.BadSyntax(argstr, i, "expected 'of' after 'is' <prop>")
+ res.append(("<-", r[0]))
return j
- j = self.tok('a', argstr, i)
+ j = self.tok("a", argstr, i)
if j >= 0:
- res.append(('->', RDF_type))
+ res.append(("->", RDF_type))
return j
- if argstr[i:i + 2] == "<=":
+ if argstr[i : i + 2] == "<=":
if self.turtle:
- self.BadSyntax(argstr, i,
- "Found '<=' in Turtle mode. ")
+ self.BadSyntax(argstr, i, "Found '<=' in Turtle mode. ")
- res.append(('<-', self._store.newSymbol(Logic_NS + "implies")))
+ res.append(("<-", self._store.newSymbol(Logic_NS + "implies")))
return i + 2
- if argstr[i:i + 1] == "=":
+ if argstr[i : i + 1] == "=":
if self.turtle:
self.BadSyntax(argstr, i, "Found '=' in Turtle mode")
- if argstr[i + 1:i + 2] == ">":
- res.append(('->', self._store.newSymbol(Logic_NS + "implies")))
+ if argstr[i + 1 : i + 2] == ">":
+ res.append(("->", self._store.newSymbol(Logic_NS + "implies")))
return i + 2
- res.append(('->', DAML_sameAs))
+ res.append(("->", DAML_sameAs))
return i + 1
- if argstr[i:i + 2] == ":=":
+ if argstr[i : i + 2] == ":=":
if self.turtle:
self.BadSyntax(argstr, i, "Found ':=' in Turtle mode")
- # patch file relates two formulae, uses this @@ really?
- res.append(('->', Logic_NS + "becomes"))
+ # patch file relates two formulae, uses this @@ really?
+ res.append(("->", Logic_NS + "becomes"))
return i + 2
j = self.prop(argstr, i, r)
if j >= 0:
- res.append(('->', r[0]))
+ res.append(("->", r[0]))
return j
- if argstr[i:i + 2] == ">-" or argstr[i:i + 2] == "<-":
- self.BadSyntax(argstr, j,
- ">- ... -> syntax is obsolete.")
+ if argstr[i : i + 2] == ">-" or argstr[i : i + 2] == "<-":
+ self.BadSyntax(argstr, j, ">- ... -> syntax is obsolete.")
return -1
@@ -836,16 +861,15 @@ class SinkParser:
"""
j = self.nodeOrLiteral(argstr, i, res)
if j < 0:
- return j # nope
+ return j # nope
- while argstr[j:j + 1] in "!^": # no spaces, must follow exactly (?)
- ch = argstr[j:j + 1]
+ while argstr[j : j + 1] in "!^": # no spaces, must follow exactly (?)
+ ch = argstr[j : j + 1]
subj = res.pop()
obj = self.blankNode(uri=self.here(j))
j = self.node(argstr, j + 1, res)
if j < 0:
- self.BadSyntax(argstr, j,
- "EOF found in middle of path syntax")
+ self.BadSyntax(argstr, j, "EOF found in middle of path syntax")
pred = res.pop()
if ch == "^": # Reverse traverse
self.makeStatement((self._context, pred, obj, subj))
@@ -874,18 +898,19 @@ class SinkParser:
if j < 0:
return j # eof
i = j
- ch = argstr[i:i + 1] # Quick 1-character checks first:
+ ch = argstr[i : i + 1] # Quick 1-character checks first:
if ch == "[":
bnodeID = self.here(i)
j = self.skipSpace(argstr, i + 1)
if j < 0:
- self.BadSyntax(argstr, i,
- "EOF after '['")
+ self.BadSyntax(argstr, i, "EOF after '['")
# Hack for "is" binding name to anon node
- if argstr[j:j + 1] == "=":
+ if argstr[j : j + 1] == "=":
if self.turtle:
- self.BadSyntax(argstr, j, "Found '[=' or '[ =' when in turtle mode.")
+ self.BadSyntax(
+ argstr, j, "Found '[=' or '[ =' when in turtle mode."
+ )
i = j + 1
objs = []
j = self.objectList(argstr, i, objs)
@@ -893,33 +918,31 @@ class SinkParser:
subj = objs[0]
if len(objs) > 1:
for obj in objs:
- self.makeStatement((self._context,
- DAML_sameAs, subj, obj))
+ self.makeStatement((self._context, DAML_sameAs, subj, obj))
j = self.skipSpace(argstr, j)
if j < 0:
- self.BadSyntax(argstr, i,
- "EOF when objectList expected after [ = ")
- if argstr[j:j + 1] == ";":
+ self.BadSyntax(
+ argstr, i, "EOF when objectList expected after [ = "
+ )
+ if argstr[j : j + 1] == ";":
j = j + 1
else:
- self.BadSyntax(argstr, i,
- "objectList expected after [= ")
+ self.BadSyntax(argstr, i, "objectList expected after [= ")
if subj is None:
subj = self.blankNode(uri=bnodeID)
i = self.property_list(argstr, j, subj)
if i < 0:
- self.BadSyntax(argstr, j,
- "property_list expected")
+ self.BadSyntax(argstr, j, "property_list expected")
j = self.skipSpace(argstr, i)
if j < 0:
- self.BadSyntax(argstr, i,
- "EOF when ']' expected after [ <propertyList>")
- if argstr[j:j + 1] != "]":
- self.BadSyntax(argstr, j,
- "']' expected")
+ self.BadSyntax(
+ argstr, i, "EOF when ']' expected after [ <propertyList>"
+ )
+ if argstr[j : j + 1] != "]":
+ self.BadSyntax(argstr, j, "']' expected")
res.append(subj)
return j + 1
@@ -927,8 +950,8 @@ class SinkParser:
# if self.turtle:
# self.BadSyntax(argstr, i,
# "found '{' while in Turtle mode, Formulas not supported!")
- ch2 = argstr[i + 1:i + 2]
- if ch2 == '$':
+ ch2 = argstr[i + 1 : i + 2]
+ if ch2 == "$":
# a set
i += 1
j = i + 1
@@ -937,27 +960,23 @@ class SinkParser:
while 1:
i = self.skipSpace(argstr, j)
if i < 0:
- self.BadSyntax(argstr, i,
- "needed '$}', found end.")
- if argstr[i:i + 2] == '$}':
+ self.BadSyntax(argstr, i, "needed '$}', found end.")
+ if argstr[i : i + 2] == "$}":
j = i + 2
break
if not first_run:
- if argstr[i:i + 1] == ',':
+ if argstr[i : i + 1] == ",":
i += 1
else:
- self.BadSyntax(
- argstr, i, "expected: ','")
+ self.BadSyntax(argstr, i, "expected: ','")
else:
first_run = False
item = []
- j = self.item(
- argstr, i, item) # @@@@@ should be path, was object
+ j = self.item(argstr, i, item) # @@@@@ should be path, was object
if j < 0:
- self.BadSyntax(argstr, i,
- "expected item in set or '$}'")
+ self.BadSyntax(argstr, i, "expected item in set or '$}'")
List.append(self._store.intern(item[0]))
res.append(self._store.newSet(List, self._context))
return j
@@ -980,17 +999,15 @@ class SinkParser:
while 1:
i = self.skipSpace(argstr, j)
if i < 0:
- self.BadSyntax(
- argstr, i, "needed '}', found end.")
+ self.BadSyntax(argstr, i, "needed '}', found end.")
- if argstr[i:i + 1] == "}":
+ if argstr[i : i + 1] == "}":
j = i + 1
break
j = self.directiveOrStatement(argstr, i)
if j < 0:
- self.BadSyntax(
- argstr, i, "expected statement or '}'")
+ self.BadSyntax(argstr, i, "expected statement or '}'")
self._anonymousNodes = parentAnonymousNodes
self._variables = self._parentVariables
@@ -998,13 +1015,13 @@ class SinkParser:
self._context = self._parentContext
self._reason2 = reason2
self._parentContext = oldParentContext
- res.append(subj.close()) # No use until closed
+ res.append(subj.close()) # No use until closed
return j
if ch == "(":
thing_type = self._store.newList
- ch2 = argstr[i + 1:i + 2]
- if ch2 == '$':
+ ch2 = argstr[i + 1 : i + 2]
+ if ch2 == "$":
thing_type = self._store.newSet
i += 1
j = i + 1
@@ -1013,34 +1030,34 @@ class SinkParser:
while 1:
i = self.skipSpace(argstr, j)
if i < 0:
- self.BadSyntax(
- argstr, i, "needed ')', found end.")
- if argstr[i:i + 1] == ')':
+ self.BadSyntax(argstr, i, "needed ')', found end.")
+ if argstr[i : i + 1] == ")":
j = i + 1
break
item = []
- j = self.item(
- argstr, i, item) # @@@@@ should be path, was object
+ j = self.item(argstr, i, item) # @@@@@ should be path, was object
if j < 0:
- self.BadSyntax(argstr, i,
- "expected item in list or ')'")
+ self.BadSyntax(argstr, i, "expected item in list or ')'")
List.append(self._store.intern(item[0]))
res.append(thing_type(List, self._context))
return j
- j = self.tok('this', argstr, i) # This context
+ j = self.tok("this", argstr, i) # This context
if j >= 0:
- self.BadSyntax(argstr, i,
- "Keyword 'this' was ancient N3. Now use " +
- "@forSome and @forAll keywords.")
-
- # booleans
- j = self.tok('true', argstr, i)
+ self.BadSyntax(
+ argstr,
+ i,
+ "Keyword 'this' was ancient N3. Now use "
+ + "@forSome and @forAll keywords.",
+ )
+
+ # booleans
+ j = self.tok("true", argstr, i)
if j >= 0:
res.append(True)
return j
- j = self.tok('false', argstr, i)
+ j = self.tok("false", argstr, i)
if j >= 0:
res.append(False)
return j
@@ -1057,23 +1074,24 @@ class SinkParser:
Leaves the terminating punctuation in the buffer
"""
while 1:
- while 1: # skip repeat ;
+ while 1: # skip repeat ;
j = self.skipSpace(argstr, i)
if j < 0:
- self.BadSyntax(argstr, i,
- "EOF found when expected verb in property list")
- if argstr[j]!=';': break
- i = j+1
+ self.BadSyntax(
+ argstr, i, "EOF found when expected verb in property list"
+ )
+ if argstr[j] != ";":
+ break
+ i = j + 1
- if argstr[j:j + 2] == ":-":
+ if argstr[j : j + 2] == ":-":
if self.turtle:
self.BadSyntax(argstr, j, "Found in ':-' in Turtle mode")
i = j + 2
res = []
j = self.node(argstr, i, res, subj)
if j < 0:
- self.BadSyntax(argstr, i,
- "bad {} or () or [] node after :- ")
+ self.BadSyntax(argstr, i, "bad {} or () or [] node after :- ")
i = j
continue
i = j
@@ -1085,20 +1103,18 @@ class SinkParser:
objs = []
i = self.objectList(argstr, j, objs)
if i < 0:
- self.BadSyntax(argstr, j,
- "objectList expected")
+ self.BadSyntax(argstr, j, "objectList expected")
for obj in objs:
dira, sym = v[0]
- if dira == '->':
+ if dira == "->":
self.makeStatement((self._context, sym, subj, obj))
else:
self.makeStatement((self._context, sym, obj, subj))
j = self.skipSpace(argstr, i)
if j < 0:
- self.BadSyntax(argstr, j,
- "EOF found in list of objects")
- if argstr[i:i + 1] != ";":
+ self.BadSyntax(argstr, j, "EOF found in list of objects")
+ if argstr[i : i + 1] != ";":
return i
i = i + 1 # skip semicolon and continue
@@ -1108,10 +1124,9 @@ class SinkParser:
"""
i = self.skipSpace(argstr, j)
if i < 0:
- self.BadSyntax(argstr, i,
- "EOF found expecting comma sep list")
+ self.BadSyntax(argstr, i, "EOF found expecting comma sep list")
if argstr[i] == ".":
- return j # empty list is OK
+ return j # empty list is OK
i = what(argstr, i, res)
if i < 0:
return -1
@@ -1120,15 +1135,14 @@ class SinkParser:
j = self.skipSpace(argstr, i)
if j < 0:
return j # eof
- ch = argstr[j:j + 1]
+ ch = argstr[j : j + 1]
if ch != ",":
if ch != ".":
return -1
- return j # Found but not swallowed "."
+ return j # Found but not swallowed "."
i = what(argstr, j + 1, res)
if i < 0:
- self.BadSyntax(argstr, i,
- "bad list content")
+ self.BadSyntax(argstr, i, "bad list content")
def objectList(self, argstr, i, res):
i = self.object(argstr, i, res)
@@ -1137,10 +1151,9 @@ class SinkParser:
while 1:
j = self.skipSpace(argstr, i)
if j < 0:
- self.BadSyntax(argstr, j,
- "EOF found after object")
- if argstr[j:j + 1] != ",":
- return j # Found something else!
+ self.BadSyntax(argstr, j, "EOF found after object")
+ if argstr[j : j + 1] != ",":
+ return j # Found something else!
i = self.object(argstr, j + 1, res)
if i < 0:
return i
@@ -1149,14 +1162,13 @@ class SinkParser:
j = self.skipSpace(argstr, i)
if j < 0:
return j # eof
- if argstr[j:j + 1] == ".":
- return j + 1 # skip
- if argstr[j:j + 1] == "}":
- return j # don't skip it
- if argstr[j:j + 1] == "]":
+ if argstr[j : j + 1] == ".":
+ return j + 1 # skip
+ if argstr[j : j + 1] == "}":
+ return j # don't skip it
+ if argstr[j : j + 1] == "]":
return j
- self.BadSyntax(argstr, j,
- "expected '.' or '}' or ']' at end of statement")
+ self.BadSyntax(argstr, j, "expected '.' or '}' or ']' at end of statement")
def uri_ref2(self, argstr, i, res):
"""Generate uri from n3 representation.
@@ -1182,8 +1194,7 @@ class SinkParser:
if not self.turtle and pfx == "":
ns = join(self._baseURI or "", "#")
else:
- self.BadSyntax(argstr, i,
- "Prefix \"%s:\" not bound" % (pfx))
+ self.BadSyntax(argstr, i, 'Prefix "%s:" not bound' % (pfx))
symb = self._store.newSymbol(ns + ln)
if symb in self._variables:
res.append(self._variables[symb])
@@ -1217,11 +1228,11 @@ class SinkParser:
if self._baseURI:
uref = join(self._baseURI, uref) # was: uripath.join
else:
- assert ":" in uref, \
- "With no base URI, cannot deal with relative URIs"
- if argstr[i - 1:i] == "#" and not uref[-1:] == "#":
- uref = uref + \
- "#" # She meant it! Weirdness in urlparse?
+ assert (
+ ":" in uref
+ ), "With no base URI, cannot deal with relative URIs"
+ if argstr[i - 1 : i] == "#" and not uref[-1:] == "#":
+ uref = uref + "#" # She meant it! Weirdness in urlparse?
symb = self._store.newSymbol(uref)
if symb in self._variables:
res.append(self._variables[symb])
@@ -1229,17 +1240,15 @@ class SinkParser:
res.append(symb)
return i + 1
i = i + 1
- self.BadSyntax(argstr, j,
- "unterminated URI reference")
+ self.BadSyntax(argstr, j, "unterminated URI reference")
elif self.keywordsSet:
v = []
j = self.bareWord(argstr, i, v)
if j < 0:
- return -1 # Forget varibles as a class, only in context.
+ return -1 # Forget varibles as a class, only in context.
if v[0] in self.keywords:
- self.BadSyntax(argstr, i,
- 'Keyword "%s" not allowed here.' % v[0])
+ self.BadSyntax(argstr, i, 'Keyword "%s" not allowed here.' % v[0])
res.append(self._store.newSymbol(self._bindings[""] + v[0]))
return j
else:
@@ -1253,7 +1262,7 @@ class SinkParser:
if m is None:
break
self.lines = self.lines + 1
- i = m.end() # Point to first character unmatched
+ i = m.end() # Point to first character unmatched
self.startOfLine = i
m = ws.match(argstr, i)
if m is not None:
@@ -1271,30 +1280,31 @@ class SinkParser:
if j < 0:
return -1
- if argstr[j:j + 1] != "?":
+ if argstr[j : j + 1] != "?":
return -1
j = j + 1
i = j
if argstr[j] in "0123456789-":
- self.BadSyntax(argstr, j,
- "Varible name can't start with '%s'" % argstr[j])
+ self.BadSyntax(argstr, j, "Varible name can't start with '%s'" % argstr[j])
while i < len(argstr) and argstr[i] not in _notKeywordsChars:
i = i + 1
if self._parentContext is None:
varURI = self._store.newSymbol(self._baseURI + "#" + argstr[j:i])
if varURI not in self._variables:
self._variables[varURI] = self._context.newUniversal(
- varURI, why=self._reason2)
+ varURI, why=self._reason2
+ )
res.append(self._variables[varURI])
return i
- # @@ was:
- # self.BadSyntax(argstr, j,
- # "Can't use ?xxx syntax for variable in outermost level: %s"
- # % argstr[j-1:i])
+ # @@ was:
+ # self.BadSyntax(argstr, j,
+ # "Can't use ?xxx syntax for variable in outermost level: %s"
+ # % argstr[j-1:i])
varURI = self._store.newSymbol(self._baseURI + "#" + argstr[j:i])
if varURI not in self._parentVariables:
self._parentVariables[varURI] = self._parentContext.newUniversal(
- varURI, why=self._reason2)
+ varURI, why=self._reason2
+ )
res.append(self._parentVariables[varURI])
return i
@@ -1340,16 +1350,17 @@ class SinkParser:
if argstr[i - 1] == ".": # qname cannot end with "."
ln = ln[:-1]
- if not ln: return -1
+ if not ln:
+ return -1
i -= 1
else: # First character is non-alpha
- ln = '' # Was: None - TBL (why? useful?)
+ ln = "" # Was: None - TBL (why? useful?)
- if i < len(argstr) and argstr[i] == ':':
+ if i < len(argstr) and argstr[i] == ":":
pfx = ln
# bnodes names have different rules
- if pfx == '_':
+ if pfx == "_":
allowedChars = _notNameChars
else:
allowedChars = _notQNameChars
@@ -1357,10 +1368,10 @@ class SinkParser:
i = i + 1
lastslash = False
# start = i # TODO first char .
- ln = ''
+ ln = ""
while i < len(argstr):
c = argstr[i]
- if not lastslash and c == '\\':
+ if not lastslash and c == "\\":
lastslash = True
i += 1
@@ -1368,12 +1379,25 @@ class SinkParser:
if lastslash:
if c not in escapeChars:
- raise BadSyntax(self._thisDoc, self.line, argstr, i,
- "illegal escape "+c)
- elif c=='%':
- if argstr[i+1] not in hexChars or argstr[i+2] not in hexChars:
- raise BadSyntax(self._thisDoc, self.line, argstr, i,
- "illegal hex escape "+c)
+ raise BadSyntax(
+ self._thisDoc,
+ self.line,
+ argstr,
+ i,
+ "illegal escape " + c,
+ )
+ elif c == "%":
+ if (
+ argstr[i + 1] not in hexChars
+ or argstr[i + 2] not in hexChars
+ ):
+ raise BadSyntax(
+ self._thisDoc,
+ self.line,
+ argstr,
+ i,
+ "illegal hex escape " + c,
+ )
ln = ln + c
i = i + 1
@@ -1383,22 +1407,22 @@ class SinkParser:
if lastslash:
raise BadSyntax(
- self._thisDoc, self.line, argstr, i,
- "qname cannot end with \\")
-
+ self._thisDoc, self.line, argstr, i, "qname cannot end with \\"
+ )
- if argstr[i-1]=='.':
+ if argstr[i - 1] == ".":
# localname cannot end in .
ln = ln[:-1]
- if not ln: return -1
+ if not ln:
+ return -1
i -= 1
res.append((pfx, ln))
return i
- else: # delimiter was not ":"
+ else: # delimiter was not ":"
if ln and self.keywordsSet and ln not in self.keywords:
- res.append(('', ln))
+ res.append(("", ln))
return i
return -1
@@ -1414,7 +1438,7 @@ class SinkParser:
i = j
if argstr[i] in self.string_delimiters:
- if argstr[i:i + 3] == argstr[i] * 3:
+ if argstr[i : i + 3] == argstr[i] * 3:
delim = argstr[i] * 3
else:
delim = argstr[i]
@@ -1462,7 +1486,7 @@ class SinkParser:
# return -1 ## or fall through?
if argstr[i] in self.string_delimiters:
- if argstr[i:i + 3] == argstr[i] * 3:
+ if argstr[i : i + 3] == argstr[i] * 3:
delim = argstr[i] * 3
else:
delim = argstr[i]
@@ -1471,17 +1495,20 @@ class SinkParser:
dt = None
j, s = self.strconst(argstr, i, delim)
lang = None
- if argstr[j:j + 1] == "@": # Language?
+ if argstr[j : j + 1] == "@": # Language?
m = langcode.match(argstr, j + 1)
if m is None:
raise BadSyntax(
- self._thisDoc, startline, argstr, i,
- "Bad language code syntax on string " +
- "literal, after @")
+ self._thisDoc,
+ startline,
+ argstr,
+ i,
+ "Bad language code syntax on string " + "literal, after @",
+ )
i = m.end()
- lang = argstr[j + 1:i]
+ lang = argstr[j + 1 : i]
j = i
- if argstr[j:j + 2] == "^^":
+ if argstr[j : j + 2] == "^^":
res2 = []
j = self.uri_ref2(argstr, j + 2, res2) # Read datatype URI
dt = res2[0]
@@ -1493,7 +1520,7 @@ class SinkParser:
def uriOf(self, sym):
if isinstance(sym, tuple):
return sym[1] # old system for --pipe
- # return sym.uriref() # cwm api
+ # return sym.uriref() # cwm api
return sym
def strconst(self, argstr, i, delim):
@@ -1504,35 +1531,39 @@ class SinkParser:
delim2, delim3, delim4, delim5 = delim1 * 2, delim1 * 3, delim1 * 4, delim1 * 5
j = i
- ustr = u"" # Empty unicode string
+ ustr = u"" # Empty unicode string
startline = self.lines # Remember where for error messages
while j < len(argstr):
if argstr[j] == delim1:
if delim == delim1: # done when delim is " or '
i = j + 1
return i, ustr
- if delim == delim3: # done when delim is """ or ''' and, respectively ...
- if argstr[j:j + 5] == delim5: # ... we have "" or '' before
+ if (
+ delim == delim3
+ ): # done when delim is """ or ''' and, respectively ...
+ if argstr[j : j + 5] == delim5: # ... we have "" or '' before
i = j + 5
ustr = ustr + delim2
return i, ustr
- if argstr[j:j + 4] == delim4: # ... we have " or ' before
+ if argstr[j : j + 4] == delim4: # ... we have " or ' before
i = j + 4
ustr = ustr + delim1
return i, ustr
- if argstr[j:j + 3] == delim3: # current " or ' is part of delim
+ if argstr[j : j + 3] == delim3: # current " or ' is part of delim
i = j + 3
return i, ustr
- # we are inside of the string and current char is " or '
+ # we are inside of the string and current char is " or '
j = j + 1
ustr = ustr + delim1
continue
- m = interesting.search(argstr, j) # was argstr[j:].
- # Note for pos param to work, MUST be compiled ... re bug?
+ m = interesting.search(argstr, j) # was argstr[j:].
+ # Note for pos param to work, MUST be compiled ... re bug?
assert m, "Quote expected in string at ^ in %s^%s" % (
- argstr[j - 20:j], argstr[j:j + 20]) # at least need a quote
+ argstr[j - 20 : j],
+ argstr[j : j + 20],
+ ) # at least need a quote
i = m.start()
try:
@@ -1543,12 +1574,15 @@ class SinkParser:
err = err + (" %02x" % ord(c))
streason = sys.exc_info()[1].__str__()
raise BadSyntax(
- self._thisDoc, startline, argstr, j,
- "Unicode error appending characters" +
- " %s to string, because\n\t%s"
- % (err, streason))
+ self._thisDoc,
+ startline,
+ argstr,
+ j,
+ "Unicode error appending characters"
+ + " %s to string, because\n\t%s" % (err, streason),
+ )
- # print "@@@ i = ",i, " j=",j, "m.end=", m.end()
+ # print "@@@ i = ",i, " j=",j, "m.end=", m.end()
ch = argstr[i]
if ch == delim1:
@@ -1561,8 +1595,12 @@ class SinkParser:
elif ch in "\r\n":
if delim == delim1:
raise BadSyntax(
- self._thisDoc, startline, argstr, i,
- "newline found in string literal")
+ self._thisDoc,
+ startline,
+ argstr,
+ i,
+ "newline found in string literal",
+ )
self.lines = self.lines + 1
ustr = ustr + ch
j = i + 1
@@ -1570,14 +1608,18 @@ class SinkParser:
elif ch == "\\":
j = i + 1
- ch = argstr[j:j + 1] # Will be empty if string ends
+ ch = argstr[j : j + 1] # Will be empty if string ends
if not ch:
raise BadSyntax(
- self._thisDoc, startline, argstr, i,
- "unterminated string literal (2)")
- k = 'abfrtvn\\"\''.find(ch)
+ self._thisDoc,
+ startline,
+ argstr,
+ i,
+ "unterminated string literal (2)",
+ )
+ k = "abfrtvn\\\"'".find(ch)
if k >= 0:
- uch = '\a\b\f\r\t\v\n\\"\''[k]
+ uch = "\a\b\f\r\t\v\n\\\"'"[k]
ustr = ustr + uch
j = j + 1
elif ch == "u":
@@ -1587,41 +1629,43 @@ class SinkParser:
j, ch = self.UEscape(argstr, j + 1, startline)
ustr = ustr + ch
else:
- self.BadSyntax(argstr, i,
- "bad escape")
+ self.BadSyntax(argstr, i, "bad escape")
- self.BadSyntax(argstr, i,
- "unterminated string literal")
+ self.BadSyntax(argstr, i, "unterminated string literal")
def _unicodeEscape(self, argstr, i, startline, reg, n, prefix):
- if len(argstr)<i+n:
+ if len(argstr) < i + n:
raise BadSyntax(
- self._thisDoc, startline, argstr, i,
- "unterminated string literal(3)")
+ self._thisDoc, startline, argstr, i, "unterminated string literal(3)"
+ )
try:
- return i+n, reg.sub(unicodeExpand, '\\'+prefix+argstr[i:i+n])
+ return i + n, reg.sub(unicodeExpand, "\\" + prefix + argstr[i : i + n])
except:
raise BadSyntax(
- self._thisDoc, startline, argstr, i,
- "bad string literal hex escape: "+argstr[i:i+n])
+ self._thisDoc,
+ startline,
+ argstr,
+ i,
+ "bad string literal hex escape: " + argstr[i : i + n],
+ )
def uEscape(self, argstr, i, startline):
- return self._unicodeEscape(argstr, i, startline, unicodeEscape4, 4, 'u')
+ return self._unicodeEscape(argstr, i, startline, unicodeEscape4, 4, "u")
def UEscape(self, argstr, i, startline):
- return self._unicodeEscape(argstr, i, startline, unicodeEscape8, 8, 'U')
+ return self._unicodeEscape(argstr, i, startline, unicodeEscape8, 8, "U")
def BadSyntax(self, argstr, i, msg):
raise BadSyntax(self._thisDoc, self.lines, argstr, i, msg)
+
# If we are going to do operators then they should generate
# [ is operator:plus of ( \1 \2 ) ]
class BadSyntax(SyntaxError):
def __init__(self, uri, lines, argstr, i, why):
- self._str = argstr.encode(
- 'utf-8') # Better go back to strings for errors
+ self._str = argstr.encode("utf-8") # Better go back to strings for errors
self._i = i
self._why = why
self.lines = lines
@@ -1641,16 +1685,21 @@ class BadSyntax(SyntaxError):
else:
post = ""
- return 'at line %i of <%s>:\nBad syntax (%s) at ^ in:\n"%s%s^%s%s"' \
- % (self.lines + 1, self._uri, self._why, pre,
- argstr[st:i], argstr[i:i + 60], post)
+ return 'at line %i of <%s>:\nBad syntax (%s) at ^ in:\n"%s%s^%s%s"' % (
+ self.lines + 1,
+ self._uri,
+ self._why,
+ pre,
+ argstr[st:i],
+ argstr[i : i + 60],
+ post,
+ )
@property
def message(self):
return str(self)
-
###############################################################################
class Formula(object):
number = 0
@@ -1663,25 +1712,24 @@ class Formula(object):
self.existentials = {}
self.universals = {}
- self.quotedgraph = QuotedGraph(
- store=parent.store, identifier=self.id())
+ self.quotedgraph = QuotedGraph(store=parent.store, identifier=self.id())
def __str__(self):
- return '_:Formula%s' % self.number
+ return "_:Formula%s" % self.number
def id(self):
- return BNode('_:Formula%s' % self.number)
+ return BNode("_:Formula%s" % self.number)
def newBlankNode(self, uri=None, why=None):
if uri is None:
self.counter += 1
- bn = BNode('f%sb%s' % (self.uuid, self.counter))
+ bn = BNode("f%sb%s" % (self.uuid, self.counter))
else:
- bn = BNode(uri.split('#').pop().replace('_', 'b'))
+ bn = BNode(uri.split("#").pop().replace("_", "b"))
return bn
def newUniversal(self, uri, why=None):
- return Variable(uri.split('#').pop())
+ return Variable(uri.split("#").pop())
def declareExistential(self, x):
self.existentials[x] = self.newBlankNode()
@@ -1691,7 +1739,7 @@ class Formula(object):
return self.quotedgraph
-r_hibyte = re.compile(r'([\x80-\xff])')
+r_hibyte = re.compile(r"([\x80-\xff])")
class RDFSink(object):
@@ -1716,9 +1764,9 @@ class RDFSink(object):
return arg.newBlankNode(uri)
elif isinstance(arg, Graph) or arg is None:
self.counter += 1
- bn = BNode('n' + str(self.counter))
+ bn = BNode("n" + str(self.counter))
else:
- bn = BNode(str(arg[0]).split('#').pop().replace('_', 'b'))
+ bn = BNode(str(arg[0]).split("#").pop().replace("_", "b"))
return bn
def newLiteral(self, s, dt, lang):
@@ -1728,18 +1776,12 @@ class RDFSink(object):
return Literal(s, lang=lang)
def newList(self, n, f):
- nil = self.newSymbol(
- 'http://www.w3.org/1999/02/22-rdf-syntax-ns#nil'
- )
+ nil = self.newSymbol("http://www.w3.org/1999/02/22-rdf-syntax-ns#nil")
if not n:
return nil
- first = self.newSymbol(
- 'http://www.w3.org/1999/02/22-rdf-syntax-ns#first'
- )
- rest = self.newSymbol(
- 'http://www.w3.org/1999/02/22-rdf-syntax-ns#rest'
- )
+ first = self.newSymbol("http://www.w3.org/1999/02/22-rdf-syntax-ns#first")
+ rest = self.newSymbol("http://www.w3.org/1999/02/22-rdf-syntax-ns#rest")
af = a = self.newBlankNode(f)
for ne in n[:-1]:
@@ -1755,12 +1797,12 @@ class RDFSink(object):
return set(args)
def setDefaultNamespace(self, *args):
- return ':'.join(repr(n) for n in args)
+ return ":".join(repr(n) for n in args)
def makeStatement(self, quadruple, why=None):
f, p, s, o = quadruple
- if hasattr(p, 'formula'):
+ if hasattr(p, "formula"):
raise Exception("Formula used as predicate")
s = self.normalise(f, s)
@@ -1768,14 +1810,14 @@ class RDFSink(object):
o = self.normalise(f, o)
if f == self.rootFormula:
- # print s, p, o, '.'
+ # print s, p, o, '.'
self.graph.add((s, p, o))
elif isinstance(f, Formula):
f.quotedgraph.add((s, p, o))
else:
- f.add((s,p,o))
+ f.add((s, p, o))
- # return str(quadruple)
+ # return str(quadruple)
def normalise(self, f, n):
if isinstance(n, tuple):
@@ -1791,8 +1833,8 @@ class RDFSink(object):
if isinstance(n, Decimal):
value = str(n)
- if value == '-0':
- value = '0'
+ if value == "-0":
+ value = "0"
s = Literal(value, datatype=DECIMAL_DATATYPE)
return s
@@ -1804,11 +1846,11 @@ class RDFSink(object):
if n in f.existentials:
return f.existentials[n]
- # if isinstance(n, Var):
- # if f.universals.has_key(n):
- # return f.universals[n]
- # f.universals[n] = f.newBlankNode()
- # return f.universals[n]
+ # if isinstance(n, Var):
+ # if f.universals.has_key(n):
+ # return f.universals[n]
+ # f.universals[n] = f.newBlankNode()
+ # return f.universals[n]
return n
@@ -1841,7 +1883,7 @@ def hexify(ustr):
"""
# s1=ustr.encode('utf-8')
s = ""
- for ch in ustr: # .encode('utf-8'):
+ for ch in ustr: # .encode('utf-8'):
if ord(ch) > 126 or ord(ch) < 33:
ch = "%%%02X" % ord(ch)
else:
@@ -1865,13 +1907,13 @@ class TurtleParser(Parser):
if encoding not in [None, "utf-8"]:
raise Exception(
- ("N3/Turtle files are always utf-8 encoded, ",
- "I was passed: %s") % encoding)
+ ("N3/Turtle files are always utf-8 encoded, ", "I was passed: %s")
+ % encoding
+ )
sink = RDFSink(graph)
- baseURI = graph.absolutize(
- source.getPublicId() or source.getSystemId() or "")
+ baseURI = graph.absolutize(source.getPublicId() or source.getSystemId() or "")
p = SinkParser(sink, baseURI=baseURI, turtle=turtle)
p.loadStream(source.getByteStream())
@@ -1893,38 +1935,40 @@ class N3Parser(TurtleParser):
pass
def parse(self, source, graph, encoding="utf-8"):
- # we're currently being handed a Graph, not a ConjunctiveGraph
+ # we're currently being handed a Graph, not a ConjunctiveGraph
assert graph.store.context_aware # is this implied by formula_aware
assert graph.store.formula_aware
conj_graph = ConjunctiveGraph(store=graph.store)
conj_graph.default_context = graph # TODO: CG __init__ should have a
- # default_context arg
- # TODO: update N3Processor so that it can use conj_graph as the sink
+ # default_context arg
+ # TODO: update N3Processor so that it can use conj_graph as the sink
conj_graph.namespace_manager = graph.namespace_manager
TurtleParser.parse(self, source, conj_graph, encoding, turtle=False)
-def _test(): # pragma: no cover
+def _test(): # pragma: no cover
import doctest
+
doctest.testmod()
# if __name__ == '__main__':
# _test()
-def main(): # pragma: no cover
+
+def main(): # pragma: no cover
g = ConjunctiveGraph()
sink = RDFSink(g)
- base_uri = 'file://' + os.path.join(os.getcwd(), sys.argv[1])
+ base_uri = "file://" + os.path.join(os.getcwd(), sys.argv[1])
p = SinkParser(sink, baseURI=base_uri)
- p._bindings[''] = p._baseURI + '#'
+ p._bindings[""] = p._baseURI + "#"
p.startDoc()
- f = open(sys.argv[1], 'rb')
+ f = open(sys.argv[1], "rb")
rdbytes = f.read()
f.close()
@@ -1934,7 +1978,8 @@ def main(): # pragma: no cover
print(t)
-if __name__ == '__main__':
+
+if __name__ == "__main__":
main()
# ends
diff --git a/rdflib/plugins/parsers/nquads.py b/rdflib/plugins/parsers/nquads.py
index 8bc415bd..0c29fc4c 100644
--- a/rdflib/plugins/parsers/nquads.py
+++ b/rdflib/plugins/parsers/nquads.py
@@ -36,26 +36,26 @@ from rdflib.plugins.parsers.ntriples import ParseError
from rdflib.plugins.parsers.ntriples import r_tail
from rdflib.plugins.parsers.ntriples import r_wspace
-__all__ = ['NQuadsParser']
+__all__ = ["NQuadsParser"]
class NQuadsParser(NTriplesParser):
-
def parse(self, inputsource, sink, **kwargs):
"""Parse f as an N-Triples file."""
- assert sink.store.context_aware, ("NQuadsParser must be given"
- " a context aware store.")
+ assert sink.store.context_aware, (
+ "NQuadsParser must be given" " a context aware store."
+ )
self.sink = ConjunctiveGraph(store=sink.store, identifier=sink.identifier)
source = inputsource.getByteStream()
- if not hasattr(source, 'read'):
+ if not hasattr(source, "read"):
raise ParseError("Item to parse must be a file-like object.")
- source = getreader('utf-8')(source)
+ source = getreader("utf-8")(source)
self.file = source
- self.buffer = ''
+ self.buffer = ""
while True:
self.line = __line = self.readline()
if self.line is None:
@@ -69,7 +69,7 @@ class NQuadsParser(NTriplesParser):
def parseline(self):
self.eat(r_wspace)
- if (not self.line) or self.line.startswith(('#')):
+ if (not self.line) or self.line.startswith(("#")):
return # The line is empty or a comment
subject = self.subject()
diff --git a/rdflib/plugins/parsers/nt.py b/rdflib/plugins/parsers/nt.py
index 783488af..d7d3b336 100644
--- a/rdflib/plugins/parsers/nt.py
+++ b/rdflib/plugins/parsers/nt.py
@@ -1,7 +1,7 @@
from rdflib.parser import Parser
from rdflib.plugins.parsers.ntriples import NTriplesParser
-__all__ = ['NTSink', 'NTParser']
+__all__ = ["NTSink", "NTParser"]
class NTSink(object):
diff --git a/rdflib/plugins/parsers/ntriples.py b/rdflib/plugins/parsers/ntriples.py
index 0724a86e..9398c8de 100644
--- a/rdflib/plugins/parsers/ntriples.py
+++ b/rdflib/plugins/parsers/ntriples.py
@@ -22,18 +22,18 @@ from rdflib.compat import decodeUnicodeEscape
from io import BytesIO
-__all__ = ['unquote', 'uriquote', 'Sink', 'NTriplesParser']
+__all__ = ["unquote", "uriquote", "Sink", "NTriplesParser"]
uriref = r'<([^:]+:[^\s"<>]*)>'
literal = r'"([^"\\]*(?:\\.[^"\\]*)*)"'
-litinfo = r'(?:@([a-zA-Z]+(?:-[a-zA-Z0-9]+)*)|\^\^' + uriref + r')?'
+litinfo = r"(?:@([a-zA-Z]+(?:-[a-zA-Z0-9]+)*)|\^\^" + uriref + r")?"
-r_line = re.compile(r'([^\r\n]*)(?:\r\n|\r|\n)')
-r_wspace = re.compile(r'[ \t]*')
-r_wspaces = re.compile(r'[ \t]+')
-r_tail = re.compile(r'[ \t]*\.[ \t]*(#.*)?')
+r_line = re.compile(r"([^\r\n]*)(?:\r\n|\r|\n)")
+r_wspace = re.compile(r"[ \t]*")
+r_wspaces = re.compile(r"[ \t]+")
+r_tail = re.compile(r"[ \t]*\.[ \t]*(#.*)?")
r_uriref = re.compile(uriref)
-r_nodeid = re.compile(r'_:([A-Za-z0-9_:]([-A-Za-z0-9_:\.]*[-A-Za-z0-9_:])?)')
+r_nodeid = re.compile(r"_:([A-Za-z0-9_:]([-A-Za-z0-9_:\.]*[-A-Za-z0-9_:])?)")
r_literal = re.compile(literal + litinfo)
bufsiz = 2048
@@ -57,11 +57,10 @@ class Sink(object):
print(s, p, o)
-quot = {'t': u'\t', 'n': u'\n', 'r': u'\r', '"': u'"', '\\':
- u'\\'}
-r_safe = re.compile(r'([\x20\x21\x23-\x5B\x5D-\x7E]+)')
+quot = {"t": u"\t", "n": u"\n", "r": u"\r", '"': u'"', "\\": u"\\"}
+r_safe = re.compile(r"([\x20\x21\x23-\x5B\x5D-\x7E]+)")
r_quot = re.compile(r'\\(t|n|r|"|\\)')
-r_uniquot = re.compile(r'\\u([0-9A-F]{4})|\\U([0-9A-F]{8})')
+r_uniquot = re.compile(r"\\u([0-9A-F]{4})|\\U([0-9A-F]{8})")
def unquote(s):
@@ -71,7 +70,7 @@ def unquote(s):
if isinstance(s, str): # nquads
s = decodeUnicodeEscape(s)
else:
- s = s.decode('unicode-escape')
+ s = s.decode("unicode-escape")
return s
else:
@@ -79,7 +78,7 @@ def unquote(s):
while s:
m = r_safe.match(s)
if m:
- s = s[m.end():]
+ s = s[m.end() :]
result.append(m.group(1))
continue
@@ -91,28 +90,27 @@ def unquote(s):
m = r_uniquot.match(s)
if m:
- s = s[m.end():]
+ s = s[m.end() :]
u, U = m.groups()
codepoint = int(u or U, 16)
if codepoint > 0x10FFFF:
raise ParseError("Disallowed codepoint: %08X" % codepoint)
result.append(chr(codepoint))
- elif s.startswith('\\'):
+ elif s.startswith("\\"):
raise ParseError("Illegal escape at: %s..." % s[:10])
else:
raise ParseError("Illegal literal character: %r" % s[0])
- return u''.join(result)
+ return u"".join(result)
-r_hibyte = re.compile(r'([\x80-\xFF])')
+r_hibyte = re.compile(r"([\x80-\xFF])")
def uriquote(uri):
if not validate:
return uri
else:
- return r_hibyte.sub(
- lambda m: '%%%02X' % ord(m.group(1)), uri)
+ return r_hibyte.sub(lambda m: "%%%02X" % ord(m.group(1)), uri)
class NTriplesParser(object):
@@ -134,14 +132,14 @@ class NTriplesParser(object):
def parse(self, f):
"""Parse f as an N-Triples file."""
- if not hasattr(f, 'read'):
+ if not hasattr(f, "read"):
raise ParseError("Item to parse must be a file-like object.")
# since N-Triples 1.1 files can and should be utf-8 encoded
- f = codecs.getreader('utf-8')(f)
+ f = codecs.getreader("utf-8")(f)
self.file = f
- self.buffer = ''
+ self.buffer = ""
while True:
self.line = self.readline()
if self.line is None:
@@ -174,7 +172,7 @@ class NTriplesParser(object):
while True:
m = r_line.match(self.buffer)
if m: # the more likely prospect
- self.buffer = self.buffer[m.end():]
+ self.buffer = self.buffer[m.end() :]
return m.group(1)
else:
buffer = self.file.read(bufsiz)
@@ -187,7 +185,7 @@ class NTriplesParser(object):
def parseline(self):
self.eat(r_wspace)
- if (not self.line) or self.line.startswith('#'):
+ if (not self.line) or self.line.startswith("#"):
return # The line is empty or a comment
subject = self.subject()
@@ -212,7 +210,7 @@ class NTriplesParser(object):
# print(dir(pattern))
# print repr(self.line), type(self.line)
raise ParseError("Failed to eat %s at %s" % (pattern.pattern, self.line))
- self.line = self.line[m.end():]
+ self.line = self.line[m.end() :]
return m
def subject(self):
@@ -235,7 +233,7 @@ class NTriplesParser(object):
return objt
def uriref(self):
- if self.peek('<'):
+ if self.peek("<"):
uri = self.eat(r_uriref).group(1)
uri = unquote(uri)
uri = uriquote(uri)
@@ -243,7 +241,7 @@ class NTriplesParser(object):
return False
def nodeid(self):
- if self.peek('_'):
+ if self.peek("_"):
# Fix for https://github.com/RDFLib/rdflib/issues/204
bnode_id = self.eat(r_nodeid).group(1)
new_id = self._bnode_ids.get(bnode_id, None)
@@ -277,6 +275,7 @@ class NTriplesParser(object):
return Literal(lit, lang, dtype)
return False
+
# # Obsolete, unused
# def parseURI(uri):
# import urllib
diff --git a/rdflib/plugins/parsers/rdfxml.py b/rdflib/plugins/parsers/rdfxml.py
index 17554ba1..976edf2c 100644
--- a/rdflib/plugins/parsers/rdfxml.py
+++ b/rdflib/plugins/parsers/rdfxml.py
@@ -15,22 +15,30 @@ from rdflib.term import Literal
from rdflib.exceptions import ParserError, Error
from rdflib.parser import Parser
-__all__ = ['create_parser', 'BagID', 'ElementHandler',
- 'RDFXMLHandler', 'RDFXMLParser']
+__all__ = ["create_parser", "BagID", "ElementHandler", "RDFXMLHandler", "RDFXMLParser"]
RDFNS = RDF
# http://www.w3.org/TR/rdf-syntax-grammar/#eventterm-attribute-URI
# A mapping from unqualified terms to their qualified version.
-UNQUALIFIED = {"about": RDF.about,
- "ID": RDF.ID,
- "type": RDF.type,
- "resource": RDF.resource,
- "parseType": RDF.parseType}
+UNQUALIFIED = {
+ "about": RDF.about,
+ "ID": RDF.ID,
+ "type": RDF.type,
+ "resource": RDF.resource,
+ "parseType": RDF.parseType,
+}
# http://www.w3.org/TR/rdf-syntax-grammar/#coreSyntaxTerms
-CORE_SYNTAX_TERMS = [RDF.RDF, RDF.ID, RDF.about, RDF.parseType,
- RDF.resource, RDF.nodeID, RDF.datatype]
+CORE_SYNTAX_TERMS = [
+ RDF.RDF,
+ RDF.ID,
+ RDF.about,
+ RDF.parseType,
+ RDF.resource,
+ RDF.nodeID,
+ RDF.datatype,
+]
# http://www.w3.org/TR/rdf-syntax-grammar/#syntaxTerms
SYNTAX_TERMS = CORE_SYNTAX_TERMS + [RDF.Description, RDF.li]
@@ -39,15 +47,16 @@ SYNTAX_TERMS = CORE_SYNTAX_TERMS + [RDF.Description, RDF.li]
OLD_TERMS = [
URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#aboutEach"),
URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#aboutEachPrefix"),
- URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#bagID")]
+ URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#bagID"),
+]
-NODE_ELEMENT_EXCEPTIONS = CORE_SYNTAX_TERMS + [RDF.li, ] + OLD_TERMS
+NODE_ELEMENT_EXCEPTIONS = CORE_SYNTAX_TERMS + [RDF.li,] + OLD_TERMS
NODE_ELEMENT_ATTRIBUTES = [RDF.ID, RDF.nodeID, RDF.about]
-PROPERTY_ELEMENT_EXCEPTIONS = \
- CORE_SYNTAX_TERMS + [RDF.Description, ] + OLD_TERMS
-PROPERTY_ATTRIBUTE_EXCEPTIONS = \
+PROPERTY_ELEMENT_EXCEPTIONS = CORE_SYNTAX_TERMS + [RDF.Description,] + OLD_TERMS
+PROPERTY_ATTRIBUTE_EXCEPTIONS = (
CORE_SYNTAX_TERMS + [RDF.Description, RDF.li] + OLD_TERMS
+)
PROPERTY_ELEMENT_ATTRIBUTES = [RDF.ID, RDF.resource, RDF.nodeID]
XMLNS = "http://www.w3.org/XML/1998/namespace"
@@ -56,7 +65,7 @@ LANG = (XMLNS, "lang")
class BagID(URIRef):
- __slots__ = ['li']
+ __slots__ = ["li"]
def __init__(self, val):
super(URIRef, self).__init__(val)
@@ -64,13 +73,26 @@ class BagID(URIRef):
def next_li(self):
self.li += 1
- return RDFNS['_%s' % self.li]
+ return RDFNS["_%s" % self.li]
class ElementHandler(object):
- __slots__ = ['start', 'char', 'end', 'li', 'id',
- 'base', 'subject', 'predicate', 'object',
- 'list', 'language', 'datatype', 'declared', 'data']
+ __slots__ = [
+ "start",
+ "char",
+ "end",
+ "li",
+ "id",
+ "base",
+ "subject",
+ "predicate",
+ "object",
+ "list",
+ "language",
+ "datatype",
+ "declared",
+ "data",
+ ]
def __init__(self):
self.start = None
@@ -89,11 +111,10 @@ class ElementHandler(object):
def next_li(self):
self.li += 1
- return RDFNS['_%s' % self.li]
+ return RDFNS["_%s" % self.li]
class RDFXMLHandler(handler.ContentHandler):
-
def __init__(self, store):
self.store = store
self.preserve_bnode_ids = False
@@ -103,7 +124,10 @@ class RDFXMLHandler(handler.ContentHandler):
document_element = ElementHandler()
document_element.start = self.document_element_start
document_element.end = lambda name, qname: None
- self.stack = [None, document_element, ]
+ self.stack = [
+ None,
+ document_element,
+ ]
self.ids = {} # remember IDs we have already seen
self.bnode = {}
self._ns_contexts = [{}] # contains uri -> prefix dicts
@@ -137,16 +161,14 @@ class RDFXMLHandler(handler.ContentHandler):
if parent and parent.base:
base = urljoin(parent.base, base)
else:
- systemId = self.locator.getPublicId() \
- or self.locator.getSystemId()
+ systemId = self.locator.getPublicId() or self.locator.getSystemId()
if systemId:
base = urljoin(systemId, base)
else:
if parent:
base = parent.base
if base is None:
- systemId = self.locator.getPublicId() \
- or self.locator.getSystemId()
+ systemId = self.locator.getPublicId() or self.locator.getSystemId()
if systemId:
base, frag = urldefrag(systemId)
current.base = base
@@ -181,25 +203,30 @@ class RDFXMLHandler(handler.ContentHandler):
def error(self, message):
locator = self.locator
- info = "%s:%s:%s: " % (locator.getSystemId(),
- locator.getLineNumber(),
- locator.getColumnNumber())
+ info = "%s:%s:%s: " % (
+ locator.getSystemId(),
+ locator.getLineNumber(),
+ locator.getColumnNumber(),
+ )
raise ParserError(info + message)
def get_current(self):
return self.stack[-2]
+
# Create a read only property called current so that self.current
# give the current element handler.
current = property(get_current)
def get_next(self):
return self.stack[-1]
+
# Create a read only property that gives the element handler to be
# used for the next element.
next = property(get_next)
def get_parent(self):
return self.stack[-3]
+
# Create a read only property that gives the current parent
# element handler
parent = property(get_parent)
@@ -233,7 +260,7 @@ class RDFXMLHandler(handler.ContentHandler):
def document_element_start(self, name, qname, attrs):
if name[0] and URIRef("".join(name)) == RDF.RDF:
# Cheap hack so 2to3 doesn't turn it into __next__
- next = getattr(self, 'next')
+ next = getattr(self, "next")
next.start = self.node_element_start
next.end = self.node_element_end
else:
@@ -248,7 +275,7 @@ class RDFXMLHandler(handler.ContentHandler):
absolutize = self.absolutize
# Cheap hack so 2to3 doesn't turn it into __next__
- next = getattr(self, 'next')
+ next = getattr(self, "next")
next.start = self.property_element_start
next.end = self.property_element_end
@@ -257,27 +284,21 @@ class RDFXMLHandler(handler.ContentHandler):
if RDF.ID in atts:
if RDF.about in atts or RDF.nodeID in atts:
- self.error(
- "Can have at most one of rdf:ID, rdf:about, and rdf:nodeID"
- )
+ self.error("Can have at most one of rdf:ID, rdf:about, and rdf:nodeID")
id = atts[RDF.ID]
if not is_ncname(id):
self.error("rdf:ID value is not a valid NCName: %s" % id)
subject = absolutize("#%s" % id)
if subject in self.ids:
- self.error(
- "two elements cannot use the same ID: '%s'" % subject)
+ self.error("two elements cannot use the same ID: '%s'" % subject)
self.ids[subject] = 1 # IDs can only appear once within a document
elif RDF.nodeID in atts:
if RDF.ID in atts or RDF.about in atts:
- self.error(
- "Can have at most one of rdf:ID, rdf:about, and rdf:nodeID"
- )
+ self.error("Can have at most one of rdf:ID, rdf:about, and rdf:nodeID")
nodeID = atts[RDF.nodeID]
if not is_ncname(nodeID):
- self.error(
- "rdf:nodeID value is not a valid NCName: %s" % nodeID)
+ self.error("rdf:nodeID value is not a valid NCName: %s" % nodeID)
if self.preserve_bnode_ids is False:
if nodeID in self.bnode:
subject = self.bnode[nodeID]
@@ -288,9 +309,7 @@ class RDFXMLHandler(handler.ContentHandler):
subject = BNode(nodeID)
elif RDF.about in atts:
if RDF.ID in atts or RDF.nodeID in atts:
- self.error(
- "Can have at most one of rdf:ID, rdf:about, and rdf:nodeID"
- )
+ self.error("Can have at most one of rdf:ID, rdf:about, and rdf:nodeID")
subject = absolutize(atts[RDF.about])
else:
subject = BNode()
@@ -330,7 +349,9 @@ class RDFXMLHandler(handler.ContentHandler):
if self.parent.object and self.current != self.stack[2]:
- self.error("Repeat node-elements inside property elements: %s"%"".join(name))
+ self.error(
+ "Repeat node-elements inside property elements: %s" % "".join(name)
+ )
self.parent.object = self.current.subject
@@ -340,7 +361,7 @@ class RDFXMLHandler(handler.ContentHandler):
absolutize = self.absolutize
# Cheap hack so 2to3 doesn't turn it into __next__
- next = getattr(self, 'next')
+ next = getattr(self, "next")
object = None
current.data = None
current.list = None
@@ -366,17 +387,14 @@ class RDFXMLHandler(handler.ContentHandler):
nodeID = atts.get(RDF.nodeID, None)
parse_type = atts.get(RDF.parseType, None)
if resource is not None and nodeID is not None:
- self.error(
- "Property element cannot have both rdf:nodeID and rdf:resource"
- )
+ self.error("Property element cannot have both rdf:nodeID and rdf:resource")
if resource is not None:
object = absolutize(resource)
next.start = self.node_element_start
next.end = self.node_element_end
elif nodeID is not None:
if not is_ncname(nodeID):
- self.error(
- "rdf:nodeID value is not a valid NCName: %s" % nodeID)
+ self.error("rdf:nodeID value is not a valid NCName: %s" % nodeID)
if self.preserve_bnode_ids is False:
if nodeID in self.bnode:
object = self.bnode[nodeID]
@@ -401,16 +419,16 @@ class RDFXMLHandler(handler.ContentHandler):
elif parse_type == "Collection":
current.char = None
object = current.list = RDF.nil # BNode()
- # self.parent.subject
+ # self.parent.subject
next.start = self.node_element_start
next.end = self.list_node_element_end
else: # if parse_type=="Literal":
- # All other values are treated as Literal
- # See: http://www.w3.org/TR/rdf-syntax-grammar/
- # parseTypeOtherPropertyElt
+ # All other values are treated as Literal
+ # See: http://www.w3.org/TR/rdf-syntax-grammar/
+ # parseTypeOtherPropertyElt
object = Literal("", datatype=RDF.XMLLiteral)
current.char = self.literal_element_char
- current.declared = {XMLNS: 'xml'}
+ current.declared = {XMLNS: "xml"}
next.start = self.literal_element_start
next.char = self.literal_element_char
next.end = self.literal_element_end
@@ -466,18 +484,17 @@ class RDFXMLHandler(handler.ContentHandler):
literalLang = current.language
if current.datatype is not None:
literalLang = None
- current.object = Literal(
- current.data, literalLang, current.datatype)
+ current.object = Literal(current.data, literalLang, current.datatype)
current.data = None
if self.next.end == self.list_node_element_end:
if current.object != RDF.nil:
self.store.add((current.list, RDF.rest, RDF.nil))
if current.object is not None:
- self.store.add(
- (self.parent.subject, current.predicate, current.object))
+ self.store.add((self.parent.subject, current.predicate, current.object))
if current.id is not None:
- self.add_reified(current.id, (self.parent.subject,
- current.predicate, current.object))
+ self.add_reified(
+ current.id, (self.parent.subject, current.predicate, current.object)
+ )
current.subject = None
def list_node_element_end(self, name, qname):
@@ -513,9 +530,9 @@ class RDFXMLHandler(handler.ContentHandler):
if not name[0] in current.declared:
current.declared[name[0]] = prefix
if prefix:
- current.object += (' xmlns:%s="%s"' % (prefix, name[0]))
+ current.object += ' xmlns:%s="%s"' % (prefix, name[0])
else:
- current.object += (' xmlns="%s"' % name[0])
+ current.object += ' xmlns="%s"' % name[0]
else:
current.object = "<%s" % name[1]
@@ -526,7 +543,7 @@ class RDFXMLHandler(handler.ContentHandler):
name = current.declared[name[0]] + ":" + name[1]
else:
name = name[1]
- current.object += (' %s=%s' % (name, quoteattr(value)))
+ current.object += " %s=%s" % (name, quoteattr(value))
current.object += ">"
def literal_element_char(self, data):
@@ -549,8 +566,7 @@ def create_parser(target, store):
try:
# Workaround for bug in expatreader.py. Needed when
# expatreader is trying to guess a prefix.
- parser.start_namespace_decl(
- "xml", "http://www.w3.org/XML/1998/namespace")
+ parser.start_namespace_decl("xml", "http://www.w3.org/XML/1998/namespace")
except AttributeError:
pass # Not present in Jython (at least)
parser.setFeature(handler.feature_namespaces, 1)
@@ -563,7 +579,6 @@ def create_parser(target, store):
class RDFXMLParser(Parser):
-
def __init__(self):
pass
diff --git a/rdflib/plugins/parsers/trig.py b/rdflib/plugins/parsers/trig.py
index f4c3ff1b..96c94503 100644
--- a/rdflib/plugins/parsers/trig.py
+++ b/rdflib/plugins/parsers/trig.py
@@ -5,18 +5,18 @@ from rdflib.parser import Parser
from .notation3 import SinkParser, RDFSink
-def becauseSubGraph(*args, **kwargs): pass
+def becauseSubGraph(*args, **kwargs):
+ pass
class TrigSinkParser(SinkParser):
-
def directiveOrStatement(self, argstr, h):
- #import pdb; pdb.set_trace()
+ # import pdb; pdb.set_trace()
i = self.skipSpace(argstr, h)
if i < 0:
- return i # EOF
+ return i # EOF
j = self.graph(argstr, i)
if j >= 0:
@@ -46,12 +46,11 @@ class TrigSinkParser(SinkParser):
if j >= 0:
return j
- if argstr[i] == '[':
+ if argstr[i] == "[":
j = self.skipSpace(argstr, i + 1)
if j < 0:
- self.BadSyntax(argstr, i,
- "Expected ] got EOF")
- if argstr[j] == ']':
+ self.BadSyntax(argstr, i, "Expected ] got EOF")
+ if argstr[j] == "]":
res.append(self.blankNode())
return j + 1
return -1
@@ -66,8 +65,8 @@ class TrigSinkParser(SinkParser):
raise Exception if it looks like a graph, but isn't.
"""
- #import pdb; pdb.set_trace()
- j = self.sparqlTok('GRAPH', argstr, i) # optional GRAPH keyword
+ # import pdb; pdb.set_trace()
+ j = self.sparqlTok("GRAPH", argstr, i) # optional GRAPH keyword
if j >= 0:
i = j
@@ -81,10 +80,9 @@ class TrigSinkParser(SinkParser):
j = self.skipSpace(argstr, i)
if j < 0:
- self.BadSyntax(argstr, i,
- "EOF found when expected graph")
+ self.BadSyntax(argstr, i, "EOF found when expected graph")
- if argstr[j:j + 1] == "=": # optional = for legacy support
+ if argstr[j : j + 1] == "=": # optional = for legacy support
i = self.skipSpace(argstr, j + 1)
if i < 0:
@@ -92,7 +90,7 @@ class TrigSinkParser(SinkParser):
else:
i = j
- if argstr[i:i + 1] != "{":
+ if argstr[i : i + 1] != "{":
return -1 # the node wasn't part of a graph
j = i + 1
@@ -106,17 +104,15 @@ class TrigSinkParser(SinkParser):
while 1:
i = self.skipSpace(argstr, j)
if i < 0:
- self.BadSyntax(
- argstr, i, "needed '}', found end.")
+ self.BadSyntax(argstr, i, "needed '}', found end.")
- if argstr[i:i + 1] == "}":
+ if argstr[i : i + 1] == "}":
j = i + 1
break
j = self.directiveOrStatement(argstr, i)
if j < 0:
- self.BadSyntax(
- argstr, i, "expected statement or '}'")
+ self.BadSyntax(argstr, i, "expected statement or '}'")
self._context = self._parentContext
self._reason2 = reason2
@@ -138,22 +134,23 @@ class TrigParser(Parser):
if encoding not in [None, "utf-8"]:
raise Exception(
- ("TriG files are always utf-8 encoded, ",
- "I was passed: %s") % encoding)
+ ("TriG files are always utf-8 encoded, ", "I was passed: %s") % encoding
+ )
# we're currently being handed a Graph, not a ConjunctiveGraph
assert graph.store.context_aware, "TriG Parser needs a context-aware store!"
conj_graph = ConjunctiveGraph(store=graph.store, identifier=graph.identifier)
conj_graph.default_context = graph # TODO: CG __init__ should have a
- # default_context arg
+ # default_context arg
# TODO: update N3Processor so that it can use conj_graph as the sink
conj_graph.namespace_manager = graph.namespace_manager
sink = RDFSink(conj_graph)
baseURI = conj_graph.absolutize(
- source.getPublicId() or source.getSystemId() or "")
+ source.getPublicId() or source.getSystemId() or ""
+ )
p = TrigSinkParser(sink, baseURI=baseURI, turtle=True)
p.loadStream(source.getByteStream())
diff --git a/rdflib/plugins/parsers/trix.py b/rdflib/plugins/parsers/trix.py
index ffd883fe..56819514 100644
--- a/rdflib/plugins/parsers/trix.py
+++ b/rdflib/plugins/parsers/trix.py
@@ -14,7 +14,7 @@ from xml.sax.saxutils import handler
from xml.sax import make_parser
from xml.sax.handler import ErrorHandler
-__all__ = ['create_parser', 'TriXHandler', 'TriXParser']
+__all__ = ["create_parser", "TriXHandler", "TriXParser"]
TRIXNS = Namespace("http://www.w3.org/2004/03/trix/trix-1/")
@@ -56,7 +56,8 @@ class TriXHandler(handler.ContentHandler):
if name[0] != str(TRIXNS):
self.error(
"Only elements in the TriX namespace are allowed. %s!=%s"
- % (name[0], TRIXNS))
+ % (name[0], TRIXNS)
+ )
if name[1] == "TriX":
if self.state == 0:
@@ -143,46 +144,55 @@ class TriXHandler(handler.ContentHandler):
if name[0] != str(TRIXNS):
self.error(
"Only elements in the TriX namespace are allowed. %s!=%s"
- % (name[0], TRIXNS))
+ % (name[0], TRIXNS)
+ )
if name[1] == "uri":
if self.state == 3:
- self.graph = Graph(store=self.store,
- identifier=URIRef(self.chars.strip()))
+ self.graph = Graph(
+ store=self.store, identifier=URIRef(self.chars.strip())
+ )
self.state = 2
elif self.state == 4:
self.triple += [URIRef(self.chars.strip())]
else:
self.error(
- "Illegal internal self.state - This should never " +
- "happen if the SAX parser ensures XML syntax correctness")
+ "Illegal internal self.state - This should never "
+ + "happen if the SAX parser ensures XML syntax correctness"
+ )
elif name[1] == "id":
if self.state == 3:
- self.graph = Graph(self.store, identifier=self.get_bnode(
- self.chars.strip()))
+ self.graph = Graph(
+ self.store, identifier=self.get_bnode(self.chars.strip())
+ )
self.state = 2
elif self.state == 4:
self.triple += [self.get_bnode(self.chars.strip())]
else:
self.error(
- "Illegal internal self.state - This should never " +
- "happen if the SAX parser ensures XML syntax correctness")
+ "Illegal internal self.state - This should never "
+ + "happen if the SAX parser ensures XML syntax correctness"
+ )
elif name[1] == "plainLiteral" or name[1] == "typedLiteral":
if self.state == 4:
- self.triple += [Literal(
- self.chars, lang=self.lang, datatype=self.datatype)]
+ self.triple += [
+ Literal(self.chars, lang=self.lang, datatype=self.datatype)
+ ]
else:
self.error(
- "This should never happen if the SAX parser " +
- "ensures XML syntax correctness")
+ "This should never happen if the SAX parser "
+ + "ensures XML syntax correctness"
+ )
elif name[1] == "triple":
if self.state == 4:
if len(self.triple) != 3:
- self.error("Triple has wrong length, got %d elements: %s" %
- (len(self.triple), self.triple))
+ self.error(
+ "Triple has wrong length, got %d elements: %s"
+ % (len(self.triple), self.triple)
+ )
self.graph.add(self.triple)
# self.store.store.add(self.triple,context=self.graph)
@@ -190,8 +200,9 @@ class TriXHandler(handler.ContentHandler):
self.state = 2
else:
self.error(
- "This should never happen if the SAX parser " +
- "ensures XML syntax correctness")
+ "This should never happen if the SAX parser "
+ + "ensures XML syntax correctness"
+ )
elif name[1] == "graph":
self.graph = None
@@ -228,7 +239,8 @@ class TriXHandler(handler.ContentHandler):
info = "%s:%s:%s: " % (
locator.getSystemId(),
locator.getLineNumber(),
- locator.getColumnNumber())
+ locator.getColumnNumber(),
+ )
raise ParserError(info + message)
@@ -237,8 +249,7 @@ def create_parser(store):
try:
# Workaround for bug in expatreader.py. Needed when
# expatreader is trying to guess a prefix.
- parser.start_namespace_decl(
- "xml", "http://www.w3.org/XML/1998/namespace")
+ parser.start_namespace_decl("xml", "http://www.w3.org/XML/1998/namespace")
except AttributeError:
pass # Not present in Jython (at least)
parser.setFeature(handler.feature_namespaces, 1)
@@ -255,8 +266,9 @@ class TriXParser(Parser):
pass
def parse(self, source, sink, **args):
- assert sink.store.context_aware, (
- "TriXParser must be given a context aware store.")
+ assert (
+ sink.store.context_aware
+ ), "TriXParser must be given a context aware store."
self._parser = create_parser(sink.store)
content_handler = self._parser.getContentHandler()
diff --git a/rdflib/plugins/serializers/n3.py b/rdflib/plugins/serializers/n3.py
index c5efc735..6c4e2ec4 100644
--- a/rdflib/plugins/serializers/n3.py
+++ b/rdflib/plugins/serializers/n3.py
@@ -3,10 +3,9 @@ Notation 3 (N3) RDF graph serializer for RDFLib.
"""
from rdflib.graph import Graph
from rdflib.namespace import Namespace, OWL
-from rdflib.plugins.serializers.turtle import (
- TurtleSerializer, SUBJECT, OBJECT)
+from rdflib.plugins.serializers.turtle import TurtleSerializer, SUBJECT, OBJECT
-__all__ = ['N3Serializer']
+__all__ = ["N3Serializer"]
SWAP_LOG = Namespace("http://www.w3.org/2000/10/swap/log#")
@@ -17,10 +16,7 @@ class N3Serializer(TurtleSerializer):
def __init__(self, store, parent=None):
super(N3Serializer, self).__init__(store)
- self.keywords.update({
- OWL.sameAs: '=',
- SWAP_LOG.implies: '=>'
- })
+ self.keywords.update({OWL.sameAs: "=", SWAP_LOG.implies: "=>"})
self.parent = parent
def reset(self):
@@ -33,8 +29,9 @@ class N3Serializer(TurtleSerializer):
self.parent.subjectDone(subject)
def isDone(self, subject):
- return (super(N3Serializer, self).isDone(subject)
- and (not self.parent or self.parent.isDone(subject)))
+ return super(N3Serializer, self).isDone(subject) and (
+ not self.parent or self.parent.isDone(subject)
+ )
def startDocument(self):
super(N3Serializer, self).startDocument()
@@ -88,8 +85,7 @@ class N3Serializer(TurtleSerializer):
properties = self.buildPredicateHash(subject)
if len(properties) == 0:
return False
- return (self.s_clause(subject)
- or super(N3Serializer, self).statement(subject))
+ return self.s_clause(subject) or super(N3Serializer, self).statement(subject)
def path(self, node, position, newline=False):
if not self.p_clause(node, position):
@@ -97,10 +93,10 @@ class N3Serializer(TurtleSerializer):
def s_clause(self, subject):
if isinstance(subject, Graph):
- self.write('\n' + self.indent())
+ self.write("\n" + self.indent())
self.p_clause(subject, SUBJECT)
self.predicateList(subject)
- self.write(' .')
+ self.write(" .")
return True
else:
return False
@@ -109,13 +105,13 @@ class N3Serializer(TurtleSerializer):
if isinstance(node, Graph):
self.subjectDone(node)
if position is OBJECT:
- self.write(' ')
- self.write('{')
+ self.write(" ")
+ self.write("{")
self.depth += 1
serializer = N3Serializer(node, parent=self)
serializer.serialize(self.stream)
self.depth -= 1
- self.write(self.indent() + '}')
+ self.write(self.indent() + "}")
return True
else:
return False
diff --git a/rdflib/plugins/serializers/nquads.py b/rdflib/plugins/serializers/nquads.py
index a193e125..70c414cd 100644
--- a/rdflib/plugins/serializers/nquads.py
+++ b/rdflib/plugins/serializers/nquads.py
@@ -5,16 +5,15 @@ from rdflib.serializer import Serializer
from rdflib.plugins.serializers.nt import _quoteLiteral
-__all__ = ['NQuadsSerializer']
+__all__ = ["NQuadsSerializer"]
class NQuadsSerializer(Serializer):
-
def __init__(self, store):
if not store.context_aware:
raise Exception(
- "NQuads serialization only makes "
- "sense for context-aware stores!")
+ "NQuads serialization only makes " "sense for context-aware stores!"
+ )
super(NQuadsSerializer, self).__init__(store)
@@ -26,19 +25,24 @@ class NQuadsSerializer(Serializer):
encoding = self.encoding
for context in self.store.contexts():
for triple in context:
- stream.write(_nq_row(
- triple, context.identifier).encode(encoding, "replace"))
+ stream.write(
+ _nq_row(triple, context.identifier).encode(encoding, "replace")
+ )
stream.write("\n".encode("latin-1"))
def _nq_row(triple, context):
if isinstance(triple[2], Literal):
- return u"%s %s %s %s .\n" % (triple[0].n3(),
- triple[1].n3(),
- _quoteLiteral(triple[2]),
- context.n3())
+ return u"%s %s %s %s .\n" % (
+ triple[0].n3(),
+ triple[1].n3(),
+ _quoteLiteral(triple[2]),
+ context.n3(),
+ )
else:
- return u"%s %s %s %s .\n" % (triple[0].n3(),
- triple[1].n3(),
- triple[2].n3(),
- context.n3())
+ return u"%s %s %s %s .\n" % (
+ triple[0].n3(),
+ triple[1].n3(),
+ triple[2].n3(),
+ context.n3(),
+ )
diff --git a/rdflib/plugins/serializers/nt.py b/rdflib/plugins/serializers/nt.py
index 95a88ae3..94632155 100644
--- a/rdflib/plugins/serializers/nt.py
+++ b/rdflib/plugins/serializers/nt.py
@@ -9,7 +9,7 @@ from rdflib.serializer import Serializer
import warnings
import codecs
-__all__ = ['NTSerializer']
+__all__ = ["NTSerializer"]
class NTSerializer(Serializer):
@@ -19,7 +19,7 @@ class NTSerializer(Serializer):
def __init__(self, store):
Serializer.__init__(self, store)
- self.encoding = 'ascii' # n-triples are ascii encoded
+ self.encoding = "ascii" # n-triples are ascii encoded
def serialize(self, stream, base=None, encoding=None, **args):
if base is not None:
@@ -48,35 +48,33 @@ def _nt_row(triple):
return u"%s %s %s .\n" % (
triple[0].n3(),
triple[1].n3(),
- _quoteLiteral(triple[2]))
+ _quoteLiteral(triple[2]),
+ )
else:
- return u"%s %s %s .\n" % (triple[0].n3(),
- triple[1].n3(),
- triple[2].n3())
+ return u"%s %s %s .\n" % (triple[0].n3(), triple[1].n3(), triple[2].n3())
def _quoteLiteral(l):
- '''
+ """
a simpler version of term.Literal.n3()
- '''
+ """
encoded = _quote_encode(l)
if l.language:
if l.datatype:
raise Exception("Literal has datatype AND language!")
- return '%s@%s' % (encoded, l.language)
+ return "%s@%s" % (encoded, l.language)
elif l.datatype:
- return '%s^^<%s>' % (encoded, l.datatype)
+ return "%s^^<%s>" % (encoded, l.datatype)
else:
- return '%s' % encoded
+ return "%s" % encoded
def _quote_encode(l):
- return '"%s"' % l.replace('\\', '\\\\')\
- .replace('\n', '\\n')\
- .replace('"', '\\"')\
- .replace('\r', '\\r')
+ return '"%s"' % l.replace("\\", "\\\\").replace("\n", "\\n").replace(
+ '"', '\\"'
+ ).replace("\r", "\\r")
def _nt_unicode_error_resolver(err):
@@ -86,11 +84,11 @@ def _nt_unicode_error_resolver(err):
def _replace_single(c):
c = ord(c)
- fmt = u'\\u%04X' if c <= 0xFFFF else u'\\U%08X'
+ fmt = u"\\u%04X" if c <= 0xFFFF else u"\\U%08X"
return fmt % c
- string = err.object[err.start:err.end]
+ string = err.object[err.start : err.end]
return ("".join(_replace_single(c) for c in string), err.end)
-codecs.register_error('_rdflib_nt_escape', _nt_unicode_error_resolver)
+codecs.register_error("_rdflib_nt_escape", _nt_unicode_error_resolver)
diff --git a/rdflib/plugins/serializers/rdfxml.py b/rdflib/plugins/serializers/rdfxml.py
index d3a705d2..425c0a7e 100644
--- a/rdflib/plugins/serializers/rdfxml.py
+++ b/rdflib/plugins/serializers/rdfxml.py
@@ -14,11 +14,10 @@ import xml.dom.minidom
from .xmlwriter import ESCAPE_ENTITIES
-__all__ = ['fix', 'XMLSerializer', 'PrettyXMLSerializer']
+__all__ = ["fix", "XMLSerializer", "PrettyXMLSerializer"]
class XMLSerializer(Serializer):
-
def __init__(self, store):
super(XMLSerializer, self).__init__(store)
@@ -50,18 +49,17 @@ class XMLSerializer(Serializer):
self.__stream = stream
self.__serialized = {}
encoding = self.encoding
- self.write = write = lambda uni: stream.write(
- uni.encode(encoding, 'replace'))
+ self.write = write = lambda uni: stream.write(uni.encode(encoding, "replace"))
# startDocument
write('<?xml version="1.0" encoding="%s"?>\n' % self.encoding)
# startRDF
- write('<rdf:RDF\n')
+ write("<rdf:RDF\n")
# If provided, write xml:base attribute for the RDF
if "xml_base" in args:
- write(' xml:base="%s"\n' % args['xml_base'])
+ write(' xml:base="%s"\n' % args["xml_base"])
elif self.base:
write(' xml:base="%s"\n' % self.base)
# TODO:
@@ -75,7 +73,7 @@ class XMLSerializer(Serializer):
write(' xmlns:%s="%s"\n' % (prefix, namespace))
else:
write(' xmlns="%s"\n' % namespace)
- write('>\n')
+ write(">\n")
# write out triples by subject
for subject in self.store.subjects():
@@ -98,8 +96,7 @@ class XMLSerializer(Serializer):
element_name = "rdf:Description"
if isinstance(subject, BNode):
- write('%s<%s rdf:nodeID="%s"' % (
- indent, element_name, subject))
+ write('%s<%s rdf:nodeID="%s"' % (indent, element_name, subject))
else:
uri = quoteattr(self.relativize(subject))
write("%s<%s rdf:about=%s" % (indent, element_name, uri))
@@ -107,8 +104,7 @@ class XMLSerializer(Serializer):
if (subject, None, None) in self.store:
write(">\n")
- for predicate, object in self.store.predicate_objects(
- subject):
+ for predicate, object in self.store.predicate_objects(subject):
self.predicate(predicate, object, depth + 1)
write("%s</%s>\n" % (indent, element_name))
@@ -129,22 +125,24 @@ class XMLSerializer(Serializer):
if object.datatype:
attributes += ' rdf:datatype="%s"' % object.datatype
- write("%s<%s%s>%s</%s>\n" %
- (indent, qname, attributes,
- escape(object, ESCAPE_ENTITIES), qname))
+ write(
+ "%s<%s%s>%s</%s>\n"
+ % (indent, qname, attributes, escape(object, ESCAPE_ENTITIES), qname)
+ )
else:
if isinstance(object, BNode):
- write('%s<%s rdf:nodeID="%s"/>\n' %
- (indent, qname, object))
+ write('%s<%s rdf:nodeID="%s"/>\n' % (indent, qname, object))
else:
- write("%s<%s rdf:resource=%s/>\n" %
- (indent, qname, quoteattr(self.relativize(object))))
+ write(
+ "%s<%s rdf:resource=%s/>\n"
+ % (indent, qname, quoteattr(self.relativize(object)))
+ )
XMLLANG = "http://www.w3.org/XML/1998/namespacelang"
XMLBASE = "http://www.w3.org/XML/1998/namespacebase"
-OWL_NS = Namespace('http://www.w3.org/2002/07/owl#')
+OWL_NS = Namespace("http://www.w3.org/2002/07/owl#")
# TODO:
@@ -157,7 +155,6 @@ def fix(val):
class PrettyXMLSerializer(Serializer):
-
def __init__(self, store, max_depth=3):
super(PrettyXMLSerializer, self).__init__(store)
self.forceRDFAbout = set()
@@ -177,8 +174,7 @@ class PrettyXMLSerializer(Serializer):
self.writer = writer = XMLWriter(stream, nm, encoding)
namespaces = {}
- possible = set(store.predicates()).union(
- store.objects(None, RDF.type))
+ possible = set(store.predicates()).union(store.objects(None, RDF.type))
for predicate in possible:
prefix, namespace, local = nm.compute_qname_strict(predicate)
@@ -247,6 +243,7 @@ class PrettyXMLSerializer(Serializer):
writer.push(element)
if isinstance(subject, BNode):
+
def subj_as_obj_more_than(ceil):
return True
# more_than(store.triples((None, None, subject)), ceil)
@@ -282,8 +279,9 @@ class PrettyXMLSerializer(Serializer):
if object.language:
writer.attribute(XMLLANG, object.language)
- if (object.datatype == RDF.XMLLiteral
- and isinstance(object.value, xml.dom.minidom.Document)):
+ if object.datatype == RDF.XMLLiteral and isinstance(
+ object.value, xml.dom.minidom.Document
+ ):
writer.attribute(RDF.parseType, "Literal")
writer.text(u"")
writer.stream.write(object)
@@ -302,17 +300,20 @@ class PrettyXMLSerializer(Serializer):
else:
if first(store.objects(object, RDF.first)): # may not have type
- # RDF.List
+ # RDF.List
self.__serialized[object] = 1
# Warn that any assertions on object other than
# RDF.first and RDF.rest are ignored... including RDF.List
import warnings
+
warnings.warn(
- "Assertions on %s other than RDF.first " % repr(object) +
- "and RDF.rest are ignored ... including RDF.List",
- UserWarning, stacklevel=2)
+ "Assertions on %s other than RDF.first " % repr(object)
+ + "and RDF.rest are ignored ... including RDF.List",
+ UserWarning,
+ stacklevel=2,
+ )
writer.attribute(RDF.parseType, "Collection")
col = Collection(store, object)
@@ -326,9 +327,11 @@ class PrettyXMLSerializer(Serializer):
if not isinstance(item, URIRef):
self.__serialized[item] = 1
else:
- if first(store.triples_choices(
- (object, RDF.type, [OWL_NS.Class, RDFS.Class]))) \
- and isinstance(object, URIRef):
+ if first(
+ store.triples_choices(
+ (object, RDF.type, [OWL_NS.Class, RDFS.Class])
+ )
+ ) and isinstance(object, URIRef):
writer.attribute(RDF.resource, self.relativize(object))
elif depth <= self.max_depth:
@@ -336,9 +339,11 @@ class PrettyXMLSerializer(Serializer):
elif isinstance(object, BNode):
- if not object in self.__serialized \
- and (object, None, None) in store \
- and len(list(store.subjects(object=object))) == 1:
+ if (
+ not object in self.__serialized
+ and (object, None, None) in store
+ and len(list(store.subjects(object=object))) == 1
+ ):
# inline blank nodes if they haven't been serialized yet
# and are only referenced once (regardless of depth)
self.subject(object, depth + 1)
diff --git a/rdflib/plugins/serializers/trig.py b/rdflib/plugins/serializers/trig.py
index 755587dc..432224e0 100644
--- a/rdflib/plugins/serializers/trig.py
+++ b/rdflib/plugins/serializers/trig.py
@@ -8,13 +8,13 @@ from collections import defaultdict
from rdflib.plugins.serializers.turtle import TurtleSerializer, _GEN_QNAME_FOR_DT, VERB
from rdflib.term import BNode, Literal
-__all__ = ['TrigSerializer']
+__all__ = ["TrigSerializer"]
class TrigSerializer(TurtleSerializer):
short_name = "trig"
- indentString = 4 * u' '
+ indentString = 4 * u" "
def __init__(self, store):
if store.context_aware:
@@ -38,14 +38,17 @@ class TrigSerializer(TurtleSerializer):
for triple in context:
self.preprocessTriple(triple)
- self._contexts[context] = (self.orderSubjects(), self._subjects, self._references)
+ self._contexts[context] = (
+ self.orderSubjects(),
+ self._subjects,
+ self._references,
+ )
def reset(self):
super(TrigSerializer, self).reset()
self._contexts = {}
- def serialize(self, stream, base=None, encoding=None,
- spacious=None, **args):
+ def serialize(self, stream, base=None, encoding=None, spacious=None, **args):
self.reset()
self.stream = stream
# if base is given here, use that, if not and a base is set for the graph use that
@@ -72,7 +75,7 @@ class TrigSerializer(TurtleSerializer):
self._subjects = subjects
if self.default_context and store.identifier == self.default_context:
- self.write(self.indent() + '\n{')
+ self.write(self.indent() + "\n{")
else:
if isinstance(store.identifier, BNode):
iri = store.identifier.n3()
@@ -80,7 +83,7 @@ class TrigSerializer(TurtleSerializer):
iri = self.getQName(store.identifier)
if iri is None:
iri = store.identifier.n3()
- self.write(self.indent() + '\n%s {' % iri)
+ self.write(self.indent() + "\n%s {" % iri)
self.depth += 1
for subject in ordered_subjects:
@@ -89,9 +92,9 @@ class TrigSerializer(TurtleSerializer):
if firstTime:
firstTime = False
if self.statement(subject) and not firstTime:
- self.write('\n')
+ self.write("\n")
self.depth -= 1
- self.write('}\n')
+ self.write("}\n")
self.endDocument()
stream.write("\n".encode("latin-1"))
diff --git a/rdflib/plugins/serializers/trix.py b/rdflib/plugins/serializers/trix.py
index e6651c70..f6115bf8 100644
--- a/rdflib/plugins/serializers/trix.py
+++ b/rdflib/plugins/serializers/trix.py
@@ -7,7 +7,7 @@ from rdflib.namespace import Namespace
from rdflib.graph import Graph, ConjunctiveGraph
-__all__ = ['TriXSerializer']
+__all__ = ["TriXSerializer"]
# TODO: Move this somewhere central
TRIXNS = Namespace("http://www.w3.org/2004/03/trix/trix-1/")
@@ -19,7 +19,8 @@ class TriXSerializer(Serializer):
super(TriXSerializer, self).__init__(store)
if not store.context_aware:
raise Exception(
- "TriX serialization only makes sense for context-aware stores")
+ "TriX serialization only makes sense for context-aware stores"
+ )
def serialize(self, stream, base=None, encoding=None, **args):
@@ -49,10 +50,11 @@ class TriXSerializer(Serializer):
def _writeGraph(self, graph):
self.writer.push(TRIXNS[u"graph"])
if graph.base:
- self.writer.attribute("http://www.w3.org/XML/1998/namespacebase", graph.base)
+ self.writer.attribute(
+ "http://www.w3.org/XML/1998/namespacebase", graph.base
+ )
if isinstance(graph.identifier, URIRef):
- self.writer.element(
- TRIXNS[u"uri"], content=str(graph.identifier))
+ self.writer.element(TRIXNS[u"uri"], content=str(graph.identifier))
for triple in graph.triples((None, None, None)):
self._writeTriple(triple)
@@ -62,23 +64,22 @@ class TriXSerializer(Serializer):
self.writer.push(TRIXNS[u"triple"])
for component in triple:
if isinstance(component, URIRef):
- self.writer.element(TRIXNS[u"uri"],
- content=str(component))
+ self.writer.element(TRIXNS[u"uri"], content=str(component))
elif isinstance(component, BNode):
- self.writer.element(TRIXNS[u"id"],
- content=str(component))
+ self.writer.element(TRIXNS[u"id"], content=str(component))
elif isinstance(component, Literal):
if component.datatype:
- self.writer.element(TRIXNS[u"typedLiteral"],
- content=str(component),
- attributes={TRIXNS[u"datatype"]:
- str(component.datatype)})
+ self.writer.element(
+ TRIXNS[u"typedLiteral"],
+ content=str(component),
+ attributes={TRIXNS[u"datatype"]: str(component.datatype)},
+ )
elif component.language:
- self.writer.element(TRIXNS[u"plainLiteral"],
- content=str(component),
- attributes={XMLNS[u"lang"]:
- str(component.language)})
+ self.writer.element(
+ TRIXNS[u"plainLiteral"],
+ content=str(component),
+ attributes={XMLNS[u"lang"]: str(component.language)},
+ )
else:
- self.writer.element(TRIXNS[u"plainLiteral"],
- content=str(component))
+ self.writer.element(TRIXNS[u"plainLiteral"], content=str(component))
self.writer.pop()
diff --git a/rdflib/plugins/serializers/turtle.py b/rdflib/plugins/serializers/turtle.py
index b89ff2d8..52693a8c 100644
--- a/rdflib/plugins/serializers/turtle.py
+++ b/rdflib/plugins/serializers/turtle.py
@@ -11,7 +11,7 @@ from rdflib.exceptions import Error
from rdflib.serializer import Serializer
from rdflib.namespace import RDF, RDFS
-__all__ = ['RecursiveSerializer', 'TurtleSerializer']
+__all__ = ["RecursiveSerializer", "TurtleSerializer"]
def _object_comparator(a, b):
@@ -52,16 +52,20 @@ class RecursiveSerializer(Serializer):
def addNamespace(self, prefix, uri):
if prefix in self.namespaces and self.namespaces[prefix] != uri:
- raise Exception("Trying to override namespace prefix %s => %s, but it's already bound to %s" % (prefix, uri, self.namespaces[prefix]))
+ raise Exception(
+ "Trying to override namespace prefix %s => %s, but it's already bound to %s"
+ % (prefix, uri, self.namespaces[prefix])
+ )
self.namespaces[prefix] = uri
def checkSubject(self, subject):
"""Check to see if the subject should be serialized yet"""
- if ((self.isDone(subject))
+ if (
+ (self.isDone(subject))
or (subject not in self._subjects)
or ((subject in self._topLevels) and (self.depth > 1))
- or (isinstance(subject, URIRef) and
- (self.depth >= self.maxDepth))):
+ or (isinstance(subject, URIRef) and (self.depth >= self.maxDepth))
+ ):
return False
return True
@@ -83,9 +87,10 @@ class RecursiveSerializer(Serializer):
seen[member] = True
recursable = [
- (isinstance(subject, BNode),
- self._references[subject], subject)
- for subject in self._subjects if subject not in seen]
+ (isinstance(subject, BNode), self._references[subject], subject)
+ for subject in self._subjects
+ if subject not in seen
+ ]
recursable.sort()
subjects.extend([subject for (isbnode, refs, subject) in recursable])
@@ -111,7 +116,7 @@ class RecursiveSerializer(Serializer):
self._topLevels = {}
if self.roundtrip_prefixes:
- if hasattr(self.roundtrip_prefixes, '__iter__'):
+ if hasattr(self.roundtrip_prefixes, "__iter__"):
for prefix, ns in self.store.namespaces():
if prefix in self.roundtrip_prefixes:
self.addNamespace(prefix, ns)
@@ -163,7 +168,7 @@ class RecursiveSerializer(Serializer):
def write(self, text):
"""Write text in given encoding."""
- self.stream.write(text.encode(self.encoding, 'replace'))
+ self.stream.write(text.encode(self.encoding, "replace"))
SUBJECT = 0
@@ -177,14 +182,12 @@ _SPACIOUS_OUTPUT = False
class TurtleSerializer(RecursiveSerializer):
short_name = "turtle"
- indentString = ' '
+ indentString = " "
def __init__(self, store):
self._ns_rewrite = {}
super(TurtleSerializer, self).__init__(store)
- self.keywords = {
- RDF.type: 'a'
- }
+ self.keywords = {RDF.type: "a"}
self.reset()
self.stream = None
self._spacious = _SPACIOUS_OUTPUT
@@ -199,8 +202,9 @@ class TurtleSerializer(RecursiveSerializer):
# so we need to keep track of ns rewrites we made so far.
- if (prefix > '' and prefix[0] == '_') \
- or self.namespaces.get(prefix, namespace) != namespace:
+ if (prefix > "" and prefix[0] == "_") or self.namespaces.get(
+ prefix, namespace
+ ) != namespace:
if prefix not in self._ns_rewrite:
p = "p" + prefix
@@ -219,8 +223,7 @@ class TurtleSerializer(RecursiveSerializer):
self._started = False
self._ns_rewrite = {}
- def serialize(self, stream, base=None, encoding=None,
- spacious=None, **args):
+ def serialize(self, stream, base=None, encoding=None, spacious=None, **args):
self.reset()
self.stream = stream
# if base is given here, use that, if not and a base is set for the graph use that
@@ -244,7 +247,7 @@ class TurtleSerializer(RecursiveSerializer):
if firstTime:
firstTime = False
if self.statement(subject) and not firstTime:
- self.write('\n')
+ self.write("\n")
self.endDocument()
stream.write("\n".encode("latin-1"))
@@ -278,7 +281,7 @@ class TurtleSerializer(RecursiveSerializer):
pfx = self.store.store.prefix(uri)
if pfx is not None:
- parts = (pfx, uri, '')
+ parts = (pfx, uri, "")
else:
# nothing worked
return None
@@ -291,95 +294,99 @@ class TurtleSerializer(RecursiveSerializer):
prefix = self.addNamespace(prefix, namespace)
- return u'%s:%s' % (prefix, local)
+ return u"%s:%s" % (prefix, local)
def startDocument(self):
self._started = True
ns_list = sorted(self.namespaces.items())
if self.base:
- self.write(self.indent() + '@base <%s> .\n' % self.base)
+ self.write(self.indent() + "@base <%s> .\n" % self.base)
for prefix, uri in ns_list:
- self.write(self.indent() + '@prefix %s: <%s> .\n' % (prefix, uri))
+ self.write(self.indent() + "@prefix %s: <%s> .\n" % (prefix, uri))
if ns_list and self._spacious:
- self.write('\n')
+ self.write("\n")
def endDocument(self):
if self._spacious:
- self.write('\n')
+ self.write("\n")
def statement(self, subject):
self.subjectDone(subject)
return self.s_squared(subject) or self.s_default(subject)
def s_default(self, subject):
- self.write('\n' + self.indent())
+ self.write("\n" + self.indent())
self.path(subject, SUBJECT)
self.predicateList(subject)
- self.write(' .')
+ self.write(" .")
return True
def s_squared(self, subject):
if (self._references[subject] > 0) or not isinstance(subject, BNode):
return False
- self.write('\n' + self.indent() + '[]')
+ self.write("\n" + self.indent() + "[]")
self.predicateList(subject)
- self.write(' .')
+ self.write(" .")
return True
def path(self, node, position, newline=False):
- if not (self.p_squared(node, position, newline)
- or self.p_default(node, position, newline)):
- raise Error("Cannot serialize node '%s'" % (node, ))
+ if not (
+ self.p_squared(node, position, newline)
+ or self.p_default(node, position, newline)
+ ):
+ raise Error("Cannot serialize node '%s'" % (node,))
def p_default(self, node, position, newline=False):
if position != SUBJECT and not newline:
- self.write(' ')
+ self.write(" ")
self.write(self.label(node, position))
return True
def label(self, node, position):
if node == RDF.nil:
- return '()'
+ return "()"
if position is VERB and node in self.keywords:
return self.keywords[node]
if isinstance(node, Literal):
return node._literal_n3(
use_plain=True,
- qname_callback=lambda dt: self.getQName(
- dt, _GEN_QNAME_FOR_DT))
+ qname_callback=lambda dt: self.getQName(dt, _GEN_QNAME_FOR_DT),
+ )
else:
node = self.relativize(node)
return self.getQName(node, position == VERB) or node.n3()
def p_squared(self, node, position, newline=False):
- if (not isinstance(node, BNode)
- or node in self._serialized
- or self._references[node] > 1
- or position == SUBJECT):
+ if (
+ not isinstance(node, BNode)
+ or node in self._serialized
+ or self._references[node] > 1
+ or position == SUBJECT
+ ):
return False
if not newline:
- self.write(' ')
+ self.write(" ")
if self.isValidList(node):
# this is a list
- self.write('(')
+ self.write("(")
self.depth += 1 # 2
self.doList(node)
self.depth -= 1 # 2
- self.write(' )')
+ self.write(" )")
else:
self.subjectDone(node)
self.depth += 2
# self.write('[\n' + self.indent())
- self.write('[')
+ self.write("[")
self.depth -= 1
# self.predicateList(node, newline=True)
self.predicateList(node, newline=False)
# self.write('\n' + self.indent() + ']')
- self.write(' ]')
+ self.write(" ]")
self.depth -= 1
return True
@@ -394,8 +401,7 @@ class TurtleSerializer(RecursiveSerializer):
except:
return False
while l:
- if l != RDF.nil and len(
- list(self.store.predicate_objects(l))) != 2:
+ if l != RDF.nil and len(list(self.store.predicate_objects(l))) != 2:
return False
l = self.store.value(l, RDF.rest)
return True
@@ -416,7 +422,7 @@ class TurtleSerializer(RecursiveSerializer):
self.verb(propList[0], newline=newline)
self.objectList(properties[propList[0]])
for predicate in propList[1:]:
- self.write(' ;\n' + self.indent(1))
+ self.write(" ;\n" + self.indent(1))
self.verb(predicate, newline=True)
self.objectList(properties[predicate])
@@ -431,6 +437,6 @@ class TurtleSerializer(RecursiveSerializer):
self.depth += depthmod
self.path(objects[0], OBJECT)
for obj in objects[1:]:
- self.write(',\n' + self.indent(1))
+ self.write(",\n" + self.indent(1))
self.path(obj, OBJECT, newline=True)
self.depth -= depthmod
diff --git a/rdflib/plugins/serializers/xmlwriter.py b/rdflib/plugins/serializers/xmlwriter.py
index de720e8c..b6f0acb5 100644
--- a/rdflib/plugins/serializers/xmlwriter.py
+++ b/rdflib/plugins/serializers/xmlwriter.py
@@ -1,19 +1,15 @@
import codecs
from xml.sax.saxutils import quoteattr, escape
-__all__ = ['XMLWriter']
+__all__ = ["XMLWriter"]
-ESCAPE_ENTITIES = {
- '\r': '&#13;'
-}
+ESCAPE_ENTITIES = {"\r": "&#13;"}
class XMLWriter(object):
- def __init__(self, stream, namespace_manager, encoding=None,
- decl=1, extra_ns=None):
- encoding = encoding or 'utf-8'
- encoder, decoder, stream_reader, stream_writer = \
- codecs.lookup(encoding)
+ def __init__(self, stream, namespace_manager, encoding=None, decl=1, extra_ns=None):
+ encoding = encoding or "utf-8"
+ encoder, decoder, stream_reader, stream_writer = codecs.lookup(encoding)
self.stream = stream = stream_writer(stream)
if decl:
stream.write('<?xml version="1.0" encoding="%s"?>' % encoding)
@@ -24,6 +20,7 @@ class XMLWriter(object):
def __get_indent(self):
return " " * len(self.element_stack)
+
indent = property(__get_indent)
def __close_start_tag(self):
@@ -103,8 +100,8 @@ class XMLWriter(object):
for pre, ns in self.extra_ns.items():
if uri.startswith(ns):
if pre != "":
- return ":".join(pre, uri[len(ns):])
+ return ":".join(pre, uri[len(ns) :])
else:
- return uri[len(ns):]
+ return uri[len(ns) :]
return self.nm.qname_strict(uri)
diff --git a/rdflib/plugins/sleepycat.py b/rdflib/plugins/sleepycat.py
index 2d08b45b..7729969e 100644
--- a/rdflib/plugins/sleepycat.py
+++ b/rdflib/plugins/sleepycat.py
@@ -8,15 +8,17 @@ from urllib.request import pathname2url
def bb(u):
- return u.encode('utf-8')
+ return u.encode("utf-8")
try:
from bsddb import db
+
has_bsddb = True
except ImportError:
try:
from bsddb3 import db
+
has_bsddb = True
except ImportError:
has_bsddb = False
@@ -36,7 +38,7 @@ if has_bsddb:
logger = logging.getLogger(__name__)
-__all__ = ['Sleepycat']
+__all__ = ["Sleepycat"]
class Sleepycat(Store):
@@ -48,8 +50,7 @@ class Sleepycat(Store):
def __init__(self, configuration=None, identifier=None):
if not has_bsddb:
- raise ImportError(
- "Unable to import bsddb/bsddb3, store is unusable.")
+ raise ImportError("Unable to import bsddb/bsddb3, store is unusable.")
self.__open = False
self.__identifier = identifier
super(Sleepycat, self).__init__(configuration)
@@ -58,6 +59,7 @@ class Sleepycat(Store):
def __get_identifier(self):
return self.__identifier
+
identifier = property(__get_identifier)
def _init_db_environment(self, homeDir, create=True):
@@ -108,11 +110,13 @@ class Sleepycat(Store):
dbsetflags = 0
# create and open the DBs
- self.__indicies = [None, ] * 3
- self.__indicies_info = [None, ] * 3
+ self.__indicies = [None,] * 3
+ self.__indicies_info = [None,] * 3
for i in range(0, 3):
- index_name = to_key_func(
- i)(("s".encode("latin-1"), "p".encode("latin-1"), "o".encode("latin-1")), "c".encode("latin-1")).decode()
+ index_name = to_key_func(i)(
+ ("s".encode("latin-1"), "p".encode("latin-1"), "o".encode("latin-1")),
+ "c".encode("latin-1"),
+ ).decode()
index = db.DB(db_env)
index.set_flags(dbsetflags)
index.open(index_name, dbname, dbtype, dbopenflags, dbmode)
@@ -148,13 +152,15 @@ class Sleepycat(Store):
yield triple[i % 3]
i += 1
yield ""
+
return get_prefix
lookup[i] = (
self.__indicies[start],
get_prefix_func(start, start + len),
from_key_func(start),
- results_from_key_func(start, self._from_string))
+ results_from_key_func(start, self._from_string),
+ )
self.__lookup_dict = lookup
@@ -187,6 +193,7 @@ class Sleepycat(Store):
def __sync_run(self):
from time import sleep, time
+
try:
min_seconds, max_seconds = 10, 300
while self.__open:
@@ -194,12 +201,11 @@ class Sleepycat(Store):
t0 = t1 = time()
self.__needs_sync = False
while self.__open:
- sleep(.1)
+ sleep(0.1)
if self.__needs_sync:
t1 = time()
self.__needs_sync = False
- if time() - t1 > min_seconds \
- or time() - t0 > max_seconds:
+ if time() - t1 > min_seconds or time() - t0 > max_seconds:
self.__needs_sync = False
logger.debug("sync")
self.sync()
@@ -254,7 +260,8 @@ class Sleepycat(Store):
self.__contexts.put(bb(c), "", txn=txn)
contexts_value = cspo.get(
- bb("%s^%s^%s^%s^" % ("", s, p, o)), txn=txn) or "".encode("latin-1")
+ bb("%s^%s^%s^%s^" % ("", s, p, o)), txn=txn
+ ) or "".encode("latin-1")
contexts = set(contexts_value.split("^".encode("latin-1")))
contexts.add(bb(c))
contexts_value = "^".encode("latin-1").join(contexts)
@@ -264,12 +271,9 @@ class Sleepycat(Store):
cpos.put(bb("%s^%s^%s^%s^" % (c, p, o, s)), "", txn=txn)
cosp.put(bb("%s^%s^%s^%s^" % (c, o, s, p)), "", txn=txn)
if not quoted:
- cspo.put(bb(
- "%s^%s^%s^%s^" % ("", s, p, o)), contexts_value, txn=txn)
- cpos.put(bb(
- "%s^%s^%s^%s^" % ("", p, o, s)), contexts_value, txn=txn)
- cosp.put(bb(
- "%s^%s^%s^%s^" % ("", o, s, p)), contexts_value, txn=txn)
+ cspo.put(bb("%s^%s^%s^%s^" % ("", s, p, o)), contexts_value, txn=txn)
+ cpos.put(bb("%s^%s^%s^%s^" % ("", p, o, s)), contexts_value, txn=txn)
+ cosp.put(bb("%s^%s^%s^%s^" % ("", o, s, p)), contexts_value, txn=txn)
self.__needs_sync = True
@@ -277,7 +281,11 @@ class Sleepycat(Store):
s, p, o = spo
cspo, cpos, cosp = self.__indicies
contexts_value = cspo.get(
- "^".encode("latin-1").join(["".encode("latin-1"), s, p, o, "".encode("latin-1")]), txn=txn) or "".encode("latin-1")
+ "^".encode("latin-1").join(
+ ["".encode("latin-1"), s, p, o, "".encode("latin-1")]
+ ),
+ txn=txn,
+ ) or "".encode("latin-1")
contexts = set(contexts_value.split("^".encode("latin-1")))
contexts.discard(c)
contexts_value = "^".encode("latin-1").join(contexts)
@@ -286,7 +294,11 @@ class Sleepycat(Store):
if not quoted:
if contexts_value:
for i, _to_key, _from_key in self.__indicies_info:
- i.put(_to_key((s, p, o), "".encode("latin-1")), contexts_value, txn=txn)
+ i.put(
+ _to_key((s, p, o), "".encode("latin-1")),
+ contexts_value,
+ txn=txn,
+ )
else:
for i, _to_key, _from_key in self.__indicies_info:
try:
@@ -304,23 +316,25 @@ class Sleepycat(Store):
if context == self:
context = None
- if subject is not None \
- and predicate is not None \
- and object is not None \
- and context is not None:
+ if (
+ subject is not None
+ and predicate is not None
+ and object is not None
+ and context is not None
+ ):
s = _to_string(subject, txn=txn)
p = _to_string(predicate, txn=txn)
o = _to_string(object, txn=txn)
c = _to_string(context, txn=txn)
- value = self.__indicies[0].get(bb("%s^%s^%s^%s^" %
- (c, s, p, o)), txn=txn)
+ value = self.__indicies[0].get(bb("%s^%s^%s^%s^" % (c, s, p, o)), txn=txn)
if value is not None:
self.__remove((bb(s), bb(p), bb(o)), bb(c), txn=txn)
self.__needs_sync = True
else:
cspo, cpos, cosp = self.__indicies
index, prefix, from_key, results_from_key = self.__lookup(
- (subject, predicate, object), context, txn=txn)
+ (subject, predicate, object), context, txn=txn
+ )
cursor = index.cursor(txn=txn)
try:
@@ -336,7 +350,7 @@ class Sleepycat(Store):
try:
cursor.set_range(key)
# Hack to stop 2to3 converting this to next(cursor)
- current = getattr(cursor, 'next')()
+ current = getattr(cursor, "next")()
except db.DBNotFoundError:
current = None
cursor.close()
@@ -362,7 +376,8 @@ class Sleepycat(Store):
# remove((None, None, None), c)
try:
self.__contexts.delete(
- bb(_to_string(context, txn=txn)), txn=txn)
+ bb(_to_string(context, txn=txn)), txn=txn
+ )
except db.DBNotFoundError:
pass
@@ -380,7 +395,8 @@ class Sleepycat(Store):
# _from_string = self._from_string ## UNUSED
index, prefix, from_key, results_from_key = self.__lookup(
- (subject, predicate, object), context, txn=txn)
+ (subject, predicate, object), context, txn=txn
+ )
cursor = index.cursor(txn=txn)
try:
@@ -394,14 +410,13 @@ class Sleepycat(Store):
try:
cursor.set_range(key)
# Cheap hack so 2to3 doesn't convert to next(cursor)
- current = getattr(cursor, 'next')()
+ current = getattr(cursor, "next")()
except db.DBNotFoundError:
current = None
cursor.close()
if key and key.startswith(prefix):
contexts_value = index.get(key, txn=txn)
- yield results_from_key(
- key, subject, predicate, object, contexts_value)
+ yield results_from_key(key, subject, predicate, object, contexts_value)
else:
break
@@ -425,7 +440,7 @@ class Sleepycat(Store):
if key.startswith(prefix):
count += 1
# Hack to stop 2to3 converting this to next(cursor)
- current = getattr(cursor, 'next')()
+ current = getattr(cursor, "next")()
else:
break
cursor.close()
@@ -444,14 +459,14 @@ class Sleepycat(Store):
prefix = prefix.encode("utf-8")
ns = self.__namespace.get(prefix, None)
if ns is not None:
- return URIRef(ns.decode('utf-8'))
+ return URIRef(ns.decode("utf-8"))
return None
def prefix(self, namespace):
namespace = namespace.encode("utf-8")
prefix = self.__prefix.get(namespace, None)
if prefix is not None:
- return prefix.decode('utf-8')
+ return prefix.decode("utf-8")
return None
def namespaces(self):
@@ -460,9 +475,9 @@ class Sleepycat(Store):
current = cursor.first()
while current:
prefix, namespace = current
- results.append((prefix.decode('utf-8'), namespace.decode('utf-8')))
+ results.append((prefix.decode("utf-8"), namespace.decode("utf-8")))
# Hack to stop 2to3 converting this to next(cursor)
- current = getattr(cursor, 'next')()
+ current = getattr(cursor, "next")()
cursor.close()
for prefix, namespace in results:
yield prefix, URIRef(namespace)
@@ -476,8 +491,7 @@ class Sleepycat(Store):
s = _to_string(s)
p = _to_string(p)
o = _to_string(o)
- contexts = self.__indicies[0].get(bb(
- "%s^%s^%s^%s^" % ("", s, p, o)))
+ contexts = self.__indicies[0].get(bb("%s^%s^%s^%s^" % ("", s, p, o)))
if contexts:
for c in contexts.split("^".encode("latin-1")):
if c:
@@ -495,7 +509,7 @@ class Sleepycat(Store):
try:
cursor.set_range(key)
# Hack to stop 2to3 converting this to next(cursor)
- current = getattr(cursor, 'next')()
+ current = getattr(cursor, "next")()
except db.DBNotFoundError:
current = None
cursor.close()
@@ -544,8 +558,7 @@ class Sleepycat(Store):
index, prefix_func, from_key, results_from_key = self.__lookup_dict[i]
# print (subject, predicate, object), context, prefix_func, index
# #DEBUG
- prefix = bb(
- "^".join(prefix_func((subject, predicate, object), context)))
+ prefix = bb("^".join(prefix_func((subject, predicate, object), context)))
return index, prefix, from_key, results_from_key
@@ -553,10 +566,15 @@ def to_key_func(i):
def to_key(triple, context):
"Takes a string; returns key"
return "^".encode("latin-1").join(
- (context,
- triple[i % 3],
- triple[(i + 1) % 3],
- triple[(i + 2) % 3], "".encode("latin-1"))) # "" to tac on the trailing ^
+ (
+ context,
+ triple[i % 3],
+ triple[(i + 1) % 3],
+ triple[(i + 2) % 3],
+ "".encode("latin-1"),
+ )
+ ) # "" to tac on the trailing ^
+
return to_key
@@ -564,11 +582,13 @@ def from_key_func(i):
def from_key(key):
"Takes a key; returns string"
parts = key.split("^".encode("latin-1"))
- return \
- parts[0], \
- parts[(3 - i + 0) % 3 + 1], \
- parts[(3 - i + 1) % 3 + 1], \
- parts[(3 - i + 2) % 3 + 1]
+ return (
+ parts[0],
+ parts[(3 - i + 0) % 3 + 1],
+ parts[(3 - i + 1) % 3 + 1],
+ parts[(3 - i + 2) % 3 + 1],
+ )
+
return from_key
@@ -590,8 +610,11 @@ def results_from_key_func(i, from_string):
o = from_string(parts[(3 - i + 2) % 3 + 1])
else:
o = object
- return (s, p, o), (
- from_string(c) for c in contexts_value.split("^".encode("latin-1")) if c)
+ return (
+ (s, p, o),
+ (from_string(c) for c in contexts_value.split("^".encode("latin-1")) if c),
+ )
+
return from_key
diff --git a/rdflib/plugins/sparql/__init__.py b/rdflib/plugins/sparql/__init__.py
index bc1227f2..9efbd87f 100644
--- a/rdflib/plugins/sparql/__init__.py
+++ b/rdflib/plugins/sparql/__init__.py
@@ -28,7 +28,7 @@ NotImplementedError if they cannot handle a certain part
"""
-PLUGIN_ENTRY_POINT = 'rdf.plugins.sparqleval'
+PLUGIN_ENTRY_POINT = "rdf.plugins.sparqleval"
from . import parser
from . import operators
diff --git a/rdflib/plugins/sparql/aggregates.py b/rdflib/plugins/sparql/aggregates.py
index 11144778..8c70aeb1 100644
--- a/rdflib/plugins/sparql/aggregates.py
+++ b/rdflib/plugins/sparql/aggregates.py
@@ -39,7 +39,6 @@ class Accumulator(object):
class Counter(Accumulator):
-
def __init__(self, aggregation):
super(Counter, self).__init__(aggregation)
self.value = 0
@@ -71,16 +70,14 @@ class Counter(Accumulator):
def type_safe_numbers(*args):
- if (
- any(isinstance(arg, float) for arg in args) and
- any(isinstance(arg, Decimal) for arg in args)
+ if any(isinstance(arg, float) for arg in args) and any(
+ isinstance(arg, Decimal) for arg in args
):
return map(float, args)
return args
class Sum(Accumulator):
-
def __init__(self, aggregation):
super(Sum, self).__init__(aggregation)
self.value = 0
@@ -107,7 +104,6 @@ class Sum(Accumulator):
class Average(Accumulator):
-
def __init__(self, aggregation):
super(Average, self).__init__(aggregation)
self.counter = 0
@@ -171,13 +167,11 @@ class Extremum(Accumulator):
class Minimum(Extremum):
-
def compare(self, val1, val2):
return min(val1, val2, key=_val)
class Maximum(Extremum):
-
def compare(self, val1, val2):
return max(val1, val2, key=_val)
@@ -205,7 +199,6 @@ class Sample(Accumulator):
class GroupConcat(Accumulator):
-
def __init__(self, aggregation):
super(GroupConcat, self).__init__(aggregation)
# only GROUPCONCAT needs to have a list as accumlator
diff --git a/rdflib/plugins/sparql/algebra.py b/rdflib/plugins/sparql/algebra.py
index 00a6d0b2..f84e51c9 100644
--- a/rdflib/plugins/sparql/algebra.py
+++ b/rdflib/plugins/sparql/algebra.py
@@ -1,4 +1,3 @@
-
"""
Converting the 'parse-tree' output of pyparsing to a SPARQL Algebra expression
@@ -19,9 +18,11 @@ from rdflib import Literal, Variable, URIRef, BNode
from rdflib.plugins.sparql.sparql import Prologue, Query
from rdflib.plugins.sparql.parserutils import CompValue, Expr
from rdflib.plugins.sparql.operators import (
- and_, TrueFilter, simplify as simplifyFilters)
-from rdflib.paths import (
- InvPath, AlternativePath, SequencePath, MulPath, NegatedPath)
+ and_,
+ TrueFilter,
+ simplify as simplifyFilters,
+)
+from rdflib.paths import InvPath, AlternativePath, SequencePath, MulPath, NegatedPath
from pyparsing import ParseResults
@@ -29,63 +30,69 @@ from pyparsing import ParseResults
# ---------------------------
# Some convenience methods
def OrderBy(p, expr):
- return CompValue('OrderBy', p=p, expr=expr)
+ return CompValue("OrderBy", p=p, expr=expr)
def ToMultiSet(p):
- return CompValue('ToMultiSet', p=p)
+ return CompValue("ToMultiSet", p=p)
def Union(p1, p2):
- return CompValue('Union', p1=p1, p2=p2)
+ return CompValue("Union", p1=p1, p2=p2)
def Join(p1, p2):
- return CompValue('Join', p1=p1, p2=p2)
+ return CompValue("Join", p1=p1, p2=p2)
def Minus(p1, p2):
- return CompValue('Minus', p1=p1, p2=p2)
+ return CompValue("Minus", p1=p1, p2=p2)
def Graph(term, graph):
- return CompValue('Graph', term=term, p=graph)
+ return CompValue("Graph", term=term, p=graph)
def BGP(triples=None):
- return CompValue('BGP', triples=triples or [])
+ return CompValue("BGP", triples=triples or [])
def LeftJoin(p1, p2, expr):
- return CompValue('LeftJoin', p1=p1, p2=p2, expr=expr)
+ return CompValue("LeftJoin", p1=p1, p2=p2, expr=expr)
def Filter(expr, p):
- return CompValue('Filter', expr=expr, p=p)
+ return CompValue("Filter", expr=expr, p=p)
def Extend(p, expr, var):
- return CompValue('Extend', p=p, expr=expr, var=var)
+ return CompValue("Extend", p=p, expr=expr, var=var)
def Values(res):
- return CompValue('values', res=res)
+ return CompValue("values", res=res)
def Project(p, PV):
- return CompValue('Project', p=p, PV=PV)
+ return CompValue("Project", p=p, PV=PV)
def Group(p, expr=None):
- return CompValue('Group', p=p, expr=expr)
+ return CompValue("Group", p=p, expr=expr)
def _knownTerms(triple, varsknown, varscount):
- return (len([x for x in triple if x not in varsknown and
- isinstance(x, (Variable, BNode))]),
- -sum(varscount.get(x, 0) for x in triple),
- not isinstance(triple[2], Literal),
- )
+ return (
+ len(
+ [
+ x
+ for x in triple
+ if x not in varsknown and isinstance(x, (Variable, BNode))
+ ]
+ ),
+ -sum(varscount.get(x, 0) for x in triple),
+ not isinstance(triple[2], Literal),
+ )
def reorderTriples(l):
@@ -115,8 +122,7 @@ def reorderTriples(l):
# we sort by decorate/undecorate, since we need the value of the sort keys
while i < len(l):
- l[i:] = sorted((_knownTerms(x[
- 1], varsknown, varscount), x[1]) for x in l[i:])
+ l[i:] = sorted((_knownTerms(x[1], varsknown, varscount), x[1]) for x in l[i:])
t = l[i][0][0] # top block has this many terms bound
j = 0
while i + j < len(l) and l[i + j][0][0] == t:
@@ -132,9 +138,8 @@ def triples(l):
l = reduce(lambda x, y: x + y, l)
if (len(l) % 3) != 0:
- raise Exception('these aint triples')
- return reorderTriples((l[x], l[x + 1], l[x + 2])
- for x in range(0, len(l), 3))
+ raise Exception("these aint triples")
+ return reorderTriples((l[x], l[x + 1], l[x + 2]) for x in range(0, len(l), 3))
def translatePName(p, prologue):
@@ -142,11 +147,12 @@ def translatePName(p, prologue):
Expand prefixed/relative URIs
"""
if isinstance(p, CompValue):
- if p.name == 'pname':
+ if p.name == "pname":
return prologue.absolutize(p)
- if p.name == 'literal':
- return Literal(p.string, lang=p.lang,
- datatype=prologue.absolutize(p.datatype))
+ if p.name == "literal":
+ return Literal(
+ p.string, lang=p.lang, datatype=prologue.absolutize(p.datatype)
+ )
elif isinstance(p, URIRef):
return prologue.absolutize(p)
@@ -157,39 +163,39 @@ def translatePath(p):
"""
if isinstance(p, CompValue):
- if p.name == 'PathAlternative':
+ if p.name == "PathAlternative":
if len(p.part) == 1:
return p.part[0]
else:
return AlternativePath(*p.part)
- elif p.name == 'PathSequence':
+ elif p.name == "PathSequence":
if len(p.part) == 1:
return p.part[0]
else:
return SequencePath(*p.part)
- elif p.name == 'PathElt':
+ elif p.name == "PathElt":
if not p.mod:
return p.part
else:
if isinstance(p.part, list):
if len(p.part) != 1:
- raise Exception('Denkfehler!')
+ raise Exception("Denkfehler!")
return MulPath(p.part[0], p.mod)
else:
return MulPath(p.part, p.mod)
- elif p.name == 'PathEltOrInverse':
+ elif p.name == "PathEltOrInverse":
if isinstance(p.part, list):
if len(p.part) != 1:
- raise Exception('Denkfehler!')
+ raise Exception("Denkfehler!")
return InvPath(p.part[0])
else:
return InvPath(p.part)
- elif p.name == 'PathNegatedPropertySet':
+ elif p.name == "PathNegatedPropertySet":
if isinstance(p.part, list):
return NegatedPath(AlternativePath(*p.part))
else:
@@ -204,9 +210,9 @@ def translateExists(e):
def _c(n):
if isinstance(n, CompValue):
- if n.name in ('Builtin_EXISTS', 'Builtin_NOTEXISTS'):
+ if n.name in ("Builtin_EXISTS", "Builtin_NOTEXISTS"):
n.graph = translateGroupGraphPattern(n.graph)
- if n.graph.name == 'Filter':
+ if n.graph.name == "Filter":
# filters inside (NOT) EXISTS can see vars bound outside
n.graph.no_isolated_scope = True
@@ -229,7 +235,7 @@ def collectAndRemoveFilters(parts):
i = 0
while i < len(parts):
p = parts[i]
- if p.name == 'Filter':
+ if p.name == "Filter":
filters.append(translateExists(p.expr))
parts.pop(i)
else:
@@ -254,8 +260,7 @@ def translateGroupOrUnionGraphPattern(graphPattern):
def translateGraphGraphPattern(graphPattern):
- return Graph(graphPattern.term,
- translateGroupGraphPattern(graphPattern.graph))
+ return Graph(graphPattern.term, translateGroupGraphPattern(graphPattern.graph))
def translateInlineData(graphPattern):
@@ -267,7 +272,7 @@ def translateGroupGraphPattern(graphPattern):
http://www.w3.org/TR/sparql11-query/#convertGraphPattern
"""
- if graphPattern.name == 'SubSelect':
+ if graphPattern.name == "SubSelect":
return ToMultiSet(translate(graphPattern)[0])
if not graphPattern.part:
@@ -277,9 +282,9 @@ def translateGroupGraphPattern(graphPattern):
g = []
for p in graphPattern.part:
- if p.name == 'TriplesBlock':
+ if p.name == "TriplesBlock":
# merge adjacent TripleBlocks
- if not (g and g[-1].name == 'BGP'):
+ if not (g and g[-1].name == "BGP"):
g.append(BGP())
g[-1]["triples"] += triples(p.triples)
else:
@@ -287,30 +292,31 @@ def translateGroupGraphPattern(graphPattern):
G = BGP()
for p in g:
- if p.name == 'OptionalGraphPattern':
+ if p.name == "OptionalGraphPattern":
A = translateGroupGraphPattern(p.graph)
- if A.name == 'Filter':
+ if A.name == "Filter":
G = LeftJoin(G, A.p, A.expr)
else:
G = LeftJoin(G, A, TrueFilter)
- elif p.name == 'MinusGraphPattern':
+ elif p.name == "MinusGraphPattern":
G = Minus(p1=G, p2=translateGroupGraphPattern(p.graph))
- elif p.name == 'GroupOrUnionGraphPattern':
+ elif p.name == "GroupOrUnionGraphPattern":
G = Join(p1=G, p2=translateGroupOrUnionGraphPattern(p))
- elif p.name == 'GraphGraphPattern':
+ elif p.name == "GraphGraphPattern":
G = Join(p1=G, p2=translateGraphGraphPattern(p))
- elif p.name == 'InlineData':
+ elif p.name == "InlineData":
G = Join(p1=G, p2=translateInlineData(p))
- elif p.name == 'ServiceGraphPattern':
+ elif p.name == "ServiceGraphPattern":
G = Join(p1=G, p2=p)
- elif p.name in ('BGP', 'Extend'):
+ elif p.name in ("BGP", "Extend"):
G = Join(p1=G, p2=p)
- elif p.name == 'Bind':
+ elif p.name == "Bind":
G = Extend(G, p.expr, p.var)
else:
- raise Exception('Unknown part in GroupGraphPattern: %s - %s' %
- (type(p), p.name))
+ raise Exception(
+ "Unknown part in GroupGraphPattern: %s - %s" % (type(p), p.name)
+ )
if filters:
G = Filter(expr=filters, p=G)
@@ -372,9 +378,7 @@ def _traverseAgg(e, visitor=lambda n, v: None):
return visitor(e, res)
-def traverse(
- tree, visitPre=lambda n: None,
- visitPost=lambda n: None, complete=None):
+def traverse(tree, visitPre=lambda n: None, visitPost=lambda n: None, complete=None):
"""
Traverse tree, visit each node with visit function
visit function may raise StopTraversal to stop traversal
@@ -397,7 +401,7 @@ def _hasAggregate(x):
"""
if isinstance(x, CompValue):
- if x.name.startswith('Aggregate_'):
+ if x.name.startswith("Aggregate_"):
raise StopTraversal(True)
@@ -409,9 +413,9 @@ def _aggs(e, A):
# TODO: nested Aggregates?
- if isinstance(e, CompValue) and e.name.startswith('Aggregate_'):
+ if isinstance(e, CompValue) and e.name.startswith("Aggregate_"):
A.append(e)
- aggvar = Variable('__agg_%d__' % len(A))
+ aggvar = Variable("__agg_%d__" % len(A))
e["res"] = aggvar
return aggvar
@@ -426,7 +430,7 @@ def _findVars(x, res):
if x.name == "Bind":
res.add(x.var)
return x # stop recursion and finding vars in the expr
- elif x.name == 'SubSelect':
+ elif x.name == "SubSelect":
if x.projection:
res.update(v.var or v.evar for v in x.projection)
return x
@@ -443,13 +447,16 @@ def _addVars(x, children):
x["_vars"] = set()
elif x.name == "Extend":
# vars only used in the expr for a bind should not be included
- x["_vars"] = reduce(operator.or_, [child for child,
- part in zip(children, x) if part != 'expr'], set())
+ x["_vars"] = reduce(
+ operator.or_,
+ [child for child, part in zip(children, x) if part != "expr"],
+ set(),
+ )
else:
x["_vars"] = set(reduce(operator.or_, children, set()))
- if x.name == 'SubSelect':
+ if x.name == "SubSelect":
if x.projection:
s = set(v.var or v.evar for v in x.projection)
else:
@@ -470,7 +477,7 @@ def _sample(e, v=None):
if isinstance(e, CompValue) and e.name.startswith("Aggregate_"):
return e # do not replace vars in aggregates
if isinstance(e, Variable) and v != e:
- return CompValue('Aggregate_Sample', vars=e)
+ return CompValue("Aggregate_Sample", vars=e)
def _simplifyFilters(e):
@@ -505,11 +512,11 @@ def translateAggregates(q, M):
if q.projection:
for v in q.projection:
if v.var:
- rv = Variable('__agg_%d__' % (len(A) + 1))
- A.append(CompValue('Aggregate_Sample', vars=v.var, res=rv))
+ rv = Variable("__agg_%d__" % (len(A) + 1))
+ A.append(CompValue("Aggregate_Sample", vars=v.var, res=rv))
E.append((rv, v.var))
- return CompValue('AggregateJoin', A=A, p=M), E
+ return CompValue("AggregateJoin", A=A, p=M), E
def translateValues(v):
@@ -554,17 +561,22 @@ def translate(q):
conditions = []
# convert "GROUP BY (?expr as ?var)" to an Extend
for c in q.groupby.condition:
- if isinstance(c, CompValue) and c.name == 'GroupAs':
+ if isinstance(c, CompValue) and c.name == "GroupAs":
M = Extend(M, c.expr, c.var)
c = c.var
conditions.append(c)
M = Group(p=M, expr=conditions)
aggregate = True
- elif traverse(q.having, _hasAggregate, complete=False) or \
- traverse(q.orderby, _hasAggregate, complete=False) or \
- any(traverse(x.expr, _hasAggregate, complete=False)
- for x in q.projection or [] if x.evar):
+ elif (
+ traverse(q.having, _hasAggregate, complete=False)
+ or traverse(q.orderby, _hasAggregate, complete=False)
+ or any(
+ traverse(x.expr, _hasAggregate, complete=False)
+ for x in q.projection or []
+ if x.evar
+ )
+ ):
# if any aggregate is used, implicit group by
M = Group(p=M)
aggregate = True
@@ -604,17 +616,22 @@ def translate(q):
# ORDER BY
if q.orderby:
- M = OrderBy(M, [CompValue('OrderCondition', expr=c.expr,
- order=c.order) for c in q.orderby.condition])
+ M = OrderBy(
+ M,
+ [
+ CompValue("OrderCondition", expr=c.expr, order=c.order)
+ for c in q.orderby.condition
+ ],
+ )
# PROJECT
M = Project(M, PV)
if q.modifier:
- if q.modifier == 'DISTINCT':
- M = CompValue('Distinct', p=M)
- elif q.modifier == 'REDUCED':
- M = CompValue('Reduced', p=M)
+ if q.modifier == "DISTINCT":
+ M = CompValue("Distinct", p=M)
+ elif q.modifier == "REDUCED":
+ M = CompValue("Reduced", p=M)
if q.limitoffset:
offset = 0
@@ -622,10 +639,11 @@ def translate(q):
offset = q.limitoffset.offset.toPython()
if q.limitoffset.limit is not None:
- M = CompValue('Slice', p=M, start=offset,
- length=q.limitoffset.limit.toPython())
+ M = CompValue(
+ "Slice", p=M, start=offset, length=q.limitoffset.limit.toPython()
+ )
else:
- M = CompValue('Slice', p=M, start=offset)
+ M = CompValue("Slice", p=M, start=offset)
return M, PV
@@ -633,12 +651,12 @@ def translate(q):
def simplify(n):
"""Remove joins to empty BGPs"""
if isinstance(n, CompValue):
- if n.name == 'Join':
- if n.p1.name == 'BGP' and len(n.p1.triples) == 0:
+ if n.name == "Join":
+ if n.p1.name == "BGP" and len(n.p1.triples) == 0:
return n.p2
- if n.p2.name == 'BGP' and len(n.p2.triples) == 0:
+ if n.p2.name == "BGP" and len(n.p2.triples) == 0:
return n.p1
- elif n.name == 'BGP':
+ elif n.name == "BGP":
n["triples"] = reorderTriples(n.triples)
return n
@@ -651,10 +669,10 @@ def analyse(n, children):
"""
if isinstance(n, CompValue):
- if n.name == 'Join':
+ if n.name == "Join":
n["lazy"] = all(children)
return False
- elif n.name in ('Slice', 'Distinct'):
+ elif n.name in ("Slice", "Distinct"):
return False
else:
return all(children)
@@ -674,9 +692,9 @@ def translatePrologue(p, base, initNs=None, prologue=None):
prologue.bind(k, v)
for x in p:
- if x.name == 'Base':
+ if x.name == "Base":
prologue.base = x.iri
- elif x.name == 'PrefixDecl':
+ elif x.name == "PrefixDecl":
prologue.bind(x.prefix, prologue.absolutize(x.iri))
return prologue
@@ -699,26 +717,24 @@ def translateQuads(quads):
def translateUpdate1(u, prologue):
- if u.name in ('Load', 'Clear', 'Drop', 'Create'):
+ if u.name in ("Load", "Clear", "Drop", "Create"):
pass # no translation needed
- elif u.name in ('Add', 'Move', 'Copy'):
+ elif u.name in ("Add", "Move", "Copy"):
pass
- elif u.name in ('InsertData', 'DeleteData', 'DeleteWhere'):
+ elif u.name in ("InsertData", "DeleteData", "DeleteWhere"):
t, q = translateQuads(u.quads)
u["quads"] = q
u["triples"] = t
- if u.name in ('DeleteWhere', 'DeleteData'):
+ if u.name in ("DeleteWhere", "DeleteData"):
pass # TODO: check for bnodes in triples
- elif u.name == 'Modify':
+ elif u.name == "Modify":
if u.delete:
- u.delete["triples"], u.delete[
- "quads"] = translateQuads(u.delete.quads)
+ u.delete["triples"], u.delete["quads"] = translateQuads(u.delete.quads)
if u.insert:
- u.insert["triples"], u.insert[
- "quads"] = translateQuads(u.insert.quads)
+ u.insert["triples"], u.insert["quads"] = translateQuads(u.insert.quads)
u["where"] = translateGroupGraphPattern(u.where)
else:
- raise Exception('Unknown type of update operation: %s' % u)
+ raise Exception("Unknown type of update operation: %s" % u)
u.prologue = prologue
return u
@@ -737,8 +753,7 @@ def translateUpdate(q, base=None, initNs=None):
prologue = translatePrologue(p, base, initNs, prologue)
# absolutize/resolve prefixes
- u = traverse(
- u, visitPost=functools.partial(translatePName, prologue=prologue))
+ u = traverse(u, visitPost=functools.partial(translatePName, prologue=prologue))
u = _traverse(u, _simplifyFilters)
u = traverse(u, visitPost=translatePath)
@@ -761,17 +776,16 @@ def translateQuery(q, base=None, initNs=None):
# absolutize/resolve prefixes
q[1] = traverse(
- q[1], visitPost=functools.partial(translatePName, prologue=prologue))
+ q[1], visitPost=functools.partial(translatePName, prologue=prologue)
+ )
P, PV = translate(q[1])
datasetClause = q[1].datasetClause
- if q[1].name == 'ConstructQuery':
+ if q[1].name == "ConstructQuery":
template = triples(q[1].template) if q[1].template else None
- res = CompValue(q[1].name, p=P,
- template=template,
- datasetClause=datasetClause)
+ res = CompValue(q[1].name, p=P, template=template, datasetClause=datasetClause)
else:
res = CompValue(q[1].name, p=P, datasetClause=datasetClause, PV=PV)
@@ -792,9 +806,9 @@ def pprintAlgebra(q):
if not isinstance(p, CompValue):
print(p)
return
- print("%s(" % (p.name, ))
+ print("%s(" % (p.name,))
for k in p:
- print("%s%s =" % (ind, k,), end=' ')
+ print("%s%s =" % (ind, k,), end=" ")
pp(p[k], ind + " ")
print("%s)" % ind)
@@ -806,7 +820,7 @@ def pprintAlgebra(q):
pp(x)
-if __name__ == '__main__':
+if __name__ == "__main__":
import sys
from rdflib.plugins.sparql import parser
import os.path
diff --git a/rdflib/plugins/sparql/datatypes.py b/rdflib/plugins/sparql/datatypes.py
index 1e8475e0..5ab8c92f 100644
--- a/rdflib/plugins/sparql/datatypes.py
+++ b/rdflib/plugins/sparql/datatypes.py
@@ -5,17 +5,45 @@ Utility functions for supporting the XML Schema Datatypes hierarchy
from rdflib import XSD
XSD_DTs = set(
- (XSD.integer, XSD.decimal, XSD.float, XSD.double, XSD.string,
- XSD.boolean, XSD.dateTime, XSD.nonPositiveInteger, XSD.negativeInteger,
- XSD.long, XSD.int, XSD.short, XSD.byte, XSD.nonNegativeInteger,
- XSD.unsignedLong, XSD.unsignedInt, XSD.unsignedShort, XSD.unsignedByte,
- XSD.positiveInteger, XSD.date))
+ (
+ XSD.integer,
+ XSD.decimal,
+ XSD.float,
+ XSD.double,
+ XSD.string,
+ XSD.boolean,
+ XSD.dateTime,
+ XSD.nonPositiveInteger,
+ XSD.negativeInteger,
+ XSD.long,
+ XSD.int,
+ XSD.short,
+ XSD.byte,
+ XSD.nonNegativeInteger,
+ XSD.unsignedLong,
+ XSD.unsignedInt,
+ XSD.unsignedShort,
+ XSD.unsignedByte,
+ XSD.positiveInteger,
+ XSD.date,
+ )
+)
_sub_types = {
XSD.integer: [
- XSD.nonPositiveInteger, XSD.negativeInteger, XSD.long, XSD.int,
- XSD.short, XSD.byte, XSD.nonNegativeInteger, XSD.positiveInteger,
- XSD.unsignedLong, XSD.unsignedInt, XSD.unsignedShort, XSD.unsignedByte],
+ XSD.nonPositiveInteger,
+ XSD.negativeInteger,
+ XSD.long,
+ XSD.int,
+ XSD.short,
+ XSD.byte,
+ XSD.nonNegativeInteger,
+ XSD.positiveInteger,
+ XSD.unsignedLong,
+ XSD.unsignedInt,
+ XSD.unsignedShort,
+ XSD.unsignedByte,
+ ],
}
_super_types = {}
@@ -25,21 +53,22 @@ for superdt in XSD_DTs:
# we only care about float, double, integer, decimal
_typePromotionMap = {
- XSD.float: {XSD.integer: XSD.float,
- XSD.decimal: XSD.float,
- XSD.double: XSD.double},
-
- XSD.double: {XSD.integer: XSD.double,
- XSD.float: XSD.double,
- XSD.decimal: XSD.double},
-
- XSD.decimal: {XSD.integer: XSD.decimal,
- XSD.float: XSD.float,
- XSD.double: XSD.double},
-
- XSD.integer: {XSD.decimal: XSD.decimal,
- XSD.float: XSD.float,
- XSD.double: XSD.double}
+ XSD.float: {XSD.integer: XSD.float, XSD.decimal: XSD.float, XSD.double: XSD.double},
+ XSD.double: {
+ XSD.integer: XSD.double,
+ XSD.float: XSD.double,
+ XSD.decimal: XSD.double,
+ },
+ XSD.decimal: {
+ XSD.integer: XSD.decimal,
+ XSD.float: XSD.float,
+ XSD.double: XSD.double,
+ },
+ XSD.integer: {
+ XSD.decimal: XSD.decimal,
+ XSD.float: XSD.float,
+ XSD.double: XSD.double,
+ },
}
@@ -53,5 +82,4 @@ def type_promotion(t1, t2):
try:
return _typePromotionMap[t1][t2]
except KeyError:
- raise TypeError(
- 'Operators cannot combine datatypes %s and %s' % (t1, t2))
+ raise TypeError("Operators cannot combine datatypes %s and %s" % (t1, t2))
diff --git a/rdflib/plugins/sparql/evaluate.py b/rdflib/plugins/sparql/evaluate.py
index fc9b0c30..43b3d0b0 100644
--- a/rdflib/plugins/sparql/evaluate.py
+++ b/rdflib/plugins/sparql/evaluate.py
@@ -24,9 +24,22 @@ from rdflib import Variable, Graph, BNode, URIRef, Literal
from rdflib.plugins.sparql import CUSTOM_EVALS
from rdflib.plugins.sparql.parserutils import value
from rdflib.plugins.sparql.sparql import (
- QueryContext, AlreadyBound, FrozenBindings, Bindings, SPARQLError)
+ QueryContext,
+ AlreadyBound,
+ FrozenBindings,
+ Bindings,
+ SPARQLError,
+)
from rdflib.plugins.sparql.evalutils import (
- _filter, _eval, _join, _diff, _minus, _fillTemplate, _ebv, _val)
+ _filter,
+ _eval,
+ _join,
+ _diff,
+ _minus,
+ _fillTemplate,
+ _ebv,
+ _val,
+)
from rdflib.plugins.sparql.aggregates import Aggregator
from rdflib.plugins.sparql.algebra import Join, ToMultiSet, Values
@@ -143,9 +156,10 @@ def evalLeftJoin(ctx, join):
# check that we would have had no OPTIONAL matches
# even without prior bindings...
p1_vars = join.p1._vars
- if p1_vars is None \
- or not any(_ebv(join.expr, b) for b in
- evalPart(ctx.thaw(a.remember(p1_vars)), join.p2)):
+ if p1_vars is None or not any(
+ _ebv(join.expr, b)
+ for b in evalPart(ctx.thaw(a.remember(p1_vars)), join.p2)
+ ):
yield a
@@ -153,7 +167,10 @@ def evalLeftJoin(ctx, join):
def evalFilter(ctx, part):
# TODO: Deal with dict returned from evalPart!
for c in evalPart(ctx, part.p):
- if _ebv(part.expr, c.forget(ctx, _except=part._vars) if not part.no_isolated_scope else c):
+ if _ebv(
+ part.expr,
+ c.forget(ctx, _except=part._vars) if not part.no_isolated_scope else c,
+ ):
yield c
@@ -161,8 +178,9 @@ def evalGraph(ctx, part):
if ctx.dataset is None:
raise Exception(
- "Non-conjunctive-graph doesn't know about " +
- "graphs. Try a query without GRAPH.")
+ "Non-conjunctive-graph doesn't know about "
+ + "graphs. Try a query without GRAPH."
+ )
ctx = ctx.clone()
graph = ctx[part.term]
@@ -191,7 +209,7 @@ def evalValues(ctx, part):
c = ctx.push()
try:
for k, v in r.items():
- if v != 'UNDEF':
+ if v != "UNDEF":
c[k] = v
except AlreadyBound:
continue
@@ -201,7 +219,7 @@ def evalValues(ctx, part):
def evalMultiset(ctx, part):
- if part.p.name == 'values':
+ if part.p.name == "values":
return evalValues(ctx, part)
return evalPart(ctx, part.p)
@@ -216,91 +234,102 @@ def evalPart(ctx, part):
except NotImplementedError:
pass # the given custome-function did not handle this part
- if part.name == 'BGP':
+ if part.name == "BGP":
# Reorder triples patterns by number of bound nodes in the current ctx
# Do patterns with more bound nodes first
- triples = sorted(part.triples, key=lambda t: len([n for n in t if ctx[n] is None]))
+ triples = sorted(
+ part.triples, key=lambda t: len([n for n in t if ctx[n] is None])
+ )
return evalBGP(ctx, triples)
- elif part.name == 'Filter':
+ elif part.name == "Filter":
return evalFilter(ctx, part)
- elif part.name == 'Join':
+ elif part.name == "Join":
return evalJoin(ctx, part)
- elif part.name == 'LeftJoin':
+ elif part.name == "LeftJoin":
return evalLeftJoin(ctx, part)
- elif part.name == 'Graph':
+ elif part.name == "Graph":
return evalGraph(ctx, part)
- elif part.name == 'Union':
+ elif part.name == "Union":
return evalUnion(ctx, part)
- elif part.name == 'ToMultiSet':
+ elif part.name == "ToMultiSet":
return evalMultiset(ctx, part)
- elif part.name == 'Extend':
+ elif part.name == "Extend":
return evalExtend(ctx, part)
- elif part.name == 'Minus':
+ elif part.name == "Minus":
return evalMinus(ctx, part)
- elif part.name == 'Project':
+ elif part.name == "Project":
return evalProject(ctx, part)
- elif part.name == 'Slice':
+ elif part.name == "Slice":
return evalSlice(ctx, part)
- elif part.name == 'Distinct':
+ elif part.name == "Distinct":
return evalDistinct(ctx, part)
- elif part.name == 'Reduced':
+ elif part.name == "Reduced":
return evalReduced(ctx, part)
- elif part.name == 'OrderBy':
+ elif part.name == "OrderBy":
return evalOrderBy(ctx, part)
- elif part.name == 'Group':
+ elif part.name == "Group":
return evalGroup(ctx, part)
- elif part.name == 'AggregateJoin':
+ elif part.name == "AggregateJoin":
return evalAggregateJoin(ctx, part)
- elif part.name == 'SelectQuery':
+ elif part.name == "SelectQuery":
return evalSelectQuery(ctx, part)
- elif part.name == 'AskQuery':
+ elif part.name == "AskQuery":
return evalAskQuery(ctx, part)
- elif part.name == 'ConstructQuery':
+ elif part.name == "ConstructQuery":
return evalConstructQuery(ctx, part)
- elif part.name == 'ServiceGraphPattern':
+ elif part.name == "ServiceGraphPattern":
return evalServiceQuery(ctx, part)
- #raise Exception('ServiceGraphPattern not implemented')
+ # raise Exception('ServiceGraphPattern not implemented')
- elif part.name == 'DescribeQuery':
- raise Exception('DESCRIBE not implemented')
+ elif part.name == "DescribeQuery":
+ raise Exception("DESCRIBE not implemented")
else:
- raise Exception('I dont know: %s' % part.name)
+ raise Exception("I dont know: %s" % part.name)
+
def evalServiceQuery(ctx, part):
res = {}
- match = re.match('^service <(.*)>[ \n]*{(.*)}[ \n]*$',
- part.get('service_string', ''), re.DOTALL | re.I)
+ match = re.match(
+ "^service <(.*)>[ \n]*{(.*)}[ \n]*$",
+ part.get("service_string", ""),
+ re.DOTALL | re.I,
+ )
if match:
service_url = match.group(1)
service_query = _buildQueryStringForServiceCall(ctx, match)
- query_settings = {'query': service_query,
- 'output': 'json'}
- headers = {'accept' : 'application/sparql-results+json',
- 'user-agent': 'rdflibForAnUser'}
+ query_settings = {"query": service_query, "output": "json"}
+ headers = {
+ "accept": "application/sparql-results+json",
+ "user-agent": "rdflibForAnUser",
+ }
# GET is easier to cache so prefer that if the query is not to long
if len(service_query) < 600:
response = requests.get(service_url, params=query_settings, headers=headers)
else:
- response = requests.post(service_url, params=query_settings, headers=headers)
+ response = requests.post(
+ service_url, params=query_settings, headers=headers
+ )
if response.status_code == 200:
- json = response.json();
- variables = res["vars_"] = json['head']['vars']
+ json = response.json()
+ variables = res["vars_"] = json["head"]["vars"]
# or just return the bindings?
- res = json['results']['bindings']
+ res = json["results"]["bindings"]
if len(res) > 0:
for r in res:
for bound in _yieldBindingsFromServiceCallResult(ctx, r, variables):
yield bound
else:
- raise Exception("Service: %s responded with code: %s", service_url, response.status_code);
+ raise Exception(
+ "Service: %s responded with code: %s", service_url, response.status_code
+ )
"""
@@ -309,6 +338,8 @@ def evalServiceQuery(ctx, part):
Re-adds prefixes if added and sets the base.
Wraps it in select if needed.
"""
+
+
def _buildQueryStringForServiceCall(ctx, match):
service_query = match.group(2)
@@ -316,18 +347,20 @@ def _buildQueryStringForServiceCall(ctx, match):
parser.parseQuery(service_query)
except ParseException:
# This could be because we don't have a select around the service call.
- service_query = 'SELECT REDUCED * WHERE {' + service_query + '}'
+ service_query = "SELECT REDUCED * WHERE {" + service_query + "}"
for p in ctx.prologue.namespace_manager.store.namespaces():
- service_query = 'PREFIX ' + p[0] + ':' + p[1].n3() + ' ' + service_query
+ service_query = "PREFIX " + p[0] + ":" + p[1].n3() + " " + service_query
# re add the base if one was defined
base = ctx.prologue.base
if base is not None and len(base) > 0:
- service_query = 'BASE <' + base + '> ' + service_query
- sol = ctx.solution();
+ service_query = "BASE <" + base + "> " + service_query
+ sol = ctx.solution()
if len(sol) > 0:
- variables = ' '.join(map(lambda v:v.n3(), sol))
- variables_bound = ' '.join(map(lambda v: ctx.get(v).n3(), sol))
- service_query = service_query + 'VALUES (' + variables + ') {(' + variables_bound + ')}'
+ variables = " ".join(map(lambda v: v.n3(), sol))
+ variables_bound = " ".join(map(lambda v: ctx.get(v).n3(), sol))
+ service_query = (
+ service_query + "VALUES (" + variables + ") {(" + variables_bound + ")}"
+ )
return service_query
@@ -335,14 +368,18 @@ def _yieldBindingsFromServiceCallResult(ctx, r, variables):
res_dict = {}
for var in variables:
if var in r and r[var]:
- if r[var]['type'] == "uri":
+ if r[var]["type"] == "uri":
res_dict[Variable(var)] = URIRef(r[var]["value"])
- elif r[var]['type'] == "bnode":
+ elif r[var]["type"] == "bnode":
res_dict[Variable(var)] = BNode(r[var]["value"])
- elif r[var]['type'] == "literal" and 'datatype' in r[var]:
- res_dict[Variable(var)] = Literal(r[var]["value"], datatype=r[var]['datatype'])
- elif r[var]['type'] == "literal" and 'xml:lang' in r[var]:
- res_dict[Variable(var)] = Literal(r[var]["value"], lang=r[var]['xml:lang'])
+ elif r[var]["type"] == "literal" and "datatype" in r[var]:
+ res_dict[Variable(var)] = Literal(
+ r[var]["value"], datatype=r[var]["datatype"]
+ )
+ elif r[var]["type"] == "literal" and "xml:lang" in r[var]:
+ res_dict[Variable(var)] = Literal(
+ r[var]["value"], lang=r[var]["xml:lang"]
+ )
yield FrozenBindings(ctx, res_dict)
@@ -389,8 +426,10 @@ def evalOrderBy(ctx, part):
for e in reversed(part.expr):
- reverse = bool(e.order and e.order == 'DESC')
- res = sorted(res, key=lambda x: _val(value(x, e.expr, variables=True)), reverse=reverse)
+ reverse = bool(e.order and e.order == "DESC")
+ res = sorted(
+ res, key=lambda x: _val(value(x, e.expr, variables=True)), reverse=reverse
+ )
return res
@@ -398,7 +437,11 @@ def evalOrderBy(ctx, part):
def evalSlice(ctx, slice):
res = evalPart(ctx, slice.p)
- return itertools.islice(res, slice.start, slice.start + slice.length if slice.length is not None else None)
+ return itertools.islice(
+ res,
+ slice.start,
+ slice.start + slice.length if slice.length is not None else None,
+ )
def evalReduced(ctx, part):
@@ -506,8 +549,9 @@ def evalQuery(graph, query, initBindings, base=None):
if main.datasetClause:
if ctx.dataset is None:
raise Exception(
- "Non-conjunctive-graph doesn't know about " +
- "graphs! Try a query without FROM (NAMED).")
+ "Non-conjunctive-graph doesn't know about "
+ + "graphs! Try a query without FROM (NAMED)."
+ )
ctx = ctx.clone() # or push/pop?
diff --git a/rdflib/plugins/sparql/evalutils.py b/rdflib/plugins/sparql/evalutils.py
index 25353fe0..8bf1981d 100644
--- a/rdflib/plugins/sparql/evalutils.py
+++ b/rdflib/plugins/sparql/evalutils.py
@@ -49,8 +49,7 @@ def _ebv(expr, ctx):
except SPARQLError:
return False # filter error == False
elif isinstance(expr, CompValue):
- raise Exception(
- "Weird - filter got a CompValue without evalfn! %r" % expr)
+ raise Exception("Weird - filter got a CompValue without evalfn! %r" % expr)
elif isinstance(expr, Variable):
try:
return EBV(ctx[expr])
@@ -73,8 +72,7 @@ def _eval(expr, ctx, raise_not_bound_error=True):
else:
return None
elif isinstance(expr, CompValue):
- raise Exception(
- "Weird - _eval got a CompValue without evalfn! %r" % expr)
+ raise Exception("Weird - _eval got a CompValue without evalfn! %r" % expr)
else:
raise Exception("Cannot eval thing: %s (%s)" % (expr, type(expr)))
@@ -101,12 +99,11 @@ def _fillTemplate(template, solution):
_o = solution.get(o)
# instantiate new bnodes for each solution
- _s, _p, _o = [bnodeMap[x] if isinstance(
- x, BNode) else y for x, y in zip(t, (_s, _p, _o))]
+ _s, _p, _o = [
+ bnodeMap[x] if isinstance(x, BNode) else y for x, y in zip(t, (_s, _p, _o))
+ ]
- if _s is not None and \
- _p is not None and \
- _o is not None:
+ if _s is not None and _p is not None and _o is not None:
yield (_s, _p, _o)
diff --git a/rdflib/plugins/sparql/operators.py b/rdflib/plugins/sparql/operators.py
index 52558aa9..8f644a1f 100644
--- a/rdflib/plugins/sparql/operators.py
+++ b/rdflib/plugins/sparql/operators.py
@@ -49,7 +49,7 @@ def Builtin_IRI(expr, ctx):
if isinstance(a, Literal):
return ctx.prologue.absolutize(URIRef(a))
- raise SPARQLError('IRI function only accepts URIRefs or Literals/Strings!')
+ raise SPARQLError("IRI function only accepts URIRefs or Literals/Strings!")
def Builtin_isBLANK(expr, ctx):
@@ -85,8 +85,7 @@ def Builtin_BNODE(expr, ctx):
if isinstance(a, Literal):
return ctx.bnodes[a] # defaultdict does the right thing
- raise SPARQLError(
- 'BNode function only accepts no argument or literal/string')
+ raise SPARQLError("BNode function only accepts no argument or literal/string")
def Builtin_ABS(expr, ctx):
@@ -158,11 +157,10 @@ def Builtin_COALESCE(expr, ctx):
"""
http://www.w3.org/TR/sparql11-query/#func-coalesce
"""
- for x in expr.get('arg', variables=True):
+ for x in expr.get("arg", variables=True):
if x is not None and not isinstance(x, (SPARQLError, Variable)):
return x
- raise SPARQLError(
- "COALESCE got no arguments that did not evaluate to an error")
+ raise SPARQLError("COALESCE got no arguments that did not evaluate to an error")
def Builtin_CEIL(expr, ctx):
@@ -214,8 +212,7 @@ def Builtin_REGEX(expr, ctx):
if flags:
# Maps XPath REGEX flags (http://www.w3.org/TR/xpath-functions/#flags)
# to Python's re flags
- flagMap = dict(
- [('i', re.IGNORECASE), ('s', re.DOTALL), ('m', re.MULTILINE)])
+ flagMap = dict([("i", re.IGNORECASE), ("s", re.DOTALL), ("m", re.MULTILINE)])
cFlag = reduce(pyop.or_, [flagMap.get(f, 0) for f in flags])
return Literal(bool(re.search(str(pattern), text, cFlag)))
@@ -231,7 +228,7 @@ def Builtin_REPLACE(expr, ctx):
flags = expr.flags
# python uses \1, xpath/sparql uses $1
- replacement = re.sub('\\$([0-9]*)', r'\\\1', replacement)
+ replacement = re.sub("\\$([0-9]*)", r"\\\1", replacement)
def _r(m):
@@ -245,7 +242,7 @@ def Builtin_REPLACE(expr, ctx):
# the match object is replaced with a wrapper that
# returns "" instead of None for unmatched groups
- class _m():
+ class _m:
def __init__(self, m):
self.m = m
self.string = m.string
@@ -259,8 +256,7 @@ def Builtin_REPLACE(expr, ctx):
if flags:
# Maps XPath REGEX flags (http://www.w3.org/TR/xpath-functions/#flags)
# to Python's re flags
- flagMap = dict(
- [('i', re.IGNORECASE), ('s', re.DOTALL), ('m', re.MULTILINE)])
+ flagMap = dict([("i", re.IGNORECASE), ("s", re.DOTALL), ("m", re.MULTILINE)])
cFlag = reduce(pyop.or_, [flagMap.get(f, 0) for f in flags])
# @@FIXME@@ either datatype OR lang, NOT both
@@ -269,8 +265,11 @@ def Builtin_REPLACE(expr, ctx):
# python versions. see comments above in _r(m).
compat_r = str(replacement) if sys.version_info[:2] >= (3, 5) else _r
- return Literal(re.sub(str(pattern), compat_r, text, cFlag),
- datatype=text.datatype, lang=text.language)
+ return Literal(
+ re.sub(str(pattern), compat_r, text, cFlag),
+ datatype=text.datatype,
+ lang=text.language,
+ )
def Builtin_STRDT(expr, ctx):
@@ -288,7 +287,7 @@ def Builtin_STRLANG(expr, ctx):
s = string(expr.arg1)
if s.language or s.datatype:
- raise SPARQLError('STRLANG expects a simple literal')
+ raise SPARQLError("STRLANG expects a simple literal")
# TODO: normalisation of lang tag to lower-case
# should probably happen in literal __init__
@@ -308,8 +307,7 @@ def Builtin_CONCAT(expr, ctx):
lang = set(x.language for x in expr.arg)
lang = lang.pop() if len(lang) == 1 else None
- return Literal("".join(string(x)
- for x in expr.arg), datatype=dt, lang=lang)
+ return Literal("".join(string(x) for x in expr.arg), datatype=dt, lang=lang)
def _compatibleStrings(a, b):
@@ -317,7 +315,7 @@ def _compatibleStrings(a, b):
string(b)
if b.language and a.language != b.language:
- raise SPARQLError('incompatible arguments to str functions')
+ raise SPARQLError("incompatible arguments to str functions")
def Builtin_STRSTARTS(expr, ctx):
@@ -373,7 +371,7 @@ def Builtin_STRAFTER(expr, ctx):
if i == -1:
return Literal("")
else:
- return Literal(a[i + len(b):], lang=a.language, datatype=a.datatype)
+ return Literal(a[i + len(b) :], lang=a.language, datatype=a.datatype)
def Builtin_CONTAINS(expr, ctx):
@@ -491,7 +489,7 @@ def Builtin_TIMEZONE(e, ctx):
"""
dt = datetime(e.arg)
if not dt.tzinfo:
- raise SPARQLError('datatime has no timezone: %r' % dt)
+ raise SPARQLError("datatime has no timezone: %r" % dt)
delta = dt.tzinfo.utcoffset(ctx.now)
@@ -508,11 +506,13 @@ def Builtin_TIMEZONE(e, ctx):
m = (s - h * 60 * 60) / 60
s = s - h * 60 * 60 - m * 60
- tzdelta = "%sP%sT%s%s%s" % (neg,
- "%dD" % d if d else "",
- "%dH" % h if h else "",
- "%dM" % m if m else "",
- "%dS" % s if not d and not h and not m else "")
+ tzdelta = "%sP%sT%s%s%s" % (
+ neg,
+ "%dD" % d if d else "",
+ "%dH" % h if h else "",
+ "%dM" % m if m else "",
+ "%dS" % s if not d and not h and not m else "",
+ )
return Literal(tzdelta, datatype=XSD.dayTimeDuration)
@@ -549,7 +549,7 @@ def Builtin_LANG(e, ctx):
def Builtin_DATATYPE(e, ctx):
l = e.arg
if not isinstance(l, Literal):
- raise SPARQLError('Can only get datatype of literal: %r' % l)
+ raise SPARQLError("Can only get datatype of literal: %r" % l)
if l.language:
return RDF_langString
if not l.datatype and not l.language:
@@ -567,7 +567,7 @@ def Builtin_BOUND(e, ctx):
"""
http://www.w3.org/TR/sparql11-query/#func-bound
"""
- n = e.get('arg', variables=True)
+ n = e.get("arg", variables=True)
return Literal(not isinstance(n, Variable))
@@ -576,7 +576,7 @@ def Builtin_EXISTS(e, ctx):
# damn...
from rdflib.plugins.sparql.evaluate import evalPart
- exists = e.name == 'Builtin_EXISTS'
+ exists = e.name == "Builtin_EXISTS"
ctx = ctx.ctx.thaw(ctx) # hmm
for x in evalPart(ctx, e.graph):
@@ -605,9 +605,11 @@ def custom_function(uri, override=False, raw=False):
"""
Decorator version of :func:`register_custom_function`.
"""
+
def decorator(func):
register_custom_function(uri, func, override=override, raw=raw)
return func
+
return decorator
@@ -624,7 +626,7 @@ def Function(e, ctx):
pair = _CUSTOM_FUNCTIONS.get(e.iri)
if pair is None:
# no such function is registered
- raise SPARQLError('Unknown function %r' % e.iri)
+ raise SPARQLError("Unknown function %r" % e.iri)
func, raw = pair
if raw:
# function expects expression and context
@@ -658,21 +660,17 @@ def default_cast(e, ctx):
if isinstance(x, (URIRef, Literal)):
return Literal(x, datatype=XSD.string)
else:
- raise SPARQLError(
- "Cannot cast term %r of type %r" % (x, type(x)))
+ raise SPARQLError("Cannot cast term %r of type %r" % (x, type(x)))
if not isinstance(x, Literal):
- raise SPARQLError(
- "Can only cast Literals to non-string data-types")
+ raise SPARQLError("Can only cast Literals to non-string data-types")
if x.datatype and not x.datatype in XSD_DTs:
- raise SPARQLError(
- "Cannot cast literal with unknown datatype: %r" % x.datatype)
+ raise SPARQLError("Cannot cast literal with unknown datatype: %r" % x.datatype)
if e.iri == XSD.dateTime:
if x.datatype and x.datatype not in (XSD.dateTime, XSD.string):
- raise SPARQLError(
- "Cannot cast %r to XSD:dateTime" % x.datatype)
+ raise SPARQLError("Cannot cast %r to XSD:dateTime" % x.datatype)
try:
return Literal(isodate.parse_datetime(x), datatype=e.iri)
except:
@@ -742,12 +740,12 @@ def MultiplicativeExpression(e, ctx):
if type(f) == float:
res = float(res)
- if op == '*':
+ if op == "*":
res *= f
else:
res /= f
except (InvalidOperation, ZeroDivisionError):
- raise SPARQLError('divide by 0')
+ raise SPARQLError("divide by 0")
return Literal(res)
@@ -775,7 +773,7 @@ def AdditiveExpression(e, ctx):
dt = type_promotion(dt, term.datatype)
- if op == '+':
+ if op == "+":
res += n
else:
res -= n
@@ -794,18 +792,22 @@ def RelationalExpression(e, ctx):
if other is None:
return expr
- ops = dict([('>', lambda x, y: x.__gt__(y)),
- ('<', lambda x, y: x.__lt__(y)),
- ('=', lambda x, y: x.eq(y)),
- ('!=', lambda x, y: x.neq(y)),
- ('>=', lambda x, y: x.__ge__(y)),
- ('<=', lambda x, y: x.__le__(y)),
- ('IN', pyop.contains),
- ('NOT IN', lambda x, y: not pyop.contains(x, y))])
+ ops = dict(
+ [
+ (">", lambda x, y: x.__gt__(y)),
+ ("<", lambda x, y: x.__lt__(y)),
+ ("=", lambda x, y: x.eq(y)),
+ ("!=", lambda x, y: x.neq(y)),
+ (">=", lambda x, y: x.__ge__(y)),
+ ("<=", lambda x, y: x.__le__(y)),
+ ("IN", pyop.contains),
+ ("NOT IN", lambda x, y: not pyop.contains(x, y)),
+ ]
+ )
- if op in ('IN', 'NOT IN'):
+ if op in ("IN", "NOT IN"):
- res = (op == 'NOT IN')
+ res = op == "NOT IN"
error = False
@@ -823,33 +825,37 @@ def RelationalExpression(e, ctx):
else:
raise error
- if not op in ('=', '!=', 'IN', 'NOT IN'):
+ if not op in ("=", "!=", "IN", "NOT IN"):
if not isinstance(expr, Literal):
raise SPARQLError(
- "Compare other than =, != of non-literals is an error: %r" %
- expr)
+ "Compare other than =, != of non-literals is an error: %r" % expr
+ )
if not isinstance(other, Literal):
raise SPARQLError(
- "Compare other than =, != of non-literals is an error: %r" %
- other)
+ "Compare other than =, != of non-literals is an error: %r" % other
+ )
else:
if not isinstance(expr, Node):
- raise SPARQLError('I cannot compare this non-node: %r' % expr)
+ raise SPARQLError("I cannot compare this non-node: %r" % expr)
if not isinstance(other, Node):
- raise SPARQLError('I cannot compare this non-node: %r' % other)
+ raise SPARQLError("I cannot compare this non-node: %r" % other)
if isinstance(expr, Literal) and isinstance(other, Literal):
- if expr.datatype is not None and expr.datatype not in XSD_DTs and other.datatype is not None and other.datatype not in XSD_DTs:
+ if (
+ expr.datatype is not None
+ and expr.datatype not in XSD_DTs
+ and other.datatype is not None
+ and other.datatype not in XSD_DTs
+ ):
# in SPARQL for non-XSD DT Literals we can only do =,!=
- if op not in ('=', '!='):
- raise SPARQLError(
- 'Can only do =,!= comparisons of non-XSD Literals')
+ if op not in ("=", "!="):
+ raise SPARQLError("Can only do =,!= comparisons of non-XSD Literals")
try:
r = ops[op](expr, other)
if r == NotImplemented:
- raise SPARQLError('Error when comparing')
+ raise SPARQLError("Error when comparing")
except TypeError as te:
raise SPARQLError(*te.args)
return Literal(r)
@@ -897,18 +903,22 @@ def ConditionalOrExpression(e, ctx):
def not_(arg):
- return Expr('UnaryNot', UnaryNot, expr=arg)
+ return Expr("UnaryNot", UnaryNot, expr=arg)
def and_(*args):
if len(args) == 1:
return args[0]
- return Expr('ConditionalAndExpression', ConditionalAndExpression,
- expr=args[0], other=list(args[1:]))
+ return Expr(
+ "ConditionalAndExpression",
+ ConditionalAndExpression,
+ expr=args[0],
+ other=list(args[1:]),
+ )
-TrueFilter = Expr('TrueFilter', lambda _1, _2: Literal(True))
+TrueFilter = Expr("TrueFilter", lambda _1, _2: Literal(True))
def simplify(expr):
@@ -919,7 +929,7 @@ def simplify(expr):
return list(map(simplify, expr))
if not isinstance(expr, CompValue):
return expr
- if expr.name.endswith('Expression'):
+ if expr.name.endswith("Expression"):
if expr.other is None:
return simplify(expr.expr)
@@ -941,8 +951,7 @@ def datetime(e):
if not isinstance(e, Literal):
raise SPARQLError("Non-literal passed as datetime: %r" % e)
if not e.datatype == XSD.dateTime:
- raise SPARQLError(
- "Literal with wrong datatype passed as datetime: %r" % e)
+ raise SPARQLError("Literal with wrong datatype passed as datetime: %r" % e)
return e.toPython()
@@ -954,8 +963,7 @@ def string(s):
if not isinstance(s, Literal):
raise SPARQLError("Non-literal passes as string: %r" % s)
if s.datatype and s.datatype != XSD.string:
- raise SPARQLError(
- "Non-string datatype-literal passes as string: %r" % s)
+ raise SPARQLError("Non-string datatype-literal passes as string: %r" % s)
return s
@@ -970,13 +978,24 @@ def numeric(expr):
if not isinstance(expr, Literal):
raise SPARQLTypeError("%r is not a literal!" % expr)
- if expr.datatype not in (XSD.float, XSD.double,
- XSD.decimal, XSD.integer,
- XSD.nonPositiveInteger, XSD.negativeInteger,
- XSD.nonNegativeInteger, XSD.positiveInteger,
- XSD.unsignedLong, XSD.unsignedInt,
- XSD.unsignedShort, XSD.unsignedByte,
- XSD.long, XSD.int, XSD.short, XSD.byte):
+ if expr.datatype not in (
+ XSD.float,
+ XSD.double,
+ XSD.decimal,
+ XSD.integer,
+ XSD.nonPositiveInteger,
+ XSD.negativeInteger,
+ XSD.nonNegativeInteger,
+ XSD.positiveInteger,
+ XSD.unsignedLong,
+ XSD.unsignedInt,
+ XSD.unsignedShort,
+ XSD.unsignedByte,
+ XSD.long,
+ XSD.int,
+ XSD.short,
+ XSD.byte,
+ ):
raise SPARQLTypeError("%r does not have a numeric datatype!" % expr)
return expr.toPython()
@@ -1011,14 +1030,18 @@ def EBV(rt):
# Type error, see: http://www.w3.org/TR/rdf-sparql-query/#ebv
raise SPARQLTypeError(
"http://www.w3.org/TR/rdf-sparql-query/#ebv - ' + \
- 'Could not determine the EBV for : %r" % rt)
+ 'Could not determine the EBV for : %r"
+ % rt
+ )
else:
return bool(pyRT)
else:
raise SPARQLTypeError(
"http://www.w3.org/TR/rdf-sparql-query/#ebv - ' + \
- 'Only literals have Boolean values! %r" % rt)
+ 'Only literals have Boolean values! %r"
+ % rt
+ )
def _lang_range_check(range, lang):
@@ -1038,6 +1061,7 @@ def _lang_range_check(range, lang):
.. __:http://dev.w3.org/2004/PythonLib-IH/RDFClosure/RestrictedDatatype.py
"""
+
def _match(r, l):
"""
Matching of a range and language item: either range is a wildcard
@@ -1046,10 +1070,10 @@ def _lang_range_check(range, lang):
@param l: language tag item
@rtype: boolean
"""
- return r == '*' or r == l
+ return r == "*" or r == l
- rangeList = range.strip().lower().split('-')
- langList = lang.strip().lower().split('-')
+ rangeList = range.strip().lower().split("-")
+ langList = lang.strip().lower().split("-")
if not _match(rangeList[0], langList[0]):
return False
if len(rangeList) > len(langList):
diff --git a/rdflib/plugins/sparql/parser.py b/rdflib/plugins/sparql/parser.py
index cc785f4c..e8c37a2e 100644
--- a/rdflib/plugins/sparql/parser.py
+++ b/rdflib/plugins/sparql/parser.py
@@ -10,10 +10,22 @@ import sys
import re
from pyparsing import (
- Literal, Regex, Optional, OneOrMore, ZeroOrMore, Forward,
- ParseException, Suppress, Combine, restOfLine, Group,
- ParseResults, delimitedList)
+ Literal,
+ Regex,
+ Optional,
+ OneOrMore,
+ ZeroOrMore,
+ Forward,
+ ParseException,
+ Suppress,
+ Combine,
+ restOfLine,
+ Group,
+ ParseResults,
+ delimitedList,
+)
from pyparsing import CaselessKeyword as Keyword # watch out :)
+
# from pyparsing import Keyword as CaseSensitiveKeyword
from .parserutils import Comp, Param, ParamList
@@ -51,9 +63,9 @@ def expandTriples(terms):
print("Terms", terms)
l = len(terms)
for i, t in enumerate(terms):
- if t == ',':
+ if t == ",":
res.extend([res[-3], res[-2]])
- elif t == ';':
+ elif t == ";":
if i + 1 == len(terms) or terms[i + 1] == ";" or terms[i + 1] == ".":
continue # this semicolon is spurious
res.append(res[0])
@@ -70,12 +82,13 @@ def expandTriples(terms):
res.append(t[0])
elif isinstance(t, ParseResults):
res += t.asList()
- elif t != '.':
+ elif t != ".":
res.append(t)
if DEBUG:
print(len(res), t)
if DEBUG:
import json
+
print(json.dumps(res, indent=2))
return res
@@ -87,6 +100,7 @@ def expandTriples(terms):
except:
if DEBUG:
import traceback
+
traceback.print_exc()
raise
@@ -139,13 +153,16 @@ def expandCollection(terms):
# SPARQL Grammar from http://www.w3.org/TR/sparql11-query/#grammar
# ------ TERMINALS --------------
# [139] IRIREF ::= '<' ([^<>"{}|^`\]-[#x00-#x20])* '>'
-IRIREF = Combine(Suppress('<') + Regex(r'[^<>"{}|^`\\%s]*' % ''.join(
- '\\x%02X' % i for i in range(33))) + Suppress('>'))
+IRIREF = Combine(
+ Suppress("<")
+ + Regex(r'[^<>"{}|^`\\%s]*' % "".join("\\x%02X" % i for i in range(33)))
+ + Suppress(">")
+)
IRIREF.setParseAction(lambda x: rdflib.URIRef(x[0]))
# [164] P_CHARS_BASE ::= [A-Z] | [a-z] | [#x00C0-#x00D6] | [#x00D8-#x00F6] | [#x00F8-#x02FF] | [#x0370-#x037D] | [#x037F-#x1FFF] | [#x200C-#x200D] | [#x2070-#x218F] | [#x2C00-#x2FEF] | [#x3001-#xD7FF] | [#xF900-#xFDCF] | [#xFDF0-#xFFFD] | [#x10000-#xEFFFF]
-if sys.maxunicode == 0xffff:
+if sys.maxunicode == 0xFFFF:
# this is narrow python build (default on windows/osx)
# this means that unicode code points over 0xffff are stored
# as several characters, which in turn means that regex character
@@ -162,52 +179,54 @@ if sys.maxunicode == 0xffff:
#
# in py3.3 this is fixed
- PN_CHARS_BASE_re = u'A-Za-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD'
+ PN_CHARS_BASE_re = u"A-Za-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD"
else:
# wide python build
- PN_CHARS_BASE_re = u'A-Za-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD\U00010000-\U000EFFFF'
+ PN_CHARS_BASE_re = u"A-Za-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD\U00010000-\U000EFFFF"
# [165] PN_CHARS_U ::= PN_CHARS_BASE | '_'
-PN_CHARS_U_re = '_' + PN_CHARS_BASE_re
+PN_CHARS_U_re = "_" + PN_CHARS_BASE_re
# [167] PN_CHARS ::= PN_CHARS_U | '-' | [0-9] | #x00B7 | [#x0300-#x036F] | [#x203F-#x2040]
-PN_CHARS_re = u'\\-0-9\u00B7\u0300-\u036F\u203F-\u2040' + PN_CHARS_U_re
+PN_CHARS_re = u"\\-0-9\u00B7\u0300-\u036F\u203F-\u2040" + PN_CHARS_U_re
# PN_CHARS = Regex(u'[%s]'%PN_CHARS_re, flags=re.U)
# [168] PN_PREFIX ::= PN_CHARS_BASE ((PN_CHARS|'.')* PN_CHARS)?
-PN_PREFIX = Regex(u'[%s](?:[%s\\.]*[%s])?' % (PN_CHARS_BASE_re,
- PN_CHARS_re, PN_CHARS_re), flags=re.U)
+PN_PREFIX = Regex(
+ u"[%s](?:[%s\\.]*[%s])?" % (PN_CHARS_BASE_re, PN_CHARS_re, PN_CHARS_re), flags=re.U
+)
# [140] PNAME_NS ::= PN_PREFIX? ':'
-PNAME_NS = Optional(
- Param('prefix', PN_PREFIX)) + Suppress(':').leaveWhitespace()
+PNAME_NS = Optional(Param("prefix", PN_PREFIX)) + Suppress(":").leaveWhitespace()
# [173] PN_LOCAL_ESC ::= '\' ( '_' | '~' | '.' | '-' | '!' | '$' | '&' | "'" | '(' | ')' | '*' | '+' | ',' | ';' | '=' | '/' | '?' | '#' | '@' | '%' )
-PN_LOCAL_ESC_re = '\\\\[_~\\.\\-!$&"\'()*+,;=/?#@%]'
+PN_LOCAL_ESC_re = "\\\\[_~\\.\\-!$&\"'()*+,;=/?#@%]"
# PN_LOCAL_ESC = Regex(PN_LOCAL_ESC_re) # regex'd
-#PN_LOCAL_ESC.setParseAction(lambda x: x[0][1:])
+# PN_LOCAL_ESC.setParseAction(lambda x: x[0][1:])
# [172] HEX ::= [0-9] | [A-F] | [a-f]
# HEX = Regex('[0-9A-Fa-f]') # not needed
# [171] PERCENT ::= '%' HEX HEX
-PERCENT_re = '%[0-9a-fA-F]{2}'
+PERCENT_re = "%[0-9a-fA-F]{2}"
# PERCENT = Regex(PERCENT_re) # regex'd
-#PERCENT.setParseAction(lambda x: chr(int(x[0][1:], 16)))
+# PERCENT.setParseAction(lambda x: chr(int(x[0][1:], 16)))
# [170] PLX ::= PERCENT | PN_LOCAL_ESC
-PLX_re = '(%s|%s)' % (PN_LOCAL_ESC_re, PERCENT_re)
+PLX_re = "(%s|%s)" % (PN_LOCAL_ESC_re, PERCENT_re)
# PLX = PERCENT | PN_LOCAL_ESC # regex'd
# [169] PN_LOCAL ::= (PN_CHARS_U | ':' | [0-9] | PLX ) ((PN_CHARS | '.' | ':' | PLX)* (PN_CHARS | ':' | PLX) )?
-PN_LOCAL = Regex(u"""([%(PN_CHARS_U)s:0-9]|%(PLX)s)
+PN_LOCAL = Regex(
+ u"""([%(PN_CHARS_U)s:0-9]|%(PLX)s)
(([%(PN_CHARS)s\\.:]|%(PLX)s)*
- ([%(PN_CHARS)s:]|%(PLX)s) )?""" % dict(PN_CHARS_U=PN_CHARS_U_re,
- PN_CHARS=PN_CHARS_re,
- PLX=PLX_re), flags=re.X | re.UNICODE)
+ ([%(PN_CHARS)s:]|%(PLX)s) )?"""
+ % dict(PN_CHARS_U=PN_CHARS_U_re, PN_CHARS=PN_CHARS_re, PLX=PLX_re),
+ flags=re.X | re.UNICODE,
+)
def _hexExpand(match):
@@ -218,71 +237,72 @@ PN_LOCAL.setParseAction(lambda x: re.sub("(%s)" % PERCENT_re, _hexExpand, x[0]))
# [141] PNAME_LN ::= PNAME_NS PN_LOCAL
-PNAME_LN = PNAME_NS + Param('localname', PN_LOCAL.leaveWhitespace())
+PNAME_LN = PNAME_NS + Param("localname", PN_LOCAL.leaveWhitespace())
# [142] BLANK_NODE_LABEL ::= '_:' ( PN_CHARS_U | [0-9] ) ((PN_CHARS|'.')* PN_CHARS)?
-BLANK_NODE_LABEL = Regex(u'_:[0-9%s](?:[\\.%s]*[%s])?' % (
- PN_CHARS_U_re, PN_CHARS_re, PN_CHARS_re), flags=re.U)
+BLANK_NODE_LABEL = Regex(
+ u"_:[0-9%s](?:[\\.%s]*[%s])?" % (PN_CHARS_U_re, PN_CHARS_re, PN_CHARS_re),
+ flags=re.U,
+)
BLANK_NODE_LABEL.setParseAction(lambda x: rdflib.BNode(x[0][2:]))
# [166] VARNAME ::= ( PN_CHARS_U | [0-9] ) ( PN_CHARS_U | [0-9] | #x00B7 | [#x0300-#x036F] | [#x203F-#x2040] )*
-VARNAME = Regex(u'[%s0-9][%s0-9\u00B7\u0300-\u036F\u203F-\u2040]*' % (
- PN_CHARS_U_re, PN_CHARS_U_re), flags=re.U)
+VARNAME = Regex(
+ u"[%s0-9][%s0-9\u00B7\u0300-\u036F\u203F-\u2040]*" % (PN_CHARS_U_re, PN_CHARS_U_re),
+ flags=re.U,
+)
# [143] VAR1 ::= '?' VARNAME
-VAR1 = Combine(Suppress('?') + VARNAME)
+VAR1 = Combine(Suppress("?") + VARNAME)
# [144] VAR2 ::= '$' VARNAME
-VAR2 = Combine(Suppress('$') + VARNAME)
+VAR2 = Combine(Suppress("$") + VARNAME)
# [145] LANGTAG ::= '@' [a-zA-Z]+ ('-' [a-zA-Z0-9]+)*
-LANGTAG = Combine(Suppress('@') + Regex('[a-zA-Z]+(?:-[a-zA-Z0-9]+)*'))
+LANGTAG = Combine(Suppress("@") + Regex("[a-zA-Z]+(?:-[a-zA-Z0-9]+)*"))
# [146] INTEGER ::= [0-9]+
INTEGER = Regex(r"[0-9]+")
# INTEGER.setResultsName('integer')
-INTEGER.setParseAction(
- lambda x: rdflib.Literal(x[0], datatype=rdflib.XSD.integer))
+INTEGER.setParseAction(lambda x: rdflib.Literal(x[0], datatype=rdflib.XSD.integer))
# [155] EXPONENT ::= [eE] [+-]? [0-9]+
-EXPONENT_re = '[eE][+-]?[0-9]+'
+EXPONENT_re = "[eE][+-]?[0-9]+"
# [147] DECIMAL ::= [0-9]* '.' [0-9]+
-DECIMAL = Regex(r'[0-9]*\.[0-9]+') # (?![eE])
+DECIMAL = Regex(r"[0-9]*\.[0-9]+") # (?![eE])
# DECIMAL.setResultsName('decimal')
-DECIMAL.setParseAction(
- lambda x: rdflib.Literal(x[0], datatype=rdflib.XSD.decimal))
+DECIMAL.setParseAction(lambda x: rdflib.Literal(x[0], datatype=rdflib.XSD.decimal))
# [148] DOUBLE ::= [0-9]+ '.' [0-9]* EXPONENT | '.' ([0-9])+ EXPONENT | ([0-9])+ EXPONENT
-DOUBLE = Regex(
- r'[0-9]+\.[0-9]*%(e)s|\.([0-9])+%(e)s|[0-9]+%(e)s' % {'e': EXPONENT_re})
+DOUBLE = Regex(r"[0-9]+\.[0-9]*%(e)s|\.([0-9])+%(e)s|[0-9]+%(e)s" % {"e": EXPONENT_re})
# DOUBLE.setResultsName('double')
-DOUBLE.setParseAction(
- lambda x: rdflib.Literal(x[0], datatype=rdflib.XSD.double))
+DOUBLE.setParseAction(lambda x: rdflib.Literal(x[0], datatype=rdflib.XSD.double))
# [149] INTEGER_POSITIVE ::= '+' INTEGER
-INTEGER_POSITIVE = Suppress('+') + INTEGER.copy().leaveWhitespace()
-INTEGER_POSITIVE.setParseAction(lambda x: rdflib.Literal(
- "+" + x[0], datatype=rdflib.XSD.integer))
+INTEGER_POSITIVE = Suppress("+") + INTEGER.copy().leaveWhitespace()
+INTEGER_POSITIVE.setParseAction(
+ lambda x: rdflib.Literal("+" + x[0], datatype=rdflib.XSD.integer)
+)
# [150] DECIMAL_POSITIVE ::= '+' DECIMAL
-DECIMAL_POSITIVE = Suppress('+') + DECIMAL.copy().leaveWhitespace()
+DECIMAL_POSITIVE = Suppress("+") + DECIMAL.copy().leaveWhitespace()
# [151] DOUBLE_POSITIVE ::= '+' DOUBLE
-DOUBLE_POSITIVE = Suppress('+') + DOUBLE.copy().leaveWhitespace()
+DOUBLE_POSITIVE = Suppress("+") + DOUBLE.copy().leaveWhitespace()
# [152] INTEGER_NEGATIVE ::= '-' INTEGER
-INTEGER_NEGATIVE = Suppress('-') + INTEGER.copy().leaveWhitespace()
+INTEGER_NEGATIVE = Suppress("-") + INTEGER.copy().leaveWhitespace()
INTEGER_NEGATIVE.setParseAction(lambda x: neg(x[0]))
# [153] DECIMAL_NEGATIVE ::= '-' DECIMAL
-DECIMAL_NEGATIVE = Suppress('-') + DECIMAL.copy().leaveWhitespace()
+DECIMAL_NEGATIVE = Suppress("-") + DECIMAL.copy().leaveWhitespace()
DECIMAL_NEGATIVE.setParseAction(lambda x: neg(x[0]))
# [154] DOUBLE_NEGATIVE ::= '-' DOUBLE
-DOUBLE_NEGATIVE = Suppress('-') + DOUBLE.copy().leaveWhitespace()
+DOUBLE_NEGATIVE = Suppress("-") + DOUBLE.copy().leaveWhitespace()
DOUBLE_NEGATIVE.setParseAction(lambda x: neg(x[0]))
# [160] ECHAR ::= '\' [tbnrf\"']
@@ -294,57 +314,58 @@ DOUBLE_NEGATIVE.setParseAction(lambda x: neg(x[0]))
# ) + ZeroOrMore( ~ Literal("'\\") | ECHAR ) ) + "'''"
STRING_LITERAL_LONG1 = Regex(u"'''((?:'|'')?(?:[^'\\\\]|\\\\['ntbrf\\\\]))*'''")
STRING_LITERAL_LONG1.setParseAction(
- lambda x: rdflib.Literal(decodeUnicodeEscape(x[0][3:-3])))
+ lambda x: rdflib.Literal(decodeUnicodeEscape(x[0][3:-3]))
+)
# [159] STRING_LITERAL_LONG2 ::= '"""' ( ( '"' | '""' )? ( [^"\] | ECHAR ) )* '"""'
# STRING_LITERAL_LONG2 = Literal('"""') + ( Optional( Literal('"') | '""'
# ) + ZeroOrMore( ~ Literal('"\\') | ECHAR ) ) + '"""'
STRING_LITERAL_LONG2 = Regex(u'"""(?:(?:"|"")?(?:[^"\\\\]|\\\\["ntbrf\\\\]))*"""')
STRING_LITERAL_LONG2.setParseAction(
- lambda x: rdflib.Literal(decodeUnicodeEscape(x[0][3:-3])))
+ lambda x: rdflib.Literal(decodeUnicodeEscape(x[0][3:-3]))
+)
# [156] STRING_LITERAL1 ::= "'" ( ([^#x27#x5C#xA#xD]) | ECHAR )* "'"
# STRING_LITERAL1 = Literal("'") + ZeroOrMore(
# Regex(u'[^\u0027\u005C\u000A\u000D]',flags=re.U) | ECHAR ) + "'"
-STRING_LITERAL1 = Regex(
- u"'(?:[^'\\n\\r\\\\]|\\\\['ntbrf\\\\])*'(?!')", flags=re.U)
+STRING_LITERAL1 = Regex(u"'(?:[^'\\n\\r\\\\]|\\\\['ntbrf\\\\])*'(?!')", flags=re.U)
STRING_LITERAL1.setParseAction(
- lambda x: rdflib.Literal(decodeUnicodeEscape(x[0][1:-1])))
+ lambda x: rdflib.Literal(decodeUnicodeEscape(x[0][1:-1]))
+)
# [157] STRING_LITERAL2 ::= '"' ( ([^#x22#x5C#xA#xD]) | ECHAR )* '"'
# STRING_LITERAL2 = Literal('"') + ZeroOrMore (
# Regex(u'[^\u0022\u005C\u000A\u000D]',flags=re.U) | ECHAR ) + '"'
-STRING_LITERAL2 = Regex(
- u'"(?:[^"\\n\\r\\\\]|\\\\["ntbrf\\\\])*"(?!")', flags=re.U)
+STRING_LITERAL2 = Regex(u'"(?:[^"\\n\\r\\\\]|\\\\["ntbrf\\\\])*"(?!")', flags=re.U)
STRING_LITERAL2.setParseAction(
- lambda x: rdflib.Literal(decodeUnicodeEscape(x[0][1:-1])))
+ lambda x: rdflib.Literal(decodeUnicodeEscape(x[0][1:-1]))
+)
# [161] NIL ::= '(' WS* ')'
-NIL = Literal('(') + ')'
+NIL = Literal("(") + ")"
NIL.setParseAction(lambda x: rdflib.RDF.nil)
# [162] WS ::= #x20 | #x9 | #xD | #xA
# Not needed?
# WS = #x20 | #x9 | #xD | #xA
# [163] ANON ::= '[' WS* ']'
-ANON = Literal('[') + ']'
+ANON = Literal("[") + "]"
ANON.setParseAction(lambda x: rdflib.BNode())
# A = CaseSensitiveKeyword('a')
-A = Literal('a')
+A = Literal("a")
A.setParseAction(lambda x: rdflib.RDF.type)
# ------ NON-TERMINALS --------------
# [5] BaseDecl ::= 'BASE' IRIREF
-BaseDecl = Comp('Base', Keyword('BASE') + Param('iri', IRIREF))
+BaseDecl = Comp("Base", Keyword("BASE") + Param("iri", IRIREF))
# [6] PrefixDecl ::= 'PREFIX' PNAME_NS IRIREF
-PrefixDecl = Comp(
- 'PrefixDecl', Keyword('PREFIX') + PNAME_NS + Param('iri', IRIREF))
+PrefixDecl = Comp("PrefixDecl", Keyword("PREFIX") + PNAME_NS + Param("iri", IRIREF))
# [4] Prologue ::= ( BaseDecl | PrefixDecl )*
Prologue = Group(ZeroOrMore(BaseDecl | PrefixDecl))
@@ -354,7 +375,7 @@ Var = VAR1 | VAR2
Var.setParseAction(lambda x: rdflib.term.Variable(x[0]))
# [137] PrefixedName ::= PNAME_LN | PNAME_NS
-PrefixedName = Comp('pname', PNAME_LN | PNAME_NS)
+PrefixedName = Comp("pname", PNAME_LN | PNAME_NS)
# [136] iri ::= IRIREF | PrefixedName
iri = IRIREF | PrefixedName
@@ -364,8 +385,14 @@ String = STRING_LITERAL_LONG1 | STRING_LITERAL_LONG2 | STRING_LITERAL1 | STRING_
# [129] RDFLiteral ::= String ( LANGTAG | ( '^^' iri ) )?
-RDFLiteral = Comp('literal', Param('string', String) + Optional(Param(
- 'lang', LANGTAG.leaveWhitespace()) | Literal('^^').leaveWhitespace() + Param('datatype', iri).leaveWhitespace()))
+RDFLiteral = Comp(
+ "literal",
+ Param("string", String)
+ + Optional(
+ Param("lang", LANGTAG.leaveWhitespace())
+ | Literal("^^").leaveWhitespace() + Param("datatype", iri).leaveWhitespace()
+ ),
+)
# [132] NumericLiteralPositive ::= INTEGER_POSITIVE | DECIMAL_POSITIVE | DOUBLE_POSITIVE
NumericLiteralPositive = DOUBLE_POSITIVE | DECIMAL_POSITIVE | INTEGER_POSITIVE
@@ -377,11 +404,14 @@ NumericLiteralNegative = DOUBLE_NEGATIVE | DECIMAL_NEGATIVE | INTEGER_NEGATIVE
NumericLiteralUnsigned = DOUBLE | DECIMAL | INTEGER
# [130] NumericLiteral ::= NumericLiteralUnsigned | NumericLiteralPositive | NumericLiteralNegative
-NumericLiteral = NumericLiteralUnsigned | NumericLiteralPositive | NumericLiteralNegative
+NumericLiteral = (
+ NumericLiteralUnsigned | NumericLiteralPositive | NumericLiteralNegative
+)
# [134] BooleanLiteral ::= 'true' | 'false'
-BooleanLiteral = Keyword('true').setParseAction(lambda: rdflib.Literal(True)) |\
- Keyword('false').setParseAction(lambda: rdflib.Literal(False))
+BooleanLiteral = Keyword("true").setParseAction(lambda: rdflib.Literal(True)) | Keyword(
+ "false"
+).setParseAction(lambda: rdflib.Literal(False))
# [138] BlankNode ::= BLANK_NODE_LABEL | ANON
BlankNode = BLANK_NODE_LABEL | ANON
@@ -396,19 +426,23 @@ VarOrTerm = Var | GraphTerm
VarOrIri = Var | iri
# [46] GraphRef ::= 'GRAPH' iri
-GraphRef = Keyword('GRAPH') + Param('graphiri', iri)
+GraphRef = Keyword("GRAPH") + Param("graphiri", iri)
# [47] GraphRefAll ::= GraphRef | 'DEFAULT' | 'NAMED' | 'ALL'
-GraphRefAll = GraphRef | Param('graphiri', Keyword('DEFAULT')) | Param(
- 'graphiri', Keyword('NAMED')) | Param('graphiri', Keyword('ALL'))
+GraphRefAll = (
+ GraphRef
+ | Param("graphiri", Keyword("DEFAULT"))
+ | Param("graphiri", Keyword("NAMED"))
+ | Param("graphiri", Keyword("ALL"))
+)
# [45] GraphOrDefault ::= 'DEFAULT' | 'GRAPH'? iri
-GraphOrDefault = ParamList('graph', Keyword(
- 'DEFAULT')) | Optional(Keyword('GRAPH')) + ParamList('graph', iri)
+GraphOrDefault = ParamList("graph", Keyword("DEFAULT")) | Optional(
+ Keyword("GRAPH")
+) + ParamList("graph", iri)
# [65] DataBlockValue ::= iri | RDFLiteral | NumericLiteral | BooleanLiteral | 'UNDEF'
-DataBlockValue = iri | RDFLiteral | NumericLiteral | BooleanLiteral | Keyword(
- 'UNDEF')
+DataBlockValue = iri | RDFLiteral | NumericLiteral | BooleanLiteral | Keyword("UNDEF")
# [78] Verb ::= VarOrIri | A
Verb = VarOrIri | A
@@ -432,37 +466,58 @@ GraphNodePath = VarOrTerm | TriplesNodePath
# [93] PathMod ::= '?' | '*' | '+'
-PathMod = Literal('?') | '*' | '+'
+PathMod = Literal("?") | "*" | "+"
# [96] PathOneInPropertySet ::= iri | A | '^' ( iri | A )
-PathOneInPropertySet = iri | A | Comp('InversePath', '^' + (iri | A))
+PathOneInPropertySet = iri | A | Comp("InversePath", "^" + (iri | A))
Path = Forward()
# [95] PathNegatedPropertySet ::= PathOneInPropertySet | '(' ( PathOneInPropertySet ( '|' PathOneInPropertySet )* )? ')'
-PathNegatedPropertySet = Comp('PathNegatedPropertySet', ParamList('part', PathOneInPropertySet) | '(' + Optional(
- ParamList('part', PathOneInPropertySet) + ZeroOrMore('|' + ParamList('part', PathOneInPropertySet))) + ')')
+PathNegatedPropertySet = Comp(
+ "PathNegatedPropertySet",
+ ParamList("part", PathOneInPropertySet)
+ | "("
+ + Optional(
+ ParamList("part", PathOneInPropertySet)
+ + ZeroOrMore("|" + ParamList("part", PathOneInPropertySet))
+ )
+ + ")",
+)
# [94] PathPrimary ::= iri | A | '!' PathNegatedPropertySet | '(' Path ')' | 'DISTINCT' '(' Path ')'
-PathPrimary = iri | A | Suppress('!') + PathNegatedPropertySet | Suppress('(') + Path + Suppress(
- ')') | Comp('DistinctPath', Keyword('DISTINCT') + '(' + Param('part', Path) + ')')
+PathPrimary = (
+ iri
+ | A
+ | Suppress("!") + PathNegatedPropertySet
+ | Suppress("(") + Path + Suppress(")")
+ | Comp("DistinctPath", Keyword("DISTINCT") + "(" + Param("part", Path) + ")")
+)
# [91] PathElt ::= PathPrimary Optional(PathMod)
-PathElt = Comp('PathElt', Param(
- 'part', PathPrimary) + Optional(Param('mod', PathMod.leaveWhitespace())))
+PathElt = Comp(
+ "PathElt",
+ Param("part", PathPrimary) + Optional(Param("mod", PathMod.leaveWhitespace())),
+)
# [92] PathEltOrInverse ::= PathElt | '^' PathElt
-PathEltOrInverse = PathElt | Suppress(
- '^') + Comp('PathEltOrInverse', Param('part', PathElt))
+PathEltOrInverse = PathElt | Suppress("^") + Comp(
+ "PathEltOrInverse", Param("part", PathElt)
+)
# [90] PathSequence ::= PathEltOrInverse ( '/' PathEltOrInverse )*
-PathSequence = Comp('PathSequence', ParamList('part', PathEltOrInverse) +
- ZeroOrMore('/' + ParamList('part', PathEltOrInverse)))
+PathSequence = Comp(
+ "PathSequence",
+ ParamList("part", PathEltOrInverse)
+ + ZeroOrMore("/" + ParamList("part", PathEltOrInverse)),
+)
# [89] PathAlternative ::= PathSequence ( '|' PathSequence )*
-PathAlternative = Comp('PathAlternative', ParamList('part', PathSequence) +
- ZeroOrMore('|' + ParamList('part', PathSequence)))
+PathAlternative = Comp(
+ "PathAlternative",
+ ParamList("part", PathSequence) + ZeroOrMore("|" + ParamList("part", PathSequence)),
+)
# [88] Path ::= PathAlternative
Path <<= PathAlternative
@@ -474,127 +529,172 @@ VerbPath = Path
ObjectPath = GraphNodePath
# [86] ObjectListPath ::= ObjectPath ( ',' ObjectPath )*
-ObjectListPath = ObjectPath + ZeroOrMore(',' + ObjectPath)
+ObjectListPath = ObjectPath + ZeroOrMore("," + ObjectPath)
GroupGraphPattern = Forward()
# [102] Collection ::= '(' OneOrMore(GraphNode) ')'
-Collection = Suppress('(') + OneOrMore(GraphNode) + Suppress(')')
+Collection = Suppress("(") + OneOrMore(GraphNode) + Suppress(")")
Collection.setParseAction(expandCollection)
# [103] CollectionPath ::= '(' OneOrMore(GraphNodePath) ')'
-CollectionPath = Suppress('(') + OneOrMore(GraphNodePath) + Suppress(')')
+CollectionPath = Suppress("(") + OneOrMore(GraphNodePath) + Suppress(")")
CollectionPath.setParseAction(expandCollection)
# [80] Object ::= GraphNode
Object = GraphNode
# [79] ObjectList ::= Object ( ',' Object )*
-ObjectList = Object + ZeroOrMore(',' + Object)
+ObjectList = Object + ZeroOrMore("," + Object)
# [83] PropertyListPathNotEmpty ::= ( VerbPath | VerbSimple ) ObjectListPath ( ';' ( ( VerbPath | VerbSimple ) ObjectList )? )*
-PropertyListPathNotEmpty = (VerbPath | VerbSimple) + ObjectListPath + ZeroOrMore(
- ';' + Optional((VerbPath | VerbSimple) + ObjectListPath))
+PropertyListPathNotEmpty = (
+ (VerbPath | VerbSimple)
+ + ObjectListPath
+ + ZeroOrMore(";" + Optional((VerbPath | VerbSimple) + ObjectListPath))
+)
# [82] PropertyListPath ::= Optional(PropertyListPathNotEmpty)
PropertyListPath = Optional(PropertyListPathNotEmpty)
# [77] PropertyListNotEmpty ::= Verb ObjectList ( ';' ( Verb ObjectList )? )*
-PropertyListNotEmpty = Verb + ObjectList + ZeroOrMore(';' + Optional(Verb +
- ObjectList))
+PropertyListNotEmpty = Verb + ObjectList + ZeroOrMore(";" + Optional(Verb + ObjectList))
# [76] PropertyList ::= Optional(PropertyListNotEmpty)
PropertyList = Optional(PropertyListNotEmpty)
# [99] BlankNodePropertyList ::= '[' PropertyListNotEmpty ']'
-BlankNodePropertyList = Group(
- Suppress('[') + PropertyListNotEmpty + Suppress(']'))
+BlankNodePropertyList = Group(Suppress("[") + PropertyListNotEmpty + Suppress("]"))
BlankNodePropertyList.setParseAction(expandBNodeTriples)
# [101] BlankNodePropertyListPath ::= '[' PropertyListPathNotEmpty ']'
BlankNodePropertyListPath = Group(
- Suppress('[') + PropertyListPathNotEmpty + Suppress(']'))
+ Suppress("[") + PropertyListPathNotEmpty + Suppress("]")
+)
BlankNodePropertyListPath.setParseAction(expandBNodeTriples)
# [98] TriplesNode ::= Collection | BlankNodePropertyList
-TriplesNode <<= (Collection | BlankNodePropertyList)
+TriplesNode <<= Collection | BlankNodePropertyList
# [100] TriplesNodePath ::= CollectionPath | BlankNodePropertyListPath
-TriplesNodePath <<= (CollectionPath | BlankNodePropertyListPath)
+TriplesNodePath <<= CollectionPath | BlankNodePropertyListPath
# [75] TriplesSameSubject ::= VarOrTerm PropertyListNotEmpty | TriplesNode PropertyList
-TriplesSameSubject = VarOrTerm + PropertyListNotEmpty | TriplesNode + \
- PropertyList
+TriplesSameSubject = VarOrTerm + PropertyListNotEmpty | TriplesNode + PropertyList
TriplesSameSubject.setParseAction(expandTriples)
# [52] TriplesTemplate ::= TriplesSameSubject ( '.' Optional(TriplesTemplate) )?
TriplesTemplate = Forward()
-TriplesTemplate <<= (ParamList('triples', TriplesSameSubject) + Optional(
- Suppress('.') + Optional(TriplesTemplate)))
+TriplesTemplate <<= ParamList("triples", TriplesSameSubject) + Optional(
+ Suppress(".") + Optional(TriplesTemplate)
+)
# [51] QuadsNotTriples ::= 'GRAPH' VarOrIri '{' Optional(TriplesTemplate) '}'
-QuadsNotTriples = Comp('QuadsNotTriples', Keyword('GRAPH') + Param(
- 'term', VarOrIri) + '{' + Optional(TriplesTemplate) + '}')
+QuadsNotTriples = Comp(
+ "QuadsNotTriples",
+ Keyword("GRAPH") + Param("term", VarOrIri) + "{" + Optional(TriplesTemplate) + "}",
+)
# [50] Quads ::= Optional(TriplesTemplate) ( QuadsNotTriples '.'? Optional(TriplesTemplate) )*
-Quads = Comp('Quads', Optional(TriplesTemplate) + ZeroOrMore(ParamList(
- 'quadsNotTriples', QuadsNotTriples) + Optional(Suppress('.')) + Optional(TriplesTemplate)))
+Quads = Comp(
+ "Quads",
+ Optional(TriplesTemplate)
+ + ZeroOrMore(
+ ParamList("quadsNotTriples", QuadsNotTriples)
+ + Optional(Suppress("."))
+ + Optional(TriplesTemplate)
+ ),
+)
# [48] QuadPattern ::= '{' Quads '}'
-QuadPattern = '{' + Param('quads', Quads) + '}'
+QuadPattern = "{" + Param("quads", Quads) + "}"
# [49] QuadData ::= '{' Quads '}'
-QuadData = '{' + Param('quads', Quads) + '}'
+QuadData = "{" + Param("quads", Quads) + "}"
# [81] TriplesSameSubjectPath ::= VarOrTerm PropertyListPathNotEmpty | TriplesNodePath PropertyListPath
-TriplesSameSubjectPath = VarOrTerm + \
- PropertyListPathNotEmpty | TriplesNodePath + PropertyListPath
+TriplesSameSubjectPath = (
+ VarOrTerm + PropertyListPathNotEmpty | TriplesNodePath + PropertyListPath
+)
TriplesSameSubjectPath.setParseAction(expandTriples)
# [55] TriplesBlock ::= TriplesSameSubjectPath ( '.' Optional(TriplesBlock) )?
TriplesBlock = Forward()
-TriplesBlock <<= (ParamList('triples', TriplesSameSubjectPath) + Optional(
- Suppress('.') + Optional(TriplesBlock)))
+TriplesBlock <<= ParamList("triples", TriplesSameSubjectPath) + Optional(
+ Suppress(".") + Optional(TriplesBlock)
+)
# [66] MinusGraphPattern ::= 'MINUS' GroupGraphPattern
MinusGraphPattern = Comp(
- 'MinusGraphPattern', Keyword('MINUS') + Param('graph', GroupGraphPattern))
+ "MinusGraphPattern", Keyword("MINUS") + Param("graph", GroupGraphPattern)
+)
# [67] GroupOrUnionGraphPattern ::= GroupGraphPattern ( 'UNION' GroupGraphPattern )*
-GroupOrUnionGraphPattern = Comp('GroupOrUnionGraphPattern', ParamList(
- 'graph', GroupGraphPattern) + ZeroOrMore(Keyword('UNION') + ParamList('graph', GroupGraphPattern)))
+GroupOrUnionGraphPattern = Comp(
+ "GroupOrUnionGraphPattern",
+ ParamList("graph", GroupGraphPattern)
+ + ZeroOrMore(Keyword("UNION") + ParamList("graph", GroupGraphPattern)),
+)
Expression = Forward()
# [72] ExpressionList ::= NIL | '(' Expression ( ',' Expression )* ')'
-ExpressionList = NIL | Group(
- Suppress('(') + delimitedList(Expression) + Suppress(')'))
+ExpressionList = NIL | Group(Suppress("(") + delimitedList(Expression) + Suppress(")"))
# [122] RegexExpression ::= 'REGEX' '(' Expression ',' Expression ( ',' Expression )? ')'
-RegexExpression = Comp('Builtin_REGEX', Keyword('REGEX') + '(' + Param('text', Expression) + ',' + Param(
- 'pattern', Expression) + Optional(',' + Param('flags', Expression)) + ')')
+RegexExpression = Comp(
+ "Builtin_REGEX",
+ Keyword("REGEX")
+ + "("
+ + Param("text", Expression)
+ + ","
+ + Param("pattern", Expression)
+ + Optional("," + Param("flags", Expression))
+ + ")",
+)
RegexExpression.setEvalFn(op.Builtin_REGEX)
# [123] SubstringExpression ::= 'SUBSTR' '(' Expression ',' Expression ( ',' Expression )? ')'
-SubstringExpression = Comp('Builtin_SUBSTR', Keyword('SUBSTR') + '(' + Param('arg', Expression) + ',' + Param(
- 'start', Expression) + Optional(',' + Param('length', Expression)) + ')').setEvalFn(op.Builtin_SUBSTR)
+SubstringExpression = Comp(
+ "Builtin_SUBSTR",
+ Keyword("SUBSTR")
+ + "("
+ + Param("arg", Expression)
+ + ","
+ + Param("start", Expression)
+ + Optional("," + Param("length", Expression))
+ + ")",
+).setEvalFn(op.Builtin_SUBSTR)
# [124] StrReplaceExpression ::= 'REPLACE' '(' Expression ',' Expression ',' Expression ( ',' Expression )? ')'
-StrReplaceExpression = Comp('Builtin_REPLACE', Keyword('REPLACE') + '(' + Param('arg', Expression) + ',' + Param(
- 'pattern', Expression) + ',' + Param('replacement', Expression) + Optional(',' + Param('flags', Expression)) + ')').setEvalFn(op.Builtin_REPLACE)
+StrReplaceExpression = Comp(
+ "Builtin_REPLACE",
+ Keyword("REPLACE")
+ + "("
+ + Param("arg", Expression)
+ + ","
+ + Param("pattern", Expression)
+ + ","
+ + Param("replacement", Expression)
+ + Optional("," + Param("flags", Expression))
+ + ")",
+).setEvalFn(op.Builtin_REPLACE)
# [125] ExistsFunc ::= 'EXISTS' GroupGraphPattern
-ExistsFunc = Comp('Builtin_EXISTS', Keyword('EXISTS') + Param(
- 'graph', GroupGraphPattern)).setEvalFn(op.Builtin_EXISTS)
+ExistsFunc = Comp(
+ "Builtin_EXISTS", Keyword("EXISTS") + Param("graph", GroupGraphPattern)
+).setEvalFn(op.Builtin_EXISTS)
# [126] NotExistsFunc ::= 'NOT' 'EXISTS' GroupGraphPattern
-NotExistsFunc = Comp('Builtin_NOTEXISTS', Keyword('NOT') + Keyword(
- 'EXISTS') + Param('graph', GroupGraphPattern)).setEvalFn(op.Builtin_EXISTS)
+NotExistsFunc = Comp(
+ "Builtin_NOTEXISTS",
+ Keyword("NOT") + Keyword("EXISTS") + Param("graph", GroupGraphPattern),
+).setEvalFn(op.Builtin_EXISTS)
# [127] Aggregate ::= 'COUNT' '(' 'DISTINCT'? ( '*' | Expression ) ')'
@@ -605,17 +705,33 @@ NotExistsFunc = Comp('Builtin_NOTEXISTS', Keyword('NOT') + Keyword(
# | 'SAMPLE' '(' Optional('DISTINCT') Expression ')'
# | 'GROUP_CONCAT' '(' Optional('DISTINCT') Expression ( ';' 'SEPARATOR' '=' String )? ')'
-_Distinct = Optional(Keyword('DISTINCT'))
-_AggregateParams = '(' + Param(
- 'distinct', _Distinct) + Param('vars', Expression) + ')'
-
-Aggregate = Comp('Aggregate_Count', Keyword('COUNT') + '(' + Param('distinct', _Distinct) + Param('vars', '*' | Expression) + ')')\
- | Comp('Aggregate_Sum', Keyword('SUM') + _AggregateParams)\
- | Comp('Aggregate_Min', Keyword('MIN') + _AggregateParams)\
- | Comp('Aggregate_Max', Keyword('MAX') + _AggregateParams)\
- | Comp('Aggregate_Avg', Keyword('AVG') + _AggregateParams)\
- | Comp('Aggregate_Sample', Keyword('SAMPLE') + _AggregateParams)\
- | Comp('Aggregate_GroupConcat', Keyword('GROUP_CONCAT') + '(' + Param('distinct', _Distinct) + Param('vars', Expression) + Optional(';' + Keyword('SEPARATOR') + '=' + Param('separator', String)) + ')')
+_Distinct = Optional(Keyword("DISTINCT"))
+_AggregateParams = "(" + Param("distinct", _Distinct) + Param("vars", Expression) + ")"
+
+Aggregate = (
+ Comp(
+ "Aggregate_Count",
+ Keyword("COUNT")
+ + "("
+ + Param("distinct", _Distinct)
+ + Param("vars", "*" | Expression)
+ + ")",
+ )
+ | Comp("Aggregate_Sum", Keyword("SUM") + _AggregateParams)
+ | Comp("Aggregate_Min", Keyword("MIN") + _AggregateParams)
+ | Comp("Aggregate_Max", Keyword("MAX") + _AggregateParams)
+ | Comp("Aggregate_Avg", Keyword("AVG") + _AggregateParams)
+ | Comp("Aggregate_Sample", Keyword("SAMPLE") + _AggregateParams)
+ | Comp(
+ "Aggregate_GroupConcat",
+ Keyword("GROUP_CONCAT")
+ + "("
+ + Param("distinct", _Distinct)
+ + Param("vars", Expression)
+ + Optional(";" + Keyword("SEPARATOR") + "=" + Param("separator", String))
+ + ")",
+ )
+)
# [121] BuiltInCall ::= Aggregate
# | 'STR' '(' + Expression + ')'
@@ -673,93 +789,271 @@ Aggregate = Comp('Aggregate_Count', Keyword('COUNT') + '(' + Param('distinct', _
# | ExistsFunc
# | NotExistsFunc
-BuiltInCall = Aggregate \
- | Comp('Builtin_STR', Keyword('STR') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_STR) \
- | Comp('Builtin_LANG', Keyword('LANG') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_LANG) \
- | Comp('Builtin_LANGMATCHES', Keyword('LANGMATCHES') + '(' + Param('arg1', Expression) + ',' + Param('arg2', Expression) + ')').setEvalFn(op.Builtin_LANGMATCHES) \
- | Comp('Builtin_DATATYPE', Keyword('DATATYPE') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_DATATYPE) \
- | Comp('Builtin_BOUND', Keyword('BOUND') + '(' + Param('arg', Var) + ')').setEvalFn(op.Builtin_BOUND) \
- | Comp('Builtin_IRI', Keyword('IRI') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_IRI) \
- | Comp('Builtin_URI', Keyword('URI') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_IRI) \
- | Comp('Builtin_BNODE', Keyword('BNODE') + ('(' + Param('arg', Expression) + ')' | NIL)).setEvalFn(op.Builtin_BNODE) \
- | Comp('Builtin_RAND', Keyword('RAND') + NIL).setEvalFn(op.Builtin_RAND) \
- | Comp('Builtin_ABS', Keyword('ABS') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_ABS) \
- | Comp('Builtin_CEIL', Keyword('CEIL') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_CEIL) \
- | Comp('Builtin_FLOOR', Keyword('FLOOR') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_FLOOR) \
- | Comp('Builtin_ROUND', Keyword('ROUND') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_ROUND) \
- | Comp('Builtin_CONCAT', Keyword('CONCAT') + Param('arg', ExpressionList)).setEvalFn(op.Builtin_CONCAT) \
- | SubstringExpression \
- | Comp('Builtin_STRLEN', Keyword('STRLEN') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_STRLEN) \
- | StrReplaceExpression \
- | Comp('Builtin_UCASE', Keyword('UCASE') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_UCASE) \
- | Comp('Builtin_LCASE', Keyword('LCASE') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_LCASE) \
- | Comp('Builtin_ENCODE_FOR_URI', Keyword('ENCODE_FOR_URI') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_ENCODE_FOR_URI) \
- | Comp('Builtin_CONTAINS', Keyword('CONTAINS') + '(' + Param('arg1', Expression) + ',' + Param('arg2', Expression) + ')').setEvalFn(op.Builtin_CONTAINS) \
- | Comp('Builtin_STRSTARTS', Keyword('STRSTARTS') + '(' + Param('arg1', Expression) + ',' + Param('arg2', Expression) + ')').setEvalFn(op.Builtin_STRSTARTS) \
- | Comp('Builtin_STRENDS', Keyword('STRENDS') + '(' + Param('arg1', Expression) + ',' + Param('arg2', Expression) + ')').setEvalFn(op.Builtin_STRENDS) \
- | Comp('Builtin_STRBEFORE', Keyword('STRBEFORE') + '(' + Param('arg1', Expression) + ',' + Param('arg2', Expression) + ')').setEvalFn(op.Builtin_STRBEFORE) \
- | Comp('Builtin_STRAFTER', Keyword('STRAFTER') + '(' + Param('arg1', Expression) + ',' + Param('arg2', Expression) + ')').setEvalFn(op.Builtin_STRAFTER) \
- | Comp('Builtin_YEAR', Keyword('YEAR') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_YEAR) \
- | Comp('Builtin_MONTH', Keyword('MONTH') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_MONTH) \
- | Comp('Builtin_DAY', Keyword('DAY') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_DAY) \
- | Comp('Builtin_HOURS', Keyword('HOURS') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_HOURS) \
- | Comp('Builtin_MINUTES', Keyword('MINUTES') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_MINUTES) \
- | Comp('Builtin_SECONDS', Keyword('SECONDS') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_SECONDS) \
- | Comp('Builtin_TIMEZONE', Keyword('TIMEZONE') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_TIMEZONE) \
- | Comp('Builtin_TZ', Keyword('TZ') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_TZ) \
- | Comp('Builtin_NOW', Keyword('NOW') + NIL).setEvalFn(op.Builtin_NOW) \
- | Comp('Builtin_UUID', Keyword('UUID') + NIL).setEvalFn(op.Builtin_UUID) \
- | Comp('Builtin_STRUUID', Keyword('STRUUID') + NIL).setEvalFn(op.Builtin_STRUUID) \
- | Comp('Builtin_MD5', Keyword('MD5') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_MD5) \
- | Comp('Builtin_SHA1', Keyword('SHA1') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_SHA1) \
- | Comp('Builtin_SHA256', Keyword('SHA256') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_SHA256) \
- | Comp('Builtin_SHA384', Keyword('SHA384') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_SHA384) \
- | Comp('Builtin_SHA512', Keyword('SHA512') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_SHA512) \
- | Comp('Builtin_COALESCE', Keyword('COALESCE') + Param('arg', ExpressionList)).setEvalFn(op.Builtin_COALESCE) \
- | Comp('Builtin_IF', Keyword('IF') + '(' + Param('arg1', Expression) + ',' + Param('arg2', Expression) + ',' + Param('arg3', Expression) + ')').setEvalFn(op.Builtin_IF) \
- | Comp('Builtin_STRLANG', Keyword('STRLANG') + '(' + Param('arg1', Expression) + ',' + Param('arg2', Expression) + ')').setEvalFn(op.Builtin_STRLANG) \
- | Comp('Builtin_STRDT', Keyword('STRDT') + '(' + Param('arg1', Expression) + ',' + Param('arg2', Expression) + ')').setEvalFn(op.Builtin_STRDT) \
- | Comp('Builtin_sameTerm', Keyword('sameTerm') + '(' + Param('arg1', Expression) + ',' + Param('arg2', Expression) + ')').setEvalFn(op.Builtin_sameTerm) \
- | Comp('Builtin_isIRI', Keyword('isIRI') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_isIRI) \
- | Comp('Builtin_isURI', Keyword('isURI') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_isIRI) \
- | Comp('Builtin_isBLANK', Keyword('isBLANK') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_isBLANK) \
- | Comp('Builtin_isLITERAL', Keyword('isLITERAL') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_isLITERAL) \
- | Comp('Builtin_isNUMERIC', Keyword('isNUMERIC') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_isNUMERIC) \
- | RegexExpression \
- | ExistsFunc \
+BuiltInCall = (
+ Aggregate
+ | Comp(
+ "Builtin_STR", Keyword("STR") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_STR)
+ | Comp(
+ "Builtin_LANG", Keyword("LANG") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_LANG)
+ | Comp(
+ "Builtin_LANGMATCHES",
+ Keyword("LANGMATCHES")
+ + "("
+ + Param("arg1", Expression)
+ + ","
+ + Param("arg2", Expression)
+ + ")",
+ ).setEvalFn(op.Builtin_LANGMATCHES)
+ | Comp(
+ "Builtin_DATATYPE", Keyword("DATATYPE") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_DATATYPE)
+ | Comp("Builtin_BOUND", Keyword("BOUND") + "(" + Param("arg", Var) + ")").setEvalFn(
+ op.Builtin_BOUND
+ )
+ | Comp(
+ "Builtin_IRI", Keyword("IRI") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_IRI)
+ | Comp(
+ "Builtin_URI", Keyword("URI") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_IRI)
+ | Comp(
+ "Builtin_BNODE", Keyword("BNODE") + ("(" + Param("arg", Expression) + ")" | NIL)
+ ).setEvalFn(op.Builtin_BNODE)
+ | Comp("Builtin_RAND", Keyword("RAND") + NIL).setEvalFn(op.Builtin_RAND)
+ | Comp(
+ "Builtin_ABS", Keyword("ABS") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_ABS)
+ | Comp(
+ "Builtin_CEIL", Keyword("CEIL") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_CEIL)
+ | Comp(
+ "Builtin_FLOOR", Keyword("FLOOR") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_FLOOR)
+ | Comp(
+ "Builtin_ROUND", Keyword("ROUND") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_ROUND)
+ | Comp(
+ "Builtin_CONCAT", Keyword("CONCAT") + Param("arg", ExpressionList)
+ ).setEvalFn(op.Builtin_CONCAT)
+ | SubstringExpression
+ | Comp(
+ "Builtin_STRLEN", Keyword("STRLEN") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_STRLEN)
+ | StrReplaceExpression
+ | Comp(
+ "Builtin_UCASE", Keyword("UCASE") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_UCASE)
+ | Comp(
+ "Builtin_LCASE", Keyword("LCASE") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_LCASE)
+ | Comp(
+ "Builtin_ENCODE_FOR_URI",
+ Keyword("ENCODE_FOR_URI") + "(" + Param("arg", Expression) + ")",
+ ).setEvalFn(op.Builtin_ENCODE_FOR_URI)
+ | Comp(
+ "Builtin_CONTAINS",
+ Keyword("CONTAINS")
+ + "("
+ + Param("arg1", Expression)
+ + ","
+ + Param("arg2", Expression)
+ + ")",
+ ).setEvalFn(op.Builtin_CONTAINS)
+ | Comp(
+ "Builtin_STRSTARTS",
+ Keyword("STRSTARTS")
+ + "("
+ + Param("arg1", Expression)
+ + ","
+ + Param("arg2", Expression)
+ + ")",
+ ).setEvalFn(op.Builtin_STRSTARTS)
+ | Comp(
+ "Builtin_STRENDS",
+ Keyword("STRENDS")
+ + "("
+ + Param("arg1", Expression)
+ + ","
+ + Param("arg2", Expression)
+ + ")",
+ ).setEvalFn(op.Builtin_STRENDS)
+ | Comp(
+ "Builtin_STRBEFORE",
+ Keyword("STRBEFORE")
+ + "("
+ + Param("arg1", Expression)
+ + ","
+ + Param("arg2", Expression)
+ + ")",
+ ).setEvalFn(op.Builtin_STRBEFORE)
+ | Comp(
+ "Builtin_STRAFTER",
+ Keyword("STRAFTER")
+ + "("
+ + Param("arg1", Expression)
+ + ","
+ + Param("arg2", Expression)
+ + ")",
+ ).setEvalFn(op.Builtin_STRAFTER)
+ | Comp(
+ "Builtin_YEAR", Keyword("YEAR") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_YEAR)
+ | Comp(
+ "Builtin_MONTH", Keyword("MONTH") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_MONTH)
+ | Comp(
+ "Builtin_DAY", Keyword("DAY") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_DAY)
+ | Comp(
+ "Builtin_HOURS", Keyword("HOURS") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_HOURS)
+ | Comp(
+ "Builtin_MINUTES", Keyword("MINUTES") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_MINUTES)
+ | Comp(
+ "Builtin_SECONDS", Keyword("SECONDS") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_SECONDS)
+ | Comp(
+ "Builtin_TIMEZONE", Keyword("TIMEZONE") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_TIMEZONE)
+ | Comp(
+ "Builtin_TZ", Keyword("TZ") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_TZ)
+ | Comp("Builtin_NOW", Keyword("NOW") + NIL).setEvalFn(op.Builtin_NOW)
+ | Comp("Builtin_UUID", Keyword("UUID") + NIL).setEvalFn(op.Builtin_UUID)
+ | Comp("Builtin_STRUUID", Keyword("STRUUID") + NIL).setEvalFn(op.Builtin_STRUUID)
+ | Comp(
+ "Builtin_MD5", Keyword("MD5") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_MD5)
+ | Comp(
+ "Builtin_SHA1", Keyword("SHA1") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_SHA1)
+ | Comp(
+ "Builtin_SHA256", Keyword("SHA256") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_SHA256)
+ | Comp(
+ "Builtin_SHA384", Keyword("SHA384") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_SHA384)
+ | Comp(
+ "Builtin_SHA512", Keyword("SHA512") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_SHA512)
+ | Comp(
+ "Builtin_COALESCE", Keyword("COALESCE") + Param("arg", ExpressionList)
+ ).setEvalFn(op.Builtin_COALESCE)
+ | Comp(
+ "Builtin_IF",
+ Keyword("IF")
+ + "("
+ + Param("arg1", Expression)
+ + ","
+ + Param("arg2", Expression)
+ + ","
+ + Param("arg3", Expression)
+ + ")",
+ ).setEvalFn(op.Builtin_IF)
+ | Comp(
+ "Builtin_STRLANG",
+ Keyword("STRLANG")
+ + "("
+ + Param("arg1", Expression)
+ + ","
+ + Param("arg2", Expression)
+ + ")",
+ ).setEvalFn(op.Builtin_STRLANG)
+ | Comp(
+ "Builtin_STRDT",
+ Keyword("STRDT")
+ + "("
+ + Param("arg1", Expression)
+ + ","
+ + Param("arg2", Expression)
+ + ")",
+ ).setEvalFn(op.Builtin_STRDT)
+ | Comp(
+ "Builtin_sameTerm",
+ Keyword("sameTerm")
+ + "("
+ + Param("arg1", Expression)
+ + ","
+ + Param("arg2", Expression)
+ + ")",
+ ).setEvalFn(op.Builtin_sameTerm)
+ | Comp(
+ "Builtin_isIRI", Keyword("isIRI") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_isIRI)
+ | Comp(
+ "Builtin_isURI", Keyword("isURI") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_isIRI)
+ | Comp(
+ "Builtin_isBLANK", Keyword("isBLANK") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_isBLANK)
+ | Comp(
+ "Builtin_isLITERAL", Keyword("isLITERAL") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_isLITERAL)
+ | Comp(
+ "Builtin_isNUMERIC", Keyword("isNUMERIC") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_isNUMERIC)
+ | RegexExpression
+ | ExistsFunc
| NotExistsFunc
+)
# [71] ArgList ::= NIL | '(' 'DISTINCT'? Expression ( ',' Expression )* ')'
-ArgList = NIL | '(' + Param('distinct', _Distinct) + delimitedList(
- ParamList('expr', Expression)) + ')'
+ArgList = (
+ NIL
+ | "("
+ + Param("distinct", _Distinct)
+ + delimitedList(ParamList("expr", Expression))
+ + ")"
+)
# [128] iriOrFunction ::= iri Optional(ArgList)
-iriOrFunction = (Comp(
- 'Function', Param('iri', iri) + ArgList).setEvalFn(op.Function)) | iri
+iriOrFunction = (
+ Comp("Function", Param("iri", iri) + ArgList).setEvalFn(op.Function)
+) | iri
# [70] FunctionCall ::= iri ArgList
-FunctionCall = Comp(
- 'Function', Param('iri', iri) + ArgList).setEvalFn(op.Function)
+FunctionCall = Comp("Function", Param("iri", iri) + ArgList).setEvalFn(op.Function)
# [120] BrackettedExpression ::= '(' Expression ')'
-BrackettedExpression = Suppress('(') + Expression + Suppress(')')
+BrackettedExpression = Suppress("(") + Expression + Suppress(")")
# [119] PrimaryExpression ::= BrackettedExpression | BuiltInCall | iriOrFunction | RDFLiteral | NumericLiteral | BooleanLiteral | Var
-PrimaryExpression = BrackettedExpression | BuiltInCall | iriOrFunction | RDFLiteral | NumericLiteral | BooleanLiteral | Var
+PrimaryExpression = (
+ BrackettedExpression
+ | BuiltInCall
+ | iriOrFunction
+ | RDFLiteral
+ | NumericLiteral
+ | BooleanLiteral
+ | Var
+)
# [118] UnaryExpression ::= '!' PrimaryExpression
# | '+' PrimaryExpression
# | '-' PrimaryExpression
# | PrimaryExpression
-UnaryExpression = Comp('UnaryNot', '!' + Param('expr', PrimaryExpression)).setEvalFn(op.UnaryNot) \
- | Comp('UnaryPlus', '+' + Param('expr', PrimaryExpression)).setEvalFn(op.UnaryPlus) \
- | Comp('UnaryMinus', '-' + Param('expr', PrimaryExpression)).setEvalFn(op.UnaryMinus) \
+UnaryExpression = (
+ Comp("UnaryNot", "!" + Param("expr", PrimaryExpression)).setEvalFn(op.UnaryNot)
+ | Comp("UnaryPlus", "+" + Param("expr", PrimaryExpression)).setEvalFn(op.UnaryPlus)
+ | Comp("UnaryMinus", "-" + Param("expr", PrimaryExpression)).setEvalFn(
+ op.UnaryMinus
+ )
| PrimaryExpression
+)
# [117] MultiplicativeExpression ::= UnaryExpression ( '*' UnaryExpression | '/' UnaryExpression )*
-MultiplicativeExpression = Comp('MultiplicativeExpression', Param('expr', UnaryExpression) + ZeroOrMore(ParamList('op', '*') + ParamList(
- 'other', UnaryExpression) | ParamList('op', '/') + ParamList('other', UnaryExpression))).setEvalFn(op.MultiplicativeExpression)
+MultiplicativeExpression = Comp(
+ "MultiplicativeExpression",
+ Param("expr", UnaryExpression)
+ + ZeroOrMore(
+ ParamList("op", "*") + ParamList("other", UnaryExpression)
+ | ParamList("op", "/") + ParamList("other", UnaryExpression)
+ ),
+).setEvalFn(op.MultiplicativeExpression)
# [116] AdditiveExpression ::= MultiplicativeExpression ( '+' MultiplicativeExpression | '-' MultiplicativeExpression | ( NumericLiteralPositive | NumericLiteralNegative ) ( ( '*' UnaryExpression ) | ( '/' UnaryExpression ) )* )*
@@ -770,36 +1064,55 @@ MultiplicativeExpression = Comp('MultiplicativeExpression', Param('expr', UnaryE
# tokenizing and parsing
-AdditiveExpression = Comp('AdditiveExpression', Param('expr', MultiplicativeExpression) +
- ZeroOrMore(ParamList('op', '+') + ParamList('other', MultiplicativeExpression) |
- ParamList('op', '-') + ParamList('other', MultiplicativeExpression))).setEvalFn(op.AdditiveExpression)
+AdditiveExpression = Comp(
+ "AdditiveExpression",
+ Param("expr", MultiplicativeExpression)
+ + ZeroOrMore(
+ ParamList("op", "+") + ParamList("other", MultiplicativeExpression)
+ | ParamList("op", "-") + ParamList("other", MultiplicativeExpression)
+ ),
+).setEvalFn(op.AdditiveExpression)
# [115] NumericExpression ::= AdditiveExpression
NumericExpression = AdditiveExpression
# [114] RelationalExpression ::= NumericExpression ( '=' NumericExpression | '!=' NumericExpression | '<' NumericExpression | '>' NumericExpression | '<=' NumericExpression | '>=' NumericExpression | 'IN' ExpressionList | 'NOT' 'IN' ExpressionList )?
-RelationalExpression = Comp('RelationalExpression', Param('expr', NumericExpression) + Optional(
- Param('op', '=') + Param('other', NumericExpression) |
- Param('op', '!=') + Param('other', NumericExpression) |
- Param('op', '<') + Param('other', NumericExpression) |
- Param('op', '>') + Param('other', NumericExpression) |
- Param('op', '<=') + Param('other', NumericExpression) |
- Param('op', '>=') + Param('other', NumericExpression) |
- Param('op', Keyword('IN')) + Param('other', ExpressionList) |
- Param('op', Combine(Keyword('NOT') + Keyword('IN'), adjacent=False, joinString=" ")) + Param('other', ExpressionList))).setEvalFn(op.RelationalExpression)
+RelationalExpression = Comp(
+ "RelationalExpression",
+ Param("expr", NumericExpression)
+ + Optional(
+ Param("op", "=") + Param("other", NumericExpression)
+ | Param("op", "!=") + Param("other", NumericExpression)
+ | Param("op", "<") + Param("other", NumericExpression)
+ | Param("op", ">") + Param("other", NumericExpression)
+ | Param("op", "<=") + Param("other", NumericExpression)
+ | Param("op", ">=") + Param("other", NumericExpression)
+ | Param("op", Keyword("IN")) + Param("other", ExpressionList)
+ | Param(
+ "op",
+ Combine(Keyword("NOT") + Keyword("IN"), adjacent=False, joinString=" "),
+ )
+ + Param("other", ExpressionList)
+ ),
+).setEvalFn(op.RelationalExpression)
# [113] ValueLogical ::= RelationalExpression
ValueLogical = RelationalExpression
# [112] ConditionalAndExpression ::= ValueLogical ( '&&' ValueLogical )*
-ConditionalAndExpression = Comp('ConditionalAndExpression', Param('expr', ValueLogical) + ZeroOrMore(
- '&&' + ParamList('other', ValueLogical))).setEvalFn(op.ConditionalAndExpression)
+ConditionalAndExpression = Comp(
+ "ConditionalAndExpression",
+ Param("expr", ValueLogical) + ZeroOrMore("&&" + ParamList("other", ValueLogical)),
+).setEvalFn(op.ConditionalAndExpression)
# [111] ConditionalOrExpression ::= ConditionalAndExpression ( '||' ConditionalAndExpression )*
-ConditionalOrExpression = Comp('ConditionalOrExpression', Param('expr', ConditionalAndExpression) + ZeroOrMore(
- '||' + ParamList('other', ConditionalAndExpression))).setEvalFn(op.ConditionalOrExpression)
+ConditionalOrExpression = Comp(
+ "ConditionalOrExpression",
+ Param("expr", ConditionalAndExpression)
+ + ZeroOrMore("||" + ParamList("other", ConditionalAndExpression)),
+).setEvalFn(op.ConditionalOrExpression)
# [110] Expression ::= ConditionalOrExpression
Expression <<= ConditionalOrExpression
@@ -809,7 +1122,7 @@ Expression <<= ConditionalOrExpression
Constraint = BrackettedExpression | BuiltInCall | FunctionCall
# [68] Filter ::= 'FILTER' Constraint
-Filter = Comp('Filter', Keyword('FILTER') + Param('expr', Constraint))
+Filter = Comp("Filter", Keyword("FILTER") + Param("expr", Constraint))
# [16] SourceSelector ::= iri
@@ -819,128 +1132,217 @@ SourceSelector = iri
DefaultGraphClause = SourceSelector
# [15] NamedGraphClause ::= 'NAMED' SourceSelector
-NamedGraphClause = Keyword('NAMED') + Param('named', SourceSelector)
+NamedGraphClause = Keyword("NAMED") + Param("named", SourceSelector)
# [13] DatasetClause ::= 'FROM' ( DefaultGraphClause | NamedGraphClause )
-DatasetClause = Comp('DatasetClause', Keyword(
- 'FROM') + (Param('default', DefaultGraphClause) | NamedGraphClause))
+DatasetClause = Comp(
+ "DatasetClause",
+ Keyword("FROM") + (Param("default", DefaultGraphClause) | NamedGraphClause),
+)
# [20] GroupCondition ::= BuiltInCall | FunctionCall | '(' Expression ( 'AS' Var )? ')' | Var
-GroupCondition = BuiltInCall | FunctionCall | Comp('GroupAs', '(' + Param(
- 'expr', Expression) + Optional(Keyword('AS') + Param('var', Var)) + ')') | Var
+GroupCondition = (
+ BuiltInCall
+ | FunctionCall
+ | Comp(
+ "GroupAs",
+ "("
+ + Param("expr", Expression)
+ + Optional(Keyword("AS") + Param("var", Var))
+ + ")",
+ )
+ | Var
+)
# [19] GroupClause ::= 'GROUP' 'BY' GroupCondition+
-GroupClause = Comp('GroupClause', Keyword('GROUP') + Keyword(
- 'BY') + OneOrMore(ParamList('condition', GroupCondition)))
+GroupClause = Comp(
+ "GroupClause",
+ Keyword("GROUP")
+ + Keyword("BY")
+ + OneOrMore(ParamList("condition", GroupCondition)),
+)
-_Silent = Optional(Param('silent', Keyword('SILENT')))
+_Silent = Optional(Param("silent", Keyword("SILENT")))
# [31] Load ::= 'LOAD' 'SILENT'? iri ( 'INTO' GraphRef )?
-Load = Comp('Load', Keyword('LOAD') + _Silent + Param('iri', iri) +
- Optional(Keyword('INTO') + GraphRef))
+Load = Comp(
+ "Load",
+ Keyword("LOAD")
+ + _Silent
+ + Param("iri", iri)
+ + Optional(Keyword("INTO") + GraphRef),
+)
# [32] Clear ::= 'CLEAR' 'SILENT'? GraphRefAll
-Clear = Comp('Clear', Keyword('CLEAR') + _Silent + GraphRefAll)
+Clear = Comp("Clear", Keyword("CLEAR") + _Silent + GraphRefAll)
# [33] Drop ::= 'DROP' _Silent GraphRefAll
-Drop = Comp('Drop', Keyword('DROP') + _Silent + GraphRefAll)
+Drop = Comp("Drop", Keyword("DROP") + _Silent + GraphRefAll)
# [34] Create ::= 'CREATE' _Silent GraphRef
-Create = Comp('Create', Keyword('CREATE') + _Silent + GraphRef)
+Create = Comp("Create", Keyword("CREATE") + _Silent + GraphRef)
# [35] Add ::= 'ADD' _Silent GraphOrDefault 'TO' GraphOrDefault
-Add = Comp('Add', Keyword(
- 'ADD') + _Silent + GraphOrDefault + Keyword('TO') + GraphOrDefault)
+Add = Comp(
+ "Add", Keyword("ADD") + _Silent + GraphOrDefault + Keyword("TO") + GraphOrDefault
+)
# [36] Move ::= 'MOVE' _Silent GraphOrDefault 'TO' GraphOrDefault
-Move = Comp('Move', Keyword(
- 'MOVE') + _Silent + GraphOrDefault + Keyword('TO') + GraphOrDefault)
+Move = Comp(
+ "Move", Keyword("MOVE") + _Silent + GraphOrDefault + Keyword("TO") + GraphOrDefault
+)
# [37] Copy ::= 'COPY' _Silent GraphOrDefault 'TO' GraphOrDefault
-Copy = Comp('Copy', Keyword(
- 'COPY') + _Silent + GraphOrDefault + Keyword('TO') + GraphOrDefault)
+Copy = Comp(
+ "Copy", Keyword("COPY") + _Silent + GraphOrDefault + Keyword("TO") + GraphOrDefault
+)
# [38] InsertData ::= 'INSERT DATA' QuadData
-InsertData = Comp('InsertData', Keyword('INSERT') + Keyword('DATA') + QuadData)
+InsertData = Comp("InsertData", Keyword("INSERT") + Keyword("DATA") + QuadData)
# [39] DeleteData ::= 'DELETE DATA' QuadData
-DeleteData = Comp('DeleteData', Keyword('DELETE') + Keyword('DATA') + QuadData)
+DeleteData = Comp("DeleteData", Keyword("DELETE") + Keyword("DATA") + QuadData)
# [40] DeleteWhere ::= 'DELETE WHERE' QuadPattern
-DeleteWhere = Comp(
- 'DeleteWhere', Keyword('DELETE') + Keyword('WHERE') + QuadPattern)
+DeleteWhere = Comp("DeleteWhere", Keyword("DELETE") + Keyword("WHERE") + QuadPattern)
# [42] DeleteClause ::= 'DELETE' QuadPattern
-DeleteClause = Comp('DeleteClause', Keyword('DELETE') + QuadPattern)
+DeleteClause = Comp("DeleteClause", Keyword("DELETE") + QuadPattern)
# [43] InsertClause ::= 'INSERT' QuadPattern
-InsertClause = Comp('InsertClause', Keyword('INSERT') + QuadPattern)
+InsertClause = Comp("InsertClause", Keyword("INSERT") + QuadPattern)
# [44] UsingClause ::= 'USING' ( iri | 'NAMED' iri )
-UsingClause = Comp('UsingClause', Keyword('USING') + (
- Param('default', iri) | Keyword('NAMED') + Param('named', iri)))
+UsingClause = Comp(
+ "UsingClause",
+ Keyword("USING") + (Param("default", iri) | Keyword("NAMED") + Param("named", iri)),
+)
# [41] Modify ::= ( 'WITH' iri )? ( DeleteClause Optional(InsertClause) | InsertClause ) ZeroOrMore(UsingClause) 'WHERE' GroupGraphPattern
-Modify = Comp('Modify', Optional(Keyword('WITH') + Param('withClause', iri)) + (Param('delete', DeleteClause) + Optional(Param(
- 'insert', InsertClause)) | Param('insert', InsertClause)) + ZeroOrMore(ParamList('using', UsingClause)) + Keyword('WHERE') + Param('where', GroupGraphPattern))
+Modify = Comp(
+ "Modify",
+ Optional(Keyword("WITH") + Param("withClause", iri))
+ + (
+ Param("delete", DeleteClause) + Optional(Param("insert", InsertClause))
+ | Param("insert", InsertClause)
+ )
+ + ZeroOrMore(ParamList("using", UsingClause))
+ + Keyword("WHERE")
+ + Param("where", GroupGraphPattern),
+)
# [30] Update1 ::= Load | Clear | Drop | Add | Move | Copy | Create | InsertData | DeleteData | DeleteWhere | Modify
-Update1 = Load | Clear | Drop | Add | Move | Copy | Create | InsertData | DeleteData | DeleteWhere | Modify
+Update1 = (
+ Load
+ | Clear
+ | Drop
+ | Add
+ | Move
+ | Copy
+ | Create
+ | InsertData
+ | DeleteData
+ | DeleteWhere
+ | Modify
+)
# [63] InlineDataOneVar ::= Var '{' ZeroOrMore(DataBlockValue) '}'
-InlineDataOneVar = ParamList(
- 'var', Var) + '{' + ZeroOrMore(ParamList('value', DataBlockValue)) + '}'
+InlineDataOneVar = (
+ ParamList("var", Var) + "{" + ZeroOrMore(ParamList("value", DataBlockValue)) + "}"
+)
# [64] InlineDataFull ::= ( NIL | '(' ZeroOrMore(Var) ')' ) '{' ( '(' ZeroOrMore(DataBlockValue) ')' | NIL )* '}'
-InlineDataFull = (NIL | '(' + ZeroOrMore(ParamList('var', Var)) + ')') + '{' + ZeroOrMore(
- ParamList('value', Group(Suppress('(') + ZeroOrMore(DataBlockValue) + Suppress(')') | NIL))) + '}'
+InlineDataFull = (
+ (NIL | "(" + ZeroOrMore(ParamList("var", Var)) + ")")
+ + "{"
+ + ZeroOrMore(
+ ParamList(
+ "value",
+ Group(Suppress("(") + ZeroOrMore(DataBlockValue) + Suppress(")") | NIL),
+ )
+ )
+ + "}"
+)
# [62] DataBlock ::= InlineDataOneVar | InlineDataFull
DataBlock = InlineDataOneVar | InlineDataFull
# [28] ValuesClause ::= ( 'VALUES' DataBlock )?
-ValuesClause = Optional(Param(
- 'valuesClause', Comp('ValuesClause', Keyword('VALUES') + DataBlock)))
+ValuesClause = Optional(
+ Param("valuesClause", Comp("ValuesClause", Keyword("VALUES") + DataBlock))
+)
# [74] ConstructTriples ::= TriplesSameSubject ( '.' Optional(ConstructTriples) )?
ConstructTriples = Forward()
-ConstructTriples <<= (ParamList('template', TriplesSameSubject) + Optional(
- Suppress('.') + Optional(ConstructTriples)))
+ConstructTriples <<= ParamList("template", TriplesSameSubject) + Optional(
+ Suppress(".") + Optional(ConstructTriples)
+)
# [73] ConstructTemplate ::= '{' Optional(ConstructTriples) '}'
-ConstructTemplate = Suppress('{') + Optional(ConstructTriples) + Suppress('}')
+ConstructTemplate = Suppress("{") + Optional(ConstructTriples) + Suppress("}")
# [57] OptionalGraphPattern ::= 'OPTIONAL' GroupGraphPattern
-OptionalGraphPattern = Comp('OptionalGraphPattern', Keyword(
- 'OPTIONAL') + Param('graph', GroupGraphPattern))
+OptionalGraphPattern = Comp(
+ "OptionalGraphPattern", Keyword("OPTIONAL") + Param("graph", GroupGraphPattern)
+)
# [58] GraphGraphPattern ::= 'GRAPH' VarOrIri GroupGraphPattern
-GraphGraphPattern = Comp('GraphGraphPattern', Keyword(
- 'GRAPH') + Param('term', VarOrIri) + Param('graph', GroupGraphPattern))
+GraphGraphPattern = Comp(
+ "GraphGraphPattern",
+ Keyword("GRAPH") + Param("term", VarOrIri) + Param("graph", GroupGraphPattern),
+)
# [59] ServiceGraphPattern ::= 'SERVICE' _Silent VarOrIri GroupGraphPattern
-ServiceGraphPattern = Comp('ServiceGraphPattern', Keyword(
- 'SERVICE') + _Silent + Param('term', VarOrIri) + Param('graph', GroupGraphPattern))
+ServiceGraphPattern = Comp(
+ "ServiceGraphPattern",
+ Keyword("SERVICE")
+ + _Silent
+ + Param("term", VarOrIri)
+ + Param("graph", GroupGraphPattern),
+)
# [60] Bind ::= 'BIND' '(' Expression 'AS' Var ')'
-Bind = Comp('Bind', Keyword('BIND') + '(' + Param(
- 'expr', Expression) + Keyword('AS') + Param('var', Var) + ')')
+Bind = Comp(
+ "Bind",
+ Keyword("BIND")
+ + "("
+ + Param("expr", Expression)
+ + Keyword("AS")
+ + Param("var", Var)
+ + ")",
+)
# [61] InlineData ::= 'VALUES' DataBlock
-InlineData = Comp('InlineData', Keyword('VALUES') + DataBlock)
+InlineData = Comp("InlineData", Keyword("VALUES") + DataBlock)
# [56] GraphPatternNotTriples ::= GroupOrUnionGraphPattern | OptionalGraphPattern | MinusGraphPattern | GraphGraphPattern | ServiceGraphPattern | Filter | Bind | InlineData
-GraphPatternNotTriples = GroupOrUnionGraphPattern | OptionalGraphPattern | MinusGraphPattern | GraphGraphPattern | ServiceGraphPattern | Filter | Bind | InlineData
+GraphPatternNotTriples = (
+ GroupOrUnionGraphPattern
+ | OptionalGraphPattern
+ | MinusGraphPattern
+ | GraphGraphPattern
+ | ServiceGraphPattern
+ | Filter
+ | Bind
+ | InlineData
+)
# [54] GroupGraphPatternSub ::= Optional(TriplesBlock) ( GraphPatternNotTriples '.'? Optional(TriplesBlock) )*
-GroupGraphPatternSub = Comp('GroupGraphPatternSub', Optional(ParamList('part', Comp('TriplesBlock', TriplesBlock))) + ZeroOrMore(
- ParamList('part', GraphPatternNotTriples) + Optional('.') + Optional(ParamList('part', Comp('TriplesBlock', TriplesBlock)))))
+GroupGraphPatternSub = Comp(
+ "GroupGraphPatternSub",
+ Optional(ParamList("part", Comp("TriplesBlock", TriplesBlock)))
+ + ZeroOrMore(
+ ParamList("part", GraphPatternNotTriples)
+ + Optional(".")
+ + Optional(ParamList("part", Comp("TriplesBlock", TriplesBlock)))
+ ),
+)
# ----------------
@@ -948,70 +1350,151 @@ GroupGraphPatternSub = Comp('GroupGraphPatternSub', Optional(ParamList('part', C
HavingCondition = Constraint
# [21] HavingClause ::= 'HAVING' HavingCondition+
-HavingClause = Comp('HavingClause', Keyword(
- 'HAVING') + OneOrMore(ParamList('condition', HavingCondition)))
+HavingClause = Comp(
+ "HavingClause",
+ Keyword("HAVING") + OneOrMore(ParamList("condition", HavingCondition)),
+)
# [24] OrderCondition ::= ( ( 'ASC' | 'DESC' ) BrackettedExpression )
# | ( Constraint | Var )
-OrderCondition = Comp('OrderCondition', Param('order', Keyword('ASC') | Keyword(
- 'DESC')) + Param('expr', BrackettedExpression) | Param('expr', Constraint | Var))
+OrderCondition = Comp(
+ "OrderCondition",
+ Param("order", Keyword("ASC") | Keyword("DESC"))
+ + Param("expr", BrackettedExpression)
+ | Param("expr", Constraint | Var),
+)
# [23] OrderClause ::= 'ORDER' 'BY' OneOrMore(OrderCondition)
-OrderClause = Comp('OrderClause', Keyword('ORDER') + Keyword(
- 'BY') + OneOrMore(ParamList('condition', OrderCondition)))
+OrderClause = Comp(
+ "OrderClause",
+ Keyword("ORDER")
+ + Keyword("BY")
+ + OneOrMore(ParamList("condition", OrderCondition)),
+)
# [26] LimitClause ::= 'LIMIT' INTEGER
-LimitClause = Keyword('LIMIT') + Param('limit', INTEGER)
+LimitClause = Keyword("LIMIT") + Param("limit", INTEGER)
# [27] OffsetClause ::= 'OFFSET' INTEGER
-OffsetClause = Keyword('OFFSET') + Param('offset', INTEGER)
+OffsetClause = Keyword("OFFSET") + Param("offset", INTEGER)
# [25] LimitOffsetClauses ::= LimitClause Optional(OffsetClause) | OffsetClause Optional(LimitClause)
-LimitOffsetClauses = Comp('LimitOffsetClauses', LimitClause + Optional(
- OffsetClause) | OffsetClause + Optional(LimitClause))
+LimitOffsetClauses = Comp(
+ "LimitOffsetClauses",
+ LimitClause + Optional(OffsetClause) | OffsetClause + Optional(LimitClause),
+)
# [18] SolutionModifier ::= GroupClause? HavingClause? OrderClause? LimitOffsetClauses?
-SolutionModifier = Optional(Param('groupby', GroupClause)) + Optional(Param('having', HavingClause)) + Optional(
- Param('orderby', OrderClause)) + Optional(Param('limitoffset', LimitOffsetClauses))
+SolutionModifier = (
+ Optional(Param("groupby", GroupClause))
+ + Optional(Param("having", HavingClause))
+ + Optional(Param("orderby", OrderClause))
+ + Optional(Param("limitoffset", LimitOffsetClauses))
+)
# [9] SelectClause ::= 'SELECT' ( 'DISTINCT' | 'REDUCED' )? ( ( Var | ( '(' Expression 'AS' Var ')' ) )+ | '*' )
-SelectClause = Keyword('SELECT') + Optional(Param('modifier', Keyword('DISTINCT') | Keyword('REDUCED'))) + (OneOrMore(ParamList('projection', Comp('vars',
- Param('var', Var) | (Literal('(') + Param('expr', Expression) + Keyword('AS') + Param('evar', Var) + ')')))) | '*')
+SelectClause = (
+ Keyword("SELECT")
+ + Optional(Param("modifier", Keyword("DISTINCT") | Keyword("REDUCED")))
+ + (
+ OneOrMore(
+ ParamList(
+ "projection",
+ Comp(
+ "vars",
+ Param("var", Var)
+ | (
+ Literal("(")
+ + Param("expr", Expression)
+ + Keyword("AS")
+ + Param("evar", Var)
+ + ")"
+ ),
+ ),
+ )
+ )
+ | "*"
+ )
+)
# [17] WhereClause ::= 'WHERE'? GroupGraphPattern
-WhereClause = Optional(Keyword('WHERE')) + Param('where', GroupGraphPattern)
+WhereClause = Optional(Keyword("WHERE")) + Param("where", GroupGraphPattern)
# [8] SubSelect ::= SelectClause WhereClause SolutionModifier ValuesClause
-SubSelect = Comp('SubSelect', SelectClause + WhereClause +
- SolutionModifier + ValuesClause)
+SubSelect = Comp(
+ "SubSelect", SelectClause + WhereClause + SolutionModifier + ValuesClause
+)
# [53] GroupGraphPattern ::= '{' ( SubSelect | GroupGraphPatternSub ) '}'
-GroupGraphPattern <<= (
- Suppress('{') + (SubSelect | GroupGraphPatternSub) + Suppress('}'))
+GroupGraphPattern <<= Suppress("{") + (SubSelect | GroupGraphPatternSub) + Suppress("}")
# [7] SelectQuery ::= SelectClause DatasetClause* WhereClause SolutionModifier
-SelectQuery = Comp('SelectQuery', SelectClause + ZeroOrMore(ParamList(
- 'datasetClause', DatasetClause)) + WhereClause + SolutionModifier + ValuesClause)
+SelectQuery = Comp(
+ "SelectQuery",
+ SelectClause
+ + ZeroOrMore(ParamList("datasetClause", DatasetClause))
+ + WhereClause
+ + SolutionModifier
+ + ValuesClause,
+)
# [10] ConstructQuery ::= 'CONSTRUCT' ( ConstructTemplate DatasetClause* WhereClause SolutionModifier | DatasetClause* 'WHERE' '{' TriplesTemplate? '}' SolutionModifier )
# NOTE: The CONSTRUCT WHERE alternative has unnescessarily many Comp/Param pairs
# to allow it to through the same algebra translation process
-ConstructQuery = Comp('ConstructQuery', Keyword('CONSTRUCT') + (ConstructTemplate + ZeroOrMore(ParamList('datasetClause', DatasetClause)) + WhereClause + SolutionModifier + ValuesClause | ZeroOrMore(ParamList(
- 'datasetClause', DatasetClause)) + Keyword('WHERE') + '{' + Optional(Param('where', Comp('FakeGroupGraphPatten', ParamList('part', Comp('TriplesBlock', TriplesTemplate))))) + '}' + SolutionModifier + ValuesClause))
+ConstructQuery = Comp(
+ "ConstructQuery",
+ Keyword("CONSTRUCT")
+ + (
+ ConstructTemplate
+ + ZeroOrMore(ParamList("datasetClause", DatasetClause))
+ + WhereClause
+ + SolutionModifier
+ + ValuesClause
+ | ZeroOrMore(ParamList("datasetClause", DatasetClause))
+ + Keyword("WHERE")
+ + "{"
+ + Optional(
+ Param(
+ "where",
+ Comp(
+ "FakeGroupGraphPatten",
+ ParamList("part", Comp("TriplesBlock", TriplesTemplate)),
+ ),
+ )
+ )
+ + "}"
+ + SolutionModifier
+ + ValuesClause
+ ),
+)
# [12] AskQuery ::= 'ASK' DatasetClause* WhereClause SolutionModifier
-AskQuery = Comp('AskQuery', Keyword('ASK') + Param('datasetClause', ZeroOrMore(
- DatasetClause)) + WhereClause + SolutionModifier + ValuesClause)
+AskQuery = Comp(
+ "AskQuery",
+ Keyword("ASK")
+ + Param("datasetClause", ZeroOrMore(DatasetClause))
+ + WhereClause
+ + SolutionModifier
+ + ValuesClause,
+)
# [11] DescribeQuery ::= 'DESCRIBE' ( VarOrIri+ | '*' ) DatasetClause* WhereClause? SolutionModifier
-DescribeQuery = Comp('DescribeQuery', Keyword('DESCRIBE') + (OneOrMore(ParamList('var', VarOrIri)) | '*') + Param(
- 'datasetClause', ZeroOrMore(DatasetClause)) + Optional(WhereClause) + SolutionModifier + ValuesClause)
+DescribeQuery = Comp(
+ "DescribeQuery",
+ Keyword("DESCRIBE")
+ + (OneOrMore(ParamList("var", VarOrIri)) | "*")
+ + Param("datasetClause", ZeroOrMore(DatasetClause))
+ + Optional(WhereClause)
+ + SolutionModifier
+ + ValuesClause,
+)
# [29] Update ::= Prologue ( Update1 ( ';' Update )? )?
Update = Forward()
-Update <<= (ParamList('prologue', Prologue) + Optional(ParamList('request',
- Update1) + Optional(';' + Update)))
+Update <<= ParamList("prologue", Prologue) + Optional(
+ ParamList("request", Update1) + Optional(";" + Update)
+)
# [2] Query ::= Prologue
@@ -1021,17 +1504,16 @@ Update <<= (ParamList('prologue', Prologue) + Optional(ParamList('request',
Query = Prologue + (SelectQuery | ConstructQuery | DescribeQuery | AskQuery)
# [3] UpdateUnit ::= Update
-UpdateUnit = Comp('Update', Update)
+UpdateUnit = Comp("Update", Update)
# [1] QueryUnit ::= Query
QueryUnit = Query
-QueryUnit.ignore('#' + restOfLine)
-UpdateUnit.ignore('#' + restOfLine)
+QueryUnit.ignore("#" + restOfLine)
+UpdateUnit.ignore("#" + restOfLine)
-expandUnicodeEscapes_re = re.compile(
- r'\\u([0-9a-f]{4}(?:[0-9a-f]{4})?)', flags=re.I)
+expandUnicodeEscapes_re = re.compile(r"\\u([0-9a-f]{4}(?:[0-9a-f]{4})?)", flags=re.I)
def expandUnicodeEscapes(q):
@@ -1050,28 +1532,29 @@ def expandUnicodeEscapes(q):
def parseQuery(q):
- if hasattr(q, 'read'):
+ if hasattr(q, "read"):
q = q.read()
if isinstance(q, bytes):
- q = q.decode('utf-8')
+ q = q.decode("utf-8")
q = expandUnicodeEscapes(q)
return Query.parseString(q, parseAll=True)
def parseUpdate(q):
- if hasattr(q, 'read'):
+ if hasattr(q, "read"):
q = q.read()
if isinstance(q, bytes):
- q = q.decode('utf-8')
+ q = q.decode("utf-8")
q = expandUnicodeEscapes(q)
return UpdateUnit.parseString(q, parseAll=True)[0]
-if __name__ == '__main__':
+if __name__ == "__main__":
import sys
+
DEBUG = True
try:
q = Query.parseString(sys.argv[1])
diff --git a/rdflib/plugins/sparql/parserutils.py b/rdflib/plugins/sparql/parserutils.py
index 29804eea..e67b754b 100644
--- a/rdflib/plugins/sparql/parserutils.py
+++ b/rdflib/plugins/sparql/parserutils.py
@@ -1,4 +1,3 @@
-
from types import MethodType
from collections import OrderedDict
@@ -44,6 +43,7 @@ the resulting CompValue
# Comp('Sum')( Param('x')(Number) + '+' + Param('y')(Number) )
+
def value(ctx, val, variables=False, errors=False):
"""
utility function for evaluating something...
@@ -172,7 +172,7 @@ class CompValue(OrderedDict):
def __getattr__(self, a):
# Hack hack: OrderedDict relies on this
- if a in ('_OrderedDict__root', '_OrderedDict__end'):
+ if a in ("_OrderedDict__root", "_OrderedDict__end"):
raise AttributeError
try:
return self[a]
@@ -224,13 +224,13 @@ class Comp(TokenConverter):
res._evalfn = MethodType(self.evalfn, res)
else:
res = CompValue(self.name)
- if self.name == 'ServiceGraphPattern':
+ if self.name == "ServiceGraphPattern":
# Then this must be a service graph pattern and have
# already matched.
# lets assume there is one, for now, then test for two later.
sgp = originalTextFor(self.expr)
service_string = sgp.searchString(instring)[0][0]
- res['service_string'] = service_string
+ res["service_string"] = service_string
for t in tokenList:
if isinstance(t, ParamValue):
@@ -250,38 +250,38 @@ class Comp(TokenConverter):
return self
-def prettify_parsetree(t, indent='', depth=0):
+def prettify_parsetree(t, indent="", depth=0):
out = []
if isinstance(t, ParseResults):
for e in t.asList():
out.append(prettify_parsetree(e, indent, depth + 1))
for k, v in sorted(t.items()):
- out.append("%s%s- %s:\n" % (indent, ' ' * depth, k))
+ out.append("%s%s- %s:\n" % (indent, " " * depth, k))
out.append(prettify_parsetree(v, indent, depth + 1))
elif isinstance(t, CompValue):
- out.append("%s%s> %s:\n" % (indent, ' ' * depth, t.name))
+ out.append("%s%s> %s:\n" % (indent, " " * depth, t.name))
for k, v in t.items():
- out.append("%s%s- %s:\n" % (indent, ' ' * (depth + 1), k))
+ out.append("%s%s- %s:\n" % (indent, " " * (depth + 1), k))
out.append(prettify_parsetree(v, indent, depth + 2))
elif isinstance(t, dict):
for k, v in t.items():
- out.append("%s%s- %s:\n" % (indent, ' ' * (depth + 1), k))
+ out.append("%s%s- %s:\n" % (indent, " " * (depth + 1), k))
out.append(prettify_parsetree(v, indent, depth + 2))
elif isinstance(t, list):
for e in t:
out.append(prettify_parsetree(e, indent, depth + 1))
else:
- out.append("%s%s- %r\n" % (indent, ' ' * depth, t))
+ out.append("%s%s- %r\n" % (indent, " " * depth, t))
return "".join(out)
-if __name__ == '__main__':
+if __name__ == "__main__":
from pyparsing import Word, nums
import sys
Number = Word(nums)
Number.setParseAction(lambda x: int(x[0]))
- Plus = Comp('plus', Param('a', Number) + '+' + Param('b', Number))
+ Plus = Comp("plus", Param("a", Number) + "+" + Param("b", Number))
Plus.setEvalFn(lambda self, ctx: self.a + self.b)
r = Plus.parseString(sys.argv[1])
diff --git a/rdflib/plugins/sparql/processor.py b/rdflib/plugins/sparql/processor.py
index 073a387e..84e8c823 100644
--- a/rdflib/plugins/sparql/processor.py
+++ b/rdflib/plugins/sparql/processor.py
@@ -1,4 +1,3 @@
-
"""
Code for tying SPARQL Engine into RDFLib
@@ -7,7 +6,6 @@ These should be automatically registered with RDFLib
"""
-
from rdflib.query import Processor, Result, UpdateProcessor
from rdflib.plugins.sparql.sparql import Query
@@ -33,12 +31,12 @@ def processUpdate(graph, updateString, initBindings={}, initNs={}, base=None):
Process a SPARQL Update Request
returns Nothing on success or raises Exceptions on error
"""
- evalUpdate(graph, translateUpdate(
- parseUpdate(updateString), base, initNs), initBindings)
+ evalUpdate(
+ graph, translateUpdate(parseUpdate(updateString), base, initNs), initBindings
+ )
class SPARQLResult(Result):
-
def __init__(self, res):
Result.__init__(self, res["type_"])
self.vars = res.get("vars_")
@@ -59,13 +57,10 @@ class SPARQLUpdateProcessor(UpdateProcessor):
class SPARQLProcessor(Processor):
-
def __init__(self, graph):
self.graph = graph
- def query(
- self, strOrQuery, initBindings={},
- initNs={}, base=None, DEBUG=False):
+ def query(self, strOrQuery, initBindings={}, initNs={}, base=None, DEBUG=False):
"""
Evaluate a query with the given initial bindings, and initial
namespaces. The given base is used to resolve relative URIs in
diff --git a/rdflib/plugins/sparql/results/csvresults.py b/rdflib/plugins/sparql/results/csvresults.py
index d354ccf5..c87b6ea7 100644
--- a/rdflib/plugins/sparql/results/csvresults.py
+++ b/rdflib/plugins/sparql/results/csvresults.py
@@ -21,11 +21,11 @@ class CSVResultParser(ResultParser):
def parse(self, source, content_type=None):
- r = Result('SELECT')
+ r = Result("SELECT")
if isinstance(source.read(0), bytes):
# if reading from source returns bytes do utf-8 decoding
- source = codecs.getreader('utf-8')(source)
+ source = codecs.getreader("utf-8")(source)
reader = csv.reader(source, delimiter=self.delim)
r.vars = [Variable(x) for x in next(reader)]
@@ -37,9 +37,11 @@ class CSVResultParser(ResultParser):
return r
def parseRow(self, row, v):
- return dict((var, val)
- for var, val in zip(v, [self.convertTerm(t)
- for t in row]) if val is not None)
+ return dict(
+ (var, val)
+ for var, val in zip(v, [self.convertTerm(t) for t in row])
+ if val is not None
+ )
def convertTerm(self, t):
if t == "":
@@ -52,22 +54,21 @@ class CSVResultParser(ResultParser):
class CSVResultSerializer(ResultSerializer):
-
def __init__(self, result):
ResultSerializer.__init__(self, result)
self.delim = ","
if result.type != "SELECT":
- raise Exception(
- "CSVSerializer can only serialize select query results")
+ raise Exception("CSVSerializer can only serialize select query results")
- def serialize(self, stream, encoding='utf-8', **kwargs):
+ def serialize(self, stream, encoding="utf-8", **kwargs):
# the serialiser writes bytes in the given encoding
# in py3 csv.writer is unicode aware and writes STRINGS,
# so we encode afterwards
import codecs
+
stream = codecs.getwriter(encoding)(stream)
out = csv.writer(stream, delimiter=self.delim)
@@ -75,8 +76,9 @@ class CSVResultSerializer(ResultSerializer):
vs = [self.serializeTerm(v, encoding) for v in self.result.vars]
out.writerow(vs)
for row in self.result.bindings:
- out.writerow([self.serializeTerm(
- row.get(v), encoding) for v in self.result.vars])
+ out.writerow(
+ [self.serializeTerm(row.get(v), encoding) for v in self.result.vars]
+ )
def serializeTerm(self, term, encoding):
if term is None:
diff --git a/rdflib/plugins/sparql/results/graph.py b/rdflib/plugins/sparql/results/graph.py
index c47daa72..13e256bb 100644
--- a/rdflib/plugins/sparql/results/graph.py
+++ b/rdflib/plugins/sparql/results/graph.py
@@ -1,18 +1,12 @@
from rdflib import Graph
-from rdflib.query import (
- Result,
- ResultParser,
- ResultSerializer,
- ResultException
-)
+from rdflib.query import Result, ResultParser, ResultSerializer, ResultException
class GraphResultParser(ResultParser):
-
def parse(self, source, content_type):
- res = Result('CONSTRUCT') # hmm - or describe?type_)
+ res = Result("CONSTRUCT") # hmm - or describe?type_)
res.graph = Graph()
res.graph.parse(source, format=content_type)
diff --git a/rdflib/plugins/sparql/results/jsonresults.py b/rdflib/plugins/sparql/results/jsonresults.py
index 6110c324..13a8da5e 100644
--- a/rdflib/plugins/sparql/results/jsonresults.py
+++ b/rdflib/plugins/sparql/results/jsonresults.py
@@ -1,7 +1,6 @@
import json
-from rdflib.query import (
- Result, ResultException, ResultSerializer, ResultParser)
+from rdflib.query import Result, ResultException, ResultSerializer, ResultParser
from rdflib import Literal, URIRef, BNode, Variable
@@ -18,23 +17,21 @@ Authors: Drew Perttula, Gunnar Aastrand Grimnes
class JSONResultParser(ResultParser):
-
def parse(self, source, content_type=None):
inp = source.read()
if isinstance(inp, bytes):
- inp = inp.decode('utf-8')
+ inp = inp.decode("utf-8")
return JSONResult(json.loads(inp))
class JSONResultSerializer(ResultSerializer):
-
def __init__(self, result):
ResultSerializer.__init__(self, result)
def serialize(self, stream, encoding=None):
res = {}
- if self.result.type == 'ASK':
+ if self.result.type == "ASK":
res["head"] = {}
res["boolean"] = self.result.askAnswer
else:
@@ -42,8 +39,9 @@ class JSONResultSerializer(ResultSerializer):
res["results"] = {}
res["head"] = {}
res["head"]["vars"] = self.result.vars
- res["results"]["bindings"] = [self._bindingToJSON(
- x) for x in self.result.bindings]
+ res["results"]["bindings"] = [
+ self._bindingToJSON(x) for x in self.result.bindings
+ ]
r = json.dumps(res, allow_nan=False, ensure_ascii=False)
if encoding is not None:
@@ -61,27 +59,26 @@ class JSONResultSerializer(ResultSerializer):
class JSONResult(Result):
-
def __init__(self, json):
self.json = json
if "boolean" in json:
- type_ = 'ASK'
+ type_ = "ASK"
elif "results" in json:
- type_ = 'SELECT'
+ type_ = "SELECT"
else:
- raise ResultException('No boolean or results in json!')
+ raise ResultException("No boolean or results in json!")
Result.__init__(self, type_)
- if type_ == 'ASK':
- self.askAnswer = bool(json['boolean'])
+ if type_ == "ASK":
+ self.askAnswer = bool(json["boolean"])
else:
self.bindings = self._get_bindings()
self.vars = [Variable(x) for x in json["head"]["vars"]]
def _get_bindings(self):
ret = []
- for row in self.json['results']['bindings']:
+ for row in self.json["results"]["bindings"]:
outRow = {}
for k, v in row.items():
outRow[Variable(k)] = parseJsonTerm(v)
@@ -97,36 +94,34 @@ def parseJsonTerm(d):
{ 'type': 'literal', 'value': 'drewp' }
"""
- t = d['type']
- if t == 'uri':
- return URIRef(d['value'])
- elif t == 'literal':
- return Literal(d['value'], datatype=d.get('datatype'), lang=d.get('xml:lang'))
- elif t == 'typed-literal':
- return Literal(d['value'], datatype=URIRef(d['datatype']))
- elif t == 'bnode':
- return BNode(d['value'])
+ t = d["type"]
+ if t == "uri":
+ return URIRef(d["value"])
+ elif t == "literal":
+ return Literal(d["value"], datatype=d.get("datatype"), lang=d.get("xml:lang"))
+ elif t == "typed-literal":
+ return Literal(d["value"], datatype=URIRef(d["datatype"]))
+ elif t == "bnode":
+ return BNode(d["value"])
else:
raise NotImplementedError("json term type %r" % t)
def termToJSON(self, term):
if isinstance(term, URIRef):
- return {'type': 'uri', 'value': str(term)}
+ return {"type": "uri", "value": str(term)}
elif isinstance(term, Literal):
- r = {'type': 'literal',
- 'value': str(term)}
+ r = {"type": "literal", "value": str(term)}
if term.datatype is not None:
- r['datatype'] = str(term.datatype)
+ r["datatype"] = str(term.datatype)
if term.language is not None:
- r['xml:lang'] = term.language
+ r["xml:lang"] = term.language
return r
elif isinstance(term, BNode):
- return {'type': 'bnode', 'value': str(term)}
+ return {"type": "bnode", "value": str(term)}
elif term is None:
return None
else:
- raise ResultException(
- 'Unknown term type: %s (%s)' % (term, type(term)))
+ raise ResultException("Unknown term type: %s (%s)" % (term, type(term)))
diff --git a/rdflib/plugins/sparql/results/rdfresults.py b/rdflib/plugins/sparql/results/rdfresults.py
index ac71ff1d..7f64bbf4 100644
--- a/rdflib/plugins/sparql/results/rdfresults.py
+++ b/rdflib/plugins/sparql/results/rdfresults.py
@@ -2,7 +2,7 @@ from rdflib import Graph, Namespace, RDF, Variable
from rdflib.query import Result, ResultParser
-RS = Namespace('http://www.w3.org/2001/sw/DataAccess/tests/result-set#')
+RS = Namespace("http://www.w3.org/2001/sw/DataAccess/tests/result-set#")
class RDFResultParser(ResultParser):
@@ -11,7 +11,6 @@ class RDFResultParser(ResultParser):
class RDFResult(Result):
-
def __init__(self, source, **kwargs):
if not isinstance(source, Graph):
@@ -24,7 +23,7 @@ class RDFResult(Result):
# there better be only one :)
if rs is None:
- type_ = 'CONSTRUCT'
+ type_ = "CONSTRUCT"
# use a new graph
g = Graph()
@@ -35,27 +34,27 @@ class RDFResult(Result):
askAnswer = graph.value(rs, RS.boolean)
if askAnswer is not None:
- type_ = 'ASK'
+ type_ = "ASK"
else:
- type_ = 'SELECT'
+ type_ = "SELECT"
Result.__init__(self, type_)
- if type_ == 'SELECT':
- self.vars = [Variable(v) for v in graph.objects(rs,
- RS.resultVariable)]
+ if type_ == "SELECT":
+ self.vars = [Variable(v) for v in graph.objects(rs, RS.resultVariable)]
self.bindings = []
for s in graph.objects(rs, RS.solution):
sol = {}
for b in graph.objects(s, RS.binding):
- sol[Variable(graph.value(
- b, RS.variable))] = graph.value(b, RS.value)
+ sol[Variable(graph.value(b, RS.variable))] = graph.value(
+ b, RS.value
+ )
self.bindings.append(sol)
- elif type_ == 'ASK':
+ elif type_ == "ASK":
self.askAnswer = askAnswer.value
if askAnswer.value is None:
- raise Exception('Malformed boolean in ask answer!')
- elif type_ == 'CONSTRUCT':
+ raise Exception("Malformed boolean in ask answer!")
+ elif type_ == "CONSTRUCT":
self.graph = g
diff --git a/rdflib/plugins/sparql/results/tsvresults.py b/rdflib/plugins/sparql/results/tsvresults.py
index 1395eaff..bdfa2d4a 100644
--- a/rdflib/plugins/sparql/results/tsvresults.py
+++ b/rdflib/plugins/sparql/results/tsvresults.py
@@ -1,4 +1,3 @@
-
"""
This implements the Tab Separated SPARQL Result Format
@@ -8,14 +7,28 @@ It is implemented with pyparsing, reusing the elements from the SPARQL Parser
import codecs
from pyparsing import (
- Optional, ZeroOrMore, Literal, ParserElement, ParseException, Suppress,
- FollowedBy, LineEnd)
+ Optional,
+ ZeroOrMore,
+ Literal,
+ ParserElement,
+ ParseException,
+ Suppress,
+ FollowedBy,
+ LineEnd,
+)
from rdflib.query import Result, ResultParser
from rdflib.plugins.sparql.parser import (
- Var, STRING_LITERAL1, STRING_LITERAL2, IRIREF, BLANK_NODE_LABEL,
- NumericLiteral, BooleanLiteral, LANGTAG)
+ Var,
+ STRING_LITERAL1,
+ STRING_LITERAL2,
+ IRIREF,
+ BLANK_NODE_LABEL,
+ NumericLiteral,
+ BooleanLiteral,
+ LANGTAG,
+)
from rdflib.plugins.sparql.parserutils import Comp, Param, CompValue
from rdflib import Literal as RDFLiteral
@@ -25,10 +38,14 @@ ParserElement.setDefaultWhitespaceChars(" \n")
String = STRING_LITERAL1 | STRING_LITERAL2
-RDFLITERAL = Comp('literal', Param('string', String) + Optional(
- Param('lang', LANGTAG.leaveWhitespace()
- ) | Literal('^^').leaveWhitespace(
- ) + Param('datatype', IRIREF).leaveWhitespace()))
+RDFLITERAL = Comp(
+ "literal",
+ Param("string", String)
+ + Optional(
+ Param("lang", LANGTAG.leaveWhitespace())
+ | Literal("^^").leaveWhitespace() + Param("datatype", IRIREF).leaveWhitespace()
+ ),
+)
NONE_VALUE = object()
@@ -49,10 +66,10 @@ class TSVResultParser(ResultParser):
if isinstance(source.read(0), bytes):
# if reading from source returns bytes do utf-8 decoding
- source = codecs.getreader('utf-8')(source)
+ source = codecs.getreader("utf-8")(source)
try:
- r = Result('SELECT')
+ r = Result("SELECT")
header = source.readline()
@@ -62,13 +79,12 @@ class TSVResultParser(ResultParser):
line = source.readline()
if not line:
break
- line = line.strip('\n')
+ line = line.strip("\n")
if line == "":
continue
row = ROW.parseString(line, parseAll=True)
- r.bindings.append(
- dict(zip(r.vars, (self.convertTerm(x) for x in row))))
+ r.bindings.append(dict(zip(r.vars, (self.convertTerm(x) for x in row))))
return r
@@ -81,7 +97,7 @@ class TSVResultParser(ResultParser):
if t is NONE_VALUE:
return None
if isinstance(t, CompValue):
- if t.name == 'literal':
+ if t.name == "literal":
return RDFLiteral(t.string, lang=t.lang, datatype=t.datatype)
else:
raise Exception("I dont know how to handle this: %s" % (t,))
@@ -89,9 +105,10 @@ class TSVResultParser(ResultParser):
return t
-if __name__ == '__main__':
+if __name__ == "__main__":
import sys
- r = Result.parse(file(sys.argv[1]), format='tsv')
+
+ r = Result.parse(file(sys.argv[1]), format="tsv")
print(r.vars)
print(r.bindings)
# print r.serialize(format='json')
diff --git a/rdflib/plugins/sparql/results/txtresults.py b/rdflib/plugins/sparql/results/txtresults.py
index c42f24c4..426dd9a1 100644
--- a/rdflib/plugins/sparql/results/txtresults.py
+++ b/rdflib/plugins/sparql/results/txtresults.py
@@ -1,4 +1,3 @@
-
from rdflib import URIRef, BNode, Literal
from rdflib.query import ResultSerializer
@@ -37,7 +36,7 @@ class TXTResultSerializer(ResultSerializer):
h2 += 1
return " " * h1 + s + " " * h2
- if self.result.type != 'SELECT':
+ if self.result.type != "SELECT":
raise Exception("Can only pretty print SELECT results!")
if not self.result:
@@ -46,14 +45,17 @@ class TXTResultSerializer(ResultSerializer):
keys = sorted(self.result.vars)
maxlen = [0] * len(keys)
- b = [[_termString(r[k], namespace_manager) for k in keys] for r in self.result]
+ b = [
+ [_termString(r[k], namespace_manager) for k in keys]
+ for r in self.result
+ ]
for r in b:
for i in range(len(keys)):
maxlen[i] = max(maxlen[i], len(r[i]))
- stream.write(
- "|".join([c(k, maxlen[i]) for i, k in enumerate(keys)]) + "\n")
+ stream.write("|".join([c(k, maxlen[i]) for i, k in enumerate(keys)]) + "\n")
stream.write("-" * (len(maxlen) + sum(maxlen)) + "\n")
for r in sorted(b):
- stream.write("|".join(
- [t + " " * (i - len(t)) for i, t in zip(maxlen, r)]) + "\n")
+ stream.write(
+ "|".join([t + " " * (i - len(t)) for i, t in zip(maxlen, r)]) + "\n"
+ )
diff --git a/rdflib/plugins/sparql/results/xmlresults.py b/rdflib/plugins/sparql/results/xmlresults.py
index cdb81f76..aa4f796f 100644
--- a/rdflib/plugins/sparql/results/xmlresults.py
+++ b/rdflib/plugins/sparql/results/xmlresults.py
@@ -8,17 +8,11 @@ from xml.sax.xmlreader import AttributesNSImpl
from rdflib.compat import etree
from rdflib import Literal, URIRef, BNode, Graph, Variable
-from rdflib.query import (
- Result,
- ResultParser,
- ResultSerializer,
- ResultException
-)
+from rdflib.query import Result, ResultParser, ResultSerializer, ResultException
-
-SPARQL_XML_NAMESPACE = u'http://www.w3.org/2005/sparql-results#'
-RESULTS_NS_ET = '{%s}' % SPARQL_XML_NAMESPACE
+SPARQL_XML_NAMESPACE = u"http://www.w3.org/2005/sparql-results#"
+RESULTS_NS_ET = "{%s}" % SPARQL_XML_NAMESPACE
log = logging.getLogger(__name__)
@@ -35,7 +29,6 @@ Authors: Drew Perttula, Gunnar Aastrand Grimnes
class XMLResultParser(ResultParser):
-
def parse(self, source, content_type=None):
return XMLResult(source)
@@ -49,31 +42,32 @@ class XMLResult(Result):
except TypeError:
tree = etree.parse(source)
- boolean = tree.find(RESULTS_NS_ET + 'boolean')
- results = tree.find(RESULTS_NS_ET + 'results')
+ boolean = tree.find(RESULTS_NS_ET + "boolean")
+ results = tree.find(RESULTS_NS_ET + "results")
if boolean is not None:
- type_ = 'ASK'
+ type_ = "ASK"
elif results is not None:
- type_ = 'SELECT'
+ type_ = "SELECT"
else:
- raise ResultException(
- "No RDF result-bindings or boolean answer found!")
+ raise ResultException("No RDF result-bindings or boolean answer found!")
Result.__init__(self, type_)
- if type_ == 'SELECT':
+ if type_ == "SELECT":
self.bindings = []
for result in results:
r = {}
for binding in result:
- r[Variable(binding.get('name'))] = parseTerm(binding[0])
+ r[Variable(binding.get("name"))] = parseTerm(binding[0])
self.bindings.append(r)
- self.vars = [Variable(x.get("name"))
- for x in tree.findall(
- './%shead/%svariable' % (
- RESULTS_NS_ET, RESULTS_NS_ET))]
+ self.vars = [
+ Variable(x.get("name"))
+ for x in tree.findall(
+ "./%shead/%svariable" % (RESULTS_NS_ET, RESULTS_NS_ET)
+ )
+ ]
else:
self.askAnswer = boolean.text.lower().strip() == "true"
@@ -83,36 +77,35 @@ def parseTerm(element):
"""rdflib object (Literal, URIRef, BNode) for the given
elementtree element"""
tag, text = element.tag, element.text
- if tag == RESULTS_NS_ET + 'literal':
+ if tag == RESULTS_NS_ET + "literal":
if text is None:
- text = ''
+ text = ""
datatype = None
lang = None
- if element.get('datatype', None):
- datatype = URIRef(element.get('datatype'))
+ if element.get("datatype", None):
+ datatype = URIRef(element.get("datatype"))
elif element.get("{%s}lang" % XML_NAMESPACE, None):
lang = element.get("{%s}lang" % XML_NAMESPACE)
ret = Literal(text, datatype=datatype, lang=lang)
return ret
- elif tag == RESULTS_NS_ET + 'uri':
+ elif tag == RESULTS_NS_ET + "uri":
return URIRef(text)
- elif tag == RESULTS_NS_ET + 'bnode':
+ elif tag == RESULTS_NS_ET + "bnode":
return BNode(text)
else:
raise TypeError("unknown binding type %r" % element)
class XMLResultSerializer(ResultSerializer):
-
def __init__(self, result):
ResultSerializer.__init__(self, result)
def serialize(self, stream, encoding="utf-8"):
writer = SPARQLXMLWriter(stream, encoding)
- if self.result.type == 'ASK':
+ if self.result.type == "ASK":
writer.write_header([])
writer.write_ask(self.result.askAnswer)
else:
@@ -134,14 +127,14 @@ class SPARQLXMLWriter:
Python saxutils-based SPARQL XML Writer
"""
- def __init__(self, output, encoding='utf-8'):
+ def __init__(self, output, encoding="utf-8"):
writer = XMLGenerator(output, encoding)
writer.startDocument()
- writer.startPrefixMapping(u'', SPARQL_XML_NAMESPACE)
- writer.startPrefixMapping(u'xml', XML_NAMESPACE)
+ writer.startPrefixMapping(u"", SPARQL_XML_NAMESPACE)
+ writer.startPrefixMapping(u"xml", XML_NAMESPACE)
writer.startElementNS(
- (SPARQL_XML_NAMESPACE, u'sparql'),
- u'sparql', AttributesNSImpl({}, {}))
+ (SPARQL_XML_NAMESPACE, u"sparql"), u"sparql", AttributesNSImpl({}, {})
+ )
self.writer = writer
self._output = output
self._encoding = encoding
@@ -149,102 +142,99 @@ class SPARQLXMLWriter:
def write_header(self, allvarsL):
self.writer.startElementNS(
- (SPARQL_XML_NAMESPACE, u'head'),
- u'head', AttributesNSImpl({}, {}))
+ (SPARQL_XML_NAMESPACE, u"head"), u"head", AttributesNSImpl({}, {})
+ )
for i in range(0, len(allvarsL)):
attr_vals = {
- (None, u'name'): str(allvarsL[i]),
+ (None, u"name"): str(allvarsL[i]),
}
attr_qnames = {
- (None, u'name'): u'name',
+ (None, u"name"): u"name",
}
self.writer.startElementNS(
- (SPARQL_XML_NAMESPACE, u'variable'),
- u'variable', AttributesNSImpl(attr_vals, attr_qnames))
- self.writer.endElementNS((SPARQL_XML_NAMESPACE,
- u'variable'), u'variable')
- self.writer.endElementNS((SPARQL_XML_NAMESPACE, u'head'), u'head')
+ (SPARQL_XML_NAMESPACE, u"variable"),
+ u"variable",
+ AttributesNSImpl(attr_vals, attr_qnames),
+ )
+ self.writer.endElementNS((SPARQL_XML_NAMESPACE, u"variable"), u"variable")
+ self.writer.endElementNS((SPARQL_XML_NAMESPACE, u"head"), u"head")
def write_ask(self, val):
self.writer.startElementNS(
- (SPARQL_XML_NAMESPACE, u'boolean'),
- u'boolean', AttributesNSImpl({}, {}))
+ (SPARQL_XML_NAMESPACE, u"boolean"), u"boolean", AttributesNSImpl({}, {})
+ )
self.writer.characters(str(val).lower())
- self.writer.endElementNS(
- (SPARQL_XML_NAMESPACE, u'boolean'), u'boolean')
+ self.writer.endElementNS((SPARQL_XML_NAMESPACE, u"boolean"), u"boolean")
def write_results_header(self):
self.writer.startElementNS(
- (SPARQL_XML_NAMESPACE, u'results'),
- u'results', AttributesNSImpl({}, {}))
+ (SPARQL_XML_NAMESPACE, u"results"), u"results", AttributesNSImpl({}, {})
+ )
self._results = True
def write_start_result(self):
self.writer.startElementNS(
- (SPARQL_XML_NAMESPACE, u'result'),
- u'result', AttributesNSImpl({}, {}))
+ (SPARQL_XML_NAMESPACE, u"result"), u"result", AttributesNSImpl({}, {})
+ )
self._resultStarted = True
def write_end_result(self):
assert self._resultStarted
- self.writer.endElementNS(
- (SPARQL_XML_NAMESPACE, u'result'), u'result')
+ self.writer.endElementNS((SPARQL_XML_NAMESPACE, u"result"), u"result")
self._resultStarted = False
def write_binding(self, name, val):
assert self._resultStarted
attr_vals = {
- (None, u'name'): str(name),
+ (None, u"name"): str(name),
}
attr_qnames = {
- (None, u'name'): u'name',
+ (None, u"name"): u"name",
}
self.writer.startElementNS(
- (SPARQL_XML_NAMESPACE, u'binding'),
- u'binding', AttributesNSImpl(attr_vals, attr_qnames))
+ (SPARQL_XML_NAMESPACE, u"binding"),
+ u"binding",
+ AttributesNSImpl(attr_vals, attr_qnames),
+ )
if isinstance(val, URIRef):
self.writer.startElementNS(
- (SPARQL_XML_NAMESPACE, u'uri'),
- u'uri', AttributesNSImpl({}, {}))
+ (SPARQL_XML_NAMESPACE, u"uri"), u"uri", AttributesNSImpl({}, {})
+ )
self.writer.characters(val)
- self.writer.endElementNS(
- (SPARQL_XML_NAMESPACE, u'uri'), u'uri')
+ self.writer.endElementNS((SPARQL_XML_NAMESPACE, u"uri"), u"uri")
elif isinstance(val, BNode):
self.writer.startElementNS(
- (SPARQL_XML_NAMESPACE, u'bnode'),
- u'bnode', AttributesNSImpl({}, {}))
+ (SPARQL_XML_NAMESPACE, u"bnode"), u"bnode", AttributesNSImpl({}, {})
+ )
self.writer.characters(val)
- self.writer.endElementNS(
- (SPARQL_XML_NAMESPACE, u'bnode'), u'bnode')
+ self.writer.endElementNS((SPARQL_XML_NAMESPACE, u"bnode"), u"bnode")
elif isinstance(val, Literal):
attr_vals = {}
attr_qnames = {}
if val.language:
- attr_vals[(XML_NAMESPACE, u'lang')] = val.language
- attr_qnames[(XML_NAMESPACE, u'lang')] = u"xml:lang"
+ attr_vals[(XML_NAMESPACE, u"lang")] = val.language
+ attr_qnames[(XML_NAMESPACE, u"lang")] = u"xml:lang"
elif val.datatype:
- attr_vals[(None, u'datatype')] = val.datatype
- attr_qnames[(None, u'datatype')] = u'datatype'
+ attr_vals[(None, u"datatype")] = val.datatype
+ attr_qnames[(None, u"datatype")] = u"datatype"
self.writer.startElementNS(
- (SPARQL_XML_NAMESPACE, u'literal'),
- u'literal', AttributesNSImpl(attr_vals, attr_qnames))
+ (SPARQL_XML_NAMESPACE, u"literal"),
+ u"literal",
+ AttributesNSImpl(attr_vals, attr_qnames),
+ )
self.writer.characters(val)
- self.writer.endElementNS(
- (SPARQL_XML_NAMESPACE, u'literal'), u'literal')
+ self.writer.endElementNS((SPARQL_XML_NAMESPACE, u"literal"), u"literal")
else:
raise Exception("Unsupported RDF term: %s" % val)
- self.writer.endElementNS(
- (SPARQL_XML_NAMESPACE, u'binding'), u'binding')
+ self.writer.endElementNS((SPARQL_XML_NAMESPACE, u"binding"), u"binding")
def close(self):
if self._results:
- self.writer.endElementNS(
- (SPARQL_XML_NAMESPACE, u'results'), u'results')
- self.writer.endElementNS(
- (SPARQL_XML_NAMESPACE, u'sparql'), u'sparql')
+ self.writer.endElementNS((SPARQL_XML_NAMESPACE, u"results"), u"results")
+ self.writer.endElementNS((SPARQL_XML_NAMESPACE, u"sparql"), u"sparql")
self.writer.endDocument()
diff --git a/rdflib/plugins/sparql/sparql.py b/rdflib/plugins/sparql/sparql.py
index d9d781db..5b6eab2e 100644
--- a/rdflib/plugins/sparql/sparql.py
+++ b/rdflib/plugins/sparql/sparql.py
@@ -129,8 +129,7 @@ class FrozenDict(Mapping):
return self._hash
def project(self, vars):
- return FrozenDict(
- (x for x in self.items() if x[0] in vars))
+ return FrozenDict((x for x in self.items() if x[0] in vars))
def disjointDomain(self, other):
return not bool(set(self).intersection(other))
@@ -146,8 +145,7 @@ class FrozenDict(Mapping):
return True
def merge(self, other):
- res = FrozenDict(
- itertools.chain(self.items(), other.items()))
+ res = FrozenDict(itertools.chain(self.items(), other.items()))
return res
@@ -159,7 +157,6 @@ class FrozenDict(Mapping):
class FrozenBindings(FrozenDict):
-
def __init__(self, ctx, *args, **kwargs):
FrozenDict.__init__(self, *args, **kwargs)
self.ctx = ctx
@@ -178,12 +175,10 @@ class FrozenBindings(FrozenDict):
return self._d[key]
def project(self, vars):
- return FrozenBindings(
- self.ctx, (x for x in self.items() if x[0] in vars))
+ return FrozenBindings(self.ctx, (x for x in self.items() if x[0] in vars))
def merge(self, other):
- res = FrozenBindings(
- self.ctx, itertools.chain(self.items(), other.items()))
+ res = FrozenBindings(self.ctx, itertools.chain(self.items(), other.items()))
return res
@@ -210,18 +205,23 @@ class FrozenBindings(FrozenDict):
# bindings from initBindings are newer forgotten
return FrozenBindings(
- self.ctx, (
- x for x in self.items() if (
- x[0] in _except or
- x[0] in self.ctx.initBindings or
- before[x[0]] is None)))
+ self.ctx,
+ (
+ x
+ for x in self.items()
+ if (
+ x[0] in _except
+ or x[0] in self.ctx.initBindings
+ or before[x[0]] is None
+ )
+ ),
+ )
def remember(self, these):
"""
return a frozen dict only of bindings in these
"""
- return FrozenBindings(
- self.ctx, (x for x in self.items() if x[0] in these))
+ return FrozenBindings(self.ctx, (x for x in self.items() if x[0] in these))
class QueryContext(object):
@@ -253,7 +253,10 @@ class QueryContext(object):
def clone(self, bindings=None):
r = QueryContext(
- self._dataset if self._dataset is not None else self.graph, bindings or self.bindings, initBindings=self.initBindings)
+ self._dataset if self._dataset is not None else self.graph,
+ bindings or self.bindings,
+ initBindings=self.initBindings,
+ )
r.prologue = self.prologue
r.graph = self.graph
r.bnodes = self.bnodes
@@ -262,30 +265,30 @@ class QueryContext(object):
def _get_dataset(self):
if self._dataset is None:
raise Exception(
- 'You performed a query operation requiring ' +
- 'a dataset (i.e. ConjunctiveGraph), but ' +
- 'operating currently on a single graph.')
+ "You performed a query operation requiring "
+ + "a dataset (i.e. ConjunctiveGraph), but "
+ + "operating currently on a single graph."
+ )
return self._dataset
dataset = property(_get_dataset, doc="current dataset")
def load(self, source, default=False, **kwargs):
-
def _load(graph, source):
try:
return graph.load(source, **kwargs)
except:
pass
try:
- return graph.load(source, format='n3', **kwargs)
+ return graph.load(source, format="n3", **kwargs)
except:
pass
try:
- return graph.load(source, format='nt', **kwargs)
+ return graph.load(source, format="nt", **kwargs)
except:
raise Exception(
- "Could not load %s as either RDF/XML, N3 or NTriples" % (
- source))
+ "Could not load %s as either RDF/XML, N3 or NTriples" % (source)
+ )
if not rdflib.plugins.sparql.SPARQL_LOAD_GRAPHS:
# we are not loading - if we already know the graph
@@ -320,9 +323,8 @@ class QueryContext(object):
"""
if vars:
return FrozenBindings(
- self, ((k, v)
- for k, v in self.bindings.items()
- if k in vars))
+ self, ((k, v) for k, v in self.bindings.items() if k in vars)
+ )
else:
return FrozenBindings(self, self.bindings.items())
@@ -366,13 +368,12 @@ class Prologue(object):
def __init__(self):
self.base = None
- self.namespace_manager = NamespaceManager(
- Graph()) # ns man needs a store
+ self.namespace_manager = NamespaceManager(Graph()) # ns man needs a store
def resolvePName(self, prefix, localname):
ns = self.namespace_manager.store.namespace(prefix or "")
if ns is None:
- raise Exception('Unknown namespace prefix : %s' % prefix)
+ raise Exception("Unknown namespace prefix : %s" % prefix)
return URIRef(ns + (localname or ""))
def bind(self, prefix, uri):
@@ -387,13 +388,13 @@ class Prologue(object):
"""
if isinstance(iri, CompValue):
- if iri.name == 'pname':
+ if iri.name == "pname":
return self.resolvePName(iri.prefix, iri.localname)
- if iri.name == 'literal':
+ if iri.name == "literal":
return Literal(
- iri.string, lang=iri.lang,
- datatype=self.absolutize(iri.datatype))
- elif isinstance(iri, URIRef) and not ':' in iri:
+ iri.string, lang=iri.lang, datatype=self.absolutize(iri.datatype)
+ )
+ elif isinstance(iri, URIRef) and not ":" in iri:
return URIRef(iri, base=self.base)
return iri
diff --git a/rdflib/plugins/sparql/update.py b/rdflib/plugins/sparql/update.py
index 44ea40a3..f979c387 100644
--- a/rdflib/plugins/sparql/update.py
+++ b/rdflib/plugins/sparql/update.py
@@ -11,7 +11,7 @@ from rdflib.plugins.sparql.evaluate import evalBGP, evalPart
def _graphOrDefault(ctx, g):
- if g == 'DEFAULT':
+ if g == "DEFAULT":
return ctx.graph
else:
return ctx.dataset.get_context(g)
@@ -21,12 +21,13 @@ def _graphAll(ctx, g):
"""
return a list of graphs
"""
- if g == 'DEFAULT':
+ if g == "DEFAULT":
return [ctx.graph]
- elif g == 'NAMED':
- return [c for c in ctx.dataset.contexts()
- if c.identifier != ctx.graph.identifier]
- elif g == 'ALL':
+ elif g == "NAMED":
+ return [
+ c for c in ctx.dataset.contexts() if c.identifier != ctx.graph.identifier
+ ]
+ elif g == "ALL":
return list(ctx.dataset.contexts())
else:
return [ctx.dataset.get_context(g)]
@@ -280,30 +281,30 @@ def evalUpdate(graph, update, initBindings={}):
ctx.prologue = u.prologue
try:
- if u.name == 'Load':
+ if u.name == "Load":
evalLoad(ctx, u)
- elif u.name == 'Clear':
+ elif u.name == "Clear":
evalClear(ctx, u)
- elif u.name == 'Drop':
+ elif u.name == "Drop":
evalDrop(ctx, u)
- elif u.name == 'Create':
+ elif u.name == "Create":
evalCreate(ctx, u)
- elif u.name == 'Add':
+ elif u.name == "Add":
evalAdd(ctx, u)
- elif u.name == 'Move':
+ elif u.name == "Move":
evalMove(ctx, u)
- elif u.name == 'Copy':
+ elif u.name == "Copy":
evalCopy(ctx, u)
- elif u.name == 'InsertData':
+ elif u.name == "InsertData":
evalInsertData(ctx, u)
- elif u.name == 'DeleteData':
+ elif u.name == "DeleteData":
evalDeleteData(ctx, u)
- elif u.name == 'DeleteWhere':
+ elif u.name == "DeleteWhere":
evalDeleteWhere(ctx, u)
- elif u.name == 'Modify':
+ elif u.name == "Modify":
evalModify(ctx, u)
else:
- raise Exception('Unknown update operation: %s' % (u,))
+ raise Exception("Unknown update operation: %s" % (u,))
except:
if not u.silent:
raise
diff --git a/rdflib/plugins/stores/auditable.py b/rdflib/plugins/stores/auditable.py
index 7a3492b7..ff21716b 100644
--- a/rdflib/plugins/stores/auditable.py
+++ b/rdflib/plugins/stores/auditable.py
@@ -20,8 +20,8 @@ from rdflib import Graph, ConjunctiveGraph
import threading
destructiveOpLocks = {
- 'add': None,
- 'remove': None,
+ "add": None,
+ "remove": None,
}
@@ -50,59 +50,79 @@ class AuditableStore(Store):
def add(self, triple, context, quoted=False):
(s, p, o) = triple
- lock = destructiveOpLocks['add']
+ lock = destructiveOpLocks["add"]
lock = lock if lock else threading.RLock()
with lock:
- context = context.__class__(self.store, context.identifier) if context is not None else None
+ context = (
+ context.__class__(self.store, context.identifier)
+ if context is not None
+ else None
+ )
ctxId = context.identifier if context is not None else None
if list(self.store.triples(triple, context)):
return # triple already in store, do nothing
- self.reverseOps.append((s, p, o, ctxId, 'remove'))
+ self.reverseOps.append((s, p, o, ctxId, "remove"))
try:
- self.reverseOps.remove((s, p, o, ctxId, 'add'))
+ self.reverseOps.remove((s, p, o, ctxId, "add"))
except ValueError:
pass
self.store.add((s, p, o), context, quoted)
def remove(self, spo, context=None):
subject, predicate, object_ = spo
- lock = destructiveOpLocks['remove']
+ lock = destructiveOpLocks["remove"]
lock = lock if lock else threading.RLock()
with lock:
# Need to determine which quads will be removed if any term is a
# wildcard
- context = context.__class__(self.store, context.identifier) if context is not None else None
+ context = (
+ context.__class__(self.store, context.identifier)
+ if context is not None
+ else None
+ )
ctxId = context.identifier if context is not None else None
if None in [subject, predicate, object_, context]:
if ctxId:
for s, p, o in context.triples((subject, predicate, object_)):
try:
- self.reverseOps.remove((s, p, o, ctxId, 'remove'))
+ self.reverseOps.remove((s, p, o, ctxId, "remove"))
except ValueError:
- self.reverseOps.append((s, p, o, ctxId, 'add'))
+ self.reverseOps.append((s, p, o, ctxId, "add"))
else:
- for s, p, o, ctx in ConjunctiveGraph(self.store).quads((subject, predicate, object_)):
+ for s, p, o, ctx in ConjunctiveGraph(self.store).quads(
+ (subject, predicate, object_)
+ ):
try:
- self.reverseOps.remove((s, p, o, ctx.identifier, 'remove'))
+ self.reverseOps.remove((s, p, o, ctx.identifier, "remove"))
except ValueError:
- self.reverseOps.append((s, p, o, ctx.identifier, 'add'))
+ self.reverseOps.append((s, p, o, ctx.identifier, "add"))
else:
if not list(self.triples((subject, predicate, object_), context)):
return # triple not present in store, do nothing
try:
- self.reverseOps.remove((subject, predicate, object_, ctxId, 'remove'))
+ self.reverseOps.remove(
+ (subject, predicate, object_, ctxId, "remove")
+ )
except ValueError:
- self.reverseOps.append((subject, predicate, object_, ctxId, 'add'))
+ self.reverseOps.append((subject, predicate, object_, ctxId, "add"))
self.store.remove((subject, predicate, object_), context)
def triples(self, triple, context=None):
(su, pr, ob) = triple
- context = context.__class__(self.store, context.identifier) if context is not None else None
+ context = (
+ context.__class__(self.store, context.identifier)
+ if context is not None
+ else None
+ )
for (s, p, o), cg in self.store.triples((su, pr, ob), context):
yield (s, p, o), cg
def __len__(self, context=None):
- context = context.__class__(self.store, context.identifier) if context is not None else None
+ context = (
+ context.__class__(self.store, context.identifier)
+ if context is not None
+ else None
+ )
return self.store.__len__(context)
def contexts(self, triple=None):
@@ -129,11 +149,13 @@ class AuditableStore(Store):
# order
with self.rollbackLock:
for subject, predicate, obj, context, op in self.reverseOps:
- if op == 'add':
+ if op == "add":
self.store.add(
- (subject, predicate, obj), Graph(self.store, context))
+ (subject, predicate, obj), Graph(self.store, context)
+ )
else:
self.store.remove(
- (subject, predicate, obj), Graph(self.store, context))
+ (subject, predicate, obj), Graph(self.store, context)
+ )
self.reverseOps = []
diff --git a/rdflib/plugins/stores/concurrent.py b/rdflib/plugins/stores/concurrent.py
index 40747fb1..a258e778 100644
--- a/rdflib/plugins/stores/concurrent.py
+++ b/rdflib/plugins/stores/concurrent.py
@@ -4,7 +4,7 @@ from threading import Lock
class ResponsibleGenerator(object):
"""A generator that will help clean up when it is done being used."""
- __slots__ = ['cleanup', 'gen']
+ __slots__ = ["cleanup", "gen"]
def __init__(self, gen, cleanup):
self.cleanup = cleanup
@@ -21,7 +21,6 @@ class ResponsibleGenerator(object):
class ConcurrentStore(object):
-
def __init__(self, store):
self.store = store
@@ -60,9 +59,11 @@ class ConcurrentStore(object):
yield s, p, o
for (s, p, o) in self.__pending_adds:
- if (su is None or su == s) \
- and (pr is None or pr == p) \
- and (ob is None or ob == o):
+ if (
+ (su is None or su == s)
+ and (pr is None or pr == p)
+ and (ob is None or ob == o)
+ ):
yield s, p, o
def __len__(self):
diff --git a/rdflib/plugins/stores/regexmatching.py b/rdflib/plugins/stores/regexmatching.py
index 55c1e671..f890405d 100644
--- a/rdflib/plugins/stores/regexmatching.py
+++ b/rdflib/plugins/stores/regexmatching.py
@@ -12,7 +12,6 @@ from rdflib.store import Store
from rdflib.graph import Graph
-
import re
# Store is capable of doing its own REGEX matching
@@ -32,13 +31,14 @@ class REGEXTerm(str):
self.compiledExpr = re.compile(expr)
def __reduce__(self):
- return (REGEXTerm, (str(''),))
+ return (REGEXTerm, (str(""),))
def regexCompareQuad(quad, regexQuad):
for index in range(4):
- if isinstance(regexQuad[index], REGEXTerm) and not \
- regexQuad[index].compiledExpr.match(quad[index]):
+ if isinstance(regexQuad[index], REGEXTerm) and not regexQuad[
+ index
+ ].compiledExpr.match(quad[index]):
return False
return True
@@ -67,29 +67,36 @@ class REGEXMatching(Store):
def remove(self, triple, context=None):
(subject, predicate, object_) = triple
- if isinstance(subject, REGEXTerm) or \
- isinstance(predicate, REGEXTerm) or \
- isinstance(object_, REGEXTerm) or \
- (context is not None and
- isinstance(context.identifier, REGEXTerm)):
+ if (
+ isinstance(subject, REGEXTerm)
+ or isinstance(predicate, REGEXTerm)
+ or isinstance(object_, REGEXTerm)
+ or (context is not None and isinstance(context.identifier, REGEXTerm))
+ ):
# One or more of the terms is a REGEX expression, so we must
# replace it / them with wildcard(s)and match after we query.
s = not isinstance(subject, REGEXTerm) and subject or None
p = not isinstance(predicate, REGEXTerm) and predicate or None
o = not isinstance(object_, REGEXTerm) and object_ or None
- c = (context is not None and
- not isinstance(context.identifier, REGEXTerm)) \
- and context \
+ c = (
+ (context is not None and not isinstance(context.identifier, REGEXTerm))
+ and context
or None
+ )
removeQuadList = []
for (s1, p1, o1), cg in self.storage.triples((s, p, o), c):
for ctx in cg:
ctx = ctx.identifier
if regexCompareQuad(
- (s1, p1, o1, ctx),
- (subject, predicate, object_, context
- is not None and context.identifier or context)):
+ (s1, p1, o1, ctx),
+ (
+ subject,
+ predicate,
+ object_,
+ context is not None and context.identifier or context,
+ ),
+ ):
removeQuadList.append((s1, p1, o1, ctx))
for s, p, o, c in removeQuadList:
self.storage.remove((s, p, o), c and Graph(self, c) or c)
@@ -98,37 +105,40 @@ class REGEXMatching(Store):
def triples(self, triple, context=None):
(subject, predicate, object_) = triple
- if isinstance(subject, REGEXTerm) or \
- isinstance(predicate, REGEXTerm) or \
- isinstance(object_, REGEXTerm) or \
- (context is not None and
- isinstance(context.identifier, REGEXTerm)):
+ if (
+ isinstance(subject, REGEXTerm)
+ or isinstance(predicate, REGEXTerm)
+ or isinstance(object_, REGEXTerm)
+ or (context is not None and isinstance(context.identifier, REGEXTerm))
+ ):
# One or more of the terms is a REGEX expression, so we must
# replace it / them with wildcard(s) and match after we query.
s = not isinstance(subject, REGEXTerm) and subject or None
p = not isinstance(predicate, REGEXTerm) and predicate or None
o = not isinstance(object_, REGEXTerm) and object_ or None
- c = (context is not None and
- not isinstance(context.identifier, REGEXTerm)) \
- and context \
+ c = (
+ (context is not None and not isinstance(context.identifier, REGEXTerm))
+ and context
or None
+ )
for (s1, p1, o1), cg in self.storage.triples((s, p, o), c):
matchingCtxs = []
for ctx in cg:
if c is None:
- if context is None \
- or context.identifier.compiledExpr.match(
- ctx.identifier):
+ if context is None or context.identifier.compiledExpr.match(
+ ctx.identifier
+ ):
matchingCtxs.append(ctx)
else:
matchingCtxs.append(ctx)
- if matchingCtxs \
- and regexCompareQuad((s1, p1, o1, None),
- (subject, predicate, object_, None)):
+ if matchingCtxs and regexCompareQuad(
+ (s1, p1, o1, None), (subject, predicate, object_, None)
+ ):
yield (s1, p1, o1), (c for c in matchingCtxs)
else:
for (s1, p1, o1), cg in self.storage.triples(
- (subject, predicate, object_), context):
+ (subject, predicate, object_), context
+ ):
yield (s1, p1, o1), cg
def __len__(self, context=None):
diff --git a/rdflib/plugins/stores/sparqlconnector.py b/rdflib/plugins/stores/sparqlconnector.py
index abb69a55..abec85a8 100644
--- a/rdflib/plugins/stores/sparqlconnector.py
+++ b/rdflib/plugins/stores/sparqlconnector.py
@@ -14,13 +14,14 @@ log = logging.getLogger(__name__)
class SPARQLConnectorException(Exception):
pass
+
# TODO: Pull in these from the result implementation plugins?
_response_mime_types = {
- 'xml': 'application/sparql-results+xml, application/rdf+xml',
- 'json': 'application/sparql-results+json',
- 'csv': 'text/csv',
- 'tsv': 'text/tab-separated-values',
- 'application/rdf+xml': 'application/rdf+xml',
+ "xml": "application/sparql-results+xml, application/rdf+xml",
+ "json": "application/sparql-results+json",
+ "csv": "text/csv",
+ "tsv": "text/tab-separated-values",
+ "application/rdf+xml": "application/rdf+xml",
}
@@ -30,7 +31,14 @@ class SPARQLConnector(object):
this class deals with nitty gritty details of talking to a SPARQL server
"""
- def __init__(self, query_endpoint=None, update_endpoint=None, returnFormat='xml', method='GET', **kwargs):
+ def __init__(
+ self,
+ query_endpoint=None,
+ update_endpoint=None,
+ returnFormat="xml",
+ method="GET",
+ **kwargs
+ ):
"""
Any additional keyword arguments will be passed to requests, and can be used to setup timesouts, basic auth, etc.
"""
@@ -48,9 +56,9 @@ class SPARQLConnector(object):
@property
def session(self):
- k = 'session_%d' % os.getpid()
+ k = "session_%d" % os.getpid()
self._session.__dict__.setdefault(k, requests.Session())
- log.debug('Session %s %s', os.getpid(), id(self._session.__dict__[k]))
+ log.debug("Session %s %s", os.getpid(), id(self._session.__dict__[k]))
return self._session.__dict__[k]
@property
@@ -59,7 +67,7 @@ class SPARQLConnector(object):
@method.setter
def method(self, method):
- if method not in ('GET', 'POST'):
+ if method not in ("GET", "POST"):
raise SPARQLConnectorException('Method must be "GET" or "POST"')
self._method = method
@@ -69,26 +77,26 @@ class SPARQLConnector(object):
if not self.query_endpoint:
raise SPARQLConnectorException("Query endpoint not set!")
- params = {'query': query}
+ params = {"query": query}
if default_graph:
params["default-graph-uri"] = default_graph
- headers = {'Accept': _response_mime_types[self.returnFormat]}
+ headers = {"Accept": _response_mime_types[self.returnFormat]}
args = dict(self.kwargs)
args.update(url=self.query_endpoint)
# merge params/headers dicts
- args.setdefault('params', {})
+ args.setdefault("params", {})
- args.setdefault('headers', {})
- args['headers'].update(headers)
+ args.setdefault("headers", {})
+ args["headers"].update(headers)
- if self.method == 'GET':
- args['params'].update(params)
- elif self.method == 'POST':
- args['headers'].update({'Content-Type': 'application/sparql-query'})
- args['data'] = params
+ if self.method == "GET":
+ args["params"].update(params)
+ elif self.method == "POST":
+ args["headers"].update({"Content-Type": "application/sparql-query"})
+ args["data"] = params
else:
raise SPARQLConnectorException("Unknown method %s" % self.method)
@@ -96,7 +104,9 @@ class SPARQLConnector(object):
res.raise_for_status()
- return Result.parse(BytesIO(res.content), content_type=res.headers['Content-type'])
+ return Result.parse(
+ BytesIO(res.content), content_type=res.headers["Content-type"]
+ )
def update(self, update, default_graph=None):
if not self.update_endpoint:
@@ -108,20 +118,19 @@ class SPARQLConnector(object):
params["using-graph-uri"] = default_graph
headers = {
- 'Accept': _response_mime_types[self.returnFormat],
- 'Content-Type': 'application/sparql-update',
+ "Accept": _response_mime_types[self.returnFormat],
+ "Content-Type": "application/sparql-update",
}
args = dict(self.kwargs)
- args.update(url=self.update_endpoint,
- data=update.encode('utf-8'))
+ args.update(url=self.update_endpoint, data=update.encode("utf-8"))
# merge params/headers dicts
- args.setdefault('params', {})
- args['params'].update(params)
- args.setdefault('headers', {})
- args['headers'].update(headers)
+ args.setdefault("params", {})
+ args["params"].update(params)
+ args.setdefault("headers", {})
+ args["headers"].update(headers)
res = self.session.post(**args)
diff --git a/rdflib/plugins/stores/sparqlstore.py b/rdflib/plugins/stores/sparqlstore.py
index 989b0126..63e92f54 100644
--- a/rdflib/plugins/stores/sparqlstore.py
+++ b/rdflib/plugins/stores/sparqlstore.py
@@ -7,9 +7,9 @@ This was first done in layer-cake, and then ported to RDFLib
"""
# Defines some SPARQL keywords
-LIMIT = 'LIMIT'
-OFFSET = 'OFFSET'
-ORDERBY = 'ORDER BY'
+LIMIT = "LIMIT"
+OFFSET = "OFFSET"
+ORDERBY = "ORDER BY"
import re
import collections
@@ -24,7 +24,7 @@ from rdflib.graph import DATASET_DEFAULT_GRAPH_ID
from rdflib.term import Node
-BNODE_IDENT_PATTERN = re.compile('(?P<label>_\:[^\s]+)')
+BNODE_IDENT_PATTERN = re.compile("(?P<label>_\:[^\s]+)")
def _node_to_sparql(node):
@@ -87,21 +87,26 @@ class SPARQLStore(SPARQLConnector, Store):
will use HTTP basic auth.
"""
+
formula_aware = False
transaction_aware = False
graph_aware = True
regex_matching = NATIVE_REGEX
- def __init__(self,
- endpoint=None,
- sparql11=True, context_aware=True,
- node_to_sparql=_node_to_sparql,
- returnFormat='xml',
- **sparqlconnector_kwargs):
+ def __init__(
+ self,
+ endpoint=None,
+ sparql11=True,
+ context_aware=True,
+ node_to_sparql=_node_to_sparql,
+ returnFormat="xml",
+ **sparqlconnector_kwargs
+ ):
"""
"""
super(SPARQLStore, self).__init__(
- endpoint, returnFormat=returnFormat, **sparqlconnector_kwargs)
+ endpoint, returnFormat=returnFormat, **sparqlconnector_kwargs
+ )
self.node_to_sparql = node_to_sparql
self.nsBindings = {}
@@ -112,7 +117,7 @@ class SPARQLStore(SPARQLConnector, Store):
# Database Management Methods
def create(self, configuration):
- raise TypeError('The SPARQL store is read only')
+ raise TypeError("The SPARQL store is read only")
def open(self, configuration, create=False):
"""
@@ -125,23 +130,23 @@ class SPARQLStore(SPARQLConnector, Store):
self.query_endpoint = configuration
def destroy(self, configuration):
- raise TypeError('The SPARQL store is read only')
+ raise TypeError("The SPARQL store is read only")
# Transactional interfaces
def commit(self):
- raise TypeError('The SPARQL store is read only')
+ raise TypeError("The SPARQL store is read only")
def rollback(self):
- raise TypeError('The SPARQL store is read only')
+ raise TypeError("The SPARQL store is read only")
def add(self, _, context=None, quoted=False):
- raise TypeError('The SPARQL store is read only')
+ raise TypeError("The SPARQL store is read only")
def addN(self, quads):
- raise TypeError('The SPARQL store is read only')
+ raise TypeError("The SPARQL store is read only")
def remove(self, _, context):
- raise TypeError('The SPARQL store is read only')
+ raise TypeError("The SPARQL store is read only")
def _query(self, *args, **kwargs):
self._queries += 1
@@ -152,20 +157,18 @@ class SPARQLStore(SPARQLConnector, Store):
bindings = list(self.nsBindings.items()) + list(extra_bindings.items())
if not bindings:
return query
- return '\n'.join([
- '\n'.join(['PREFIX %s: <%s>' % (k, v) for k, v in bindings]),
- '', # separate ns_bindings from query with an empty line
- query
- ])
+ return "\n".join(
+ [
+ "\n".join(["PREFIX %s: <%s>" % (k, v) for k, v in bindings]),
+ "", # separate ns_bindings from query with an empty line
+ query,
+ ]
+ )
def _preprocess_query(self, query):
return self._inject_prefixes(query)
- def query(self, query,
- initNs={},
- initBindings={},
- queryGraph=None,
- DEBUG=False):
+ def query(self, query, initNs={}, initBindings={}, queryGraph=None, DEBUG=False):
self.debug = DEBUG
assert isinstance(query, str)
@@ -173,17 +176,18 @@ class SPARQLStore(SPARQLConnector, Store):
if initBindings:
if not self.sparql11:
- raise Exception(
- "initBindings not supported for SPARQL 1.0 Endpoints.")
+ raise Exception("initBindings not supported for SPARQL 1.0 Endpoints.")
v = list(initBindings)
# VALUES was added to SPARQL 1.1 on 2012/07/24
- query += "\nVALUES ( %s )\n{ ( %s ) }\n"\
- % (" ".join("?" + str(x) for x in v),
- " ".join(self.node_to_sparql(initBindings[x]) for x in v))
+ query += "\nVALUES ( %s )\n{ ( %s ) }\n" % (
+ " ".join("?" + str(x) for x in v),
+ " ".join(self.node_to_sparql(initBindings[x]) for x in v),
+ )
- return self._query(query,
- default_graph=queryGraph if self._is_contextual(queryGraph) else None)
+ return self._query(
+ query, default_graph=queryGraph if self._is_contextual(queryGraph) else None
+ )
def triples(self, spo, context=None):
"""
@@ -225,28 +229,31 @@ class SPARQLStore(SPARQLConnector, Store):
vars = []
if not s:
- s = Variable('s')
+ s = Variable("s")
vars.append(s)
if not p:
- p = Variable('p')
+ p = Variable("p")
vars.append(p)
if not o:
- o = Variable('o')
+ o = Variable("o")
vars.append(o)
if vars:
- v = ' '.join([term.n3() for term in vars])
- verb = 'SELECT %s ' % v
+ v = " ".join([term.n3() for term in vars])
+ verb = "SELECT %s " % v
else:
- verb = 'ASK'
+ verb = "ASK"
nts = self.node_to_sparql
query = "%s { %s %s %s }" % (verb, nts(s), nts(p), nts(o))
# The ORDER BY is necessary
- if hasattr(context, LIMIT) or hasattr(context, OFFSET) \
- or hasattr(context, ORDERBY):
+ if (
+ hasattr(context, LIMIT)
+ or hasattr(context, OFFSET)
+ or hasattr(context, ORDERBY)
+ ):
var = None
if isinstance(s, Variable):
var = s
@@ -254,28 +261,33 @@ class SPARQLStore(SPARQLConnector, Store):
var = p
elif isinstance(o, Variable):
var = o
- elif hasattr(context, ORDERBY) \
- and isinstance(getattr(context, ORDERBY), Variable):
+ elif hasattr(context, ORDERBY) and isinstance(
+ getattr(context, ORDERBY), Variable
+ ):
var = getattr(context, ORDERBY)
- query = query + ' %s %s' % (ORDERBY, var.n3())
+ query = query + " %s %s" % (ORDERBY, var.n3())
try:
- query = query + ' LIMIT %s' % int(getattr(context, LIMIT))
+ query = query + " LIMIT %s" % int(getattr(context, LIMIT))
except (ValueError, TypeError, AttributeError):
pass
try:
- query = query + ' OFFSET %s' % int(getattr(context, OFFSET))
+ query = query + " OFFSET %s" % int(getattr(context, OFFSET))
except (ValueError, TypeError, AttributeError):
pass
- result = self._query(query,
- default_graph=context.identifier if self._is_contextual(context) else None)
+ result = self._query(
+ query,
+ default_graph=context.identifier if self._is_contextual(context) else None,
+ )
if vars:
for row in result:
- yield (row.get(s, s),
- row.get(p, p),
- row.get(o, o)), None # why is the context here not the passed in graph 'context'?
+ yield (
+ row.get(s, s),
+ row.get(p, p),
+ row.get(o, o),
+ ), None # why is the context here not the passed in graph 'context'?
else:
if result.askAnswer:
yield (s, p, o), None
@@ -288,18 +300,23 @@ class SPARQLStore(SPARQLConnector, Store):
which will iterate over each term in the list and dispatch to
triples.
"""
- raise NotImplementedError('Triples choices currently not supported')
+ raise NotImplementedError("Triples choices currently not supported")
def __len__(self, context=None):
if not self.sparql11:
raise NotImplementedError(
- "For performance reasons, this is not" +
- "supported for sparql1.0 endpoints")
+ "For performance reasons, this is not"
+ + "supported for sparql1.0 endpoints"
+ )
else:
q = "SELECT (count(*) as ?c) WHERE {?s ?p ?o .}"
- result = self._query(q,
- default_graph=context.identifier if self._is_contextual(context) else None)
+ result = self._query(
+ q,
+ default_graph=context.identifier
+ if self._is_contextual(context)
+ else None,
+ )
return int(next(iter(result)).c)
@@ -321,12 +338,14 @@ class SPARQLStore(SPARQLConnector, Store):
if triple:
nts = self.node_to_sparql
s, p, o = triple
- params = (nts(s if s else Variable('s')),
- nts(p if p else Variable('p')),
- nts(o if o else Variable('o')))
- q = 'SELECT ?name WHERE { GRAPH ?name { %s %s %s }}' % params
+ params = (
+ nts(s if s else Variable("s")),
+ nts(p if p else Variable("p")),
+ nts(o if o else Variable("o")),
+ )
+ q = "SELECT ?name WHERE { GRAPH ?name { %s %s %s }}" % params
else:
- q = 'SELECT ?name WHERE { GRAPH ?name {} }'
+ q = "SELECT ?name WHERE { GRAPH ?name {} }"
result = self._query(q)
@@ -338,9 +357,7 @@ class SPARQLStore(SPARQLConnector, Store):
def prefix(self, namespace):
""" """
- return dict(
- [(v, k) for k, v in self.nsBindings.items()]
- ).get(namespace)
+ return dict([(v, k) for k, v in self.nsBindings.items()]).get(namespace)
def namespace(self, prefix):
return self.nsBindings.get(prefix)
@@ -350,10 +367,10 @@ class SPARQLStore(SPARQLConnector, Store):
yield prefix, ns
def add_graph(self, graph):
- raise TypeError('The SPARQL store is read only')
+ raise TypeError("The SPARQL store is read only")
def remove_graph(self, graph):
- raise TypeError('The SPARQL store is read only')
+ raise TypeError("The SPARQL store is read only")
def _is_contextual(self, graph):
""" Returns `True` if the "GRAPH" keyword must appear
@@ -362,7 +379,7 @@ class SPARQLStore(SPARQLConnector, Store):
if (not self.context_aware) or (graph is None):
return False
if isinstance(graph, str):
- return graph != '__UNION__'
+ return graph != "__UNION__"
else:
return graph.identifier != DATASET_DEFAULT_GRAPH_ID
@@ -414,19 +431,27 @@ class SPARQLUpdateStore(SPARQLStore):
STRING_LITERAL2 = u'"([^"\\\\]|\\\\.)*"'
STRING_LITERAL_LONG1 = u"'''(('|'')?([^'\\\\]|\\\\.))*'''"
STRING_LITERAL_LONG2 = u'"""(("|"")?([^"\\\\]|\\\\.))*"""'
- String = u'(%s)|(%s)|(%s)|(%s)' % (STRING_LITERAL1, STRING_LITERAL2,
- STRING_LITERAL_LONG1, STRING_LITERAL_LONG2)
+ String = u"(%s)|(%s)|(%s)|(%s)" % (
+ STRING_LITERAL1,
+ STRING_LITERAL2,
+ STRING_LITERAL_LONG1,
+ STRING_LITERAL_LONG2,
+ )
IRIREF = u'<([^<>"{}|^`\\]\\\\\[\\x00-\\x20])*>'
- COMMENT = u'#[^\\x0D\\x0A]*([\\x0D\\x0A]|\\Z)'
+ COMMENT = u"#[^\\x0D\\x0A]*([\\x0D\\x0A]|\\Z)"
# Simplified grammar to find { at beginning and } at end of blocks
- BLOCK_START = u'{'
- BLOCK_END = u'}'
- ESCAPED = u'\\\\.'
+ BLOCK_START = u"{"
+ BLOCK_END = u"}"
+ ESCAPED = u"\\\\."
# Match anything that doesn't start or end a block:
- BlockContent = u'(%s)|(%s)|(%s)|(%s)' % (String, IRIREF, COMMENT, ESCAPED)
- BlockFinding = u'(?P<block_start>%s)|(?P<block_end>%s)|(?P<block_content>%s)' % (BLOCK_START, BLOCK_END, BlockContent)
+ BlockContent = u"(%s)|(%s)|(%s)|(%s)" % (String, IRIREF, COMMENT, ESCAPED)
+ BlockFinding = u"(?P<block_start>%s)|(?P<block_end>%s)|(?P<block_content>%s)" % (
+ BLOCK_START,
+ BLOCK_END,
+ BlockContent,
+ )
BLOCK_FINDING_PATTERN = re.compile(BlockFinding)
# Note that BLOCK_FINDING_PATTERN.finditer() will not cover the whole
@@ -435,15 +460,17 @@ class SPARQLUpdateStore(SPARQLStore):
##################################################################
- def __init__(self,
- queryEndpoint=None, update_endpoint=None,
- sparql11=True,
- context_aware=True,
- postAsEncoded=True,
- autocommit=True,
- dirty_reads=False,
- **kwds
- ):
+ def __init__(
+ self,
+ queryEndpoint=None,
+ update_endpoint=None,
+ sparql11=True,
+ context_aware=True,
+ postAsEncoded=True,
+ autocommit=True,
+ dirty_reads=False,
+ **kwds
+ ):
"""
:param autocommit if set, the store will commit after every
writing operations. If False, we only make queries on the
@@ -521,7 +548,7 @@ class SPARQLUpdateStore(SPARQLStore):
and reads can degenerate to the original call-per-triple situation that originally existed.
"""
if self._edits and len(self._edits) > 0:
- self._update('\n;\n'.join(self._edits))
+ self._update("\n;\n".join(self._edits))
self._edits = None
def rollback(self):
@@ -539,8 +566,7 @@ class SPARQLUpdateStore(SPARQLStore):
nts = self.node_to_sparql
triple = "%s %s %s ." % (nts(subject), nts(predicate), nts(obj))
if self._is_contextual(context):
- q = "INSERT DATA { GRAPH %s { %s } }" % (
- nts(context.identifier), triple)
+ q = "INSERT DATA { GRAPH %s { %s } }" % (nts(context.identifier), triple)
else:
q = "INSERT DATA { %s }" % triple
self._transaction().append(q)
@@ -559,12 +585,13 @@ class SPARQLUpdateStore(SPARQLStore):
nts = self.node_to_sparql
for context in contexts:
triples = [
- "%s %s %s ." % (
- nts(subject), nts(predicate), nts(obj)
- ) for subject, predicate, obj in contexts[context]
+ "%s %s %s ." % (nts(subject), nts(predicate), nts(obj))
+ for subject, predicate, obj in contexts[context]
]
- data.append("INSERT DATA { GRAPH %s { %s } }\n" % (
- nts(context.identifier), '\n'.join(triples)))
+ data.append(
+ "INSERT DATA { GRAPH %s { %s } }\n"
+ % (nts(context.identifier), "\n".join(triples))
+ )
self._transaction().extend(data)
if self.autocommit:
self.commit()
@@ -586,7 +613,10 @@ class SPARQLUpdateStore(SPARQLStore):
triple = "%s %s %s ." % (nts(subject), nts(predicate), nts(obj))
if self._is_contextual(context):
cid = nts(context.identifier)
- q = "WITH %(graph)s DELETE { %(triple)s } WHERE { %(triple)s }" % { 'graph': cid, 'triple': triple }
+ q = "WITH %(graph)s DELETE { %(triple)s } WHERE { %(triple)s }" % {
+ "graph": cid,
+ "triple": triple,
+ }
else:
q = "DELETE { %s } WHERE { %s } " % (triple, triple)
self._transaction().append(q)
@@ -602,11 +632,7 @@ class SPARQLUpdateStore(SPARQLStore):
SPARQLConnector.update(self, update)
- def update(self, query,
- initNs={},
- initBindings={},
- queryGraph=None,
- DEBUG=False):
+ def update(self, query, initNs={}, initBindings={}, queryGraph=None, DEBUG=False):
"""
Perform a SPARQL Update Query against the endpoint,
INSERT, LOAD, DELETE etc.
@@ -656,9 +682,10 @@ class SPARQLUpdateStore(SPARQLStore):
# have a WHERE clause. This also works for updates with
# more than one INSERT/DELETE.
v = list(initBindings)
- values = "\nVALUES ( %s )\n{ ( %s ) }\n"\
- % (" ".join("?" + str(x) for x in v),
- " ".join(self.node_to_sparql(initBindings[x]) for x in v))
+ values = "\nVALUES ( %s )\n{ ( %s ) }\n" % (
+ " ".join("?" + str(x) for x in v),
+ " ".join(self.node_to_sparql(initBindings[x]) for x in v),
+ )
query = self.where_pattern.sub("WHERE { " + values, query)
@@ -677,7 +704,7 @@ class SPARQLUpdateStore(SPARQLStore):
if isinstance(query_graph, Node):
query_graph = self.node_to_sparql(query_graph)
else:
- query_graph = '<%s>' % query_graph
+ query_graph = "<%s>" % query_graph
graph_block_open = " GRAPH %s {" % query_graph
graph_block_close = "} "
@@ -696,16 +723,18 @@ class SPARQLUpdateStore(SPARQLStore):
modified_query = []
pos = 0
for match in self.BLOCK_FINDING_PATTERN.finditer(query):
- if match.group('block_start') is not None:
+ if match.group("block_start") is not None:
level += 1
if level == 1:
- modified_query.append(query[pos:match.end()])
+ modified_query.append(query[pos : match.end()])
modified_query.append(graph_block_open)
pos = match.end()
- elif match.group('block_end') is not None:
+ elif match.group("block_end") is not None:
if level == 1:
- since_previous_pos = query[pos:match.start()]
- if modified_query[-1] is graph_block_open and (since_previous_pos == "" or since_previous_pos.isspace()):
+ since_previous_pos = query[pos : match.start()]
+ if modified_query[-1] is graph_block_open and (
+ since_previous_pos == "" or since_previous_pos.isspace()
+ ):
# In this case, adding graph_block_start and
# graph_block_end results in an empty GRAPH block. Some
# enpoints (e.g. TDB) can not handle this. Therefore
@@ -725,8 +754,7 @@ class SPARQLUpdateStore(SPARQLStore):
if not self.graph_aware:
Store.add_graph(self, graph)
elif graph.identifier != DATASET_DEFAULT_GRAPH_ID:
- self.update(
- "CREATE GRAPH %s" % self.node_to_sparql(graph.identifier))
+ self.update("CREATE GRAPH %s" % self.node_to_sparql(graph.identifier))
def close(self, commit_pending_transaction=False):
@@ -741,5 +769,4 @@ class SPARQLUpdateStore(SPARQLStore):
elif graph.identifier == DATASET_DEFAULT_GRAPH_ID:
self.update("DROP DEFAULT")
else:
- self.update(
- "DROP GRAPH %s" % self.node_to_sparql(graph.identifier))
+ self.update("DROP GRAPH %s" % self.node_to_sparql(graph.identifier))