summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNicholas Car <nicholas.car@surroundaustralia.com>2020-05-17 16:38:31 +1000
committerNicholas Car <nicholas.car@surroundaustralia.com>2020-05-17 16:38:31 +1000
commit78e99a408859f0749ddfc628cbad60f4c5c93aea (patch)
treefaa3857d33bdb59a59558569c5705c60fdc355e6
parentdac682fcbf8ea708175519e4824be7a937073749 (diff)
downloadrdflib-78e99a408859f0749ddfc628cbad60f4c5c93aea.tar.gz
changes for flake8
-rw-r--r--rdflib/__init__.py2
-rw-r--r--rdflib/collection.py2
-rw-r--r--rdflib/compare.py6
-rw-r--r--rdflib/events.py4
-rw-r--r--rdflib/extras/infixowl.py6
-rw-r--r--rdflib/graph.py69
-rw-r--r--rdflib/namespace.py2
-rw-r--r--rdflib/plugins/memory.py4
-rwxr-xr-xrdflib/plugins/parsers/notation3.py117
-rw-r--r--rdflib/plugins/parsers/ntriples.py8
-rw-r--r--rdflib/plugins/parsers/rdfxml.py4
-rw-r--r--rdflib/plugins/parsers/trig.py6
-rw-r--r--rdflib/plugins/parsers/trix.py2
-rw-r--r--rdflib/plugins/serializers/nt.py22
-rw-r--r--rdflib/plugins/serializers/rdfxml.py6
-rw-r--r--rdflib/plugins/serializers/trig.py4
-rw-r--r--rdflib/plugins/serializers/turtle.py20
-rw-r--r--rdflib/plugins/serializers/xmlwriter.py6
-rw-r--r--rdflib/plugins/sleepycat.py4
-rw-r--r--rdflib/plugins/sparql/algebra.py20
-rw-r--r--rdflib/plugins/sparql/evaluate.py1
-rw-r--r--rdflib/plugins/sparql/evalutils.py2
-rw-r--r--rdflib/plugins/sparql/operators.py52
-rw-r--r--rdflib/plugins/sparql/parser.py8
-rw-r--r--rdflib/plugins/sparql/parserutils.py4
-rw-r--r--rdflib/plugins/sparql/results/graph.py2
-rw-r--r--rdflib/plugins/sparql/results/tsvresults.py2
-rw-r--r--rdflib/plugins/sparql/results/xmlresults.py3
-rw-r--r--rdflib/plugins/sparql/sparql.py2
-rw-r--r--rdflib/plugins/stores/sparqlstore.py20
-rw-r--r--rdflib/term.py15
-rw-r--r--rdflib/tools/csv2rdf.py22
-rw-r--r--rdflib/tools/rdfpipe.py1
-rw-r--r--rdflib/tools/rdfs2dot.py14
-rw-r--r--rdflib/util.py4
35 files changed, 216 insertions, 250 deletions
diff --git a/rdflib/__init__.py b/rdflib/__init__.py
index a6c6c39a..837764a3 100644
--- a/rdflib/__init__.py
+++ b/rdflib/__init__.py
@@ -92,7 +92,7 @@ _interactive_mode = False
try:
import __main__
- if not hasattr(__main__, "__file__") and sys.stdout != None and sys.stderr.isatty():
+ if not hasattr(__main__, "__file__") and sys.stdout is not None and sys.stderr.isatty():
# show log messages in interactive mode
_interactive_mode = True
logger.setLevel(logging.INFO)
diff --git a/rdflib/collection.py b/rdflib/collection.py
index 6f715b15..8b667a23 100644
--- a/rdflib/collection.py
+++ b/rdflib/collection.py
@@ -272,7 +272,7 @@ if __name__ == "__main__":
try:
del c[500]
- except IndexError as i:
+ except IndexError:
pass
c.append(Literal("5"))
diff --git a/rdflib/compare.py b/rdflib/compare.py
index 7fe93f52..839cfbb0 100644
--- a/rdflib/compare.py
+++ b/rdflib/compare.py
@@ -315,7 +315,6 @@ class _TripleCanonicalizer(object):
return c
def _get_candidates(self, coloring):
- candidates = [c for c in coloring if not c.discrete()]
for c in [c for c in coloring if not c.discrete()]:
for node in c.nodes:
yield node, c
@@ -336,7 +335,7 @@ class _TripleCanonicalizer(object):
coloring.extend(colors)
try:
si = sequence.index(c)
- sequence = sequence[:si] + colors + sequence[si + 1 :]
+ sequence = sequence[:si] + colors + sequence[si + 1:]
except ValueError:
sequence = colors[1:] + sequence
combined_colors = []
@@ -422,7 +421,6 @@ class _TripleCanonicalizer(object):
if best_score is None or best_score < color_score:
best = [refined_coloring]
best_score = color_score
- best_experimental = experimental
best_experimental_score = experimental_score
elif best_score > color_score:
# prune this branch.
@@ -451,8 +449,6 @@ class _TripleCanonicalizer(object):
def canonical_triples(self, stats=None):
if stats is not None:
- start_canonicalization = datetime.now()
- if stats is not None:
start_coloring = datetime.now()
coloring = self._initial_color()
if stats is not None:
diff --git a/rdflib/events.py b/rdflib/events.py
index 5451c6be..816925ed 100644
--- a/rdflib/events.py
+++ b/rdflib/events.py
@@ -84,8 +84,8 @@ class Dispatcher(object):
lst = self._dispatch_map.get(type(event), None)
if lst is None:
raise ValueError("unknown event type: %s" % type(event))
- for l in lst:
- l(event)
+ for l_ in lst:
+ l_(event)
def test():
diff --git a/rdflib/extras/infixowl.py b/rdflib/extras/infixowl.py
index 16dc616a..c043675a 100644
--- a/rdflib/extras/infixowl.py
+++ b/rdflib/extras/infixowl.py
@@ -597,8 +597,8 @@ class AnnotatableTerms(Individual):
if isinstance(label, Identifier):
self.graph.add((self.identifier, RDFS.label, label))
else:
- for l in label:
- self.graph.add((self.identifier, RDFS.label, l))
+ for l_ in label:
+ self.graph.add((self.identifier, RDFS.label, l_))
@TermDeletionHelper(RDFS.label)
def _delete_label(self):
@@ -1900,7 +1900,7 @@ class Restriction(Class):
return manchesterSyntax(self.identifier, self.graph)
-### Infix Operators ###
+# Infix Operators #
some = Infix(
diff --git a/rdflib/graph.py b/rdflib/graph.py
index 77d1fbc7..12d18dce 100644
--- a/rdflib/graph.py
+++ b/rdflib/graph.py
@@ -3,11 +3,35 @@ from __future__ import division
from __future__ import print_function
from rdflib.term import Literal # required for doctests
-
-assert Literal # avoid warning
from rdflib.namespace import Namespace # required for doctests
+import logging
+
+import random
+from rdflib.namespace import RDF, RDFS, SKOS
+from rdflib import plugin, exceptions, query
+from rdflib.term import Node, URIRef, Genid
+from rdflib.term import BNode
+import rdflib.term
+from rdflib.paths import Path
+from rdflib.store import Store
+from rdflib.serializer import Serializer
+from rdflib.parser import Parser
+from rdflib.parser import create_input_source
+from rdflib.namespace import NamespaceManager
+from rdflib.resource import Resource
+from rdflib.collection import Collection
+
+import os
+import shutil
+import tempfile
+
+from io import BytesIO
+from urllib.parse import urlparse
+
+assert Literal # avoid warning
assert Namespace # avoid warning
+logger = logging.getLogger(__name__)
__doc__ = """\
@@ -235,31 +259,6 @@ Using Namespace class:
"""
-import logging
-
-logger = logging.getLogger(__name__)
-
-import random
-from rdflib.namespace import RDF, RDFS, SKOS
-from rdflib import plugin, exceptions, query
-from rdflib.term import Node, URIRef, Genid
-from rdflib.term import BNode
-import rdflib.term
-from rdflib.paths import Path
-from rdflib.store import Store
-from rdflib.serializer import Serializer
-from rdflib.parser import Parser
-from rdflib.parser import create_input_source
-from rdflib.namespace import NamespaceManager
-from rdflib.resource import Resource
-from rdflib.collection import Collection
-
-import os
-import shutil
-import tempfile
-
-from io import BytesIO
-from urllib.parse import urlparse
__all__ = [
"Graph",
@@ -775,18 +774,14 @@ class Graph(Node):
# setup the language filtering
if lang is not None:
if lang == "": # we only want not language-tagged literals
-
- def langfilter(l):
- return l.language is None
-
+ def langfilter(l_):
+ return l_.language is None
else:
-
- def langfilter(l):
- return l.language == lang
+ def langfilter(l_):
+ return l_.language == lang
else: # we don't care about language tags
-
- def langfilter(l):
+ def langfilter(l_):
return True
for labelProp in labelProperties:
@@ -794,7 +789,7 @@ class Graph(Node):
if len(labels) == 0:
continue
else:
- return [(labelProp, l) for l in labels]
+ return [(labelProp, l_) for l_ in labels]
return default
def comment(self, subject, default=""):
diff --git a/rdflib/namespace.py b/rdflib/namespace.py
index 7e8f053b..091fbc60 100644
--- a/rdflib/namespace.py
+++ b/rdflib/namespace.py
@@ -618,7 +618,7 @@ class NamespaceManager(object):
pl_namespace = get_longest_namespace(self.__strie[namespace], uri)
if pl_namespace is not None:
namespace = pl_namespace
- name = uri[len(namespace) :]
+ name = uri[len(namespace):]
namespace = URIRef(namespace)
prefix = self.store.prefix(namespace) # warning multiple prefixes problem
diff --git a/rdflib/plugins/memory.py b/rdflib/plugins/memory.py
index c102799f..6cedc2b3 100644
--- a/rdflib/plugins/memory.py
+++ b/rdflib/plugins/memory.py
@@ -4,7 +4,7 @@ from __future__ import print_function
import random
-from rdflib.store import Store, NO_STORE, VALID_STORE
+from rdflib.store import Store
__all__ = ["Memory", "IOMemory"]
@@ -295,7 +295,7 @@ class IOMemory(Store):
del self.__tripleContexts[enctriple]
if (
- not req_cid is None
+ req_cid is not None
and req_cid in self.__contextTriples
and len(self.__contextTriples[req_cid]) == 0
):
diff --git a/rdflib/plugins/parsers/notation3.py b/rdflib/plugins/parsers/notation3.py
index 3bc2169f..c427f153 100755
--- a/rdflib/plugins/parsers/notation3.py
+++ b/rdflib/plugins/parsers/notation3.py
@@ -32,7 +32,6 @@ from __future__ import division
from __future__ import print_function
# Python standard libraries
-import types
import sys
import os
import re
@@ -140,13 +139,10 @@ def join(here, there):
return here + frag
# join('mid:foo@example', '../foo') bzzt
- if here[bcolonl + 1 : bcolonl + 2] != "/":
- raise ValueError(
- ("Base <%s> has no slash after " "colon - with relative '%s'.")
- % (here, there)
- )
+ if here[bcolonl + 1: bcolonl + 2] != "/":
+ raise ValueError("Base <%s> has no slash after " "colon - with relative '%s'." % (here, there))
- if here[bcolonl + 1 : bcolonl + 3] == "//":
+ if here[bcolonl + 1: bcolonl + 3] == "//":
bpath = here.find("/", bcolonl + 3)
else:
bpath = bcolonl + 1
@@ -247,7 +243,7 @@ runNamespaceValue = None
def runNamespace():
- "Return a URI suitable as a namespace for run-local objects"
+ """Returns a URI suitable as a namespace for run-local objects"""
# @@@ include hostname (privacy?) (hash it?)
global runNamespaceValue
if runNamespaceValue is None:
@@ -259,10 +255,9 @@ nextu = 0
def uniqueURI():
- "A unique URI"
+ """A unique URI"""
global nextu
nextu += 1
- # return runNamespace() + "u_" + `nextu`
return runNamespace() + "u_" + str(nextu)
@@ -323,20 +318,6 @@ def unicodeExpand(m):
raise Exception("Invalid unicode code point: " + m.group(1))
-if narrow_build:
-
- def unicodeExpand(m):
- try:
- return chr(int(m.group(1), 16))
- except ValueError:
- warnings.warn(
- "Encountered a unicode char > 0xFFFF in a narrow python build. "
- "Trying to degrade gracefully, but this can cause problems "
- "later when working with the string:\n%s" % m.group(0)
- )
- return codecs.decode(m.group(0), "unicode_escape")
-
-
unicodeEscape4 = re.compile(r"\\u([0-9a-fA-F]{4})")
unicodeEscape8 = re.compile(r"\\U([0-9a-fA-F]{8})")
@@ -521,14 +502,14 @@ class SinkParser:
"""
assert tok[0] not in _notNameChars # not for punctuation
- if argstr[i : i + 1] == "@":
+ if argstr[i: i + 1] == "@":
i = i + 1
else:
if tok not in self.keywords:
return -1 # No, this has neither keywords declaration nor "@"
if (
- argstr[i : i + len(tok)] == tok
+ argstr[i: i + len(tok)] == tok
and (argstr[i + len(tok)] in _notKeywordsChars)
or (colon and argstr[i + len(tok)] == ":")
):
@@ -545,7 +526,7 @@ class SinkParser:
assert tok[0] not in _notNameChars # not for punctuation
- if argstr[i : i + len(tok)].lower() == tok.lower() and (
+ if argstr[i: i + len(tok)].lower() == tok.lower() and (
argstr[i + len(tok)] in _notQNameChars
):
i = i + len(tok)
@@ -725,7 +706,7 @@ class SinkParser:
self._store.bind(qn, uri)
def setKeywords(self, k):
- "Takes a list of strings"
+ """Takes a list of strings"""
if k is None:
self.keywordsSet = 0
else:
@@ -813,23 +794,23 @@ class SinkParser:
res.append(("->", RDF_type))
return j
- if argstr[i : i + 2] == "<=":
+ if argstr[i: i + 2] == "<=":
if self.turtle:
self.BadSyntax(argstr, i, "Found '<=' in Turtle mode. ")
res.append(("<-", self._store.newSymbol(Logic_NS + "implies")))
return i + 2
- if argstr[i : i + 1] == "=":
+ if argstr[i: i + 1] == "=":
if self.turtle:
self.BadSyntax(argstr, i, "Found '=' in Turtle mode")
- if argstr[i + 1 : i + 2] == ">":
+ if argstr[i + 1: i + 2] == ">":
res.append(("->", self._store.newSymbol(Logic_NS + "implies")))
return i + 2
res.append(("->", DAML_sameAs))
return i + 1
- if argstr[i : i + 2] == ":=":
+ if argstr[i: i + 2] == ":=":
if self.turtle:
self.BadSyntax(argstr, i, "Found ':=' in Turtle mode")
@@ -842,7 +823,7 @@ class SinkParser:
res.append(("->", r[0]))
return j
- if argstr[i : i + 2] == ">-" or argstr[i : i + 2] == "<-":
+ if argstr[i: i + 2] == ">-" or argstr[i: i + 2] == "<-":
self.BadSyntax(argstr, j, ">- ... -> syntax is obsolete.")
return -1
@@ -863,8 +844,8 @@ class SinkParser:
if j < 0:
return j # nope
- while argstr[j : j + 1] in "!^": # no spaces, must follow exactly (?)
- ch = argstr[j : j + 1]
+ while argstr[j: j + 1] in "!^": # no spaces, must follow exactly (?)
+ ch = argstr[j: j + 1]
subj = res.pop()
obj = self.blankNode(uri=self.here(j))
j = self.node(argstr, j + 1, res)
@@ -898,7 +879,7 @@ class SinkParser:
if j < 0:
return j # eof
i = j
- ch = argstr[i : i + 1] # Quick 1-character checks first:
+ ch = argstr[i: i + 1] # Quick 1-character checks first:
if ch == "[":
bnodeID = self.here(i)
@@ -906,7 +887,7 @@ class SinkParser:
if j < 0:
self.BadSyntax(argstr, i, "EOF after '['")
# Hack for "is" binding name to anon node
- if argstr[j : j + 1] == "=":
+ if argstr[j: j + 1] == "=":
if self.turtle:
self.BadSyntax(
argstr, j, "Found '[=' or '[ =' when in turtle mode."
@@ -924,7 +905,7 @@ class SinkParser:
self.BadSyntax(
argstr, i, "EOF when objectList expected after [ = "
)
- if argstr[j : j + 1] == ";":
+ if argstr[j: j + 1] == ";":
j = j + 1
else:
self.BadSyntax(argstr, i, "objectList expected after [= ")
@@ -941,7 +922,7 @@ class SinkParser:
self.BadSyntax(
argstr, i, "EOF when ']' expected after [ <propertyList>"
)
- if argstr[j : j + 1] != "]":
+ if argstr[j: j + 1] != "]":
self.BadSyntax(argstr, j, "']' expected")
res.append(subj)
return j + 1
@@ -950,7 +931,7 @@ class SinkParser:
# if self.turtle:
# self.BadSyntax(argstr, i,
# "found '{' while in Turtle mode, Formulas not supported!")
- ch2 = argstr[i + 1 : i + 2]
+ ch2 = argstr[i + 1: i + 2]
if ch2 == "$":
# a set
i += 1
@@ -961,12 +942,12 @@ class SinkParser:
i = self.skipSpace(argstr, j)
if i < 0:
self.BadSyntax(argstr, i, "needed '$}', found end.")
- if argstr[i : i + 2] == "$}":
+ if argstr[i: i + 2] == "$}":
j = i + 2
break
if not first_run:
- if argstr[i : i + 1] == ",":
+ if argstr[i: i + 1] == ",":
i += 1
else:
self.BadSyntax(argstr, i, "expected: ','")
@@ -1001,7 +982,7 @@ class SinkParser:
if i < 0:
self.BadSyntax(argstr, i, "needed '}', found end.")
- if argstr[i : i + 1] == "}":
+ if argstr[i: i + 1] == "}":
j = i + 1
break
@@ -1020,7 +1001,7 @@ class SinkParser:
if ch == "(":
thing_type = self._store.newList
- ch2 = argstr[i + 1 : i + 2]
+ ch2 = argstr[i + 1: i + 2]
if ch2 == "$":
thing_type = self._store.newSet
i += 1
@@ -1031,7 +1012,7 @@ class SinkParser:
i = self.skipSpace(argstr, j)
if i < 0:
self.BadSyntax(argstr, i, "needed ')', found end.")
- if argstr[i : i + 1] == ")":
+ if argstr[i: i + 1] == ")":
j = i + 1
break
@@ -1084,7 +1065,7 @@ class SinkParser:
break
i = j + 1
- if argstr[j : j + 2] == ":-":
+ if argstr[j: j + 2] == ":-":
if self.turtle:
self.BadSyntax(argstr, j, "Found in ':-' in Turtle mode")
i = j + 2
@@ -1114,7 +1095,7 @@ class SinkParser:
j = self.skipSpace(argstr, i)
if j < 0:
self.BadSyntax(argstr, j, "EOF found in list of objects")
- if argstr[i : i + 1] != ";":
+ if argstr[i: i + 1] != ";":
return i
i = i + 1 # skip semicolon and continue
@@ -1135,7 +1116,7 @@ class SinkParser:
j = self.skipSpace(argstr, i)
if j < 0:
return j # eof
- ch = argstr[j : j + 1]
+ ch = argstr[j: j + 1]
if ch != ",":
if ch != ".":
return -1
@@ -1152,7 +1133,7 @@ class SinkParser:
j = self.skipSpace(argstr, i)
if j < 0:
self.BadSyntax(argstr, j, "EOF found after object")
- if argstr[j : j + 1] != ",":
+ if argstr[j: j + 1] != ",":
return j # Found something else!
i = self.object(argstr, j + 1, res)
if i < 0:
@@ -1162,11 +1143,11 @@ class SinkParser:
j = self.skipSpace(argstr, i)
if j < 0:
return j # eof
- if argstr[j : j + 1] == ".":
+ if argstr[j: j + 1] == ".":
return j + 1 # skip
- if argstr[j : j + 1] == "}":
+ if argstr[j: j + 1] == "}":
return j # don't skip it
- if argstr[j : j + 1] == "]":
+ if argstr[j: j + 1] == "]":
return j
self.BadSyntax(argstr, j, "expected '.' or '}' or ']' at end of statement")
@@ -1231,7 +1212,7 @@ class SinkParser:
assert (
":" in uref
), "With no base URI, cannot deal with relative URIs"
- if argstr[i - 1 : i] == "#" and not uref[-1:] == "#":
+ if argstr[i - 1: i] == "#" and not uref[-1:] == "#":
uref = uref + "#" # She meant it! Weirdness in urlparse?
symb = self._store.newSymbol(uref)
if symb in self._variables:
@@ -1280,7 +1261,7 @@ class SinkParser:
if j < 0:
return -1
- if argstr[j : j + 1] != "?":
+ if argstr[j: j + 1] != "?":
return -1
j = j + 1
i = j
@@ -1438,7 +1419,7 @@ class SinkParser:
i = j
if argstr[i] in self.string_delimiters:
- if argstr[i : i + 3] == argstr[i] * 3:
+ if argstr[i: i + 3] == argstr[i] * 3:
delim = argstr[i] * 3
else:
delim = argstr[i]
@@ -1486,7 +1467,7 @@ class SinkParser:
# return -1 ## or fall through?
if argstr[i] in self.string_delimiters:
- if argstr[i : i + 3] == argstr[i] * 3:
+ if argstr[i: i + 3] == argstr[i] * 3:
delim = argstr[i] * 3
else:
delim = argstr[i]
@@ -1495,7 +1476,7 @@ class SinkParser:
dt = None
j, s = self.strconst(argstr, i, delim)
lang = None
- if argstr[j : j + 1] == "@": # Language?
+ if argstr[j: j + 1] == "@": # Language?
m = langcode.match(argstr, j + 1)
if m is None:
raise BadSyntax(
@@ -1506,9 +1487,9 @@ class SinkParser:
"Bad language code syntax on string " + "literal, after @",
)
i = m.end()
- lang = argstr[j + 1 : i]
+ lang = argstr[j + 1: i]
j = i
- if argstr[j : j + 2] == "^^":
+ if argstr[j: j + 2] == "^^":
res2 = []
j = self.uri_ref2(argstr, j + 2, res2) # Read datatype URI
dt = res2[0]
@@ -1541,15 +1522,15 @@ class SinkParser:
if (
delim == delim3
): # done when delim is """ or ''' and, respectively ...
- if argstr[j : j + 5] == delim5: # ... we have "" or '' before
+ if argstr[j: j + 5] == delim5: # ... we have "" or '' before
i = j + 5
ustr = ustr + delim2
return i, ustr
- if argstr[j : j + 4] == delim4: # ... we have " or ' before
+ if argstr[j: j + 4] == delim4: # ... we have " or ' before
i = j + 4
ustr = ustr + delim1
return i, ustr
- if argstr[j : j + 3] == delim3: # current " or ' is part of delim
+ if argstr[j: j + 3] == delim3: # current " or ' is part of delim
i = j + 3
return i, ustr
@@ -1561,8 +1542,8 @@ class SinkParser:
m = interesting.search(argstr, j) # was argstr[j:].
# Note for pos param to work, MUST be compiled ... re bug?
assert m, "Quote expected in string at ^ in %s^%s" % (
- argstr[j - 20 : j],
- argstr[j : j + 20],
+ argstr[j - 20: j],
+ argstr[j: j + 20],
) # at least need a quote
i = m.start()
@@ -1608,7 +1589,7 @@ class SinkParser:
elif ch == "\\":
j = i + 1
- ch = argstr[j : j + 1] # Will be empty if string ends
+ ch = argstr[j: j + 1] # Will be empty if string ends
if not ch:
raise BadSyntax(
self._thisDoc,
@@ -1639,14 +1620,14 @@ class SinkParser:
self._thisDoc, startline, argstr, i, "unterminated string literal(3)"
)
try:
- return i + n, reg.sub(unicodeExpand, "\\" + prefix + argstr[i : i + n])
+ return i + n, reg.sub(unicodeExpand, "\\" + prefix + argstr[i: i + n])
except:
raise BadSyntax(
self._thisDoc,
startline,
argstr,
i,
- "bad string literal hex escape: " + argstr[i : i + n],
+ "bad string literal hex escape: " + argstr[i: i + n],
)
def uEscape(self, argstr, i, startline):
@@ -1691,7 +1672,7 @@ class BadSyntax(SyntaxError):
self._why,
pre,
argstr[st:i],
- argstr[i : i + 60],
+ argstr[i: i + 60],
post,
)
diff --git a/rdflib/plugins/parsers/ntriples.py b/rdflib/plugins/parsers/ntriples.py
index 9398c8de..41c529a7 100644
--- a/rdflib/plugins/parsers/ntriples.py
+++ b/rdflib/plugins/parsers/ntriples.py
@@ -78,7 +78,7 @@ def unquote(s):
while s:
m = r_safe.match(s)
if m:
- s = s[m.end() :]
+ s = s[m.end():]
result.append(m.group(1))
continue
@@ -90,7 +90,7 @@ def unquote(s):
m = r_uniquot.match(s)
if m:
- s = s[m.end() :]
+ s = s[m.end():]
u, U = m.groups()
codepoint = int(u or U, 16)
if codepoint > 0x10FFFF:
@@ -172,7 +172,7 @@ class NTriplesParser(object):
while True:
m = r_line.match(self.buffer)
if m: # the more likely prospect
- self.buffer = self.buffer[m.end() :]
+ self.buffer = self.buffer[m.end():]
return m.group(1)
else:
buffer = self.file.read(bufsiz)
@@ -210,7 +210,7 @@ class NTriplesParser(object):
# print(dir(pattern))
# print repr(self.line), type(self.line)
raise ParseError("Failed to eat %s at %s" % (pattern.pattern, self.line))
- self.line = self.line[m.end() :]
+ self.line = self.line[m.end():]
return m
def subject(self):
diff --git a/rdflib/plugins/parsers/rdfxml.py b/rdflib/plugins/parsers/rdfxml.py
index 976edf2c..76761edd 100644
--- a/rdflib/plugins/parsers/rdfxml.py
+++ b/rdflib/plugins/parsers/rdfxml.py
@@ -50,10 +50,10 @@ OLD_TERMS = [
URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#bagID"),
]
-NODE_ELEMENT_EXCEPTIONS = CORE_SYNTAX_TERMS + [RDF.li,] + OLD_TERMS
+NODE_ELEMENT_EXCEPTIONS = CORE_SYNTAX_TERMS + [RDF.li, ] + OLD_TERMS
NODE_ELEMENT_ATTRIBUTES = [RDF.ID, RDF.nodeID, RDF.about]
-PROPERTY_ELEMENT_EXCEPTIONS = CORE_SYNTAX_TERMS + [RDF.Description,] + OLD_TERMS
+PROPERTY_ELEMENT_EXCEPTIONS = CORE_SYNTAX_TERMS + [RDF.Description, ] + OLD_TERMS
PROPERTY_ATTRIBUTE_EXCEPTIONS = (
CORE_SYNTAX_TERMS + [RDF.Description, RDF.li] + OLD_TERMS
)
diff --git a/rdflib/plugins/parsers/trig.py b/rdflib/plugins/parsers/trig.py
index 96c94503..8f270de0 100644
--- a/rdflib/plugins/parsers/trig.py
+++ b/rdflib/plugins/parsers/trig.py
@@ -82,7 +82,7 @@ class TrigSinkParser(SinkParser):
if j < 0:
self.BadSyntax(argstr, i, "EOF found when expected graph")
- if argstr[j : j + 1] == "=": # optional = for legacy support
+ if argstr[j: j + 1] == "=": # optional = for legacy support
i = self.skipSpace(argstr, j + 1)
if i < 0:
@@ -90,7 +90,7 @@ class TrigSinkParser(SinkParser):
else:
i = j
- if argstr[i : i + 1] != "{":
+ if argstr[i: i + 1] != "{":
return -1 # the node wasn't part of a graph
j = i + 1
@@ -106,7 +106,7 @@ class TrigSinkParser(SinkParser):
if i < 0:
self.BadSyntax(argstr, i, "needed '}', found end.")
- if argstr[i : i + 1] == "}":
+ if argstr[i: i + 1] == "}":
j = i + 1
break
diff --git a/rdflib/plugins/parsers/trix.py b/rdflib/plugins/parsers/trix.py
index 56819514..3286aaeb 100644
--- a/rdflib/plugins/parsers/trix.py
+++ b/rdflib/plugins/parsers/trix.py
@@ -5,7 +5,7 @@ from rdflib.namespace import Namespace
from rdflib.term import URIRef
from rdflib.term import BNode
from rdflib.term import Literal
-from rdflib.graph import Graph, ConjunctiveGraph
+from rdflib.graph import Graph
from rdflib.exceptions import ParserError
from rdflib.parser import Parser
diff --git a/rdflib/plugins/serializers/nt.py b/rdflib/plugins/serializers/nt.py
index 94632155..9ff72539 100644
--- a/rdflib/plugins/serializers/nt.py
+++ b/rdflib/plugins/serializers/nt.py
@@ -54,25 +54,25 @@ def _nt_row(triple):
return u"%s %s %s .\n" % (triple[0].n3(), triple[1].n3(), triple[2].n3())
-def _quoteLiteral(l):
+def _quoteLiteral(l_):
"""
a simpler version of term.Literal.n3()
"""
- encoded = _quote_encode(l)
+ encoded = _quote_encode(l_)
- if l.language:
- if l.datatype:
+ if l_.language:
+ if l_.datatype:
raise Exception("Literal has datatype AND language!")
- return "%s@%s" % (encoded, l.language)
- elif l.datatype:
- return "%s^^<%s>" % (encoded, l.datatype)
+ return "%s@%s" % (encoded, l_.language)
+ elif l_.datatype:
+ return "%s^^<%s>" % (encoded, l_.datatype)
else:
return "%s" % encoded
-def _quote_encode(l):
- return '"%s"' % l.replace("\\", "\\\\").replace("\n", "\\n").replace(
+def _quote_encode(l_):
+ return '"%s"' % l_.replace("\\", "\\\\").replace("\n", "\\n").replace(
'"', '\\"'
).replace("\r", "\\r")
@@ -87,8 +87,8 @@ def _nt_unicode_error_resolver(err):
fmt = u"\\u%04X" if c <= 0xFFFF else u"\\U%08X"
return fmt % c
- string = err.object[err.start : err.end]
- return ("".join(_replace_single(c) for c in string), err.end)
+ string = err.object[err.start: err.end]
+ return "".join(_replace_single(c) for c in string), err.end
codecs.register_error("_rdflib_nt_escape", _nt_unicode_error_resolver)
diff --git a/rdflib/plugins/serializers/rdfxml.py b/rdflib/plugins/serializers/rdfxml.py
index 425c0a7e..3f3c840d 100644
--- a/rdflib/plugins/serializers/rdfxml.py
+++ b/rdflib/plugins/serializers/rdfxml.py
@@ -87,7 +87,7 @@ class XMLSerializer(Serializer):
del self.__serialized
def subject(self, subject, depth=1):
- if not subject in self.__serialized:
+ if subject not in self.__serialized:
self.__serialized[subject] = 1
if isinstance(subject, (BNode, URIRef)):
@@ -230,7 +230,7 @@ class PrettyXMLSerializer(Serializer):
writer.pop(RDF.Description)
self.forceRDFAbout.remove(subject)
- elif not subject in self.__serialized:
+ elif subject not in self.__serialized:
self.__serialized[subject] = 1
type = first(store.objects(subject, RDF.type))
@@ -340,7 +340,7 @@ class PrettyXMLSerializer(Serializer):
elif isinstance(object, BNode):
if (
- not object in self.__serialized
+ object not in self.__serialized
and (object, None, None) in store
and len(list(store.subjects(object=object))) == 1
):
diff --git a/rdflib/plugins/serializers/trig.py b/rdflib/plugins/serializers/trig.py
index 432224e0..abf8d33e 100644
--- a/rdflib/plugins/serializers/trig.py
+++ b/rdflib/plugins/serializers/trig.py
@@ -5,8 +5,8 @@ See <http://www.w3.org/TR/trig/> for syntax specification.
from collections import defaultdict
-from rdflib.plugins.serializers.turtle import TurtleSerializer, _GEN_QNAME_FOR_DT, VERB
-from rdflib.term import BNode, Literal
+from rdflib.plugins.serializers.turtle import TurtleSerializer
+from rdflib.term import BNode
__all__ = ["TrigSerializer"]
diff --git a/rdflib/plugins/serializers/turtle.py b/rdflib/plugins/serializers/turtle.py
index 52693a8c..a2270674 100644
--- a/rdflib/plugins/serializers/turtle.py
+++ b/rdflib/plugins/serializers/turtle.py
@@ -391,28 +391,28 @@ class TurtleSerializer(RecursiveSerializer):
return True
- def isValidList(self, l):
+ def isValidList(self, l_):
"""
Checks if l is a valid RDF list, i.e. no nodes have other properties.
"""
try:
- if self.store.value(l, RDF.first) is None:
+ if self.store.value(l_, RDF.first) is None:
return False
except:
return False
- while l:
- if l != RDF.nil and len(list(self.store.predicate_objects(l))) != 2:
+ while l_:
+ if l_ != RDF.nil and len(list(self.store.predicate_objects(l_))) != 2:
return False
- l = self.store.value(l, RDF.rest)
+ l_ = self.store.value(l_, RDF.rest)
return True
- def doList(self, l):
- while l:
- item = self.store.value(l, RDF.first)
+ def doList(self, l_):
+ while l_:
+ item = self.store.value(l_, RDF.first)
if item is not None:
self.path(item, OBJECT)
- self.subjectDone(l)
- l = self.store.value(l, RDF.rest)
+ self.subjectDone(l_)
+ l_ = self.store.value(l_, RDF.rest)
def predicateList(self, subject, newline=False):
properties = self.buildPredicateHash(subject)
diff --git a/rdflib/plugins/serializers/xmlwriter.py b/rdflib/plugins/serializers/xmlwriter.py
index b6f0acb5..99d1e767 100644
--- a/rdflib/plugins/serializers/xmlwriter.py
+++ b/rdflib/plugins/serializers/xmlwriter.py
@@ -86,7 +86,7 @@ class XMLWriter(object):
def text(self, text):
self.__close_start_tag()
- if "<" in text and ">" in text and not "]]>" in text:
+ if "<" in text and ">" in text and "]]>" not in text:
self.stream.write("<![CDATA[")
self.stream.write(text)
self.stream.write("]]>")
@@ -100,8 +100,8 @@ class XMLWriter(object):
for pre, ns in self.extra_ns.items():
if uri.startswith(ns):
if pre != "":
- return ":".join(pre, uri[len(ns) :])
+ return ":".join(pre, uri[len(ns):])
else:
- return uri[len(ns) :]
+ return uri[len(ns):]
return self.nm.qname_strict(uri)
diff --git a/rdflib/plugins/sleepycat.py b/rdflib/plugins/sleepycat.py
index 7729969e..735d3c3a 100644
--- a/rdflib/plugins/sleepycat.py
+++ b/rdflib/plugins/sleepycat.py
@@ -110,8 +110,8 @@ class Sleepycat(Store):
dbsetflags = 0
# create and open the DBs
- self.__indicies = [None,] * 3
- self.__indicies_info = [None,] * 3
+ self.__indicies = [None, ] * 3
+ self.__indicies_info = [None, ] * 3
for i in range(0, 3):
index_name = to_key_func(i)(
("s".encode("latin-1"), "p".encode("latin-1"), "o".encode("latin-1")),
diff --git a/rdflib/plugins/sparql/algebra.py b/rdflib/plugins/sparql/algebra.py
index f84e51c9..cfd4f410 100644
--- a/rdflib/plugins/sparql/algebra.py
+++ b/rdflib/plugins/sparql/algebra.py
@@ -95,7 +95,7 @@ def _knownTerms(triple, varsknown, varscount):
)
-def reorderTriples(l):
+def reorderTriples(l_):
"""
Reorder triple patterns so that we execute the
ones with most bindings first
@@ -105,10 +105,10 @@ def reorderTriples(l):
if isinstance(term, (Variable, BNode)):
varsknown.add(term)
- l = [(None, x) for x in l]
+ l_ = [(None, x) for x in l_]
varsknown = set()
varscount = collections.defaultdict(int)
- for t in l:
+ for t in l_:
for c in t[1]:
if isinstance(c, (Variable, BNode)):
varscount[c] += 1
@@ -121,17 +121,17 @@ def reorderTriples(l):
# we sort by decorate/undecorate, since we need the value of the sort keys
- while i < len(l):
- l[i:] = sorted((_knownTerms(x[1], varsknown, varscount), x[1]) for x in l[i:])
- t = l[i][0][0] # top block has this many terms bound
+ while i < len(l_):
+ l_[i:] = sorted((_knownTerms(x[1], varsknown, varscount), x[1]) for x in l_[i:])
+ t = l_[i][0][0] # top block has this many terms bound
j = 0
- while i + j < len(l) and l[i + j][0][0] == t:
- for c in l[i + j][1]:
+ while i + j < len(l_) and l_[i + j][0][0] == t:
+ for c in l_[i + j][1]:
_addvar(c, varsknown)
j += 1
i += 1
- return [x[1] for x in l]
+ return [x[1] for x in l_]
def triples(l):
@@ -826,7 +826,7 @@ if __name__ == "__main__":
import os.path
if os.path.exists(sys.argv[1]):
- q = file(sys.argv[1])
+ q = open(sys.argv[1]).read()
else:
q = sys.argv[1]
diff --git a/rdflib/plugins/sparql/evaluate.py b/rdflib/plugins/sparql/evaluate.py
index 43b3d0b0..6c47eec3 100644
--- a/rdflib/plugins/sparql/evaluate.py
+++ b/rdflib/plugins/sparql/evaluate.py
@@ -42,7 +42,6 @@ from rdflib.plugins.sparql.evalutils import (
)
from rdflib.plugins.sparql.aggregates import Aggregator
-from rdflib.plugins.sparql.algebra import Join, ToMultiSet, Values
from rdflib.plugins.sparql import parser
diff --git a/rdflib/plugins/sparql/evalutils.py b/rdflib/plugins/sparql/evalutils.py
index 8bf1981d..72b767a1 100644
--- a/rdflib/plugins/sparql/evalutils.py
+++ b/rdflib/plugins/sparql/evalutils.py
@@ -3,7 +3,7 @@ import collections
from rdflib.term import Variable, Literal, BNode, URIRef
from rdflib.plugins.sparql.operators import EBV
-from rdflib.plugins.sparql.parserutils import Expr, CompValue, value
+from rdflib.plugins.sparql.parserutils import Expr, CompValue
from rdflib.plugins.sparql.sparql import SPARQLError, NotBoundError
diff --git a/rdflib/plugins/sparql/operators.py b/rdflib/plugins/sparql/operators.py
index 8f644a1f..ef995ce0 100644
--- a/rdflib/plugins/sparql/operators.py
+++ b/rdflib/plugins/sparql/operators.py
@@ -168,16 +168,16 @@ def Builtin_CEIL(expr, ctx):
http://www.w3.org/TR/sparql11-query/#func-ceil
"""
- l = expr.arg
- return Literal(int(math.ceil(numeric(l))), datatype=l.datatype)
+ l_ = expr.arg
+ return Literal(int(math.ceil(numeric(l_))), datatype=l_.datatype)
def Builtin_FLOOR(expr, ctx):
"""
http://www.w3.org/TR/sparql11-query/#func-floor
"""
- l = expr.arg
- return Literal(int(math.floor(numeric(l))), datatype=l.datatype)
+ l_ = expr.arg
+ return Literal(int(math.floor(numeric(l_))), datatype=l_.datatype)
def Builtin_ROUND(expr, ctx):
@@ -189,10 +189,10 @@ def Builtin_ROUND(expr, ctx):
# but in py3k bound was changed to
# "round-to-even" behaviour
# this is an ugly work-around
- l = expr.arg
- v = numeric(l)
+ l_ = expr.arg
+ v = numeric(l_)
v = int(Decimal(v).quantize(1, ROUND_HALF_UP))
- return Literal(v, datatype=l.datatype)
+ return Literal(v, datatype=l_.datatype)
def Builtin_REGEX(expr, ctx):
@@ -371,7 +371,7 @@ def Builtin_STRAFTER(expr, ctx):
if i == -1:
return Literal("")
else:
- return Literal(a[i + len(b) :], lang=a.language, datatype=a.datatype)
+ return Literal(a[i + len(b):], lang=a.language, datatype=a.datatype)
def Builtin_CONTAINS(expr, ctx):
@@ -407,9 +407,9 @@ def Builtin_SUBSTR(expr, ctx):
def Builtin_STRLEN(e, ctx):
- l = string(e.arg)
+ l_ = string(e.arg)
- return Literal(len(l))
+ return Literal(len(l_))
def Builtin_STR(e, ctx):
@@ -420,9 +420,9 @@ def Builtin_STR(e, ctx):
def Builtin_LCASE(e, ctx):
- l = string(e.arg)
+ l_ = string(e.arg)
- return Literal(l.lower(), datatype=l.datatype, lang=l.language)
+ return Literal(l_.lower(), datatype=l_.datatype, lang=l_.language)
def Builtin_LANGMATCHES(e, ctx):
@@ -528,9 +528,9 @@ def Builtin_TZ(e, ctx):
def Builtin_UCASE(e, ctx):
- l = string(e.arg)
+ l_ = string(e.arg)
- return Literal(l.upper(), datatype=l.datatype, lang=l.language)
+ return Literal(l_.upper(), datatype=l_.datatype, lang=l_.language)
def Builtin_LANG(e, ctx):
@@ -542,19 +542,19 @@ def Builtin_LANG(e, ctx):
with an empty language tag.
"""
- l = literal(e.arg)
- return Literal(l.language or "")
+ l_ = literal(e.arg)
+ return Literal(l_.language or "")
def Builtin_DATATYPE(e, ctx):
- l = e.arg
- if not isinstance(l, Literal):
- raise SPARQLError("Can only get datatype of literal: %r" % l)
- if l.language:
+ l_ = e.arg
+ if not isinstance(l_, Literal):
+ raise SPARQLError("Can only get datatype of literal: %r" % l_)
+ if l_.language:
return RDF_langString
- if not l.datatype and not l.language:
+ if not l_.datatype and not l_.language:
return XSD.string
- return l.datatype
+ return l_.datatype
def Builtin_sameTerm(e, ctx):
@@ -825,7 +825,7 @@ def RelationalExpression(e, ctx):
else:
raise error
- if not op in ("=", "!=", "IN", "NOT IN"):
+ if op not in ("=", "!=", "IN", "NOT IN"):
if not isinstance(expr, Literal):
raise SPARQLError(
"Compare other than =, != of non-literals is an error: %r" % expr
@@ -1062,15 +1062,15 @@ def _lang_range_check(range, lang):
"""
- def _match(r, l):
+ def _match(r, l_):
"""
Matching of a range and language item: either range is a wildcard
or the two are equal
@param r: language range item
- @param l: language tag item
+ @param l_: language tag item
@rtype: boolean
"""
- return r == "*" or r == l
+ return r == "*" or r == l_
rangeList = range.strip().lower().split("-")
langList = lang.strip().lower().split("-")
diff --git a/rdflib/plugins/sparql/parser.py b/rdflib/plugins/sparql/parser.py
index e8c37a2e..2124aad2 100644
--- a/rdflib/plugins/sparql/parser.py
+++ b/rdflib/plugins/sparql/parser.py
@@ -61,7 +61,7 @@ def expandTriples(terms):
res = []
if DEBUG:
print("Terms", terms)
- l = len(terms)
+ l_ = len(terms)
for i, t in enumerate(terms):
if t == ",":
res.extend([res[-3], res[-2]])
@@ -78,7 +78,7 @@ def expandTriples(terms):
if len(t) > 1:
res += t
# is this bnode the subject of more triples?
- if i + 1 < l and terms[i + 1] not in ".,;":
+ if i + 1 < l_ and terms[i + 1] not in ".,;":
res.append(t[0])
elif isinstance(t, ParseResults):
res += t.asList()
@@ -1058,9 +1058,9 @@ MultiplicativeExpression = Comp(
# [116] AdditiveExpression ::= MultiplicativeExpression ( '+' MultiplicativeExpression | '-' MultiplicativeExpression | ( NumericLiteralPositive | NumericLiteralNegative ) ( ( '*' UnaryExpression ) | ( '/' UnaryExpression ) )* )*
# NOTE: The second part of this production is there because:
-### "In signed numbers, no white space is allowed between the sign and the number. The AdditiveExpression grammar rule allows for this by covering the two cases of an expression followed by a signed number. These produce an addition or subtraction of the unsigned number as appropriate."
+# "In signed numbers, no white space is allowed between the sign and the number. The AdditiveExpression grammar rule allows for this by covering the two cases of an expression followed by a signed number. These produce an addition or subtraction of the unsigned number as appropriate."
-# Here (I think) this is not nescessary since pyparsing doesn't separate
+# Here (I think) this is not necessary since pyparsing doesn't separate
# tokenizing and parsing
diff --git a/rdflib/plugins/sparql/parserutils.py b/rdflib/plugins/sparql/parserutils.py
index e67b754b..c30e10d6 100644
--- a/rdflib/plugins/sparql/parserutils.py
+++ b/rdflib/plugins/sparql/parserutils.py
@@ -4,7 +4,7 @@ from collections import OrderedDict
from pyparsing import TokenConverter, ParseResults, originalTextFor
-from rdflib import BNode, Variable, URIRef
+from rdflib import BNode, Variable
DEBUG = True
DEBUG = False
@@ -235,7 +235,7 @@ class Comp(TokenConverter):
for t in tokenList:
if isinstance(t, ParamValue):
if t.isList:
- if not t.name in res:
+ if t.name not in res:
res[t.name] = plist()
res[t.name].append(t.tokenList)
else:
diff --git a/rdflib/plugins/sparql/results/graph.py b/rdflib/plugins/sparql/results/graph.py
index 13e256bb..77715d07 100644
--- a/rdflib/plugins/sparql/results/graph.py
+++ b/rdflib/plugins/sparql/results/graph.py
@@ -1,6 +1,6 @@
from rdflib import Graph
-from rdflib.query import Result, ResultParser, ResultSerializer, ResultException
+from rdflib.query import Result, ResultParser
class GraphResultParser(ResultParser):
diff --git a/rdflib/plugins/sparql/results/tsvresults.py b/rdflib/plugins/sparql/results/tsvresults.py
index bdfa2d4a..2406cf4e 100644
--- a/rdflib/plugins/sparql/results/tsvresults.py
+++ b/rdflib/plugins/sparql/results/tsvresults.py
@@ -108,7 +108,7 @@ class TSVResultParser(ResultParser):
if __name__ == "__main__":
import sys
- r = Result.parse(file(sys.argv[1]), format="tsv")
+ r = Result.parse(source=sys.argv[1], format="tsv")
print(r.vars)
print(r.bindings)
# print r.serialize(format='json')
diff --git a/rdflib/plugins/sparql/results/xmlresults.py b/rdflib/plugins/sparql/results/xmlresults.py
index aa4f796f..1511783f 100644
--- a/rdflib/plugins/sparql/results/xmlresults.py
+++ b/rdflib/plugins/sparql/results/xmlresults.py
@@ -1,5 +1,4 @@
import logging
-from io import BytesIO
from xml.sax.saxutils import XMLGenerator
from xml.dom import XML_NAMESPACE
@@ -7,7 +6,7 @@ from xml.sax.xmlreader import AttributesNSImpl
from rdflib.compat import etree
-from rdflib import Literal, URIRef, BNode, Graph, Variable
+from rdflib import Literal, URIRef, BNode, Variable
from rdflib.query import Result, ResultParser, ResultSerializer, ResultException
diff --git a/rdflib/plugins/sparql/sparql.py b/rdflib/plugins/sparql/sparql.py
index 5b6eab2e..417edc03 100644
--- a/rdflib/plugins/sparql/sparql.py
+++ b/rdflib/plugins/sparql/sparql.py
@@ -287,7 +287,7 @@ class QueryContext(object):
return graph.load(source, format="nt", **kwargs)
except:
raise Exception(
- "Could not load %s as either RDF/XML, N3 or NTriples" % (source)
+ "Could not load %s as either RDF/XML, N3 or NTriples" % source
)
if not rdflib.plugins.sparql.SPARQL_LOAD_GRAPHS:
diff --git a/rdflib/plugins/stores/sparqlstore.py b/rdflib/plugins/stores/sparqlstore.py
index 63e92f54..1bdf2d32 100644
--- a/rdflib/plugins/stores/sparqlstore.py
+++ b/rdflib/plugins/stores/sparqlstore.py
@@ -5,12 +5,6 @@ This is an RDFLib store around Ivan Herman et al.'s SPARQL service wrapper.
This was first done in layer-cake, and then ported to RDFLib
"""
-
-# Defines some SPARQL keywords
-LIMIT = "LIMIT"
-OFFSET = "OFFSET"
-ORDERBY = "ORDER BY"
-
import re
import collections
@@ -23,6 +17,10 @@ from rdflib import Variable, BNode
from rdflib.graph import DATASET_DEFAULT_GRAPH_ID
from rdflib.term import Node
+# Defines some SPARQL keywords
+LIMIT = "LIMIT"
+OFFSET = "OFFSET"
+ORDERBY = "ORDER BY"
BNODE_IDENT_PATTERN = re.compile("(?P<label>_\:[^\s]+)")
@@ -406,9 +404,9 @@ class SPARQLUpdateStore(SPARQLStore):
where_pattern = re.compile(r"""(?P<where>WHERE\s*\{)""", re.IGNORECASE)
- ##################################################################
- ### Regex for injecting GRAPH blocks into updates on a context ###
- ##################################################################
+ ##############################################################
+ # Regex for injecting GRAPH blocks into updates on a context #
+ ##############################################################
# Observations on the SPARQL grammar (http://www.w3.org/TR/2013/REC-sparql11-query-20130321/):
# 1. Only the terminals STRING_LITERAL1, STRING_LITERAL2,
@@ -726,12 +724,12 @@ class SPARQLUpdateStore(SPARQLStore):
if match.group("block_start") is not None:
level += 1
if level == 1:
- modified_query.append(query[pos : match.end()])
+ modified_query.append(query[pos: match.end()])
modified_query.append(graph_block_open)
pos = match.end()
elif match.group("block_end") is not None:
if level == 1:
- since_previous_pos = query[pos : match.start()]
+ since_previous_pos = query[pos: match.start()]
if modified_query[-1] is graph_block_open and (
since_previous_pos == "" or since_previous_pos.isspace()
):
diff --git a/rdflib/term.py b/rdflib/term.py
index c2833fd6..a93e9c50 100644
--- a/rdflib/term.py
+++ b/rdflib/term.py
@@ -39,8 +39,6 @@ __all__ = [
]
import logging
-
-logger = logging.getLogger(__name__)
import warnings
import math
@@ -68,6 +66,9 @@ from urllib.parse import urldefrag
from urllib.parse import urljoin
from urllib.parse import urlparse
+from decimal import Decimal
+
+logger = logging.getLogger(__name__)
skolem_genid = "/.well-known/genid/"
rdflib_skolem_genid = "/.well-known/genid/rdflib/"
skolems = {}
@@ -77,7 +78,7 @@ _invalid_uri_chars = '<>" {}|\\^`'
def _is_valid_uri(uri):
- return all(map(lambda c: ord(c) > 256 or not c in _invalid_uri_chars, uri))
+ return all(map(lambda c: ord(c) > 256 or c not in _invalid_uri_chars, uri))
_lang_tag_regex = compile("^[a-zA-Z]+(?:-[a-zA-Z0-9]+)*$")
@@ -302,7 +303,7 @@ class URIRef(Identifier):
"""
if isinstance(self, RDFLibGenid):
parsed_uri = urlparse("%s" % self)
- return BNode(value=parsed_uri.path[len(rdflib_skolem_genid) :])
+ return BNode(value=parsed_uri.path[len(rdflib_skolem_genid):])
elif isinstance(self, Genid):
bnode_id = "%s" % self
if bnode_id in skolems:
@@ -918,8 +919,8 @@ class Literal(Identifier):
if self.datatype and other.datatype:
# two datatyped literals
if (
- not self.datatype in XSDToPython
- or not other.datatype in XSDToPython
+ self.datatype not in XSDToPython
+ or other.datatype not in XSDToPython
):
# non XSD DTs must match
if self.datatype != other.datatype:
@@ -1525,8 +1526,6 @@ def _castPythonToLiteral(obj, datatype):
return obj, None # TODO: is this right for the fall through case?
-from decimal import Decimal
-
# Mappings from Python types to XSD datatypes and back (borrowed from sparta)
# datetime instances are also instances of date... so we need to order these.
diff --git a/rdflib/tools/csv2rdf.py b/rdflib/tools/csv2rdf.py
index cec1005c..812ffadc 100644
--- a/rdflib/tools/csv2rdf.py
+++ b/rdflib/tools/csv2rdf.py
@@ -114,12 +114,12 @@ def toPropertyLabel(label):
return label
-def index(l, i):
+def index(l_, i):
"""return a set of indexes from a list
>>> index([1,2,3],(0,2))
(1, 3)
"""
- return tuple([l[x] for x in i])
+ return tuple([l_[x] for x in i])
def csv_reader(csv_data, dialect=csv.excel, **kwargs):
@@ -357,20 +357,20 @@ class CSV2RDF(object):
# output class/property definitions
self.triple(self.CLASS, RDF.type, RDFS.Class)
for i in range(len(headers)):
- h, l = headers[i], header_labels[i]
- if h == "" or l == "":
+ h, l_ = headers[i], header_labels[i]
+ if h == "" or l_ == "":
continue
if self.COLUMNS.get(i, self.DEFAULT) == "ignore":
continue
self.triple(h, RDF.type, RDF.Property)
- self.triple(h, RDFS.label, rdflib.Literal(toPropertyLabel(l)))
+ self.triple(h, RDFS.label, rdflib.Literal(toPropertyLabel(l_)))
self.triple(h, RDFS.domain, self.CLASS)
self.triple(
h, RDFS.range, self.COLUMNS.get(i, default_node_make).range()
)
rows = 0
- for l in csvreader:
+ for l_ in csvreader:
try:
if self.IDENT == "auto":
uri = self.BASE["%d" % rows]
@@ -379,21 +379,21 @@ class CSV2RDF(object):
"_".join(
[
quote(x.encode("utf8").replace(" ", "_"), safe="")
- for x in index(l, self.IDENT)
+ for x in index(l_, self.IDENT)
]
)
]
if self.LABEL:
self.triple(
- uri, RDFS.label, rdflib.Literal(" ".join(index(l, self.LABEL)))
+ uri, RDFS.label, rdflib.Literal(" ".join(index(l_, self.LABEL)))
)
if self.CLASS:
# type triple
self.triple(uri, RDF.type, self.CLASS)
- for i, x in enumerate(l):
+ for i, x in enumerate(l_):
x = x.strip()
if x != "":
if self.COLUMNS.get(i, self.DEFAULT) == "ignore":
@@ -425,9 +425,9 @@ class CSV2RDF(object):
# output types/labels for generated URIs
classes = set()
- for l, x in uris.items():
+ for l_, x in uris.items():
u, c = x
- self.triple(u, RDFS.label, rdflib.Literal(l))
+ self.triple(u, RDFS.label, rdflib.Literal(l_))
if c:
c = rdflib.URIRef(c)
classes.add(c)
diff --git a/rdflib/tools/rdfpipe.py b/rdflib/tools/rdfpipe.py
index 4be352a0..6ec9e6c2 100644
--- a/rdflib/tools/rdfpipe.py
+++ b/rdflib/tools/rdfpipe.py
@@ -15,7 +15,6 @@ import rdflib
from rdflib import plugin
from rdflib.store import Store
from rdflib.graph import ConjunctiveGraph
-from rdflib.namespace import RDF, RDFS, OWL, XSD
from rdflib.parser import Parser
from rdflib.serializer import Serializer
diff --git a/rdflib/tools/rdfs2dot.py b/rdflib/tools/rdfs2dot.py
index 4c31516f..e8cf5813 100644
--- a/rdflib/tools/rdfs2dot.py
+++ b/rdflib/tools/rdfs2dot.py
@@ -90,13 +90,13 @@ def rdfs2dot(g, stream, opts={}):
def label(x, g):
- l = g.value(x, RDFS.label)
- if l is None:
+ l_ = g.value(x, RDFS.label)
+ if l_ is None:
try:
- l = g.namespace_manager.compute_qname(x)[2]
+ l_ = g.namespace_manager.compute_qname(x)[2]
except:
pass # bnodes and some weird URIs cannot be split
- return l
+ return l_
stream.write(u'digraph { \n node [ fontname="DejaVu Sans" ] ; \n')
@@ -113,10 +113,10 @@ def rdfs2dot(g, stream, opts={}):
g.objects(x, RDFS.domain), g.objects(x, RDFS.range)
):
if b in XSDTERMS or b == RDFS.Literal:
- l = label(b, g)
+ l_ = label(b, g)
if b == RDFS.Literal:
- l = "literal"
- fields[node(a)].add((label(x, g), l))
+ l_ = "literal"
+ fields[node(a)].add((label(x, g), l_))
else:
# if a in nodes and b in nodes:
stream.write(
diff --git a/rdflib/util.py b/rdflib/util.py
index 38faf06f..57b20915 100644
--- a/rdflib/util.py
+++ b/rdflib/util.py
@@ -180,7 +180,7 @@ def from_n3(s, default=None, backend=None, nsm=None):
else:
quotes = '"'
value, rest = s.rsplit(quotes, 1)
- value = value[len(quotes) :] # strip leading quotes
+ value = value[len(quotes):] # strip leading quotes
datatype = None
language = None
@@ -191,7 +191,7 @@ def from_n3(s, default=None, backend=None, nsm=None):
# datatype has to come after lang-tag so ignore everything before
# see: http://www.w3.org/TR/2011/WD-turtle-20110809/
# #prod-turtle2-RDFLiteral
- datatype = from_n3(rest[dtoffset + 2 :], default, backend, nsm)
+ datatype = from_n3(rest[dtoffset + 2:], default, backend, nsm)
else:
if rest.startswith("@"):
language = rest[1:] # strip leading at sign