From 16e6f7dee7f02bb81aa6b385b982dcdda5b99286 Mon Sep 17 00:00:00 2001 From: Steve Dower Date: Thu, 7 Mar 2019 08:02:26 -0800 Subject: bpo-36216: Add check for characters in netloc that normalize to separators (GH-12201) --- Lib/urllib/parse.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) (limited to 'Lib/urllib/parse.py') diff --git a/Lib/urllib/parse.py b/Lib/urllib/parse.py index dc2171144f..8b6c9b1060 100644 --- a/Lib/urllib/parse.py +++ b/Lib/urllib/parse.py @@ -396,6 +396,21 @@ def _splitnetloc(url, start=0): delim = min(delim, wdelim) # use earliest delim position return url[start:delim], url[delim:] # return (domain, rest) +def _checknetloc(netloc): + if not netloc or netloc.isascii(): + return + # looking for characters like \u2100 that expand to 'a/c' + # IDNA uses NFKC equivalence, so normalize for this check + import unicodedata + netloc2 = unicodedata.normalize('NFKC', netloc) + if netloc == netloc2: + return + _, _, netloc = netloc.rpartition('@') # anything to the left of '@' is okay + for c in '/?#@:': + if c in netloc2: + raise ValueError("netloc '" + netloc2 + "' contains invalid " + + "characters under NFKC normalization") + def urlsplit(url, scheme='', allow_fragments=True): """Parse a URL into 5 components: :///?# @@ -424,6 +439,7 @@ def urlsplit(url, scheme='', allow_fragments=True): url, fragment = url.split('#', 1) if '?' in url: url, query = url.split('?', 1) + _checknetloc(netloc) v = SplitResult('http', netloc, url, query, fragment) _parse_cache[key] = v return _coerce_result(v) @@ -447,6 +463,7 @@ def urlsplit(url, scheme='', allow_fragments=True): url, fragment = url.split('#', 1) if '?' in url: url, query = url.split('?', 1) + _checknetloc(netloc) v = SplitResult(scheme, netloc, url, query, fragment) _parse_cache[key] = v return _coerce_result(v) -- cgit v1.2.1