summaryrefslogtreecommitdiff
path: root/Lib/test/test_normalization.py
diff options
context:
space:
mode:
authorHye-Shik Chang <hyeshik@gmail.com>2005-12-10 17:44:27 +0000
committerHye-Shik Chang <hyeshik@gmail.com>2005-12-10 17:44:27 +0000
commit3a5583fa4df80c52c72475dbb2254b5da50ca4a4 (patch)
treea98267568ca588ac27df1d54b5e32bb7265e1c47 /Lib/test/test_normalization.py
parent44b95154c769b005c3d87224887d1b66d5b9ee81 (diff)
downloadcpython-3a5583fa4df80c52c72475dbb2254b5da50ca4a4.tar.gz
Patch #1276356: Implement new resource "urlfetch" for regrtest.
This enables even impatient people to run tests that require remote files such as test_normalization and test_codecmaps_*.
Diffstat (limited to 'Lib/test/test_normalization.py')
-rw-r--r--Lib/test/test_normalization.py22
1 files changed, 4 insertions, 18 deletions
diff --git a/Lib/test/test_normalization.py b/Lib/test/test_normalization.py
index 0cbc2b49e9..7c86f75ed9 100644
--- a/Lib/test/test_normalization.py
+++ b/Lib/test/test_normalization.py
@@ -1,21 +1,11 @@
-from test.test_support import verbose, TestFailed, TestSkipped, verify
+from test.test_support import (verbose, TestFailed, TestSkipped, verify,
+ open_urlresource)
import sys
import os
from unicodedata import normalize
TESTDATAFILE = "NormalizationTest-3.2.0" + os.extsep + "txt"
-
-# This search allows using a build directory just inside the source
-# directory, and saving just one copy of the test data in the source
-# tree, rather than having a copy in each build directory.
-# There might be a better way to do this.
-
-for path in [os.path.curdir, os.path.pardir]:
- fn = os.path.join(path, TESTDATAFILE)
- skip_expected = not os.path.exists(fn)
- if not skip_expected:
- TESTDATAFILE = fn
- break
+TESTDATAURL = "http://www.unicode.org/Public/3.2-Update/" + TESTDATAFILE
class RangeError:
pass
@@ -40,12 +30,8 @@ def unistr(data):
return u"".join([unichr(x) for x in data])
def test_main():
- if skip_expected:
- raise TestSkipped(TESTDATAFILE + " not found, download from " +
- "http://www.unicode.org/Public/3.2-Update/" + TESTDATAFILE)
-
part1_data = {}
- for line in open(TESTDATAFILE):
+ for line in open_urlresource(TESTDATAURL):
if '#' in line:
line = line.split('#')[0]
line = line.strip()