summaryrefslogtreecommitdiff
path: root/numpy
diff options
context:
space:
mode:
authorCharles Harris <charlesr.harris@gmail.com>2014-07-30 18:06:28 -0600
committerJulian Taylor <jtaylor.debian@googlemail.com>2014-07-31 21:21:17 +0200
commit01b0d7e82211b581aaff925e3ccc36cff9ac1895 (patch)
tree8ec68353d5f09b9f0411948f1345ec79f5443b4c /numpy
parentdec6658cdc10a23ad0e733fb52a814306033d88c (diff)
downloadnumpy-01b0d7e82211b581aaff925e3ccc36cff9ac1895.tar.gz
STY: Make files in numpy/lib PEP8 compliant.
The rules enforced are the same as those used for scipy.
Diffstat (limited to 'numpy')
-rw-r--r--numpy/lib/_datasource.py129
-rw-r--r--numpy/lib/_iotools.py169
-rw-r--r--numpy/lib/_version.py1
-rw-r--r--numpy/lib/arraypad.py1
-rw-r--r--numpy/lib/arraysetops.py14
-rw-r--r--numpy/lib/arrayterator.py13
-rw-r--r--numpy/lib/financial.py62
-rw-r--r--numpy/lib/format.py102
-rw-r--r--numpy/lib/function_base.py12
-rw-r--r--numpy/lib/index_tricks.py80
-rw-r--r--numpy/lib/npyio.py29
-rw-r--r--numpy/lib/polynomial.py67
-rw-r--r--numpy/lib/recfunctions.py116
-rw-r--r--numpy/lib/scimath.py16
-rw-r--r--numpy/lib/setup.py3
-rw-r--r--numpy/lib/shape_base.py28
-rw-r--r--numpy/lib/stride_tricks.py22
-rw-r--r--numpy/lib/type_check.py62
-rw-r--r--numpy/lib/user_array.py139
-rw-r--r--numpy/lib/utils.py130
20 files changed, 678 insertions, 517 deletions
diff --git a/numpy/lib/_datasource.py b/numpy/lib/_datasource.py
index 495bec49e..338c8b331 100644
--- a/numpy/lib/_datasource.py
+++ b/numpy/lib/_datasource.py
@@ -1,19 +1,21 @@
"""A file interface for handling local and remote data files.
-The goal of datasource is to abstract some of the file system operations when
-dealing with data files so the researcher doesn't have to know all the
+
+The goal of datasource is to abstract some of the file system operations
+when dealing with data files so the researcher doesn't have to know all the
low-level details. Through datasource, a researcher can obtain and use a
file with one function call, regardless of location of the file.
DataSource is meant to augment standard python libraries, not replace them.
-It should work seemlessly with standard file IO operations and the os module.
+It should work seemlessly with standard file IO operations and the os
+module.
DataSource files can originate locally or remotely:
- local files : '/home/guido/src/local/data.txt'
- URLs (http, ftp, ...) : 'http://www.scipy.org/not/real/data.txt'
-DataSource files can also be compressed or uncompressed. Currently only gzip
-and bz2 are supported.
+DataSource files can also be compressed or uncompressed. Currently only
+gzip and bz2 are supported.
Example::
@@ -50,9 +52,9 @@ class _FileOpeners(object):
Container for different methods to open (un-)compressed files.
`_FileOpeners` contains a dictionary that holds one method for each
- supported file format. Attribute lookup is implemented in such a way that
- an instance of `_FileOpeners` itself can be indexed with the keys of that
- dictionary. Currently uncompressed files as well as files
+ supported file format. Attribute lookup is implemented in such a way
+ that an instance of `_FileOpeners` itself can be indexed with the keys
+ of that dictionary. Currently uncompressed files as well as files
compressed with ``gzip`` or ``bz2`` compression are supported.
Notes
@@ -68,9 +70,11 @@ class _FileOpeners(object):
True
"""
+
def __init__(self):
self._loaded = False
self._file_openers = {None: open}
+
def _load(self):
if self._loaded:
return
@@ -104,6 +108,7 @@ class _FileOpeners(object):
"""
self._load()
return list(self._file_openers.keys())
+
def __getitem__(self, key):
self._load()
return self._file_openers[key]
@@ -114,8 +119,8 @@ def open(path, mode='r', destpath=os.curdir):
"""
Open `path` with `mode` and return the file object.
- If ``path`` is an URL, it will be downloaded, stored in the `DataSource`
- `destpath` directory and opened from there.
+ If ``path`` is an URL, it will be downloaded, stored in the
+ `DataSource` `destpath` directory and opened from there.
Parameters
----------
@@ -123,12 +128,12 @@ def open(path, mode='r', destpath=os.curdir):
Local file path or URL to open.
mode : str, optional
Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to
- append. Available modes depend on the type of object specified by path.
- Default is 'r'.
+ append. Available modes depend on the type of object specified by
+ path. Default is 'r'.
destpath : str, optional
- Path to the directory where the source file gets downloaded to for use.
- If `destpath` is None, a temporary directory will be created. The
- default path is the current directory.
+ Path to the directory where the source file gets downloaded to for
+ use. If `destpath` is None, a temporary directory will be created.
+ The default path is the current directory.
Returns
-------
@@ -153,15 +158,15 @@ class DataSource (object):
A generic data source file (file, http, ftp, ...).
DataSources can be local files or remote files/URLs. The files may
- also be compressed or uncompressed. DataSource hides some of the low-level
- details of downloading the file, allowing you to simply pass in a valid
- file path (or URL) and obtain a file object.
+ also be compressed or uncompressed. DataSource hides some of the
+ low-level details of downloading the file, allowing you to simply pass
+ in a valid file path (or URL) and obtain a file object.
Parameters
----------
destpath : str or None, optional
- Path to the directory where the source file gets downloaded to for use.
- If `destpath` is None, a temporary directory will be created.
+ Path to the directory where the source file gets downloaded to for
+ use. If `destpath` is None, a temporary directory will be created.
The default path is the current directory.
Notes
@@ -201,7 +206,7 @@ class DataSource (object):
self._destpath = os.path.abspath(destpath)
self._istmpdest = False
else:
- import tempfile # deferring import to improve startup time
+ import tempfile # deferring import to improve startup time
self._destpath = tempfile.mkdtemp()
self._istmpdest = True
@@ -212,6 +217,7 @@ class DataSource (object):
def _iszip(self, filename):
"""Test if the filename is a zip file by looking at the file extension.
+
"""
fname, ext = os.path.splitext(filename)
return ext in _file_openers.keys()
@@ -306,13 +312,12 @@ class DataSource (object):
def _findfile(self, path):
"""Searches for ``path`` and returns full path if found.
- If path is an URL, _findfile will cache a local copy and return
- the path to the cached file.
- If path is a local file, _findfile will return a path to that local
- file.
+ If path is an URL, _findfile will cache a local copy and return the
+ path to the cached file. If path is a local file, _findfile will
+ return a path to that local file.
- The search will include possible compressed versions of the file and
- return the first occurence found.
+ The search will include possible compressed versions of the file
+ and return the first occurence found.
"""
@@ -391,7 +396,7 @@ class DataSource (object):
# Note: os.path.join treats '/' as os.sep on Windows
path = path.lstrip(os.sep).lstrip('/')
path = path.lstrip(os.pardir).lstrip('..')
- drive, path = os.path.splitdrive(path) # for Windows
+ drive, path = os.path.splitdrive(path) # for Windows
return path
def exists(self, path):
@@ -403,7 +408,8 @@ class DataSource (object):
- a local file.
- a remote URL that has been downloaded and stored locally in the
`DataSource` directory.
- - a remote URL that has not been downloaded, but is valid and accessible.
+ - a remote URL that has not been downloaded, but is valid and
+ accessible.
Parameters
----------
@@ -417,10 +423,10 @@ class DataSource (object):
Notes
-----
- When `path` is an URL, `exists` will return True if it's either stored
- locally in the `DataSource` directory, or is a valid remote URL.
- `DataSource` does not discriminate between the two, the file is accessible
- if it exists in either location.
+ When `path` is an URL, `exists` will return True if it's either
+ stored locally in the `DataSource` directory, or is a valid remote
+ URL. `DataSource` does not discriminate between the two, the file
+ is accessible if it exists in either location.
"""
# We import this here because importing urllib2 is slow and
@@ -456,17 +462,17 @@ class DataSource (object):
"""
Open and return file-like object.
- If `path` is an URL, it will be downloaded, stored in the `DataSource`
- directory and opened from there.
+ If `path` is an URL, it will be downloaded, stored in the
+ `DataSource` directory and opened from there.
Parameters
----------
path : str
Local file path or URL to open.
mode : {'r', 'w', 'a'}, optional
- Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to
- append. Available modes depend on the type of object specified by
- `path`. Default is 'r'.
+ Mode to open `path`. Mode 'r' for reading, 'w' for writing,
+ 'a' to append. Available modes depend on the type of object
+ specified by `path`. Default is 'r'.
Returns
-------
@@ -499,12 +505,14 @@ class Repository (DataSource):
"""
Repository(baseurl, destpath='.')
- A data repository where multiple DataSource's share a base URL/directory.
+ A data repository where multiple DataSource's share a base
+ URL/directory.
- `Repository` extends `DataSource` by prepending a base URL (or directory)
- to all the files it handles. Use `Repository` when you will be working
- with multiple files from one base URL. Initialize `Repository` with the
- base URL, then refer to each file by its filename only.
+ `Repository` extends `DataSource` by prepending a base URL (or
+ directory) to all the files it handles. Use `Repository` when you will
+ be working with multiple files from one base URL. Initialize
+ `Repository` with the base URL, then refer to each file by its filename
+ only.
Parameters
----------
@@ -512,8 +520,8 @@ class Repository (DataSource):
Path to the local directory or remote location that contains the
data files.
destpath : str or None, optional
- Path to the directory where the source file gets downloaded to for use.
- If `destpath` is None, a temporary directory will be created.
+ Path to the directory where the source file gets downloaded to for
+ use. If `destpath` is None, a temporary directory will be created.
The default path is the current directory.
Examples
@@ -565,8 +573,9 @@ class Repository (DataSource):
Parameters
----------
path : str
- Can be a local file or a remote URL. This may, but does not have
- to, include the `baseurl` with which the `Repository` was initialized.
+ Can be a local file or a remote URL. This may, but does not
+ have to, include the `baseurl` with which the `Repository` was
+ initialized.
Returns
-------
@@ -591,8 +600,9 @@ class Repository (DataSource):
Parameters
----------
path : str
- Can be a local file or a remote URL. This may, but does not have
- to, include the `baseurl` with which the `Repository` was initialized.
+ Can be a local file or a remote URL. This may, but does not
+ have to, include the `baseurl` with which the `Repository` was
+ initialized.
Returns
-------
@@ -601,10 +611,10 @@ class Repository (DataSource):
Notes
-----
- When `path` is an URL, `exists` will return True if it's either stored
- locally in the `DataSource` directory, or is a valid remote URL.
- `DataSource` does not discriminate between the two, the file is accessible
- if it exists in either location.
+ When `path` is an URL, `exists` will return True if it's either
+ stored locally in the `DataSource` directory, or is a valid remote
+ URL. `DataSource` does not discriminate between the two, the file
+ is accessible if it exists in either location.
"""
return DataSource.exists(self, self._fullpath(path))
@@ -613,18 +623,19 @@ class Repository (DataSource):
"""
Open and return file-like object prepending Repository base URL.
- If `path` is an URL, it will be downloaded, stored in the DataSource
- directory and opened from there.
+ If `path` is an URL, it will be downloaded, stored in the
+ DataSource directory and opened from there.
Parameters
----------
path : str
Local file path or URL to open. This may, but does not have to,
- include the `baseurl` with which the `Repository` was initialized.
+ include the `baseurl` with which the `Repository` was
+ initialized.
mode : {'r', 'w', 'a'}, optional
- Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to
- append. Available modes depend on the type of object specified by
- `path`. Default is 'r'.
+ Mode to open `path`. Mode 'r' for reading, 'w' for writing,
+ 'a' to append. Available modes depend on the type of object
+ specified by `path`. Default is 'r'.
Returns
-------
diff --git a/numpy/lib/_iotools.py b/numpy/lib/_iotools.py
index f54f2196c..1b1180893 100644
--- a/numpy/lib/_iotools.py
+++ b/numpy/lib/_iotools.py
@@ -11,7 +11,7 @@ import numpy.core.numeric as nx
from numpy.compat import asbytes, bytes, asbytes_nested, basestring
if sys.version_info[0] >= 3:
- from builtins import bool, int, float, complex, object, str
+ from builtins import bool, int, float, complex, object, str
unicode = str
else:
from __builtin__ import bool, int, float, complex, object, unicode, str
@@ -20,6 +20,7 @@ else:
if sys.version_info[0] >= 3:
def _bytes_to_complex(s):
return complex(s.decode('ascii'))
+
def _bytes_to_name(s):
return s.decode('ascii')
else:
@@ -148,10 +149,6 @@ def flatten_dtype(ndtype, flatten_base=False):
return types
-
-
-
-
class LineSplitter(object):
"""
Object to split a string at a given delimiter or at given places.
@@ -188,6 +185,7 @@ class LineSplitter(object):
"""
return lambda input: [_.strip() for _ in method(input)]
#
+
def __init__(self, delimiter=None, comments=asbytes('#'), autostrip=True):
self.comments = comments
# Delimiter is a character
@@ -203,7 +201,8 @@ class LineSplitter(object):
delimiter = [slice(i, j) for (i, j) in zip(idx[:-1], idx[1:])]
# Delimiter is a single integer
elif int(delimiter):
- (_handyman, delimiter) = (self._fixedwidth_splitter, int(delimiter))
+ (_handyman, delimiter) = (
+ self._fixedwidth_splitter, int(delimiter))
else:
(_handyman, delimiter) = (self._delimited_splitter, None)
self.delimiter = delimiter
@@ -212,6 +211,7 @@ class LineSplitter(object):
else:
self._handyman = _handyman
#
+
def _delimited_splitter(self, line):
if self.comments is not None:
line = line.split(self.comments)[0]
@@ -220,6 +220,7 @@ class LineSplitter(object):
return []
return line.split(self.delimiter)
#
+
def _fixedwidth_splitter(self, line):
if self.comments is not None:
line = line.split(self.comments)[0]
@@ -230,6 +231,7 @@ class LineSplitter(object):
slices = [slice(i, i + fixed) for i in range(0, len(line), fixed)]
return [line[s] for s in slices]
#
+
def _variablewidth_splitter(self, line):
if self.comments is not None:
line = line.split(self.comments)[0]
@@ -238,32 +240,32 @@ class LineSplitter(object):
slices = self.delimiter
return [line[s] for s in slices]
#
+
def __call__(self, line):
return self._handyman(line)
-
class NameValidator(object):
"""
Object to validate a list of strings to use as field names.
The strings are stripped of any non alphanumeric character, and spaces
- are replaced by '_'. During instantiation, the user can define a list of
- names to exclude, as well as a list of invalid characters. Names in the
- exclusion list are appended a '_' character.
+ are replaced by '_'. During instantiation, the user can define a list
+ of names to exclude, as well as a list of invalid characters. Names in
+ the exclusion list are appended a '_' character.
- Once an instance has been created, it can be called with a list of names,
- and a list of valid names will be created.
- The `__call__` method accepts an optional keyword "default" that sets
- the default name in case of ambiguity. By default this is 'f', so
- that names will default to `f0`, `f1`, etc.
+ Once an instance has been created, it can be called with a list of
+ names, and a list of valid names will be created. The `__call__`
+ method accepts an optional keyword "default" that sets the default name
+ in case of ambiguity. By default this is 'f', so that names will
+ default to `f0`, `f1`, etc.
Parameters
----------
excludelist : sequence, optional
- A list of names to exclude. This list is appended to the default list
- ['return', 'file', 'print']. Excluded names are appended an underscore:
- for example, `file` becomes `file_` if supplied.
+ A list of names to exclude. This list is appended to the default
+ list ['return', 'file', 'print']. Excluded names are appended an
+ underscore: for example, `file` becomes `file_` if supplied.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
@@ -278,8 +280,8 @@ class NameValidator(object):
Notes
-----
- Calling an instance of `NameValidator` is the same as calling its method
- `validate`.
+ Calling an instance of `NameValidator` is the same as calling its
+ method `validate`.
Examples
--------
@@ -298,6 +300,7 @@ class NameValidator(object):
defaultexcludelist = ['return', 'file', 'print']
defaultdeletechars = set("""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""")
#
+
def __init__(self, excludelist=None, deletechars=None,
case_sensitive=None, replace_space='_'):
# Process the exclusion list ..
@@ -326,18 +329,18 @@ class NameValidator(object):
def validate(self, names, defaultfmt="f%i", nbfields=None):
"""
- Validate a list of strings to use as field names for a structured array.
+ Validate a list of strings as field names for a structured array.
Parameters
----------
names : sequence of str
Strings to be validated.
defaultfmt : str, optional
- Default format string, used if validating a given string reduces its
- length to zero.
+ Default format string, used if validating a given string
+ reduces its length to zero.
nboutput : integer, optional
- Final number of validated names, used to expand or shrink the initial
- list of names.
+ Final number of validated names, used to expand or shrink the
+ initial list of names.
Returns
-------
@@ -346,8 +349,8 @@ class NameValidator(object):
Notes
-----
- A `NameValidator` instance can be called directly, which is the same as
- calling `validate`. For examples, see `NameValidator`.
+ A `NameValidator` instance can be called directly, which is the
+ same as calling `validate`. For examples, see `NameValidator`.
"""
# Initial checks ..............
@@ -394,11 +397,11 @@ class NameValidator(object):
seen[item] = cnt + 1
return tuple(validatednames)
#
+
def __call__(self, names, defaultfmt="f%i", nbfields=None):
return self.validate(names, defaultfmt=defaultfmt, nbfields=nbfields)
-
def str2bool(value):
"""
Tries to transform a string supposed to represent a boolean to a boolean.
@@ -462,22 +465,22 @@ class ConversionWarning(UserWarning):
pass
-
class StringConverter(object):
"""
- Factory class for function transforming a string into another object (int,
- float).
+ Factory class for function transforming a string into another object
+ (int, float).
After initialization, an instance can be called to transform a string
- into another object. If the string is recognized as representing a missing
- value, a default value is returned.
+ into another object. If the string is recognized as representing a
+ missing value, a default value is returned.
Attributes
----------
func : function
Function used for the conversion.
default : any
- Default value to return when the input corresponds to a missing value.
+ Default value to return when the input corresponds to a missing
+ value.
type : type
Type of the output.
_status : int
@@ -494,14 +497,13 @@ class StringConverter(object):
If a `dtype`, specifies the input data type, used to define a basic
function and a default value for missing data. For example, when
`dtype` is float, the `func` attribute is set to `float` and the
- default value to `np.nan`.
- If a function, this function is used to convert a string to another
- object. In this case, it is recommended to give an associated default
- value as input.
+ default value to `np.nan`. If a function, this function is used to
+ convert a string to another object. In this case, it is recommended
+ to give an associated default value as input.
default : any, optional
- Value to return by default, that is, when the string to be converted
- is flagged as missing. If not given, `StringConverter` tries to supply
- a reasonable default value.
+ Value to return by default, that is, when the string to be
+ converted is flagged as missing. If not given, `StringConverter`
+ tries to supply a reasonable default value.
missing_values : sequence of str, optional
Sequence of strings indicating a missing value.
locked : bool, optional
@@ -517,19 +519,23 @@ class StringConverter(object):
(nx.string_, bytes, asbytes('???'))]
(_defaulttype, _defaultfunc, _defaultfill) = zip(*_mapper)
#
+
@classmethod
def _getdtype(cls, val):
"""Returns the dtype of the input variable."""
return np.array(val).dtype
#
+
@classmethod
def _getsubdtype(cls, val):
"""Returns the type of the dtype of the input variable."""
return np.array(val).dtype.type
#
- # This is a bit annoying. We want to return the "general" type in most cases
- # (ie. "string" rather than "S10"), but we want to return the specific type
- # for datetime64 (ie. "datetime64[us]" rather than "datetime64").
+ # This is a bit annoying. We want to return the "general" type in most
+ # cases (ie. "string" rather than "S10"), but we want to return the
+ # specific type for datetime64 (ie. "datetime64[us]" rather than
+ # "datetime64").
+
@classmethod
def _dtypeortype(cls, dtype):
"""Returns dtype for datetime64 and type of dtype otherwise."""
@@ -537,15 +543,17 @@ class StringConverter(object):
return dtype
return dtype.type
#
+
@classmethod
def upgrade_mapper(cls, func, default=None):
"""
- Upgrade the mapper of a StringConverter by adding a new function and its
- corresponding default.
+ Upgrade the mapper of a StringConverter by adding a new function and
+ its corresponding default.
- The input function (or sequence of functions) and its associated default
- value (if any) is inserted in penultimate position of the mapper.
- The corresponding type is estimated from the dtype of the default value.
+ The input function (or sequence of functions) and its associated
+ default value (if any) is inserted in penultimate position of the
+ mapper. The corresponding type is estimated from the dtype of the
+ default value.
Parameters
----------
@@ -577,6 +585,7 @@ class StringConverter(object):
for (fct, dft) in zip(func, default):
cls._mapper.insert(-1, (cls._getsubdtype(dft), fct, dft))
#
+
def __init__(self, dtype_or_func=None, default=None, missing_values=None,
locked=False):
# Convert unicode (for Py3)
@@ -600,12 +609,13 @@ class StringConverter(object):
except TypeError:
# dtype_or_func must be a function, then
if not hasattr(dtype_or_func, '__call__'):
- errmsg = "The input argument `dtype` is neither a function"\
- " or a dtype (got '%s' instead)"
+ errmsg = ("The input argument `dtype` is neither a"
+ " function nor a dtype (got '%s' instead)")
raise TypeError(errmsg % type(dtype_or_func))
# Set the function
self.func = dtype_or_func
- # If we don't have a default, try to guess it or set it to None
+ # If we don't have a default, try to guess it or set it to
+ # None
if default is None:
try:
default = self.func(asbytes('0'))
@@ -638,7 +648,7 @@ class StringConverter(object):
elif issubclass(dtype.type, np.int64):
self.func = np.int64
else:
- self.func = lambda x : int(float(x))
+ self.func = lambda x: int(float(x))
# Store the list of strings corresponding to missing values.
if missing_values is None:
self.missing_values = set([asbytes('')])
@@ -652,12 +662,14 @@ class StringConverter(object):
self._checked = False
self._initial_default = default
#
+
def _loose_call(self, value):
try:
return self.func(value)
except ValueError:
return self.default
#
+
def _strict_call(self, value):
try:
return self.func(value)
@@ -668,18 +680,20 @@ class StringConverter(object):
return self.default
raise ValueError("Cannot convert string '%s'" % value)
#
+
def __call__(self, value):
return self._callingfunction(value)
#
+
def upgrade(self, value):
"""
- Try to find the best converter for a given string, and return the result.
+ Rind the best converter for a given string, and return the result.
The supplied string `value` is converted by testing different
- converters in order. First the `func` method of the `StringConverter`
- instance is tried, if this fails other available converters are tried.
- The order in which these other converters are tried is determined by the
- `_status` attribute of the instance.
+ converters in order. First the `func` method of the
+ `StringConverter` instance is tried, if this fails other available
+ converters are tried. The order in which these other converters
+ are tried is determined by the `_status` attribute of the instance.
Parameters
----------
@@ -733,7 +747,9 @@ class StringConverter(object):
# Complains if we try to upgrade by the maximum
_status = self._status
if _status == _statusmax:
- raise ConverterError("Could not find a valid conversion function")
+ raise ConverterError(
+ "Could not find a valid conversion function"
+ )
elif _status < _statusmax - 1:
_status += 1
(self.type, self.func, default) = self._mapper[_status]
@@ -754,23 +770,24 @@ class StringConverter(object):
func : function
Conversion function.
default : any, optional
- Value to return by default, that is, when the string to be converted
- is flagged as missing. If not given, `StringConverter` tries to supply
- a reasonable default value.
+ Value to return by default, that is, when the string to be
+ converted is flagged as missing. If not given,
+ `StringConverter` tries to supply a reasonable default value.
testing_value : str, optional
A string representing a standard input value of the converter.
- This string is used to help defining a reasonable default value.
+ This string is used to help defining a reasonable default
+ value.
missing_values : sequence of str, optional
Sequence of strings indicating a missing value.
locked : bool, optional
- Whether the StringConverter should be locked to prevent automatic
- upgrade or not. Default is False.
+ Whether the StringConverter should be locked to prevent
+ automatic upgrade or not. Default is False.
Notes
-----
- `update` takes the same parameters as the constructor of `StringConverter`,
- except that `func` does not accept a `dtype` whereas `dtype_or_func` in
- the constructor does.
+ `update` takes the same parameters as the constructor of
+ `StringConverter`, except that `func` does not accept a `dtype`
+ whereas `dtype_or_func` in the constructor does.
"""
self.func = func
@@ -796,7 +813,6 @@ class StringConverter(object):
self.missing_values = []
-
def easy_dtype(ndtype, names=None, defaultfmt="f%i", **validationargs):
"""
Convenience function to create a `np.dtype` object.
@@ -807,17 +823,18 @@ def easy_dtype(ndtype, names=None, defaultfmt="f%i", **validationargs):
Parameters
----------
ndtype : var
- Definition of the dtype. Can be any string or dictionary
- recognized by the `np.dtype` function, or a sequence of types.
+ Definition of the dtype. Can be any string or dictionary recognized
+ by the `np.dtype` function, or a sequence of types.
names : str or sequence, optional
Sequence of strings to use as field names for a structured dtype.
- For convenience, `names` can be a string of a comma-separated list of
- names.
+ For convenience, `names` can be a string of a comma-separated list
+ of names.
defaultfmt : str, optional
Format string used to define missing names, such as ``"f%i"``
(default) or ``"fields_%02i"``.
validationargs : optional
- A series of optional arguments used to initialize a `NameValidator`.
+ A series of optional arguments used to initialize a
+ `NameValidator`.
Examples
--------
@@ -865,8 +882,8 @@ def easy_dtype(ndtype, names=None, defaultfmt="f%i", **validationargs):
elif (nbtypes > 0):
validate = NameValidator(**validationargs)
# Default initial names : should we change the format ?
- if (ndtype.names == tuple("f%i" % i for i in range(nbtypes))) and \
- (defaultfmt != "f%i"):
+ if ((ndtype.names == tuple("f%i" % i for i in range(nbtypes))) and
+ (defaultfmt != "f%i")):
ndtype.names = validate([''] * nbtypes, defaultfmt=defaultfmt)
# Explicit initial names : just validate
else:
diff --git a/numpy/lib/_version.py b/numpy/lib/_version.py
index 4eaabd0ff..54b9c1dc7 100644
--- a/numpy/lib/_version.py
+++ b/numpy/lib/_version.py
@@ -52,6 +52,7 @@ class NumpyVersion():
>>> NumpyVersion('1.7') # raises ValueError, add ".0"
"""
+
def __init__(self, vstring):
self.vstring = vstring
ver_main = re.match(r'\d[.]\d+[.]\d+', vstring)
diff --git a/numpy/lib/arraypad.py b/numpy/lib/arraypad.py
index befa9839e..bbfdce794 100644
--- a/numpy/lib/arraypad.py
+++ b/numpy/lib/arraypad.py
@@ -244,7 +244,6 @@ def _prepend_ramp(arr, pad_amt, end, axis=-1):
ramp_arr = _arange_ndarray(arr, padshape, axis,
reverse=True).astype(np.float64)
-
# Appropriate slicing to extract n-dimensional edge along `axis`
edge_slice = tuple(slice(None) if i != axis else 0
for (i, x) in enumerate(arr.shape))
diff --git a/numpy/lib/arraysetops.py b/numpy/lib/arraysetops.py
index 42555d30f..2d98c35d2 100644
--- a/numpy/lib/arraysetops.py
+++ b/numpy/lib/arraysetops.py
@@ -245,7 +245,7 @@ def intersect1d(ar1, ar2, assume_unique=False):
# Might be faster than unique( intersect1d( ar1, ar2 ) )?
ar1 = unique(ar1)
ar2 = unique(ar2)
- aux = np.concatenate( (ar1, ar2) )
+ aux = np.concatenate((ar1, ar2))
aux.sort()
return aux[:-1][aux[1:] == aux[:-1]]
@@ -282,13 +282,13 @@ def setxor1d(ar1, ar2, assume_unique=False):
ar1 = unique(ar1)
ar2 = unique(ar2)
- aux = np.concatenate( (ar1, ar2) )
+ aux = np.concatenate((ar1, ar2))
if aux.size == 0:
return aux
aux.sort()
# flag = ediff1d( aux, to_end = 1, to_begin = 1 ) == 0
- flag = np.concatenate( ([True], aux[1:] != aux[:-1], [True] ) )
+ flag = np.concatenate(([True], aux[1:] != aux[:-1], [True]))
# flag2 = ediff1d( flag ) == 0
flag2 = flag[1:] == flag[:-1]
return aux[flag2]
@@ -371,7 +371,7 @@ def in1d(ar1, ar2, assume_unique=False, invert=False):
ar1, rev_idx = np.unique(ar1, return_inverse=True)
ar2 = np.unique(ar2)
- ar = np.concatenate( (ar1, ar2) )
+ ar = np.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
@@ -381,8 +381,8 @@ def in1d(ar1, ar2, assume_unique=False, invert=False):
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
- flag = np.concatenate( (bool_ar, [invert]) )
- indx = order.argsort(kind='mergesort')[:len( ar1 )]
+ flag = np.concatenate((bool_ar, [invert]))
+ indx = order.argsort(kind='mergesort')[:len(ar1)]
if assume_unique:
return flag[indx]
@@ -417,7 +417,7 @@ def union1d(ar1, ar2):
array([-2, -1, 0, 1, 2])
"""
- return unique( np.concatenate( (ar1, ar2) ) )
+ return unique(np.concatenate((ar1, ar2)))
def setdiff1d(ar1, ar2, assume_unique=False):
"""
diff --git a/numpy/lib/arrayterator.py b/numpy/lib/arrayterator.py
index 8ac720ccd..d9839feeb 100644
--- a/numpy/lib/arrayterator.py
+++ b/numpy/lib/arrayterator.py
@@ -104,7 +104,8 @@ class Arrayterator(object):
"""
# Fix index, handling ellipsis and incomplete slices.
- if not isinstance(index, tuple): index = (index,)
+ if not isinstance(index, tuple):
+ index = (index,)
fixed = []
length, dims = len(index), len(self.shape)
for slice_ in index:
@@ -180,7 +181,8 @@ class Arrayterator(object):
def __iter__(self):
# Skip arrays with degenerate dimensions
- if [dim for dim in self.shape if dim <= 0]: raise StopIteration
+ if [dim for dim in self.shape if dim <= 0]:
+ raise StopIteration
start = self.start[:]
stop = self.stop[:]
@@ -199,12 +201,13 @@ class Arrayterator(object):
# along higher dimensions, so we read only a single position
if count == 0:
stop[i] = start[i]+1
- elif count <= self.shape[i]: # limit along this dimension
+ elif count <= self.shape[i]:
+ # limit along this dimension
stop[i] = start[i] + count*step[i]
rundim = i
else:
- stop[i] = self.stop[i] # read everything along this
- # dimension
+ # read everything along this dimension
+ stop[i] = self.stop[i]
stop[i] = min(self.stop[i], stop[i])
count = count//self.shape[i]
diff --git a/numpy/lib/financial.py b/numpy/lib/financial.py
index e54614483..5b96e5b8e 100644
--- a/numpy/lib/financial.py
+++ b/numpy/lib/financial.py
@@ -119,7 +119,8 @@ def fv(rate, nper, pmt, pv, when='end'):
temp = (1+rate)**nper
miter = np.broadcast(rate, nper, pmt, pv, when)
zer = np.zeros(miter.shape)
- fact = np.where(rate==zer, nper+zer, (1+rate*when)*(temp-1)/rate+zer)
+ fact = np.where(rate == zer, nper + zer,
+ (1 + rate*when)*(temp - 1)/rate + zer)
return -(pv*temp + pmt*fact)
def pmt(rate, nper, pv, fv=0, when='end'):
@@ -210,7 +211,8 @@ def pmt(rate, nper, pv, fv=0, when='end'):
temp = (1+rate)**nper
miter = np.broadcast(rate, nper, pv, fv, when)
zer = np.zeros(miter.shape)
- fact = np.where(rate==zer, nper+zer, (1+rate*when)*(temp-1)/rate+zer)
+ fact = np.where(rate == zer, nper + zer,
+ (1 + rate*when)*(temp - 1)/rate + zer)
return -(fv + pv*temp) / fact
def nper(rate, pmt, pv, fv=0, when='end'):
@@ -279,7 +281,7 @@ def nper(rate, pmt, pv, fv=0, when='end'):
B = np.log((-fv+z) / (pv+z))/np.log(1.0+rate)
miter = np.broadcast(rate, pmt, pv, fv, when)
zer = np.zeros(miter.shape)
- return np.where(rate==zer, A+zer, B+zer) + 0.0
+ return np.where(rate == zer, A + zer, B + zer) + 0.0
def ipmt(rate, per, nper, pv, fv=0.0, when='end'):
"""
@@ -365,7 +367,8 @@ def ipmt(rate, per, nper, pv, fv=0.0, when='end'):
"""
when = _convert_when(when)
- rate, per, nper, pv, fv, when = np.broadcast_arrays(rate, per, nper, pv, fv, when)
+ rate, per, nper, pv, fv, when = np.broadcast_arrays(rate, per, nper,
+ pv, fv, when)
total_pmt = pmt(rate, nper, pv, fv, when)
ipmt = _rbl(rate, per, total_pmt, pv, when)*rate
try:
@@ -507,12 +510,16 @@ def pv(rate, nper, pmt, fv=0.0, when='end'):
return -(fv + pmt*fact)/temp
# Computed with Sage
-# (y + (r + 1)^n*x + p*((r + 1)^n - 1)*(r*w + 1)/r)/(n*(r + 1)^(n - 1)*x - p*((r + 1)^n - 1)*(r*w + 1)/r^2 + n*p*(r + 1)^(n - 1)*(r*w + 1)/r + p*((r + 1)^n - 1)*w/r)
+# (y + (r + 1)^n*x + p*((r + 1)^n - 1)*(r*w + 1)/r)/(n*(r + 1)^(n - 1)*x -
+# p*((r + 1)^n - 1)*(r*w + 1)/r^2 + n*p*(r + 1)^(n - 1)*(r*w + 1)/r +
+# p*((r + 1)^n - 1)*w/r)
def _g_div_gp(r, n, p, x, y, w):
t1 = (r+1)**n
t2 = (r+1)**(n-1)
- return (y + t1*x + p*(t1 - 1)*(r*w + 1)/r)/(n*t2*x - p*(t1 - 1)*(r*w + 1)/(r**2) + n*p*t2*(r*w + 1)/r + p*(t1 - 1)*w/r)
+ return ((y + t1*x + p*(t1 - 1)*(r*w + 1)/r) /
+ (n*t2*x - p*(t1 - 1)*(r*w + 1)/(r**2) + n*p*t2*(r*w + 1)/r +
+ p*(t1 - 1)*w/r))
# Use Newton's iteration until the change is less than 1e-6
# for all values or a maximum of 100 iterations is reached.
@@ -572,7 +579,7 @@ def rate(nper, pmt, pv, fv, when='end', guess=0.10, tol=1e-6, maxiter=100):
while (iter < maxiter) and not close:
rnp1 = rn - _g_div_gp(rn, nper, pmt, pv, fv, when)
diff = abs(rnp1-rn)
- close = np.all(diff<tol)
+ close = np.all(diff < tol)
iter += 1
rn = rnp1
if not close:
@@ -593,9 +600,9 @@ def irr(values):
----------
values : array_like, shape(N,)
Input cash flows per time period. By convention, net "deposits"
- are negative and net "withdrawals" are positive. Thus, for example,
- at least the first element of `values`, which represents the initial
- investment, will typically be negative.
+ are negative and net "withdrawals" are positive. Thus, for
+ example, at least the first element of `values`, which represents
+ the initial investment, will typically be negative.
Returns
-------
@@ -605,13 +612,13 @@ def irr(values):
Notes
-----
The IRR is perhaps best understood through an example (illustrated
- using np.irr in the Examples section below). Suppose one invests
- 100 units and then makes the following withdrawals at regular
- (fixed) intervals: 39, 59, 55, 20. Assuming the ending value is 0,
- one's 100 unit investment yields 173 units; however, due to the
- combination of compounding and the periodic withdrawals, the
- "average" rate of return is neither simply 0.73/4 nor (1.73)^0.25-1.
- Rather, it is the solution (for :math:`r`) of the equation:
+ using np.irr in the Examples section below). Suppose one invests 100
+ units and then makes the following withdrawals at regular (fixed)
+ intervals: 39, 59, 55, 20. Assuming the ending value is 0, one's 100
+ unit investment yields 173 units; however, due to the combination of
+ compounding and the periodic withdrawals, the "average" rate of return
+ is neither simply 0.73/4 nor (1.73)^0.25-1. Rather, it is the solution
+ (for :math:`r`) of the equation:
.. math:: -100 + \\frac{39}{1+r} + \\frac{59}{(1+r)^2}
+ \\frac{55}{(1+r)^3} + \\frac{20}{(1+r)^4} = 0
@@ -663,18 +670,18 @@ def npv(rate, values):
The discount rate.
values : array_like, shape(M, )
The values of the time series of cash flows. The (fixed) time
- interval between cash flow "events" must be the same as that
- for which `rate` is given (i.e., if `rate` is per year, then
- precisely a year is understood to elapse between each cash flow
- event). By convention, investments or "deposits" are negative,
- income or "withdrawals" are positive; `values` must begin with
- the initial investment, thus `values[0]` will typically be
- negative.
+ interval between cash flow "events" must be the same as that for
+ which `rate` is given (i.e., if `rate` is per year, then precisely
+ a year is understood to elapse between each cash flow event). By
+ convention, investments or "deposits" are negative, income or
+ "withdrawals" are positive; `values` must begin with the initial
+ investment, thus `values[0]` will typically be negative.
Returns
-------
out : float
- The NPV of the input cash flow series `values` at the discount `rate`.
+ The NPV of the input cash flow series `values` at the discount
+ `rate`.
Notes
-----
@@ -705,8 +712,9 @@ def mirr(values, finance_rate, reinvest_rate):
Parameters
----------
values : array_like
- Cash flows (must contain at least one positive and one negative value)
- or nan is returned. The first value is considered a sunk cost at time zero.
+ Cash flows (must contain at least one positive and one negative
+ value) or nan is returned. The first value is considered a sunk
+ cost at time zero.
finance_rate : scalar
Interest rate paid on the cash flows
reinvest_rate : scalar
diff --git a/numpy/lib/format.py b/numpy/lib/format.py
index 6083312de..98743b6ad 100644
--- a/numpy/lib/format.py
+++ b/numpy/lib/format.py
@@ -150,7 +150,7 @@ else:
MAGIC_PREFIX = asbytes('\x93NUMPY')
MAGIC_LEN = len(MAGIC_PREFIX) + 2
-BUFFER_SIZE = 2 ** 18 #size of buffer for reading npz files in bytes
+BUFFER_SIZE = 2**18 # size of buffer for reading npz files in bytes
# difference between version 1.0 and 2.0 is a 4 byte (I) header length
# instead of 2 bytes (H) allowing storage of large structured arrays
@@ -231,10 +231,10 @@ def dtype_to_descr(dtype):
"""
if dtype.names is not None:
- # This is a record array. The .descr is fine.
- # XXX: parts of the record array with an empty name, like padding bytes,
- # still get fiddled with. This needs to be fixed in the C implementation
- # of dtype().
+ # This is a record array. The .descr is fine. XXX: parts of the
+ # record array with an empty name, like padding bytes, still get
+ # fiddled with. This needs to be fixed in the C implementation of
+ # dtype().
return dtype.descr
else:
return dtype.str
@@ -293,9 +293,9 @@ def _write_array_header(fp, d, version=None):
header.append("}")
header = "".join(header)
# Pad the header with spaces and a final newline such that the magic
- # string, the header-length short and the header are aligned on a 16-byte
- # boundary. Hopefully, some system, possibly memory-mapping, can take
- # advantage of our premature optimization.
+ # string, the header-length short and the header are aligned on a
+ # 16-byte boundary. Hopefully, some system, possibly memory-mapping,
+ # can take advantage of our premature optimization.
current_header_len = MAGIC_LEN + 2 + len(header) + 1 # 1 for the newline
topad = 16 - (current_header_len % 16)
header = asbytes(header + ' '*topad + '\n')
@@ -325,8 +325,8 @@ def write_array_header_1_0(fp, d):
----------
fp : filelike object
d : dict
- This has the appropriate entries for writing its string representation
- to the header of the file.
+ This has the appropriate entries for writing its string
+ representation to the header of the file.
"""
_write_array_header(fp, d, (1, 0))
@@ -341,8 +341,8 @@ def write_array_header_2_0(fp, d):
----------
fp : filelike object
d : dict
- This has the appropriate entries for writing its string representation
- to the header of the file.
+ This has the appropriate entries for writing its string
+ representation to the header of the file.
"""
_write_array_header(fp, d, (2, 0))
@@ -363,9 +363,9 @@ def read_array_header_1_0(fp):
shape : tuple of int
The shape of the array.
fortran_order : bool
- The array data will be written out directly if it is either C-contiguous
- or Fortran-contiguous. Otherwise, it will be made contiguous before
- writing it out.
+ The array data will be written out directly if it is either
+ C-contiguous or Fortran-contiguous. Otherwise, it will be made
+ contiguous before writing it out.
dtype : dtype
The dtype of the file's data.
@@ -396,9 +396,9 @@ def read_array_header_2_0(fp):
shape : tuple of int
The shape of the array.
fortran_order : bool
- The array data will be written out directly if it is either C-contiguous
- or Fortran-contiguous. Otherwise, it will be made contiguous before
- writing it out.
+ The array data will be written out directly if it is either
+ C-contiguous or Fortran-contiguous. Otherwise, it will be made
+ contiguous before writing it out.
dtype : dtype
The dtype of the file's data.
@@ -428,9 +428,9 @@ def _read_array_header(fp, version):
else:
raise ValueError("Invalid version %r" % version)
- # The header is a pretty-printed string representation of a literal Python
- # dictionary with trailing newlines padded to a 16-byte boundary. The keys
- # are strings.
+ # The header is a pretty-printed string representation of a literal
+ # Python dictionary with trailing newlines padded to a 16-byte
+ # boundary. The keys are strings.
# "shape" : tuple of int
# "fortran_order" : bool
# "descr" : dtype.descr
@@ -449,7 +449,7 @@ def _read_array_header(fp, version):
# Sanity-check the values.
if (not isinstance(d['shape'], tuple) or
- not numpy.all([isinstance(x, (int, long)) for x in d['shape']])):
+ not numpy.all([isinstance(x, (int, long)) for x in d['shape']])):
msg = "shape is not valid: %r"
raise ValueError(msg % (d['shape'],))
if not isinstance(d['fortran_order'], bool):
@@ -474,13 +474,13 @@ def write_array(fp, array, version=None):
Parameters
----------
fp : file_like object
- An open, writable file object, or similar object with a ``.write()``
- method.
+ An open, writable file object, or similar object with a
+ ``.write()`` method.
array : ndarray
The array to write to disk.
version : (int, int) or None, optional
- The version number of the format. None means use the oldest supported
- version that is able to store the data. Default: None
+ The version number of the format. None means use the oldest
+ supported version that is able to store the data. Default: None
Raises
------
@@ -504,8 +504,9 @@ def write_array(fp, array, version=None):
buffersize = max(16 * 1024 ** 2 // array.itemsize, 1)
if array.dtype.hasobject:
- # We contain Python objects so we cannot write out the data directly.
- # Instead, we will pickle it out with version 2 of the pickle protocol.
+ # We contain Python objects so we cannot write out the data
+ # directly. Instead, we will pickle it out with version 2 of the
+ # pickle protocol.
pickle.dump(array, fp, protocol=2)
elif array.flags.f_contiguous and not array.flags.c_contiguous:
if isfileobj(fp):
@@ -563,13 +564,13 @@ def read_array(fp):
# We can use the fast fromfile() function.
array = numpy.fromfile(fp, dtype=dtype, count=count)
else:
- # This is not a real file. We have to read it the memory-intensive
- # way.
- # crc32 module fails on reads greater than 2 ** 32 bytes, breaking
- # large reads from gzip streams. Chunk reads to BUFFER_SIZE bytes to
- # avoid issue and reduce memory overhead of the read. In
- # non-chunked case count < max_read_count, so only one read is
- # performed.
+ # This is not a real file. We have to read it the
+ # memory-intensive way.
+ # crc32 module fails on reads greater than 2 ** 32 bytes,
+ # breaking large reads from gzip streams. Chunk reads to
+ # BUFFER_SIZE bytes to avoid issue and reduce memory overhead
+ # of the read. In non-chunked case count < max_read_count, so
+ # only one read is performed.
max_read_count = BUFFER_SIZE // min(BUFFER_SIZE, dtype.itemsize)
@@ -604,25 +605,24 @@ def open_memmap(filename, mode='r+', dtype=None, shape=None,
object.
mode : str, optional
The mode in which to open the file; the default is 'r+'. In
- addition to the standard file modes, 'c' is also accepted to
- mean "copy on write." See `memmap` for the available mode strings.
+ addition to the standard file modes, 'c' is also accepted to mean
+ "copy on write." See `memmap` for the available mode strings.
dtype : data-type, optional
The data type of the array if we are creating a new file in "write"
- mode, if not, `dtype` is ignored. The default value is None,
- which results in a data-type of `float64`.
+ mode, if not, `dtype` is ignored. The default value is None, which
+ results in a data-type of `float64`.
shape : tuple of int
The shape of the array if we are creating a new file in "write"
mode, in which case this parameter is required. Otherwise, this
parameter is ignored and is thus optional.
fortran_order : bool, optional
Whether the array should be Fortran-contiguous (True) or
- C-contiguous (False, the default) if we are creating a new file
- in "write" mode.
+ C-contiguous (False, the default) if we are creating a new file in
+ "write" mode.
version : tuple of int (major, minor) or None
If the mode is a "write" mode, then this is the version of the file
- format used to create the file.
- None means use the oldest supported version that is able to store the
- data. Default: None
+ format used to create the file. None means use the oldest
+ supported version that is able to store the data. Default: None
Returns
-------
@@ -642,15 +642,15 @@ def open_memmap(filename, mode='r+', dtype=None, shape=None,
"""
if not isinstance(filename, basestring):
- raise ValueError("Filename must be a string. Memmap cannot use" \
+ raise ValueError("Filename must be a string. Memmap cannot use"
" existing file handles.")
if 'w' in mode:
# We are creating the file, not reading it.
# Check if we ought to create the file.
_check_version(version)
- # Ensure that the given dtype is an authentic dtype object rather than
- # just something that can be interpreted as a dtype object.
+ # Ensure that the given dtype is an authentic dtype object rather
+ # than just something that can be interpreted as a dtype object.
dtype = numpy.dtype(dtype)
if dtype.hasobject:
msg = "Array can't be memory-mapped: Python objects in dtype."
@@ -713,9 +713,9 @@ def _read_bytes(fp, size, error_template="ran out of data"):
"""
data = bytes()
while True:
- # io files (default in python3) return None or raise on would-block,
- # python2 file will truncate, probably nothing can be done about that.
- # note that regular files can't be non-blocking
+ # io files (default in python3) return None or raise on
+ # would-block, python2 file will truncate, probably nothing can be
+ # done about that. note that regular files can't be non-blocking
try:
r = fp.read(size - len(data))
data += r
@@ -725,6 +725,6 @@ def _read_bytes(fp, size, error_template="ran out of data"):
pass
if len(data) != size:
msg = "EOF: reading %s, expected %d bytes got %d"
- raise ValueError(msg %(error_template, size, len(data)))
+ raise ValueError(msg % (error_template, size, len(data)))
else:
return data
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index 3d8ffc586..0a1d05f77 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -367,8 +367,8 @@ def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
Ncount[i] = digitize(sample[:, i], edges[i])
# Using digitize, values that fall on an edge are put in the right bin.
- # For the rightmost bin, we want values equal to the right
- # edge to be counted in the last bin, and not as an outlier.
+ # For the rightmost bin, we want values equal to the right edge to be
+ # counted in the last bin, and not as an outlier.
for i in arange(D):
# Rounding precision
mindiff = dedges[i].min()
@@ -376,7 +376,8 @@ def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
decimal = int(-log10(mindiff)) + 6
# Find which points are on the rightmost edge.
not_smaller_than_edge = (sample[:, i] >= edges[i][-1])
- on_edge = (around(sample[:, i], decimal) == around(edges[i][-1], decimal))
+ on_edge = (around(sample[:, i], decimal) ==
+ around(edges[i][-1], decimal))
# Shift these points one bin to the left.
Ncount[i][where(on_edge & not_smaller_than_edge)[0]] -= 1
@@ -1622,6 +1623,7 @@ class vectorize(object):
further degrades performance.
"""
+
def __init__(self, pyfunc, otypes='', doc=None, excluded=None,
cache=False):
self.pyfunc = pyfunc
@@ -3375,7 +3377,7 @@ def meshgrid(*xi, **kwargs):
raise TypeError("meshgrid() got an unexpected keyword argument '%s'"
% (list(kwargs)[0],))
- if not indexing in ['xy', 'ij']:
+ if indexing not in ['xy', 'ij']:
raise ValueError(
"Valid values for `indexing` are 'xy' and 'ij'.")
@@ -3436,7 +3438,7 @@ def delete(arr, obj, axis=None):
Notes
-----
Often it is preferable to use a boolean mask. For example:
-
+
>>> mask = np.ones(len(arr), dtype=bool)
>>> mask[[0,2,4]] = False
>>> result = arr[mask,...]
diff --git a/numpy/lib/index_tricks.py b/numpy/lib/index_tricks.py
index f0066be81..98c6b291b 100644
--- a/numpy/lib/index_tricks.py
+++ b/numpy/lib/index_tricks.py
@@ -1,28 +1,30 @@
from __future__ import division, absolute_import, print_function
-__all__ = ['ravel_multi_index',
- 'unravel_index',
- 'mgrid',
- 'ogrid',
- 'r_', 'c_', 's_',
- 'index_exp', 'ix_',
- 'ndenumerate', 'ndindex',
- 'fill_diagonal', 'diag_indices', 'diag_indices_from']
-
import sys
+import math
+
import numpy.core.numeric as _nx
-from numpy.core.numeric import ( asarray, ScalarType, array, alltrue, cumprod,
- arange )
+from numpy.core.numeric import (
+ asarray, ScalarType, array, alltrue, cumprod, arange
+ )
from numpy.core.numerictypes import find_common_type
-import math
from . import function_base
import numpy.matrixlib as matrix
from .function_base import diff
from numpy.lib._compiled_base import ravel_multi_index, unravel_index
from numpy.lib.stride_tricks import as_strided
+
makemat = matrix.matrix
+
+__all__ = [
+ 'ravel_multi_index', 'unravel_index', 'mgrid', 'ogrid', 'r_', 'c_',
+ 's_', 'index_exp', 'ix_', 'ndenumerate', 'ndindex', 'fill_diagonal',
+ 'diag_indices', 'diag_indices_from'
+ ]
+
+
def ix_(*args):
"""
Construct an open mesh from multiple sequences.
@@ -142,8 +144,10 @@ class nd_grid(object):
[4]]), array([[0, 1, 2, 3, 4]])]
"""
+
def __init__(self, sparse=False):
self.sparse = sparse
+
def __getitem__(self, key):
try:
size = []
@@ -151,16 +155,19 @@ class nd_grid(object):
for k in range(len(key)):
step = key[k].step
start = key[k].start
- if start is None: start=0
- if step is None: step=1
+ if start is None:
+ start = 0
+ if step is None:
+ step = 1
if isinstance(step, complex):
size.append(int(abs(step)))
typ = float
else:
- size.append(int(math.ceil((key[k].stop - start)/(step*1.0))))
- if isinstance(step, float) or \
- isinstance(start, float) or \
- isinstance(key[k].stop, float):
+ size.append(
+ int(math.ceil((key[k].stop - start)/(step*1.0))))
+ if (isinstance(step, float) or
+ isinstance(start, float) or
+ isinstance(key[k].stop, float)):
typ = float
if self.sparse:
nn = [_nx.arange(_x, dtype=_t)
@@ -170,8 +177,10 @@ class nd_grid(object):
for k in range(len(size)):
step = key[k].step
start = key[k].start
- if start is None: start=0
- if step is None: step=1
+ if start is None:
+ start = 0
+ if step is None:
+ step = 1
if isinstance(step, complex):
step = int(abs(step))
if step != 1:
@@ -188,13 +197,14 @@ class nd_grid(object):
step = key.step
stop = key.stop
start = key.start
- if start is None: start = 0
+ if start is None:
+ start = 0
if isinstance(step, complex):
step = abs(step)
length = int(step)
if step != 1:
step = (key.stop-start)/float(step-1)
- stop = key.stop+step
+ stop = key.stop + step
return _nx.arange(0, length, 1, float)*step + start
else:
return _nx.arange(start, stop, step)
@@ -207,8 +217,8 @@ class nd_grid(object):
mgrid = nd_grid(sparse=False)
ogrid = nd_grid(sparse=True)
-mgrid.__doc__ = None # set in numpy.add_newdocs
-ogrid.__doc__ = None # set in numpy.add_newdocs
+mgrid.__doc__ = None # set in numpy.add_newdocs
+ogrid.__doc__ = None # set in numpy.add_newdocs
class AxisConcatenator(object):
"""
@@ -217,6 +227,7 @@ class AxisConcatenator(object):
For detailed documentation on usage, see `r_`.
"""
+
def _retval(self, res):
if self.matrix:
oldndim = res.ndim
@@ -256,7 +267,8 @@ class AxisConcatenator(object):
step = key[k].step
start = key[k].start
stop = key[k].stop
- if start is None: start = 0
+ if start is None:
+ start = 0
if step is None:
step = 1
if isinstance(step, complex):
@@ -431,6 +443,7 @@ class RClass(AxisConcatenator):
matrix([[1, 2, 3, 4, 5, 6]])
"""
+
def __init__(self):
AxisConcatenator.__init__(self, 0)
@@ -453,6 +466,7 @@ class CClass(AxisConcatenator):
array([[1, 2, 3, 0, 0, 4, 5, 6]])
"""
+
def __init__(self):
AxisConcatenator.__init__(self, -1, ndmin=2, trans1d=0)
@@ -484,6 +498,7 @@ class ndenumerate(object):
(1, 1) 4
"""
+
def __init__(self, arr):
self.iter = asarray(arr).flat
@@ -536,10 +551,12 @@ class ndindex(object):
(2, 1, 0)
"""
+
def __init__(self, *shape):
if len(shape) == 1 and isinstance(shape[0], tuple):
shape = shape[0]
- x = as_strided(_nx.zeros(1), shape=shape, strides=_nx.zeros_like(shape))
+ x = as_strided(_nx.zeros(1), shape=shape,
+ strides=_nx.zeros_like(shape))
self._it = _nx.nditer(x, flags=['multi_index', 'zerosize_ok'],
order='C')
@@ -556,18 +573,20 @@ class ndindex(object):
def __next__(self):
"""
- Standard iterator method, updates the index and returns the index tuple.
+ Standard iterator method, updates the index and returns the index
+ tuple.
Returns
-------
val : tuple of ints
- Returns a tuple containing the indices of the current iteration.
+ Returns a tuple containing the indices of the current
+ iteration.
"""
next(self._it)
return self._it.multi_index
- next = __next__
+ next = __next__
# You can do all this with slice() plus a few special objects,
@@ -624,6 +643,7 @@ class IndexExpression(object):
array([2, 4])
"""
+
def __init__(self, maketuple):
self.maketuple = maketuple
@@ -743,7 +763,7 @@ def fill_diagonal(a, val, wrap=False):
else:
# For more than d=2, the strided formula is only valid for arrays with
# all dimensions equal, so we check first.
- if not alltrue(diff(a.shape)==0):
+ if not alltrue(diff(a.shape) == 0):
raise ValueError("All dimensions of input must be of equal length")
step = 1 + (cumprod(a.shape[:-1])).sum()
diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py
index 479e9c099..fe855a71a 100644
--- a/numpy/lib/npyio.py
+++ b/numpy/lib/npyio.py
@@ -112,6 +112,7 @@ class BagObj(object):
"Doesn't matter what you want, you're gonna get this"
"""
+
def __init__(self, obj):
# Use weakref to make NpzFile objects collectable by refcount
self._obj = weakref.proxy(obj)
@@ -185,6 +186,7 @@ class NpzFile(object):
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
+
def __init__(self, fid, own_fid=False):
# Import is postponed to here since zipfile depends on gzip, an
# optional component of the so-called standard library.
@@ -868,7 +870,7 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
# Verify that the array has at least dimensions `ndmin`.
# Check correctness of the values of `ndmin`
- if not ndmin in [0, 1, 2]:
+ if ndmin not in [0, 1, 2]:
raise ValueError('Illegal value of ndmin keyword: %s' % ndmin)
# Tweak the size and shape of the arrays - remove extraneous dimensions
if X.ndim > ndmin:
@@ -1392,7 +1394,8 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
first_line = next(fhd)
if names is True:
if comments in first_line:
- first_line = asbytes('').join(first_line.split(comments)[1:])
+ first_line = (
+ asbytes('').join(first_line.split(comments)[1:]))
first_values = split_line(first_line)
except StopIteration:
# return an empty array if the datafile is empty
@@ -1689,23 +1692,17 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
if usemask:
masks = masks[:-skip_footer]
-
# Convert each value according to the converter:
# We want to modify the list in place to avoid creating a new one...
- #
- # if loose:
- # conversionfuncs = [conv._loose_call for conv in converters]
- # else:
- # conversionfuncs = [conv._strict_call for conv in converters]
- # for (i, vals) in enumerate(rows):
- # rows[i] = tuple([convert(val)
- # for (convert, val) in zip(conversionfuncs, vals)])
if loose:
- rows = list(zip(*[[converter._loose_call(_r) for _r in map(itemgetter(i), rows)]
- for (i, converter) in enumerate(converters)]))
+ rows = list(
+ zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)]
+ for (i, conv) in enumerate(converters)]))
else:
- rows = list(zip(*[[converter._strict_call(_r) for _r in map(itemgetter(i), rows)]
- for (i, converter) in enumerate(converters)]))
+ rows = list(
+ zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)]
+ for (i, conv) in enumerate(converters)]))
+
# Reset the dtype
data = rows
if dtype is None:
@@ -1767,7 +1764,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
if user_converters:
ishomogeneous = True
descr = []
- for (i, ttype) in enumerate([conv.type for conv in converters]):
+ for i, ttype in enumerate([conv.type for conv in converters]):
# Keep the dtype of the current converter
if i in user_converters:
ishomogeneous &= (ttype == dtype.type)
diff --git a/numpy/lib/polynomial.py b/numpy/lib/polynomial.py
index 10ae32a60..6a1adc773 100644
--- a/numpy/lib/polynomial.py
+++ b/numpy/lib/polynomial.py
@@ -143,7 +143,7 @@ def poly(seq_of_zeros):
neg_roots = NX.conjugate(sort_complex(
NX.compress(roots.imag < 0, roots)))
if (len(pos_roots) == len(neg_roots) and
- NX.alltrue(neg_roots == pos_roots)):
+ NX.alltrue(neg_roots == pos_roots)):
a = a.real.copy()
return a
@@ -412,15 +412,14 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
deg : int
Degree of the fitting polynomial
rcond : float, optional
- Relative condition number of the fit. Singular values smaller than this
- relative to the largest singular value will be ignored. The default
- value is len(x)*eps, where eps is the relative precision of the float
- type, about 2e-16 in most cases.
+ Relative condition number of the fit. Singular values smaller than
+ this relative to the largest singular value will be ignored. The
+ default value is len(x)*eps, where eps is the relative precision of
+ the float type, about 2e-16 in most cases.
full : bool, optional
- Switch determining nature of return value. When it is
- False (the default) just the coefficients are returned, when True
- diagnostic information from the singular value decomposition is also
- returned.
+ Switch determining nature of return value. When it is False (the
+ default) just the coefficients are returned, when True diagnostic
+ information from the singular value decomposition is also returned.
w : array_like, shape (M,), optional
weights to apply to the y-coordinates of the sample points.
cov : bool, optional
@@ -430,18 +429,21 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
Returns
-------
p : ndarray, shape (M,) or (M, K)
- Polynomial coefficients, highest power first.
- If `y` was 2-D, the coefficients for `k`-th data set are in ``p[:,k]``.
+ Polynomial coefficients, highest power first. If `y` was 2-D, the
+ coefficients for `k`-th data set are in ``p[:,k]``.
- residuals, rank, singular_values, rcond : present only if `full` = True
- Residuals of the least-squares fit, the effective rank of the scaled
- Vandermonde coefficient matrix, its singular values, and the specified
- value of `rcond`. For more details, see `linalg.lstsq`.
+ residuals, rank, singular_values, rcond :
+ Present only if `full` = True. Residuals of the least-squares fit,
+ the effective rank of the scaled Vandermonde coefficient matrix,
+ its singular values, and the specified value of `rcond`. For more
+ details, see `linalg.lstsq`.
- V : ndaray, shape (M,M) or (M,M,K) : present only if `full` = False and `cov`=True
- The covariance matrix of the polynomial coefficient estimates. The diagonal
- of this matrix are the variance estimates for each coefficient. If y is a 2-d
- array, then the covariance matrix for the `k`-th data set are in ``V[:,:,k]``
+ V : ndarray, shape (M,M) or (M,M,K)
+ Present only if `full` = False and `cov`=True. The covariance
+ matrix of the polynomial coefficient estimates. The diagonal of
+ this matrix are the variance estimates for each coefficient. If y
+ is a 2-D array, then the covariance matrix for the `k`-th data set
+ are in ``V[:,:,k]``
Warns
@@ -531,8 +533,7 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
>>> import matplotlib.pyplot as plt
>>> xp = np.linspace(-2, 6, 100)
- >>> plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')
- [<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
+ >>> _ = plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')
>>> plt.ylim(-2,2)
(-2, 2)
>>> plt.show()
@@ -543,19 +544,19 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
y = NX.asarray(y) + 0.0
# check arguments.
- if deg < 0 :
+ if deg < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
- if y.ndim < 1 or y.ndim > 2 :
+ if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
- if x.shape[0] != y.shape[0] :
+ if x.shape[0] != y.shape[0]:
raise TypeError("expected x and y to have same length")
# set rcond
- if rcond is None :
+ if rcond is None:
rcond = len(x)*finfo(x.dtype).eps
# set up least squares equation for powers of x
@@ -567,7 +568,7 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
w = NX.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected a 1-d array for weights")
- if w.shape[0] != y.shape[0] :
+ if w.shape[0] != y.shape[0]:
raise TypeError("expected w and y to have the same length")
lhs *= w[:, NX.newaxis]
if rhs.ndim == 2:
@@ -586,9 +587,9 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
msg = "Polyfit may be poorly conditioned"
warnings.warn(msg, RankWarning)
- if full :
+ if full:
return c, resids, rank, s, rcond
- elif cov :
+ elif cov:
Vbase = inv(dot(lhs.T, lhs))
Vbase /= NX.outer(scale, scale)
# Some literature ignores the extra -2.0 factor in the denominator, but
@@ -600,7 +601,7 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
return c, Vbase * fac
else:
return c, Vbase[:,:, NX.newaxis] * fac
- else :
+ else:
return c
@@ -917,8 +918,8 @@ def _raise_power(astr, wrap=70):
n = span[1]
toadd2 = partstr + ' '*(len(power)-1)
toadd1 = ' '*(len(partstr)-1) + power
- if ((len(line2)+len(toadd2) > wrap) or \
- (len(line1)+len(toadd1) > wrap)):
+ if ((len(line2) + len(toadd2) > wrap) or
+ (len(line1) + len(toadd1) > wrap)):
output += line1 + "\n" + line2 + "\n "
line1 = toadd1
line2 = toadd2
@@ -1126,7 +1127,6 @@ class poly1d(object):
thestr = newstr
return _raise_power(thestr)
-
def __call__(self, val):
return polyval(self.coeffs, val)
@@ -1214,7 +1214,8 @@ class poly1d(object):
try:
return self.__dict__[key]
except KeyError:
- raise AttributeError("'%s' has no attribute '%s'" % (self.__class__, key))
+ raise AttributeError(
+ "'%s' has no attribute '%s'" % (self.__class__, key))
def __getitem__(self, val):
ind = self.order - val
diff --git a/numpy/lib/recfunctions.py b/numpy/lib/recfunctions.py
index 6ae8b8445..a61b1749b 100644
--- a/numpy/lib/recfunctions.py
+++ b/numpy/lib/recfunctions.py
@@ -1,8 +1,8 @@
"""
Collection of utilities to manipulate structured arrays.
-Most of these functions were initially implemented by John Hunter for matplotlib.
-They have been rewritten and extended for convenience.
+Most of these functions were initially implemented by John Hunter for
+matplotlib. They have been rewritten and extended for convenience.
"""
from __future__ import division, absolute_import, print_function
@@ -70,7 +70,6 @@ def recursive_fill_fields(input, output):
return output
-
def get_names(adtype):
"""
Returns the field names of the input datatype as a tuple.
@@ -187,7 +186,7 @@ def zip_descr(seqarrays, flatten=False):
def get_fieldstructure(adtype, lastname=None, parents=None,):
"""
- Returns a dictionary with fields as keys and a list of parent fields as values.
+ Returns a dictionary with fields indexing lists of their parent fields.
This function is used to simplify access to fields nested in other fields.
@@ -225,8 +224,7 @@ def get_fieldstructure(adtype, lastname=None, parents=None,):
else:
lastparent = [_ for _ in (parents.get(lastname, []) or [])]
if lastparent:
-# if (lastparent[-1] != lastname):
- lastparent.append(lastname)
+ lastparent.append(lastname)
elif lastname:
lastparent = [lastname, ]
parents[name] = lastparent or []
@@ -237,6 +235,7 @@ def _izip_fields_flat(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays,
collapsing any nested structure.
+
"""
for element in iterable:
if isinstance(element, np.void):
@@ -249,9 +248,11 @@ def _izip_fields_flat(iterable):
def _izip_fields(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays.
+
"""
for element in iterable:
- if hasattr(element, '__iter__') and not isinstance(element, basestring):
+ if (hasattr(element, '__iter__') and
+ not isinstance(element, basestring)):
for f in _izip_fields(element):
yield f
elif isinstance(element, np.void) and len(tuple(element)) == 1:
@@ -325,9 +326,8 @@ def _fix_defaults(output, defaults=None):
return output
-
-def merge_arrays(seqarrays,
- fill_value= -1, flatten=False, usemask=False, asrecarray=False):
+def merge_arrays(seqarrays, fill_value=-1, flatten=False,
+ usemask=False, asrecarray=False):
"""
Merge arrays field by field.
@@ -463,7 +463,6 @@ def merge_arrays(seqarrays,
return output
-
def drop_fields(base, drop_names, usemask=True, asrecarray=False):
"""
Return a new array with fields in `drop_names` dropped.
@@ -475,13 +474,14 @@ def drop_fields(base, drop_names, usemask=True, asrecarray=False):
base : array
Input array
drop_names : string or sequence
- String or sequence of strings corresponding to the names of the fields
- to drop.
+ String or sequence of strings corresponding to the names of the
+ fields to drop.
usemask : {False, True}, optional
Whether to return a masked array or not.
- asrecarray : string or sequence
+ asrecarray : string or sequence, optional
Whether to return a recarray or a mrecarray (`asrecarray=True`) or
- a plain ndarray or masked array with flexible dtype (`asrecarray=False`)
+ a plain ndarray or masked array with flexible dtype. The default
+ is False.
Examples
--------
@@ -502,7 +502,7 @@ def drop_fields(base, drop_names, usemask=True, asrecarray=False):
drop_names = [drop_names, ]
else:
drop_names = set(drop_names)
- #
+
def _drop_descr(ndtype, drop_names):
names = ndtype.names
newdtype = []
@@ -517,11 +517,11 @@ def drop_fields(base, drop_names, usemask=True, asrecarray=False):
else:
newdtype.append((name, current))
return newdtype
- #
+
newdtype = _drop_descr(base.dtype, drop_names)
if not newdtype:
return None
- #
+
output = np.empty(base.shape, dtype=newdtype)
output = recursive_fill_fields(base, output)
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
@@ -534,7 +534,6 @@ def rec_drop_fields(base, drop_names):
return drop_fields(base, drop_names, usemask=False, asrecarray=True)
-
def rename_fields(base, namemapper):
"""
Rename the fields from a flexible-datatype ndarray or recarray.
@@ -564,8 +563,9 @@ def rename_fields(base, namemapper):
newname = namemapper.get(name, name)
current = ndtype[name]
if current.names:
- newdtype.append((newname,
- _recursive_rename_fields(current, namemapper)))
+ newdtype.append(
+ (newname, _recursive_rename_fields(current, namemapper))
+ )
else:
newdtype.append((newname, current))
return newdtype
@@ -574,7 +574,7 @@ def rename_fields(base, namemapper):
def append_fields(base, names, data, dtypes=None,
- fill_value= -1, usemask=True, asrecarray=False):
+ fill_value=-1, usemask=True, asrecarray=False):
"""
Add new fields to an existing array.
@@ -615,7 +615,7 @@ def append_fields(base, names, data, dtypes=None,
if dtypes is None:
data = [np.array(a, copy=False, subok=True) for a in data]
data = [a.view([(name, a.dtype)]) for (name, a) in zip(names, data)]
- else :
+ else:
if not isinstance(dtypes, (tuple, list)):
dtypes = [dtypes, ]
if len(data) != len(dtypes):
@@ -642,7 +642,6 @@ def append_fields(base, names, data, dtypes=None,
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
-
def rec_append_fields(base, names, data, dtypes=None):
"""
Add new fields to an existing array.
@@ -677,7 +676,6 @@ def rec_append_fields(base, names, data, dtypes=None):
asrecarray=True, usemask=False)
-
def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,
autoconvert=False):
"""
@@ -690,11 +688,11 @@ def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,
defaults : dictionary, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
- Whether to return a MaskedArray (or MaskedRecords is `asrecarray==True`)
- or a ndarray.
+ Whether to return a MaskedArray (or MaskedRecords is
+ `asrecarray==True`) or a ndarray.
asrecarray : {False, True}, optional
- Whether to return a recarray (or MaskedRecords if `usemask==True`) or
- just a flexible-type ndarray.
+ Whether to return a recarray (or MaskedRecords if `usemask==True`)
+ or just a flexible-type ndarray.
autoconvert : {False, True}, optional
Whether automatically cast the type of the field to the maximum.
@@ -744,7 +742,7 @@ def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,
current_descr[-1] = descr[1]
newdescr[nameidx] = tuple(current_descr)
elif descr[1] != current_descr[-1]:
- raise TypeError("Incompatible type '%s' <> '%s'" % \
+ raise TypeError("Incompatible type '%s' <> '%s'" %
(dict(newdescr)[name], descr[1]))
# Only one field: use concatenate
if len(newdescr) == 1:
@@ -768,7 +766,6 @@ def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,
usemask=usemask, asrecarray=asrecarray)
-
def find_duplicates(a, key=None, ignoremask=True, return_index=False):
"""
Find the duplicates in a structured array along a given key
@@ -823,19 +820,17 @@ def find_duplicates(a, key=None, ignoremask=True, return_index=False):
return duplicates
-
def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None, usemask=True, asrecarray=False):
"""
Join arrays `r1` and `r2` on key `key`.
The key should be either a string or a sequence of string corresponding
- to the fields used to join the array.
- An exception is raised if the `key` field cannot be found in the two input
- arrays.
- Neither `r1` nor `r2` should have any duplicates along `key`: the presence
- of duplicates will make the output quite unreliable. Note that duplicates
- are not looked for by the algorithm.
+ to the fields used to join the array. An exception is raised if the
+ `key` field cannot be found in the two input arrays. Neither `r1` nor
+ `r2` should have any duplicates along `key`: the presence of duplicates
+ will make the output quite unreliable. Note that duplicates are not
+ looked for by the algorithm.
Parameters
----------
@@ -846,39 +841,41 @@ def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
Structured arrays.
jointype : {'inner', 'outer', 'leftouter'}, optional
If 'inner', returns the elements common to both r1 and r2.
- If 'outer', returns the common elements as well as the elements of r1
- not in r2 and the elements of not in r2.
- If 'leftouter', returns the common elements and the elements of r1 not
- in r2.
+ If 'outer', returns the common elements as well as the elements of
+ r1 not in r2 and the elements of not in r2.
+ If 'leftouter', returns the common elements and the elements of r1
+ not in r2.
r1postfix : string, optional
- String appended to the names of the fields of r1 that are present in r2
- but absent of the key.
+ String appended to the names of the fields of r1 that are present
+ in r2 but absent of the key.
r2postfix : string, optional
- String appended to the names of the fields of r2 that are present in r1
- but absent of the key.
+ String appended to the names of the fields of r2 that are present
+ in r1 but absent of the key.
defaults : {dictionary}, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
- Whether to return a MaskedArray (or MaskedRecords is `asrecarray==True`)
- or a ndarray.
+ Whether to return a MaskedArray (or MaskedRecords is
+ `asrecarray==True`) or a ndarray.
asrecarray : {False, True}, optional
- Whether to return a recarray (or MaskedRecords if `usemask==True`) or
- just a flexible-type ndarray.
+ Whether to return a recarray (or MaskedRecords if `usemask==True`)
+ or just a flexible-type ndarray.
Notes
-----
* The output is sorted along the key.
- * A temporary array is formed by dropping the fields not in the key for the
- two arrays and concatenating the result. This array is then sorted, and
- the common entries selected. The output is constructed by filling the fields
- with the selected entries. Matching is not preserved if there are some
- duplicates...
+ * A temporary array is formed by dropping the fields not in the key for
+ the two arrays and concatenating the result. This array is then
+ sorted, and the common entries selected. The output is constructed by
+ filling the fields with the selected entries. Matching is not
+ preserved if there are some duplicates...
"""
# Check jointype
if jointype not in ('inner', 'outer', 'leftouter'):
- raise ValueError("The 'jointype' argument should be in 'inner', "\
- "'outer' or 'leftouter' (got '%s' instead)" % jointype)
+ raise ValueError(
+ "The 'jointype' argument should be in 'inner', "
+ "'outer' or 'leftouter' (got '%s' instead)" % jointype
+ )
# If we have a single key, put it in a tuple
if isinstance(key, basestring):
key = (key,)
@@ -963,14 +960,15 @@ def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
ndtype.append(desc)
# Revert the elements to tuples
ndtype = [tuple(_) for _ in ndtype]
- # Find the largest nb of common fields : r1cmn and r2cmn should be equal, but...
+ # Find the largest nb of common fields :
+ # r1cmn and r2cmn should be equal, but...
cmn = max(r1cmn, r2cmn)
# Construct an empty array
output = ma.masked_all((cmn + r1spc + r2spc,), dtype=ndtype)
names = output.dtype.names
for f in r1names:
selected = s1[f]
- if f not in names or (f in r2names and not r2postfix and not f in key):
+ if f not in names or (f in r2names and not r2postfix and f not in key):
f += r1postfix
current = output[f]
current[:r1cmn] = selected[:r1cmn]
diff --git a/numpy/lib/scimath.py b/numpy/lib/scimath.py
index 3da86d9c8..e07caf805 100644
--- a/numpy/lib/scimath.py
+++ b/numpy/lib/scimath.py
@@ -17,16 +17,21 @@ correctly handled. See their respective docstrings for specific examples.
"""
from __future__ import division, absolute_import, print_function
-__all__ = ['sqrt', 'log', 'log2', 'logn', 'log10', 'power', 'arccos',
- 'arcsin', 'arctanh']
-
import numpy.core.numeric as nx
import numpy.core.numerictypes as nt
from numpy.core.numeric import asarray, any
from numpy.lib.type_check import isreal
+
+__all__ = [
+ 'sqrt', 'log', 'log2', 'logn', 'log10', 'power', 'arccos', 'arcsin',
+ 'arctanh'
+ ]
+
+
_ln2 = nx.log(2.0)
+
def _tocomplex(arr):
"""Convert its input `arr` to a complex array.
@@ -109,9 +114,10 @@ def _fix_real_lt_zero(x):
>>> np.lib.scimath._fix_real_lt_zero([-1,2])
array([-1.+0.j, 2.+0.j])
+
"""
x = asarray(x)
- if any(isreal(x) & (x<0)):
+ if any(isreal(x) & (x < 0)):
x = _tocomplex(x)
return x
@@ -163,7 +169,7 @@ def _fix_real_abs_gt_1(x):
array([ 0.+0.j, 2.+0.j])
"""
x = asarray(x)
- if any(isreal(x) & (abs(x)>1)):
+ if any(isreal(x) & (abs(x) > 1)):
x = _tocomplex(x)
return x
diff --git a/numpy/lib/setup.py b/numpy/lib/setup.py
index 153af314c..68d99c33a 100644
--- a/numpy/lib/setup.py
+++ b/numpy/lib/setup.py
@@ -9,7 +9,6 @@ def configuration(parent_package='',top_path=None):
config.add_include_dirs(join('..', 'core', 'include'))
-
config.add_extension('_compiled_base',
sources=[join('src', '_compiled_base.c')]
)
@@ -19,6 +18,6 @@ def configuration(parent_package='',top_path=None):
return config
-if __name__=='__main__':
+if __name__ == '__main__':
from numpy.distutils.core import setup
setup(configuration=configuration)
diff --git a/numpy/lib/shape_base.py b/numpy/lib/shape_base.py
index 929646de6..70fa3ab03 100644
--- a/numpy/lib/shape_base.py
+++ b/numpy/lib/shape_base.py
@@ -201,7 +201,8 @@ def apply_over_axes(func, a, axes):
if array(axes).ndim == 0:
axes = (axes,)
for axis in axes:
- if axis < 0: axis = N + axis
+ if axis < 0:
+ axis = N + axis
args = (val, axis)
res = func(*args)
if res.ndim == val.ndim:
@@ -373,7 +374,7 @@ def _replace_zero_by_x_arrays(sub_arys):
sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype)
return sub_arys
-def array_split(ary,indices_or_sections,axis = 0):
+def array_split(ary, indices_or_sections, axis=0):
"""
Split an array into multiple sub-arrays.
@@ -397,23 +398,26 @@ def array_split(ary,indices_or_sections,axis = 0):
Ntotal = ary.shape[axis]
except AttributeError:
Ntotal = len(ary)
- try: # handle scalar case.
+ try:
+ # handle scalar case.
Nsections = len(indices_or_sections) + 1
div_points = [0] + list(indices_or_sections) + [Ntotal]
- except TypeError: #indices_or_sections is a scalar, not an array.
+ except TypeError:
+ # indices_or_sections is a scalar, not an array.
Nsections = int(indices_or_sections)
if Nsections <= 0:
raise ValueError('number sections must be larger than 0.')
Neach_section, extras = divmod(Ntotal, Nsections)
- section_sizes = [0] + \
- extras * [Neach_section+1] + \
- (Nsections-extras) * [Neach_section]
+ section_sizes = ([0] +
+ extras * [Neach_section+1] +
+ (Nsections-extras) * [Neach_section])
div_points = _nx.array(section_sizes).cumsum()
sub_arys = []
sary = _nx.swapaxes(ary, axis, 0)
for i in range(Nsections):
- st = div_points[i]; end = div_points[i+1]
+ st = div_points[i]
+ end = div_points[i + 1]
sub_arys.append(_nx.swapaxes(sary[st:end], axis, 0))
# This "kludge" was introduced here to replace arrays shaped (0, 10)
@@ -493,12 +497,14 @@ def split(ary,indices_or_sections,axis=0):
array([], dtype=float64)]
"""
- try: len(indices_or_sections)
+ try:
+ len(indices_or_sections)
except TypeError:
sections = indices_or_sections
N = ary.shape[axis]
if N % sections:
- raise ValueError('array split does not result in an equal division')
+ raise ValueError(
+ 'array split does not result in an equal division')
res = array_split(ary, indices_or_sections, axis)
return res
@@ -850,7 +856,7 @@ def tile(A, reps):
if (d < c.ndim):
tup = (1,)*(c.ndim-d) + tup
for i, nrep in enumerate(tup):
- if nrep!=1:
+ if nrep != 1:
c = c.reshape(-1, n).repeat(nrep, 0)
dim_in = shape[i]
dim_out = dim_in*nrep
diff --git a/numpy/lib/stride_tricks.py b/numpy/lib/stride_tricks.py
index 1ffaaee36..12f8bbf13 100644
--- a/numpy/lib/stride_tricks.py
+++ b/numpy/lib/stride_tricks.py
@@ -12,9 +12,10 @@ import numpy as np
__all__ = ['broadcast_arrays']
class DummyArray(object):
- """ Dummy object that just exists to hang __array_interface__ dictionaries
+ """Dummy object that just exists to hang __array_interface__ dictionaries
and possibly keep alive a reference to a base array.
"""
+
def __init__(self, interface, base=None):
self.__array_interface__ = interface
self.base = base
@@ -81,8 +82,8 @@ def broadcast_arrays(*args):
strides = [list(x.strides) for x in args]
nds = [len(s) for s in shapes]
biggest = max(nds)
- # Go through each array and prepend dimensions of length 1 to each of the
- # shapes in order to make the number of dimensions equal.
+ # Go through each array and prepend dimensions of length 1 to each of
+ # the shapes in order to make the number of dimensions equal.
for i in range(len(args)):
diff = biggest - nds[i]
if diff > 0:
@@ -99,23 +100,24 @@ def broadcast_arrays(*args):
raise ValueError("shape mismatch: two or more arrays have "
"incompatible dimensions on axis %r." % (axis,))
elif len(unique) == 2:
- # There is exactly one non-1 length. The common shape will take this
- # value.
+ # There is exactly one non-1 length. The common shape will take
+ # this value.
unique.remove(1)
new_length = unique.pop()
common_shape.append(new_length)
- # For each array, if this axis is being broadcasted from a length of
- # 1, then set its stride to 0 so that it repeats its data.
+ # For each array, if this axis is being broadcasted from a
+ # length of 1, then set its stride to 0 so that it repeats its
+ # data.
for i in range(len(args)):
if shapes[i][axis] == 1:
shapes[i][axis] = new_length
strides[i][axis] = 0
else:
- # Every array has a length of 1 on this axis. Strides can be left
- # alone as nothing is broadcasted.
+ # Every array has a length of 1 on this axis. Strides can be
+ # left alone as nothing is broadcasted.
common_shape.append(1)
# Construct the new arrays.
broadcasted = [as_strided(x, shape=sh, strides=st) for (x, sh, st) in
- zip(args, shapes, strides)]
+ zip(args, shapes, strides)]
return broadcasted
diff --git a/numpy/lib/type_check.py b/numpy/lib/type_check.py
index 1ed5bf32a..a45d0bd86 100644
--- a/numpy/lib/type_check.py
+++ b/numpy/lib/type_check.py
@@ -58,7 +58,7 @@ def mintypecode(typechars,typeset='GDFgdf',default='d'):
'G'
"""
- typecodes = [(isinstance(t, str) and t) or asarray(t).dtype.char\
+ typecodes = [(isinstance(t, str) and t) or asarray(t).dtype.char
for t in typechars]
intersection = [t for t in typecodes if t in typeset]
if not intersection:
@@ -266,7 +266,7 @@ def iscomplexobj(x):
True
"""
- return issubclass( asarray(x).dtype.type, _nx.complexfloating)
+ return issubclass(asarray(x).dtype.type, _nx.complexfloating)
def isrealobj(x):
"""
@@ -300,7 +300,7 @@ def isrealobj(x):
False
"""
- return not issubclass( asarray(x).dtype.type, _nx.complexfloating)
+ return not issubclass(asarray(x).dtype.type, _nx.complexfloating)
#-----------------------------------------------------------------------------
@@ -464,28 +464,28 @@ def asscalar(a):
#-----------------------------------------------------------------------------
-_namefromtype = {'S1' : 'character',
- '?' : 'bool',
- 'b' : 'signed char',
- 'B' : 'unsigned char',
- 'h' : 'short',
- 'H' : 'unsigned short',
- 'i' : 'integer',
- 'I' : 'unsigned integer',
- 'l' : 'long integer',
- 'L' : 'unsigned long integer',
- 'q' : 'long long integer',
- 'Q' : 'unsigned long long integer',
- 'f' : 'single precision',
- 'd' : 'double precision',
- 'g' : 'long precision',
- 'F' : 'complex single precision',
- 'D' : 'complex double precision',
- 'G' : 'complex long double precision',
- 'S' : 'string',
- 'U' : 'unicode',
- 'V' : 'void',
- 'O' : 'object'
+_namefromtype = {'S1': 'character',
+ '?': 'bool',
+ 'b': 'signed char',
+ 'B': 'unsigned char',
+ 'h': 'short',
+ 'H': 'unsigned short',
+ 'i': 'integer',
+ 'I': 'unsigned integer',
+ 'l': 'long integer',
+ 'L': 'unsigned long integer',
+ 'q': 'long long integer',
+ 'Q': 'unsigned long long integer',
+ 'f': 'single precision',
+ 'd': 'double precision',
+ 'g': 'long precision',
+ 'F': 'complex single precision',
+ 'D': 'complex double precision',
+ 'G': 'complex long double precision',
+ 'S': 'string',
+ 'U': 'unicode',
+ 'V': 'void',
+ 'O': 'object'
}
def typename(char):
@@ -544,12 +544,12 @@ def typename(char):
#determine the "minimum common type" for a group of arrays.
array_type = [[_nx.single, _nx.double, _nx.longdouble],
[_nx.csingle, _nx.cdouble, _nx.clongdouble]]
-array_precision = {_nx.single : 0,
- _nx.double : 1,
- _nx.longdouble : 2,
- _nx.csingle : 0,
- _nx.cdouble : 1,
- _nx.clongdouble : 2}
+array_precision = {_nx.single: 0,
+ _nx.double: 1,
+ _nx.longdouble: 2,
+ _nx.csingle: 0,
+ _nx.cdouble: 1,
+ _nx.clongdouble: 2}
def common_type(*arrays):
"""
Return a scalar type which is common to the input arrays.
diff --git a/numpy/lib/user_array.py b/numpy/lib/user_array.py
index 3dde538d8..bb5bec628 100644
--- a/numpy/lib/user_array.py
+++ b/numpy/lib/user_array.py
@@ -7,29 +7,33 @@ complete.
from __future__ import division, absolute_import, print_function
from numpy.core import (
- array, asarray, absolute, add, subtract, multiply, divide,
- remainder, power, left_shift, right_shift, bitwise_and, bitwise_or,
- bitwise_xor, invert, less, less_equal, not_equal, equal, greater,
- greater_equal, shape, reshape, arange, sin, sqrt, transpose
- )
+ array, asarray, absolute, add, subtract, multiply, divide,
+ remainder, power, left_shift, right_shift, bitwise_and, bitwise_or,
+ bitwise_xor, invert, less, less_equal, not_equal, equal, greater,
+ greater_equal, shape, reshape, arange, sin, sqrt, transpose
+)
from numpy.compat import long
+
class container(object):
+
def __init__(self, data, dtype=None, copy=True):
self.array = array(data, dtype, copy=copy)
def __repr__(self):
if len(self.shape) > 0:
- return self.__class__.__name__+repr(self.array)[len("array"):]
+ return self.__class__.__name__ + repr(self.array)[len("array"):]
else:
- return self.__class__.__name__+"("+repr(self.array)+")"
+ return self.__class__.__name__ + "(" + repr(self.array) + ")"
- def __array__(self,t=None):
- if t: return self.array.astype(t)
+ def __array__(self, t=None):
+ if t:
+ return self.array.astype(t)
return self.array
# Array as sequence
- def __len__(self): return len(self.array)
+ def __len__(self):
+ return len(self.array)
def __getitem__(self, index):
return self._rc(self.array[index])
@@ -37,19 +41,21 @@ class container(object):
def __getslice__(self, i, j):
return self._rc(self.array[i:j])
-
def __setitem__(self, index, value):
self.array[index] = asarray(value, self.dtype)
+
def __setslice__(self, i, j, value):
self.array[i:j] = asarray(value, self.dtype)
def __abs__(self):
return self._rc(absolute(self.array))
+
def __neg__(self):
return self._rc(-self.array)
def __add__(self, other):
- return self._rc(self.array+asarray(other))
+ return self._rc(self.array + asarray(other))
+
__radd__ = __add__
def __iadd__(self, other):
@@ -57,32 +63,40 @@ class container(object):
return self
def __sub__(self, other):
- return self._rc(self.array-asarray(other))
+ return self._rc(self.array - asarray(other))
+
def __rsub__(self, other):
- return self._rc(asarray(other)-self.array)
+ return self._rc(asarray(other) - self.array)
+
def __isub__(self, other):
subtract(self.array, other, self.array)
return self
def __mul__(self, other):
return self._rc(multiply(self.array, asarray(other)))
+
__rmul__ = __mul__
+
def __imul__(self, other):
multiply(self.array, other, self.array)
return self
def __div__(self, other):
return self._rc(divide(self.array, asarray(other)))
+
def __rdiv__(self, other):
return self._rc(divide(asarray(other), self.array))
+
def __idiv__(self, other):
divide(self.array, other, self.array)
return self
def __mod__(self, other):
return self._rc(remainder(self.array, other))
+
def __rmod__(self, other):
return self._rc(remainder(other, self.array))
+
def __imod__(self, other):
remainder(self.array, other, self.array)
return self
@@ -90,59 +104,74 @@ class container(object):
def __divmod__(self, other):
return (self._rc(divide(self.array, other)),
self._rc(remainder(self.array, other)))
+
def __rdivmod__(self, other):
return (self._rc(divide(other, self.array)),
self._rc(remainder(other, self.array)))
def __pow__(self, other):
return self._rc(power(self.array, asarray(other)))
+
def __rpow__(self, other):
return self._rc(power(asarray(other), self.array))
+
def __ipow__(self, other):
power(self.array, other, self.array)
return self
def __lshift__(self, other):
return self._rc(left_shift(self.array, other))
+
def __rshift__(self, other):
return self._rc(right_shift(self.array, other))
+
def __rlshift__(self, other):
return self._rc(left_shift(other, self.array))
+
def __rrshift__(self, other):
return self._rc(right_shift(other, self.array))
+
def __ilshift__(self, other):
left_shift(self.array, other, self.array)
return self
+
def __irshift__(self, other):
right_shift(self.array, other, self.array)
return self
def __and__(self, other):
return self._rc(bitwise_and(self.array, other))
+
def __rand__(self, other):
return self._rc(bitwise_and(other, self.array))
+
def __iand__(self, other):
bitwise_and(self.array, other, self.array)
return self
def __xor__(self, other):
return self._rc(bitwise_xor(self.array, other))
+
def __rxor__(self, other):
return self._rc(bitwise_xor(other, self.array))
+
def __ixor__(self, other):
bitwise_xor(self.array, other, self.array)
return self
def __or__(self, other):
return self._rc(bitwise_or(self.array, other))
+
def __ror__(self, other):
return self._rc(bitwise_or(other, self.array))
+
def __ior__(self, other):
bitwise_or(self.array, other, self.array)
return self
def __pos__(self):
return self._rc(self.array)
+
def __invert__(self):
return self._rc(invert(self.array))
@@ -150,33 +179,62 @@ class container(object):
if len(self.shape) == 0:
return func(self[0])
else:
- raise TypeError("only rank-0 arrays can be converted to Python scalars.")
+ raise TypeError(
+ "only rank-0 arrays can be converted to Python scalars.")
+
+ def __complex__(self):
+ return self._scalarfunc(complex)
- def __complex__(self): return self._scalarfunc(complex)
- def __float__(self): return self._scalarfunc(float)
- def __int__(self): return self._scalarfunc(int)
- def __long__(self): return self._scalarfunc(long)
- def __hex__(self): return self._scalarfunc(hex)
- def __oct__(self): return self._scalarfunc(oct)
+ def __float__(self):
+ return self._scalarfunc(float)
- def __lt__(self, other): return self._rc(less(self.array, other))
- def __le__(self, other): return self._rc(less_equal(self.array, other))
- def __eq__(self, other): return self._rc(equal(self.array, other))
- def __ne__(self, other): return self._rc(not_equal(self.array, other))
- def __gt__(self, other): return self._rc(greater(self.array, other))
- def __ge__(self, other): return self._rc(greater_equal(self.array, other))
+ def __int__(self):
+ return self._scalarfunc(int)
- def copy(self): return self._rc(self.array.copy())
+ def __long__(self):
+ return self._scalarfunc(long)
- def tostring(self): return self.array.tostring()
+ def __hex__(self):
+ return self._scalarfunc(hex)
- def byteswap(self): return self._rc(self.array.byteswap())
+ def __oct__(self):
+ return self._scalarfunc(oct)
- def astype(self, typecode): return self._rc(self.array.astype(typecode))
+ def __lt__(self, other):
+ return self._rc(less(self.array, other))
+
+ def __le__(self, other):
+ return self._rc(less_equal(self.array, other))
+
+ def __eq__(self, other):
+ return self._rc(equal(self.array, other))
+
+ def __ne__(self, other):
+ return self._rc(not_equal(self.array, other))
+
+ def __gt__(self, other):
+ return self._rc(greater(self.array, other))
+
+ def __ge__(self, other):
+ return self._rc(greater_equal(self.array, other))
+
+ def copy(self):
+ return self._rc(self.array.copy())
+
+ def tostring(self):
+ return self.array.tostring()
+
+ def byteswap(self):
+ return self._rc(self.array.byteswap())
+
+ def astype(self, typecode):
+ return self._rc(self.array.astype(typecode))
def _rc(self, a):
- if len(shape(a)) == 0: return a
- else: return self.__class__(a)
+ if len(shape(a)) == 0:
+ return a
+ else:
+ return self.__class__(a)
def __array_wrap__(self, *args):
return self.__class__(args[0])
@@ -200,19 +258,20 @@ class container(object):
# Test of class container
#############################################################
if __name__ == '__main__':
- temp=reshape(arange(10000), (100, 100))
+ temp = reshape(arange(10000), (100, 100))
- ua=container(temp)
+ ua = container(temp)
# new object created begin test
print(dir(ua))
- print(shape(ua), ua.shape) # I have changed Numeric.py
+ print(shape(ua), ua.shape) # I have changed Numeric.py
- ua_small=ua[:3, :5]
+ ua_small = ua[:3, :5]
print(ua_small)
- ua_small[0, 0]=10 # this did not change ua[0,0], which is not normal behavior
+ # this did not change ua[0,0], which is not normal behavior
+ ua_small[0, 0] = 10
print(ua_small[0, 0], ua[0, 0])
- print(sin(ua_small)/3.*6.+sqrt(ua_small**2))
+ print(sin(ua_small) / 3. * 6. + sqrt(ua_small ** 2))
print(less(ua_small, 103), type(less(ua_small, 103)))
- print(type(ua_small*reshape(arange(15), shape(ua_small))))
+ print(type(ua_small * reshape(arange(15), shape(ua_small))))
print(reshape(ua_small, (5, 3)))
print(transpose(ua_small))
diff --git a/numpy/lib/utils.py b/numpy/lib/utils.py
index 17c5b9743..df0052493 100644
--- a/numpy/lib/utils.py
+++ b/numpy/lib/utils.py
@@ -60,6 +60,7 @@ class _Deprecate(object):
deprecate
"""
+
def __init__(self, old_name=None, new_name=None, message=None):
self.old_name = old_name
self.new_name = new_name
@@ -122,16 +123,16 @@ def deprecate(*args, **kwargs):
func : function
The function to be deprecated.
old_name : str, optional
- The name of the function to be deprecated. Default is None, in which
- case the name of `func` is used.
+ The name of the function to be deprecated. Default is None, in
+ which case the name of `func` is used.
new_name : str, optional
- The new name for the function. Default is None, in which case
- the deprecation message is that `old_name` is deprecated. If given,
- the deprecation message is that `old_name` is deprecated and `new_name`
+ The new name for the function. Default is None, in which case the
+ deprecation message is that `old_name` is deprecated. If given, the
+ deprecation message is that `old_name` is deprecated and `new_name`
should be used instead.
message : str, optional
- Additional explanation of the deprecation. Displayed in the docstring
- after the warning.
+ Additional explanation of the deprecation. Displayed in the
+ docstring after the warning.
Returns
-------
@@ -140,7 +141,8 @@ def deprecate(*args, **kwargs):
Examples
--------
- Note that ``olduint`` returns a value after printing Deprecation Warning:
+ Note that ``olduint`` returns a value after printing Deprecation
+ Warning:
>>> olduint = np.deprecate(np.uint)
>>> olduint(6)
@@ -183,14 +185,16 @@ def byte_bounds(a):
Parameters
----------
a : ndarray
- Input array. It must conform to the Python-side of the array interface.
+ Input array. It must conform to the Python-side of the array
+ interface.
Returns
-------
(low, high) : tuple of 2 integers
- The first integer is the first byte of the array, the second integer is
- just past the last byte of the array. If `a` is not contiguous it
- will not use every byte between the (`low`, `high`) values.
+ The first integer is the first byte of the array, the second
+ integer is just past the last byte of the array. If `a` is not
+ contiguous it will not use every byte between the (`low`, `high`)
+ values.
Examples
--------
@@ -210,11 +214,11 @@ def byte_bounds(a):
a_data = ai['data'][0]
astrides = ai['strides']
ashape = ai['shape']
-
bytes_a = asarray(a).dtype.itemsize
-
+
a_low = a_high = a_data
- if astrides is None: # contiguous case
+ if astrides is None:
+ # contiguous case
a_high += a.size * bytes_a
else:
for shape, stride in zip(ashape, astrides):
@@ -251,8 +255,8 @@ def who(vardict=None):
Notes
-----
- Prints out the name, shape, bytes and type of all of the ndarrays present
- in `vardict`.
+ Prints out the name, shape, bytes and type of all of the ndarrays
+ present in `vardict`.
Examples
--------
@@ -286,11 +290,11 @@ def who(vardict=None):
idv = id(var)
if idv in cache.keys():
namestr = name + " (%s)" % cache[idv]
- original=0
+ original = 0
else:
cache[idv] = name
namestr = name
- original=1
+ original = 1
shapestr = " x ".join(map(str, var.shape))
bytestr = str(var.nbytes)
sta.append([namestr, shapestr, bytestr, var.dtype.name,
@@ -333,9 +337,9 @@ def who(vardict=None):
# NOTE: pydoc defines a help function which works simliarly to this
# except it uses a pager to take over the screen.
-# combine name and arguments and split to multiple lines of
-# width characters. End lines on a comma and begin argument list
-# indented with the rest of the arguments.
+# combine name and arguments and split to multiple lines of width
+# characters. End lines on a comma and begin argument list indented with
+# the rest of the arguments.
def _split_line(name, arguments, width):
firstwidth = len(name)
k = firstwidth
@@ -413,7 +417,10 @@ def _info(obj, output=sys.stdout):
print("aligned: ", bp(obj.flags.aligned), file=output)
print("contiguous: ", bp(obj.flags.contiguous), file=output)
print("fortran: ", obj.flags.fortran, file=output)
- print("data pointer: %s%s" % (hex(obj.ctypes._as_parameter_.value), extra), file=output)
+ print(
+ "data pointer: %s%s" % (hex(obj.ctypes._as_parameter_.value), extra),
+ file=output
+ )
print("byteorder: ", end=' ', file=output)
if endian in ['|', '=']:
print("%s%s%s" % (tic, sys.byteorder, tic), file=output)
@@ -428,7 +435,7 @@ def _info(obj, output=sys.stdout):
print("type: %s" % obj.dtype, file=output)
-def info(object=None,maxwidth=76,output=sys.stdout,toplevel='numpy'):
+def info(object=None, maxwidth=76, output=sys.stdout, toplevel='numpy'):
"""
Get help information for a function, class, or module.
@@ -437,13 +444,13 @@ def info(object=None,maxwidth=76,output=sys.stdout,toplevel='numpy'):
object : object or str, optional
Input object or name to get information about. If `object` is a
numpy object, its docstring is given. If it is a string, available
- modules are searched for matching objects.
- If None, information about `info` itself is returned.
+ modules are searched for matching objects. If None, information
+ about `info` itself is returned.
maxwidth : int, optional
Printing width.
output : file like object, optional
- File like object that the output is written to, default is ``stdout``.
- The object has to be opened in 'w' or 'a' mode.
+ File like object that the output is written to, default is
+ ``stdout``. The object has to be opened in 'w' or 'a' mode.
toplevel : str, optional
Start search at this level.
@@ -453,8 +460,9 @@ def info(object=None,maxwidth=76,output=sys.stdout,toplevel='numpy'):
Notes
-----
- When used interactively with an object, ``np.info(obj)`` is equivalent to
- ``help(obj)`` on the Python prompt or ``obj?`` on the IPython prompt.
+ When used interactively with an object, ``np.info(obj)`` is equivalent
+ to ``help(obj)`` on the Python prompt or ``obj?`` on the IPython
+ prompt.
Examples
--------
@@ -478,10 +486,11 @@ def info(object=None,maxwidth=76,output=sys.stdout,toplevel='numpy'):
"""
global _namedict, _dictlist
# Local import to speed up numpy's import time.
- import pydoc, inspect
+ import pydoc
+ import inspect
- if hasattr(object, '_ppimport_importer') or \
- hasattr(object, '_ppimport_module'):
+ if (hasattr(object, '_ppimport_importer') or
+ hasattr(object, '_ppimport_module')):
object = object._ppimport_module
elif hasattr(object, '_ppimport_attr'):
object = object._ppimport_attr
@@ -499,7 +508,10 @@ def info(object=None,maxwidth=76,output=sys.stdout,toplevel='numpy'):
try:
obj = _namedict[namestr][object]
if id(obj) in objlist:
- print("\n *** Repeat reference found in %s *** " % namestr, file=output)
+ print("\n "
+ "*** Repeat reference found in %s *** " % namestr,
+ file=output
+ )
else:
objlist.append(id(obj))
print(" *** Found in %s ***" % namestr, file=output)
@@ -511,7 +523,10 @@ def info(object=None,maxwidth=76,output=sys.stdout,toplevel='numpy'):
if numfound == 0:
print("Help for %s not found." % object, file=output)
else:
- print("\n *** Total of %d references found. ***" % numfound, file=output)
+ print("\n "
+ "*** Total of %d references found. ***" % numfound,
+ file=output
+ )
elif inspect.isfunction(object):
name = object.__name__
@@ -530,7 +545,9 @@ def info(object=None,maxwidth=76,output=sys.stdout,toplevel='numpy'):
arguments = "()"
try:
if hasattr(object, '__init__'):
- arguments = inspect.formatargspec(*inspect.getargspec(object.__init__.__func__))
+ arguments = inspect.formatargspec(
+ *inspect.getargspec(object.__init__.__func__)
+ )
arglist = arguments.split(', ')
if len(arglist) > 1:
arglist[1] = "("+arglist[1]
@@ -559,7 +576,9 @@ def info(object=None,maxwidth=76,output=sys.stdout,toplevel='numpy'):
continue
thisobj = getattr(object, meth, None)
if thisobj is not None:
- methstr, other = pydoc.splitdoc(inspect.getdoc(thisobj) or "None")
+ methstr, other = pydoc.splitdoc(
+ inspect.getdoc(thisobj) or "None"
+ )
print(" %s -- %s" % (meth, methstr), file=output)
elif (sys.version_info[0] < 3
@@ -569,7 +588,9 @@ def info(object=None,maxwidth=76,output=sys.stdout,toplevel='numpy'):
print("Instance of class: ", object.__class__.__name__, file=output)
print(file=output)
if hasattr(object, '__call__'):
- arguments = inspect.formatargspec(*inspect.getargspec(object.__call__.__func__))
+ arguments = inspect.formatargspec(
+ *inspect.getargspec(object.__call__.__func__)
+ )
arglist = arguments.split(', ')
if len(arglist) > 1:
arglist[1] = "("+arglist[1]
@@ -597,7 +618,9 @@ def info(object=None,maxwidth=76,output=sys.stdout,toplevel='numpy'):
elif inspect.ismethod(object):
name = object.__name__
- arguments = inspect.formatargspec(*inspect.getargspec(object.__func__))
+ arguments = inspect.formatargspec(
+ *inspect.getargspec(object.__func__)
+ )
arglist = arguments.split(', ')
if len(arglist) > 1:
arglist[1] = "("+arglist[1]
@@ -628,7 +651,8 @@ def source(object, output=sys.stdout):
Parameters
----------
object : numpy object
- Input object. This can be any object (function, class, module, ...).
+ Input object. This can be any object (function, class, module,
+ ...).
output : file object, optional
If `output` not supplied then source code is printed to screen
(sys.stdout). File object must be created with either write 'w' or
@@ -669,7 +693,8 @@ def source(object, output=sys.stdout):
# and index: index in breadth-first namespace traversal
_lookfor_caches = {}
-# regexp whose match indicates that the string may contain a function signature
+# regexp whose match indicates that the string may contain a function
+# signature
_function_signature_re = re.compile(r"[a-z0-9_]+\(.*[,=].*\)", re.I)
def lookfor(what, module=None, import_modules=True, regenerate=False,
@@ -727,7 +752,8 @@ def lookfor(what, module=None, import_modules=True, regenerate=False,
# XXX: maybe using a real stemming search engine would be better?
found = []
whats = str(what).lower().split()
- if not whats: return
+ if not whats:
+ return
for name, (docstring, kind, index) in cache.items():
if kind in ('module', 'object'):
@@ -856,7 +882,8 @@ def _lookfor_generate_cache(module, import_modules, regenerate):
stack = [(module.__name__, module)]
while stack:
name, item = stack.pop(0)
- if id(item) in seen: continue
+ if id(item) in seen:
+ continue
seen[id(item)] = True
index += 1
@@ -875,7 +902,8 @@ def _lookfor_generate_cache(module, import_modules, regenerate):
for mod_path in os.listdir(pth):
this_py = os.path.join(pth, mod_path)
init_py = os.path.join(pth, mod_path, '__init__.py')
- if os.path.isfile(this_py) and mod_path.endswith('.py'):
+ if (os.path.isfile(this_py) and
+ mod_path.endswith('.py')):
to_import = mod_path[:-3]
elif os.path.isfile(init_py):
to_import = mod_path
@@ -935,7 +963,8 @@ def _lookfor_generate_cache(module, import_modules, regenerate):
try:
doc = inspect.getdoc(item)
- except NameError: # ref SWIG's NameError: Unknown C global variable
+ except NameError:
+ # ref SWIG's NameError: Unknown C global variable
doc = None
if doc is not None:
cache[name] = (doc, kind, index)
@@ -999,7 +1028,9 @@ class SafeEval(object):
return node.value
def visitDict(self, node,**kw):
- return dict([(self.visit(k), self.visit(v)) for k, v in node.items])
+ return dict(
+ [(self.visit(k), self.visit(v)) for k, v in node.items]
+ )
def visitTuple(self, node, **kw):
return tuple([self.visit(i) for i in node.nodes])
@@ -1097,8 +1128,8 @@ def safe_eval(source):
Raises
------
SyntaxError
- If the code has invalid Python syntax, or if it contains non-literal
- code.
+ If the code has invalid Python syntax, or if it contains
+ non-literal code.
Examples
--------
@@ -1124,7 +1155,8 @@ def safe_eval(source):
import warnings
with warnings.catch_warnings():
- # compiler package is deprecated for 3.x, which is already solved here
+ # compiler package is deprecated for 3.x, which is already solved
+ # here
warnings.simplefilter('ignore', DeprecationWarning)
try:
import compiler