summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGiampaolo Rodola <g.rodola@gmail.com>2017-05-09 19:40:57 +0200
committerGiampaolo Rodola <g.rodola@gmail.com>2017-05-09 19:40:57 +0200
commitdff3034a4e91f52410e5f23c17b6dd5a527f2aee (patch)
tree7c091f4028887684c6e5d3c76e4e2a4e1e02487b
parent29ee05cb5957f0142093c41fa97d8609d5ccd71a (diff)
parent26bf47f8066dcff9946f01a0af514bbddb1c23ff (diff)
downloadpsutil-dff3034a4e91f52410e5f23c17b6dd5a527f2aee.tar.gz
Merge branch 'master' of github.com:giampaolo/psutil
-rw-r--r--HISTORY.rst2
-rw-r--r--docs/index.rst34
-rw-r--r--psutil/__init__.py47
-rw-r--r--psutil/_common.py105
-rwxr-xr-xpsutil/tests/test_linux.py9
-rwxr-xr-xpsutil/tests/test_memory_leaks.py4
-rwxr-xr-xpsutil/tests/test_misc.py246
7 files changed, 421 insertions, 26 deletions
diff --git a/HISTORY.rst b/HISTORY.rst
index bf7f6367..bc909608 100644
--- a/HISTORY.rst
+++ b/HISTORY.rst
@@ -7,6 +7,8 @@
**Enhancements**
+- 802_: disk_io_counters() and net_io_counters() numbers no longer wrap
+ (restart from 0). Introduced a new "nowrap" argument.
- 1015_: swap_memory() now relies on /proc/meminfo instead of sysinfo() syscall
so that it can be used in conjunction with PROCFS_PATH in order to retrieve
memory info about Linux containers such as Docker and Heroku.
diff --git a/docs/index.rst b/docs/index.rst
index 4b0c2821..b31f9d73 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -366,7 +366,7 @@ Disks
.. versionchanged::
4.3.0 *percent* value takes root reserved space into account.
-.. function:: disk_io_counters(perdisk=False)
+.. function:: disk_io_counters(perdisk=False, nowrap=True)
Return system-wide disk I/O statistics as a named tuple including the
following fields:
@@ -394,6 +394,13 @@ Disks
the named tuple described above as the values.
See `iotop.py <https://github.com/giampaolo/psutil/blob/master/scripts/iotop.py>`__
for an example application.
+ On some systems such as Linux, on a very busy or long-lived system, the
+ numbers returned by the kernel may overflow and wrap (restart from zero).
+ If *nowrap* is ``True`` psutil will detect and adjust those numbers across
+ function calls and add "old value" to "new value" so that the returned
+ numbers will always be increasing or remain the same, but never decrease.
+ ``disk_io_counters.cache_clear()`` can be used to invalidate the *nowrap*
+ cache.
>>> import psutil
>>> psutil.disk_io_counters()
@@ -404,17 +411,15 @@ Disks
'sda2': sdiskio(read_count=18707, write_count=8830, read_bytes=6060, write_bytes=3443, read_time=24585, write_time=1572),
'sdb1': sdiskio(read_count=161, write_count=0, read_bytes=786432, write_bytes=0, read_time=44, write_time=0)}
- .. warning::
- on some systems such as Linux, on a very busy or long-lived system these
- numbers may wrap (restart from zero), see
- `issue #802 <https://github.com/giampaolo/psutil/issues/802>`__.
- Applications should be prepared to deal with that.
-
.. note::
on Windows ``"diskperf -y"`` command may need to be executed first
otherwise this function won't find any disk.
.. versionchanged::
+ 5.3.0 numbers no longer wrap (restart from zero) across calls thanks to new
+ *nowrap* argument.
+
+ .. versionchanged::
4.0.0 added *busy_time* (Linux, FreeBSD), *read_merged_count* and
*write_merged_count* (Linux) fields.
@@ -442,6 +447,13 @@ Network
If *pernic* is ``True`` return the same information for every network
interface installed on the system as a dictionary with network interface
names as the keys and the named tuple described above as the values.
+ On some systems such as Linux, on a very busy or long-lived system, the
+ numbers returned by the kernel may overflow and wrap (restart from zero).
+ If *nowrap* is ``True`` psutil will detect and adjust those numbers across
+ function calls and add "old value" to "new value" so that the returned
+ numbers will always be increasing or remain the same, but never decrease.
+ ``net_io_counters.cache_clear()`` can be used to invalidate the *nowrap*
+ cache.
>>> import psutil
>>> psutil.net_io_counters()
@@ -455,11 +467,9 @@ Network
and `ifconfig.py <https://github.com/giampaolo/psutil/blob/master/scripts/ifconfig.py>`__
for an example application.
- .. warning::
- on some systems such as Linux, on a very busy or long-lived system these
- numbers may wrap (restart from zero), see
- `issues #802 <https://github.com/giampaolo/psutil/issues/802>`__.
- Applications should be prepared to deal with that.
+ .. versionchanged::
+ 5.3.0 numbers no longer wrap (restart from zero) across calls thanks to new
+ *nowrap* argument.
.. function:: net_connections(kind='inet')
diff --git a/psutil/__init__.py b/psutil/__init__.py
index d39f54f1..8de1cac2 100644
--- a/psutil/__init__.py
+++ b/psutil/__init__.py
@@ -30,6 +30,7 @@ from . import _common
from ._common import deprecated_method
from ._common import memoize
from ._common import memoize_when_activated
+from ._common import wrap_numbers as _wrap_numbers
from ._compat import callable
from ._compat import long
from ._compat import PY3 as _PY3
@@ -2032,7 +2033,7 @@ def disk_partitions(all=False):
return _psplatform.disk_partitions(all)
-def disk_io_counters(perdisk=False):
+def disk_io_counters(perdisk=False, nowrap=True):
"""Return system disk I/O statistics as a namedtuple including
the following fields:
@@ -2040,18 +2041,33 @@ def disk_io_counters(perdisk=False):
- write_count: number of writes
- read_bytes: number of bytes read
- write_bytes: number of bytes written
- - read_time: time spent reading from disk (in milliseconds)
- - write_time: time spent writing to disk (in milliseconds)
+ - read_time: time spent reading from disk (in ms)
+ - write_time: time spent writing to disk (in ms)
- If perdisk is True return the same information for every
+ Platform specific:
+
+ - busy_time: (Linux, FreeBSD) time spent doing actual I/Os (in ms)
+ - read_merged_count (Linux): number of merged reads
+ - write_merged_count (Linux): number of merged writes
+
+ If *perdisk* is True return the same information for every
physical disk installed on the system as a dictionary
with partition names as the keys and the namedtuple
described above as the values.
+ If *nowrap* is True it detects and adjust the numbers which overflow
+ and wrap (restart from 0) and add "old value" to "new value" so that
+ the returned numbers will always be increasing or remain the same,
+ but never decrease.
+ "disk_io_counters.cache_clear()" can be used to invalidate the
+ cache.
+
On recent Windows versions 'diskperf -y' command may need to be
executed first otherwise this function won't find any disk.
"""
rawdict = _psplatform.disk_io_counters()
+ if nowrap:
+ rawdict = _wrap_numbers(rawdict, 'psutil.disk_io_counters')
nt = getattr(_psplatform, "sdiskio", _common.sdiskio)
if perdisk:
for disk, fields in rawdict.items():
@@ -2061,12 +2077,17 @@ def disk_io_counters(perdisk=False):
return nt(*[sum(x) for x in zip(*rawdict.values())])
+disk_io_counters.cache_clear = functools.partial(
+ _wrap_numbers.cache_clear, 'psutil.disk_io_counters')
+disk_io_counters.cache_clear.__doc__ = "Clears nowrap argument cache"
+
+
# =====================================================================
# --- network related functions
# =====================================================================
-def net_io_counters(pernic=False):
+def net_io_counters(pernic=False, nowrap=True):
"""Return network I/O statistics as a namedtuple including
the following fields:
@@ -2080,12 +2101,21 @@ def net_io_counters(pernic=False):
- dropout: total number of outgoing packets which were dropped
(always 0 on OSX and BSD)
- If pernic is True return the same information for every
+ If *pernic* is True return the same information for every
network interface installed on the system as a dictionary
with network interface names as the keys and the namedtuple
described above as the values.
+
+ If *nowrap* is True it detects and adjust the numbers which overflow
+ and wrap (restart from 0) and add "old value" to "new value" so that
+ the returned numbers will always be increasing or remain the same,
+ but never decrease.
+ "disk_io_counters.cache_clear()" can be used to invalidate the
+ cache.
"""
rawdict = _psplatform.net_io_counters()
+ if nowrap:
+ rawdict = _wrap_numbers(rawdict, 'psutil.net_io_counters')
if pernic:
for nic, fields in rawdict.items():
rawdict[nic] = _common.snetio(*fields)
@@ -2094,6 +2124,11 @@ def net_io_counters(pernic=False):
return _common.snetio(*[sum(x) for x in zip(*rawdict.values())])
+net_io_counters.cache_clear = functools.partial(
+ _wrap_numbers.cache_clear, 'psutil.net_io_counters')
+net_io_counters.cache_clear.__doc__ = "Clears nowrap argument cache"
+
+
def net_connections(kind='inet'):
"""Return system-wide connections as a list of
(fd, family, type, laddr, raddr, status, pid) namedtuples.
diff --git a/psutil/_common.py b/psutil/_common.py
index 8f3b4f41..d58dac6b 100644
--- a/psutil/_common.py
+++ b/psutil/_common.py
@@ -16,7 +16,9 @@ import os
import socket
import stat
import sys
+import threading
import warnings
+from collections import defaultdict
from collections import namedtuple
from socket import AF_INET
from socket import SOCK_DGRAM
@@ -61,7 +63,7 @@ __all__ = [
# utility functions
'conn_tmap', 'deprecated_method', 'isfile_strict', 'memoize',
'parse_environ_block', 'path_exists_strict', 'usage_percent',
- 'supports_ipv6', 'sockfam_to_enum', 'socktype_to_enum',
+ 'supports_ipv6', 'sockfam_to_enum', 'socktype_to_enum', "wrap_numbers",
]
@@ -465,3 +467,104 @@ def deprecated_method(replacement):
return getattr(self, replacement)(*args, **kwargs)
return inner
return outer
+
+
+class _WrapNumbers:
+ """Watches numbers so that they don't overflow and wrap
+ (reset to zero).
+ """
+
+ def __init__(self):
+ self.lock = threading.Lock()
+ self.cache = {}
+ self.reminders = {}
+ self.reminder_keys = {}
+
+ def _add_dict(self, input_dict, name):
+ assert name not in self.cache
+ assert name not in self.reminders
+ assert name not in self.reminder_keys
+ self.cache[name] = input_dict
+ self.reminders[name] = defaultdict(int)
+ self.reminder_keys[name] = defaultdict(set)
+
+ def _remove_dead_reminders(self, input_dict, name):
+ """In case the number of keys changed between calls (e.g. a
+ disk disappears) this removes the entry from self.reminders.
+ """
+ old_dict = self.cache[name]
+ gone_keys = set(old_dict.keys()) - set(input_dict.keys())
+ for gone_key in gone_keys:
+ for remkey in self.reminder_keys[name][gone_key]:
+ del self.reminders[name][remkey]
+ del self.reminder_keys[name][gone_key]
+
+ def run(self, input_dict, name):
+ """Cache dict and sum numbers which overflow and wrap.
+ Return an updated copy of `input_dict`
+ """
+ if name not in self.cache:
+ # This was the first call.
+ self._add_dict(input_dict, name)
+ return input_dict
+
+ self._remove_dead_reminders(input_dict, name)
+
+ old_dict = self.cache[name]
+ new_dict = {}
+ for key in input_dict.keys():
+ input_tuple = input_dict[key]
+ try:
+ old_tuple = old_dict[key]
+ except KeyError:
+ # The input dict has a new key (e.g. a new disk or NIC)
+ # which didn't exist in the previous call.
+ new_dict[key] = input_tuple
+ continue
+
+ bits = []
+ for i in range(len(input_tuple)):
+ input_value = input_tuple[i]
+ old_value = old_tuple[i]
+ remkey = (key, i)
+ if input_value < old_value:
+ # it wrapped!
+ self.reminders[name][remkey] += old_value
+ self.reminder_keys[name][key].add(remkey)
+ bits.append(input_value + self.reminders[name][remkey])
+
+ new_dict[key] = tuple(bits)
+
+ self.cache[name] = input_dict
+ return new_dict
+
+ def cache_clear(self, name=None):
+ """Clear the internal cache, optionally only for function 'name'."""
+ with self.lock:
+ if name is None:
+ self.cache.clear()
+ self.reminders.clear()
+ self.reminder_keys.clear()
+ else:
+ self.cache.pop(name, None)
+ self.reminders.pop(name, None)
+ self.reminder_keys.pop(name, None)
+
+ def cache_info(self):
+ """Return internal cache dicts as a tuple of 3 elements."""
+ with self.lock:
+ return (self.cache, self.reminders, self.reminder_keys)
+
+
+def wrap_numbers(input_dict, name):
+ """Given an `input_dict` and a function `name`, adjust the numbers
+ which "wrap" (restart from zero) across different calls by adding
+ "old value" to "new value" and return an updated dict.
+ """
+ with _wn.lock:
+ return _wn.run(input_dict, name)
+
+
+_wn = _WrapNumbers()
+wrap_numbers.cache_clear = _wn.cache_clear
+wrap_numbers.cache_info = _wn.cache_info
diff --git a/psutil/tests/test_linux.py b/psutil/tests/test_linux.py
index 7906d64e..691e69f5 100755
--- a/psutil/tests/test_linux.py
+++ b/psutil/tests/test_linux.py
@@ -728,7 +728,8 @@ class TestSystemNetwork(unittest.TestCase):
ret['bytes_sent'] = int(re.findall('TX bytes:(\d+)', out)[0])
return ret
- for name, stats in psutil.net_io_counters(pernic=True).items():
+ nio = psutil.net_io_counters(pernic=True, nowrap=False)
+ for name, stats in nio.items():
try:
ifconfig_ret = ifconfig(name)
except RuntimeError:
@@ -873,7 +874,7 @@ class TestSystemDisks(unittest.TestCase):
orig_open = open
patch_point = 'builtins.open' if PY3 else '__builtin__.open'
with mock.patch(patch_point, side_effect=open_mock) as m:
- ret = psutil.disk_io_counters()
+ ret = psutil.disk_io_counters(nowrap=False)
assert m.called
self.assertEqual(ret.read_count, 1)
self.assertEqual(ret.read_merged_count, 2)
@@ -905,7 +906,7 @@ class TestSystemDisks(unittest.TestCase):
orig_open = open
patch_point = 'builtins.open' if PY3 else '__builtin__.open'
with mock.patch(patch_point, side_effect=open_mock) as m:
- ret = psutil.disk_io_counters()
+ ret = psutil.disk_io_counters(nowrap=False)
assert m.called
self.assertEqual(ret.read_count, 1)
self.assertEqual(ret.read_merged_count, 2)
@@ -939,7 +940,7 @@ class TestSystemDisks(unittest.TestCase):
orig_open = open
patch_point = 'builtins.open' if PY3 else '__builtin__.open'
with mock.patch(patch_point, side_effect=open_mock) as m:
- ret = psutil.disk_io_counters()
+ ret = psutil.disk_io_counters(nowrap=False)
assert m.called
self.assertEqual(ret.read_count, 1)
self.assertEqual(ret.read_bytes, 2 * SECTOR_SIZE)
diff --git a/psutil/tests/test_memory_leaks.py b/psutil/tests/test_memory_leaks.py
index 4f764dbd..5e9cfbf6 100755
--- a/psutil/tests/test_memory_leaks.py
+++ b/psutil/tests/test_memory_leaks.py
@@ -506,7 +506,7 @@ class TestModuleFunctionsLeaks(TestMemLeak):
'/proc/diskstats not available on this Linux version')
@skip_if_linux()
def test_disk_io_counters(self):
- self.execute(psutil.disk_io_counters)
+ self.execute(psutil.disk_io_counters, nowrap=False)
# --- proc
@@ -518,7 +518,7 @@ class TestModuleFunctionsLeaks(TestMemLeak):
@skip_if_linux()
def test_net_io_counters(self):
- self.execute(psutil.net_io_counters)
+ self.execute(psutil.net_io_counters, nowrap=False)
@unittest.skipIf(LINUX,
"worthless on Linux (pure python)")
diff --git a/psutil/tests/test_misc.py b/psutil/tests/test_misc.py
index 6bc2e28c..28c40ed7 100755
--- a/psutil/tests/test_misc.py
+++ b/psutil/tests/test_misc.py
@@ -27,6 +27,7 @@ from psutil import WINDOWS
from psutil._common import memoize
from psutil._common import memoize_when_activated
from psutil._common import supports_ipv6
+from psutil._common import wrap_numbers
from psutil._compat import PY3
from psutil.tests import APPVEYOR
from psutil.tests import bind_socket
@@ -66,8 +67,12 @@ import psutil
import psutil.tests
+# ===================================================================
+# --- Misc / generic tests.
+# ===================================================================
+
+
class TestMisc(unittest.TestCase):
- """Misc / generic tests."""
def test_process__repr__(self, func=repr):
p = psutil.Process()
@@ -378,6 +383,245 @@ class TestMisc(unittest.TestCase):
# ===================================================================
+# --- Tests for wrap_numbers() function.
+# ===================================================================
+
+
+nt = collections.namedtuple('foo', 'a b c')
+
+
+class TestWrapNumbers(unittest.TestCase):
+
+ def setUp(self):
+ wrap_numbers.cache_clear()
+
+ tearDown = setUp
+
+ def test_first_call(self):
+ input = {'disk1': nt(5, 5, 5)}
+ self.assertEqual(wrap_numbers(input, 'disk_io'), input)
+
+ def test_input_hasnt_changed(self):
+ input = {'disk1': nt(5, 5, 5)}
+ self.assertEqual(wrap_numbers(input, 'disk_io'), input)
+ self.assertEqual(wrap_numbers(input, 'disk_io'), input)
+
+ def test_increase_but_no_wrap(self):
+ input = {'disk1': nt(5, 5, 5)}
+ self.assertEqual(wrap_numbers(input, 'disk_io'), input)
+ input = {'disk1': nt(10, 15, 20)}
+ self.assertEqual(wrap_numbers(input, 'disk_io'), input)
+ input = {'disk1': nt(20, 25, 30)}
+ self.assertEqual(wrap_numbers(input, 'disk_io'), input)
+ input = {'disk1': nt(20, 25, 30)}
+ self.assertEqual(wrap_numbers(input, 'disk_io'), input)
+
+ def test_wrap(self):
+ # let's say 100 is the threshold
+ input = {'disk1': nt(100, 100, 100)}
+ self.assertEqual(wrap_numbers(input, 'disk_io'), input)
+ # first wrap restarts from 10
+ input = {'disk1': nt(100, 100, 10)}
+ self.assertEqual(wrap_numbers(input, 'disk_io'),
+ {'disk1': nt(100, 100, 110)})
+ # then it remains the same
+ input = {'disk1': nt(100, 100, 10)}
+ self.assertEqual(wrap_numbers(input, 'disk_io'),
+ {'disk1': nt(100, 100, 110)})
+ # then it goes up
+ input = {'disk1': nt(100, 100, 90)}
+ self.assertEqual(wrap_numbers(input, 'disk_io'),
+ {'disk1': nt(100, 100, 190)})
+ # then it wraps again
+ input = {'disk1': nt(100, 100, 20)}
+ self.assertEqual(wrap_numbers(input, 'disk_io'),
+ {'disk1': nt(100, 100, 210)})
+ # and remains the same
+ input = {'disk1': nt(100, 100, 20)}
+ self.assertEqual(wrap_numbers(input, 'disk_io'),
+ {'disk1': nt(100, 100, 210)})
+ # now wrap another num
+ input = {'disk1': nt(50, 100, 20)}
+ self.assertEqual(wrap_numbers(input, 'disk_io'),
+ {'disk1': nt(150, 100, 210)})
+ # and again
+ input = {'disk1': nt(40, 100, 20)}
+ self.assertEqual(wrap_numbers(input, 'disk_io'),
+ {'disk1': nt(190, 100, 210)})
+ # keep it the same
+ input = {'disk1': nt(40, 100, 20)}
+ self.assertEqual(wrap_numbers(input, 'disk_io'),
+ {'disk1': nt(190, 100, 210)})
+
+ def test_changing_keys(self):
+ # Emulate a case where the second call to disk_io()
+ # (or whatever) provides a new disk, then the new disk
+ # disappears on the third call.
+ input = {'disk1': nt(5, 5, 5)}
+ self.assertEqual(wrap_numbers(input, 'disk_io'), input)
+ input = {'disk1': nt(5, 5, 5),
+ 'disk2': nt(7, 7, 7)}
+ self.assertEqual(wrap_numbers(input, 'disk_io'), input)
+ input = {'disk1': nt(8, 8, 8)}
+ self.assertEqual(wrap_numbers(input, 'disk_io'), input)
+
+ def test_changing_keys_w_wrap(self):
+ input = {'disk1': nt(50, 50, 50),
+ 'disk2': nt(100, 100, 100)}
+ self.assertEqual(wrap_numbers(input, 'disk_io'), input)
+ # disk 2 wraps
+ input = {'disk1': nt(50, 50, 50),
+ 'disk2': nt(100, 100, 10)}
+ self.assertEqual(wrap_numbers(input, 'disk_io'),
+ {'disk1': nt(50, 50, 50),
+ 'disk2': nt(100, 100, 110)})
+ # disk 2 disappears
+ input = {'disk1': nt(50, 50, 50)}
+ self.assertEqual(wrap_numbers(input, 'disk_io'), input)
+
+ # then it appears again; the old wrap is supposed to be
+ # gone.
+ input = {'disk1': nt(50, 50, 50),
+ 'disk2': nt(100, 100, 100)}
+ self.assertEqual(wrap_numbers(input, 'disk_io'), input)
+ # remains the same
+ input = {'disk1': nt(50, 50, 50),
+ 'disk2': nt(100, 100, 100)}
+ self.assertEqual(wrap_numbers(input, 'disk_io'), input)
+ # and then wraps again
+ input = {'disk1': nt(50, 50, 50),
+ 'disk2': nt(100, 100, 10)}
+ self.assertEqual(wrap_numbers(input, 'disk_io'),
+ {'disk1': nt(50, 50, 50),
+ 'disk2': nt(100, 100, 110)})
+
+ def test_real_data(self):
+ d = {'nvme0n1': (300, 508, 640, 1571, 5970, 1987, 2049, 451751, 47048),
+ 'nvme0n1p1': (1171, 2, 5600256, 1024, 516, 0, 0, 0, 8),
+ 'nvme0n1p2': (54, 54, 2396160, 5165056, 4, 24, 30, 1207, 28),
+ 'nvme0n1p3': (2389, 4539, 5154, 150, 4828, 1844, 2019, 398, 348)}
+ self.assertEqual(wrap_numbers(d, 'disk_io'), d)
+ self.assertEqual(wrap_numbers(d, 'disk_io'), d)
+ # decrease this ↓
+ d = {'nvme0n1': (100, 508, 640, 1571, 5970, 1987, 2049, 451751, 47048),
+ 'nvme0n1p1': (1171, 2, 5600256, 1024, 516, 0, 0, 0, 8),
+ 'nvme0n1p2': (54, 54, 2396160, 5165056, 4, 24, 30, 1207, 28),
+ 'nvme0n1p3': (2389, 4539, 5154, 150, 4828, 1844, 2019, 398, 348)}
+ out = wrap_numbers(d, 'disk_io')
+ self.assertEqual(out['nvme0n1'][0], 400)
+
+ # --- cache tests
+
+ def test_cache_first_call(self):
+ input = {'disk1': nt(5, 5, 5)}
+ wrap_numbers(input, 'disk_io')
+ cache = wrap_numbers.cache_info()
+ self.assertEqual(cache[0], {'disk_io': input})
+ self.assertEqual(cache[1], {'disk_io': {}})
+ self.assertEqual(cache[2], {'disk_io': {}})
+
+ def test_cache_call_twice(self):
+ input = {'disk1': nt(5, 5, 5)}
+ wrap_numbers(input, 'disk_io')
+ input = {'disk1': nt(10, 10, 10)}
+ wrap_numbers(input, 'disk_io')
+ cache = wrap_numbers.cache_info()
+ self.assertEqual(cache[0], {'disk_io': input})
+ self.assertEqual(
+ cache[1],
+ {'disk_io': {('disk1', 0): 0, ('disk1', 1): 0, ('disk1', 2): 0}})
+ self.assertEqual(cache[2], {'disk_io': {}})
+
+ def test_cache_wrap(self):
+ # let's say 100 is the threshold
+ input = {'disk1': nt(100, 100, 100)}
+ wrap_numbers(input, 'disk_io')
+
+ # first wrap restarts from 10
+ input = {'disk1': nt(100, 100, 10)}
+ wrap_numbers(input, 'disk_io')
+ cache = wrap_numbers.cache_info()
+ self.assertEqual(cache[0], {'disk_io': input})
+ self.assertEqual(
+ cache[1],
+ {'disk_io': {('disk1', 0): 0, ('disk1', 1): 0, ('disk1', 2): 100}})
+ self.assertEqual(cache[2], {'disk_io': {'disk1': set([('disk1', 2)])}})
+
+ def assert_():
+ cache = wrap_numbers.cache_info()
+ self.assertEqual(
+ cache[1],
+ {'disk_io': {('disk1', 0): 0, ('disk1', 1): 0,
+ ('disk1', 2): 100}})
+ self.assertEqual(cache[2],
+ {'disk_io': {'disk1': set([('disk1', 2)])}})
+
+ # then it remains the same
+ input = {'disk1': nt(100, 100, 10)}
+ wrap_numbers(input, 'disk_io')
+ cache = wrap_numbers.cache_info()
+ self.assertEqual(cache[0], {'disk_io': input})
+ assert_()
+
+ # then it goes up
+ input = {'disk1': nt(100, 100, 90)}
+ wrap_numbers(input, 'disk_io')
+ cache = wrap_numbers.cache_info()
+ self.assertEqual(cache[0], {'disk_io': input})
+ assert_()
+
+ # then it wraps again
+ input = {'disk1': nt(100, 100, 20)}
+ wrap_numbers(input, 'disk_io')
+ cache = wrap_numbers.cache_info()
+ self.assertEqual(cache[0], {'disk_io': input})
+ self.assertEqual(
+ cache[1],
+ {'disk_io': {('disk1', 0): 0, ('disk1', 1): 0, ('disk1', 2): 190}})
+ self.assertEqual(cache[2], {'disk_io': {'disk1': set([('disk1', 2)])}})
+
+ def test_cache_changing_keys(self):
+ input = {'disk1': nt(5, 5, 5)}
+ wrap_numbers(input, 'disk_io')
+ input = {'disk1': nt(5, 5, 5),
+ 'disk2': nt(7, 7, 7)}
+ wrap_numbers(input, 'disk_io')
+ cache = wrap_numbers.cache_info()
+ self.assertEqual(cache[0], {'disk_io': input})
+ self.assertEqual(
+ cache[1],
+ {'disk_io': {('disk1', 0): 0, ('disk1', 1): 0, ('disk1', 2): 0}})
+ self.assertEqual(cache[2], {'disk_io': {}})
+
+ def test_cache_clear(self):
+ input = {'disk1': nt(5, 5, 5)}
+ wrap_numbers(input, 'disk_io')
+ wrap_numbers(input, 'disk_io')
+ wrap_numbers.cache_clear('disk_io')
+ self.assertEqual(wrap_numbers.cache_info(), ({}, {}, {}))
+ wrap_numbers.cache_clear('disk_io')
+ wrap_numbers.cache_clear('?!?')
+
+ def test_cache_clear_public_apis(self):
+ psutil.disk_io_counters()
+ psutil.net_io_counters()
+ caches = wrap_numbers.cache_info()
+ for cache in caches:
+ self.assertIn('psutil.disk_io_counters', cache)
+ self.assertIn('psutil.net_io_counters', cache)
+
+ psutil.disk_io_counters.cache_clear()
+ caches = wrap_numbers.cache_info()
+ for cache in caches:
+ self.assertIn('psutil.net_io_counters', cache)
+ self.assertNotIn('psutil.disk_io_counters', cache)
+
+ psutil.net_io_counters.cache_clear()
+ caches = wrap_numbers.cache_info()
+ self.assertEqual(caches, ({}, {}, {}))
+
+
+# ===================================================================
# --- Example script tests
# ===================================================================