summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
authorkbespalov <kbespalov@mirantis.com>2016-08-12 15:57:23 +0300
committerKirill Bespalov <kbespalov@mirantis.com>2016-08-16 14:32:19 +0000
commit6a41a81c9eeac9e6ebd20a9d753c75350b4a83cf (patch)
tree1ed5d3885ceb92b2f951575ca02a1f3d159d83ac /tools
parentee8fff03d989e1a73068262d072f483b8c779163 (diff)
downloadoslo-messaging-6a41a81c9eeac9e6ebd20a9d753c75350b4a83cf.tar.gz
Fix calculating of duration in simulator.py
When we calculate metrics like msg/sec, latency, etc we expect the start and end time as time of the first and last processed message: RPC Server life timeline: [----0..5 sec----][---5..10 sec---][---10..15 sec--] waiting clients 10 msg recved wait sigint expected: duration 5 sec, 2 msg/sec actual (incorrect): duration 15 sec, 0.6 msg/sec no reason to set the boundaries if server was idle few seconds before running of clients and after. Change-Id: I33e0a605b54ea7b89977504892528c41c3b00a68
Diffstat (limited to 'tools')
-rwxr-xr-xtools/simulator.py13
1 files changed, 11 insertions, 2 deletions
diff --git a/tools/simulator.py b/tools/simulator.py
index 2f3161b..8a3ff5a 100755
--- a/tools/simulator.py
+++ b/tools/simulator.py
@@ -125,6 +125,9 @@ class MessageStatsCollector(object):
def monitor(self):
global IS_RUNNING
if IS_RUNNING:
+ # NOTE(kbespalov): this way not properly works
+ # because the monitor starting with range 1sec +-150 ms
+ # due to high threading contention between rpc clients
threading.Timer(1.0, self.monitor).start()
now = time.time()
@@ -187,8 +190,14 @@ class MessageStatsCollector(object):
for point in itertools.chain(*(c.get_series() for c in collectors)):
count += point['count']
size += point['size']
- start = min(start, point['timestamp'])
- end = max(end, point['timestamp'])
+ if point['count']:
+ # NOTE(kbespalov):
+ # we except the start and end time as time of
+ # first and last processed message, no reason
+ # to set boundaries if server was idle before
+ # running of clients and after.
+ start = min(start, point['timestamp'])
+ end = max(end, point['timestamp'])
if 'latency' in point:
sum_latencies += point['latency'] * point['count']