From 28c25527c25babcf31e4b00dea316ca6f8612079 Mon Sep 17 00:00:00 2001 From: Tim Peters Date: Fri, 2 Aug 2002 21:48:06 +0000 Subject: Hmm! I thought I checked this in before! Oh well. Added new heapify() function, which transforms an arbitrary list into a heap in linear time; that's a fundamental tool for using heaps in real life . Added heapyify() test. Added a "less naive" N-best algorithm to the test suite, and noted that this could actually go much faster (building on heapify()) if we had max-heaps instead of min-heaps (the iterative method is appropriate when all the data isn't known in advance, but when it is known in advance the tradeoffs get murkier). --- Lib/test/test_heapq.py | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) (limited to 'Lib/test/test_heapq.py') diff --git a/Lib/test/test_heapq.py b/Lib/test/test_heapq.py index 879899e35c..1330f1249c 100644 --- a/Lib/test/test_heapq.py +++ b/Lib/test/test_heapq.py @@ -2,7 +2,7 @@ from test.test_support import verify, vereq, verbose, TestFailed -from heapq import heappush, heappop +from heapq import heappush, heappop, heapify import random def check_invariant(heap): @@ -40,6 +40,24 @@ def test_main(): heappop(heap) heap.sort() vereq(heap, data_sorted[-10:]) + # 4) Test heapify. + for size in range(30): + heap = [random.random() for dummy in range(size)] + heapify(heap) + check_invariant(heap) + # 5) Less-naive "N-best" algorithm, much faster (if len(data) is big + # enough ) than sorting all of data. However, if we had a max + # heap instead of a min heap, it would go much faster still via + # heapify'ing all of data (linear time), then doing 10 heappops + # (10 log-time steps). + heap = data[:10] + heapify(heap) + for item in data[10:]: + if item > heap[0]: # this gets rarer and rarer the longer we run + heappush(heap, item) + heappop(heap) + heap.sort() + vereq(heap, data_sorted[-10:]) # Make user happy if verbose: print "All OK" -- cgit v1.2.1