summaryrefslogtreecommitdiff
path: root/source4/scripting/bin/samba_kcc
blob: d483d01c163f1e592be18bc72d27da1bcead122d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372
3373
3374
3375
3376
#!/usr/bin/env python
#
# Compute our KCC topology
#
# Copyright (C) Dave Craft 2011
# Copyright (C) Andrew Bartlett 2015
#
# Andrew Bartlett's alleged work performed by his underlings Douglas
# Bagnall and Garming Sam.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.

import os
import sys
import random
import uuid

# ensure we get messages out immediately, so they get in the samba logs,
# and don't get swallowed by a timeout
os.environ['PYTHONUNBUFFERED'] = '1'

# forcing GMT avoids a problem in some timezones with kerberos. Both MIT
# heimdal can get mutual authentication errors due to the 24 second difference
# between UTC and GMT when using some zone files (eg. the PDT zone from
# the US)
os.environ["TZ"] = "GMT"

# Find right directory when running from source tree
sys.path.insert(0, "bin/python")

import optparse
import logging
import itertools
import heapq
import time
from functools import partial

from samba import (
    getopt as options,
    ldb,
    dsdb,
    drs_utils,
    nttime2unix)
from samba.auth import system_session
from samba.samdb import SamDB
from samba.dcerpc import drsuapi
from samba.kcc_utils import *
from samba.graph_utils import *
from samba import ldif_utils


class KCC(object):
    """The Knowledge Consistency Checker class.

    A container for objects and methods allowing a run of the KCC.  Produces a
    set of connections in the samdb for which the Distributed Replication
    Service can then utilize to replicate naming contexts

    :param unix_now: The putative current time in seconds since 1970.
    :param read_only: Don't write to the database.
    :param verify: Check topological invariants for the generated graphs
    :param debug: Write verbosely to stderr.
    "param dot_files: write Graphviz files in /tmp showing topology
    """
    def __init__(self):
        """Initializes the partitions class which can hold
        our local DCs partitions or all the partitions in
        the forest
        """
        self.part_table = {}    # partition objects
        self.site_table = {}
        self.transport_table = {}
        self.ip_transport = None
        self.sitelink_table = {}
        self.dsa_by_dnstr = {}
        self.dsa_by_guid = {}

        self.get_dsa_by_guidstr = self.dsa_by_guid.get
        self.get_dsa = self.dsa_by_dnstr.get

        # TODO: These should be backed by a 'permanent' store so that when
        # calling DRSGetReplInfo with DS_REPL_INFO_KCC_DSA_CONNECT_FAILURES,
        # the failure information can be returned
        self.kcc_failed_links = {}
        self.kcc_failed_connections = set()

        # Used in inter-site topology computation.  A list
        # of connections (by NTDSConnection object) that are
        # to be kept when pruning un-needed NTDS Connections
        self.kept_connections = set()

        self.my_dsa_dnstr = None  # My dsa DN
        self.my_dsa = None  # My dsa object

        self.my_site_dnstr = None
        self.my_site = None

        self.samdb = None

    def load_all_transports(self):
        """Loads the inter-site transport objects for Sites

        :return: None
        :raise KCCError: if no IP transport is found
        """
        try:
            res = self.samdb.search("CN=Inter-Site Transports,CN=Sites,%s" %
                                    self.samdb.get_config_basedn(),
                                    scope=ldb.SCOPE_SUBTREE,
                                    expression="(objectClass=interSiteTransport)")
        except ldb.LdbError, (enum, estr):
            raise KCCError("Unable to find inter-site transports - (%s)" %
                           estr)

        for msg in res:
            dnstr = str(msg.dn)

            transport = Transport(dnstr)

            transport.load_transport(self.samdb)
            self.transport_table.setdefault(str(transport.guid),
                                            transport)
            if transport.name == 'IP':
                self.ip_transport = transport

        if self.ip_transport is None:
            raise KCCError("there doesn't seem to be an IP transport")

    def load_all_sitelinks(self):
        """Loads the inter-site siteLink objects

        :return: None
        :raise KCCError: if site-links aren't found
        """
        try:
            res = self.samdb.search("CN=Inter-Site Transports,CN=Sites,%s" %
                                    self.samdb.get_config_basedn(),
                                    scope=ldb.SCOPE_SUBTREE,
                                    expression="(objectClass=siteLink)")
        except ldb.LdbError, (enum, estr):
            raise KCCError("Unable to find inter-site siteLinks - (%s)" % estr)

        for msg in res:
            dnstr = str(msg.dn)

            # already loaded
            if dnstr in self.sitelink_table:
                continue

            sitelink = SiteLink(dnstr)

            sitelink.load_sitelink(self.samdb)

            # Assign this siteLink to table
            # and index by dn
            self.sitelink_table[dnstr] = sitelink

    def load_site(self, dn_str):
        """Helper for load_my_site and load_all_sites.

        Put all the site's DSAs into the KCC indices.

        :param dn_str: a site dn_str
        :return: the Site object pertaining to the dn_str
        """
        site = Site(dn_str, unix_now)
        site.load_site(self.samdb)

        # We avoid replacing the site with an identical copy in case
        # somewhere else has a reference to the old one, which would
        # lead to all manner of confusion and chaos.
        guid = str(site.site_guid)
        if guid not in self.site_table:
            self.site_table[guid] = site
            self.dsa_by_dnstr.update(site.dsa_table)
            self.dsa_by_guid.update((str(x.dsa_guid), x)
                                    for x in site.dsa_table.values())

        return self.site_table[guid]

    def load_my_site(self):
        """Load the Site object for the local DSA.

        :return: None
        """
        self.my_site_dnstr = ("CN=%s,CN=Sites,%s" % (
            self.samdb.server_site_name(),
            self.samdb.get_config_basedn()))

        self.my_site = self.load_site(self.my_site_dnstr)

    def load_all_sites(self):
        """Discover all sites and create Site objects.

        :return: None
        :raise: KCCError if sites can't be found
        """
        try:
            res = self.samdb.search("CN=Sites,%s" %
                                    self.samdb.get_config_basedn(),
                                    scope=ldb.SCOPE_SUBTREE,
                                    expression="(objectClass=site)")
        except ldb.LdbError, (enum, estr):
            raise KCCError("Unable to find sites - (%s)" % estr)

        for msg in res:
            sitestr = str(msg.dn)
            self.load_site(sitestr)

    def load_my_dsa(self):
        """Discover my nTDSDSA dn thru the rootDSE entry

        :return: None
        :raise: KCCError if DSA can't be found
        """
        dn = ldb.Dn(self.samdb, "<GUID=%s>" % self.samdb.get_ntds_GUID())
        try:
            res = self.samdb.search(base=dn, scope=ldb.SCOPE_BASE,
                                    attrs=["objectGUID"])
        except ldb.LdbError, (enum, estr):
            logger.warning("Search for %s failed: %s.  This typically happens"
                           " in --importldif mode due to lack of module"
                           " support.", dn, estr)
            try:
                # We work around the failure above by looking at the
                # dsServiceName that was put in the fake rootdse by
                # the --exportldif, rather than the
                # samdb.get_ntds_GUID(). The disadvantage is that this
                # mode requires we modify the @ROOTDSE dnq to support
                # --forced-local-dsa
                service_name_res = self.samdb.search(base="",
                                                     scope=ldb.SCOPE_BASE,
                                                     attrs=["dsServiceName"])
                dn = ldb.Dn(self.samdb,
                            service_name_res[0]["dsServiceName"][0])

                res = self.samdb.search(base=dn, scope=ldb.SCOPE_BASE,
                                        attrs=["objectGUID"])
            except ldb.LdbError, (enum, estr):
                raise KCCError("Unable to find my nTDSDSA - (%s)" % estr)

        if len(res) != 1:
            raise KCCError("Unable to find my nTDSDSA at %s" %
                           dn.extended_str())

        ntds_guid = misc.GUID(self.samdb.get_ntds_GUID())
        if misc.GUID(res[0]["objectGUID"][0]) != ntds_guid:
            raise KCCError("Did not find the GUID we expected,"
                           " perhaps due to --importldif")

        self.my_dsa_dnstr = str(res[0].dn)

        self.my_dsa = self.my_site.get_dsa(self.my_dsa_dnstr)

        if self.my_dsa_dnstr not in self.dsa_by_dnstr:
            DEBUG_DARK_YELLOW("my_dsa %s isn't in self.dsas_by_dnstr:"
                              " it must be RODC.\n"
                              "Let's add it, because my_dsa is special!\n"
                              "(likewise for self.dsa_by_guid of course)" %
                              self.my_dsas_dnstr)

            self.dsa_by_dnstr[self.my_dsa_dnstr] = self.my_dsa
            self.dsa_by_guid[str(self.my_dsa.dsa_guid)] = self.my_dsa

    def load_all_partitions(self):
        """Discover all NCs thru the Partitions dn and
        instantiate and load the NCs.

        Each NC is inserted into the part_table by partition
        dn string (not the nCName dn string)

        ::returns: Raises KCCError on error
        """
        try:
            res = self.samdb.search("CN=Partitions,%s" %
                                    self.samdb.get_config_basedn(),
                                    scope=ldb.SCOPE_SUBTREE,
                                    expression="(objectClass=crossRef)")
        except ldb.LdbError, (enum, estr):
            raise KCCError("Unable to find partitions - (%s)" % estr)

        for msg in res:
            partstr = str(msg.dn)

            # already loaded
            if partstr in self.part_table:
                continue

            part = Partition(partstr)

            part.load_partition(self.samdb)
            self.part_table[partstr] = part

    def should_be_present_test(self):
        """Enumerate all loaded partitions and DSAs in local
        site and test if NC should be present as replica
        """
        for partdn, part in self.part_table.items():
            for dsadn, dsa in self.my_site.dsa_table.items():
                needed, ro, partial = part.should_be_present(dsa)
                logger.info("dsadn:%s\nncdn:%s\nneeded=%s:ro=%s:partial=%s\n" %
                            (dsadn, part.nc_dnstr, needed, ro, partial))

    def refresh_failed_links_connections(self):
        """Based on MS-ADTS 6.2.2.1"""

        # Instead of NULL link with failure_count = 0, the tuple is
        # simply removed

        # LINKS: Refresh failed links
        self.kcc_failed_links = {}
        current, needed = self.my_dsa.get_rep_tables()
        for replica in current.values():
            # For every possible connection to replicate
            for reps_from in replica.rep_repsFrom:
                failure_count = reps_from.consecutive_sync_failures
                if failure_count <= 0:
                    continue

                dsa_guid = str(reps_from.source_dsa_obj_guid)
                time_first_failure = reps_from.last_success
                last_result = reps_from.last_attempt
                dns_name = reps_from.dns_name1

                f = self.kcc_failed_links.get(dsa_guid)
                if not f:
                    f = KCCFailedObject(dsa_guid, failure_count,
                                        time_first_failure, last_result,
                                        dns_name)
                    self.kcc_failed_links[dsa_guid] = f
                #elif f.failure_count == 0:
                #    f.failure_count = failure_count
                #    f.time_first_failure = time_first_failure
                #    f.last_result = last_result
                else:
                    f.failure_count = max(f.failure_count, failure_count)
                    f.time_first_failure = min(f.time_first_failure,
                                               time_first_failure)
                    f.last_result = last_result

        # CONNECTIONS: Refresh failed connections
        restore_connections = set()
        if opts.attempt_live_connections:
            DEBUG("refresh_failed_links: checking if links are still down")
            for connection in self.kcc_failed_connections:
                try:
                    drs_utils.drsuapi_connect(connection.dns_name, lp, creds)
                    # Failed connection is no longer failing
                    restore_connections.add(connection)
                except drs_utils.drsException:
                    # Failed connection still failing
                    connection.failure_count += 1
        else:
            DEBUG("refresh_failed_links: not checking live links because we\n"
                  "weren't asked to --attempt-live-connections")

        # Remove the restored connections from the failed connections
        self.kcc_failed_connections.difference_update(restore_connections)

    def is_stale_link_connection(self, target_dsa):
        """Check whether a link to a remote DSA is stale

        Used in MS-ADTS 6.2.2.2 Intrasite Connection Creation

        Returns True if the remote seems to have been down for at
        least two hours, otherwise False.

        :param target_dsa: the remote DSA object
        :return: True if link is stale, otherwise False
        """
        failed_link = self.kcc_failed_links.get(str(target_dsa.dsa_guid))
        if failed_link:
            # failure_count should be > 0, but check anyways
            if failed_link.failure_count > 0:
                unix_first_failure = \
                    nttime2unix(failed_link.time_first_failure)
                # TODO guard against future
                if unix_first_failure > unix_now:
                    logger.error("The last success time attribute for \
                                 repsFrom is in the future!")

                # Perform calculation in seconds
                if (unix_now - unix_first_failure) > 60 * 60 * 2:
                    return True

        # TODO connections

        return False

    # TODO: This should be backed by some form of local database
    def remove_unneeded_failed_links_connections(self):
        # Remove all tuples in kcc_failed_links where failure count = 0
        # In this implementation, this should never happen.

        # Remove all connections which were not used this run or connections
        # that became active during this run.
        pass

    def remove_unneeded_ntdsconn(self, all_connected):
        """Remove unneeded NTDS Connections once topology is calculated

        Based on MS-ADTS 6.2.2.4 Removing Unnecessary Connections

        :param all_connected: indicates whether all sites are connected
        :return: None
        """
        mydsa = self.my_dsa

        # New connections won't have GUIDs which are needed for
        # sorting. Add them.
        for cn_conn in mydsa.connect_table.values():
            if cn_conn.guid is None:
                if opts.readonly:
                    cn_conn.guid = misc.GUID(str(uuid.uuid4()))
                    cn_conn.whenCreated = nt_now
                else:
                    cn_conn.load_connection(self.samdb)

        for cn_conn in mydsa.connect_table.values():

            s_dnstr = cn_conn.get_from_dnstr()
            if s_dnstr is None:
                cn_conn.to_be_deleted = True
                continue

            # Get the source DSA no matter what site
            # XXX s_dsa is NEVER USED. It will be removed.
            s_dsa = self.get_dsa(s_dnstr)

            #XXX should an RODC be regarded as same site
            same_site = s_dnstr in self.my_site.dsa_table

            # Given an nTDSConnection object cn, if the DC with the
            # nTDSDSA object dc that is the parent object of cn and
            # the DC with the nTDSDA object referenced by cn!fromServer
            # are in the same site, the KCC on dc deletes cn if all of
            # the following are true:
            #
            # Bit NTDSCONN_OPT_IS_GENERATED is clear in cn!options.
            #
            # No site settings object s exists for the local DC's site, or
            # bit NTDSSETTINGS_OPT_IS_TOPL_CLEANUP_DISABLED is clear in
            # s!options.
            #
            # Another nTDSConnection object cn2 exists such that cn and
            # cn2 have the same parent object, cn!fromServer = cn2!fromServer,
            # and either
            #
            #     cn!whenCreated < cn2!whenCreated
            #
            #     cn!whenCreated = cn2!whenCreated and
            #     cn!objectGUID < cn2!objectGUID
            #
            # Bit NTDSCONN_OPT_RODC_TOPOLOGY is clear in cn!options
            if same_site:
                if not cn_conn.is_generated():
                    continue

                if self.my_site.is_cleanup_ntdsconn_disabled():
                    continue

                # Loop thru connections looking for a duplicate that
                # fulfills the previous criteria
                lesser = False
                packed_guid = ndr_pack(cn_conn.guid)
                for cn2_conn in mydsa.connect_table.values():
                    if cn2_conn is cn_conn:
                        continue

                    s2_dnstr = cn2_conn.get_from_dnstr()

                    # If the NTDS Connections has a different
                    # fromServer field then no match
                    if s2_dnstr != s_dnstr:
                        continue

                    #XXX GUID comparison
                    lesser = (cn_conn.whenCreated < cn2_conn.whenCreated or
                              (cn_conn.whenCreated == cn2_conn.whenCreated and
                               packed_guid < ndr_pack(cn2_conn.guid)))

                    if lesser:
                        break

                if lesser and not cn_conn.is_rodc_topology():
                    cn_conn.to_be_deleted = True

            # Given an nTDSConnection object cn, if the DC with the nTDSDSA
            # object dc that is the parent object of cn and the DC with
            # the nTDSDSA object referenced by cn!fromServer are in
            # different sites, a KCC acting as an ISTG in dc's site
            # deletes cn if all of the following are true:
            #
            #     Bit NTDSCONN_OPT_IS_GENERATED is clear in cn!options.
            #
            #     cn!fromServer references an nTDSDSA object for a DC
            #     in a site other than the local DC's site.
            #
            #     The keepConnections sequence returned by
            #     CreateIntersiteConnections() does not contain
            #     cn!objectGUID, or cn is "superseded by" (see below)
            #     another nTDSConnection cn2 and keepConnections
            #     contains cn2!objectGUID.
            #
            #     The return value of CreateIntersiteConnections()
            #     was true.
            #
            #     Bit NTDSCONN_OPT_RODC_TOPOLOGY is clear in
            #     cn!options
            #
            else:  # different site

                if not mydsa.is_istg():
                    continue

                if not cn_conn.is_generated():
                    continue

                # TODO
                # We are directly using this connection in intersite or
                # we are using a connection which can supersede this one.
                #
                # MS-ADTS 6.2.2.4 - Removing Unnecessary Connections does not
                # appear to be correct.
                #
                # 1. cn!fromServer and cn!parent appear inconsistent with
                #    no cn2
                # 2. The repsFrom do not imply each other
                #
                if cn_conn in self.kept_connections:  # and not_superceded:
                    continue

                # This is the result of create_intersite_connections
                if not all_connected:
                    continue

                if not cn_conn.is_rodc_topology():
                    cn_conn.to_be_deleted = True

        if mydsa.is_ro() or opts.readonly:
            for connect in mydsa.connect_table.values():
                if connect.to_be_deleted:
                    DEBUG_FN("TO BE DELETED:\n%s" % connect)
                if connect.to_be_added:
                    DEBUG_FN("TO BE ADDED:\n%s" % connect)

            # Peform deletion from our tables but perform
            # no database modification
            mydsa.commit_connections(self.samdb, ro=True)
        else:
            # Commit any modified connections
            mydsa.commit_connections(self.samdb)

    def modify_repsFrom(self, n_rep, t_repsFrom, s_rep, s_dsa, cn_conn):
        """Update an repsFrom object if required.

        Part of MS-ADTS 6.2.2.5.

        Update t_repsFrom if necessary to satisfy requirements. Such
        updates are typically required when the IDL_DRSGetNCChanges
        server has moved from one site to another--for example, to
        enable compression when the server is moved from the
        client's site to another site.

        The repsFrom.update_flags bit field may be modified
        auto-magically if any changes are made here. See
        kcc_utils.RepsFromTo for gory details.


        :param n_rep: NC replica we need
        :param t_repsFrom: repsFrom tuple to modify
        :param s_rep: NC replica at source DSA
        :param s_dsa: source DSA
        :param cn_conn: Local DSA NTDSConnection child

        :return: None
        """
        s_dnstr = s_dsa.dsa_dnstr
        update = 0x0

        same_site = s_dnstr in self.my_site.dsa_table

        # if schedule doesn't match then update and modify
        times = convert_schedule_to_repltimes(cn_conn.schedule)
        if times != t_repsFrom.schedule:
            t_repsFrom.schedule = times
            update |= drsuapi.DRSUAPI_DRS_UPDATE_SCHEDULE

        # Bit DRS_PER_SYNC is set in replicaFlags if and only
        # if nTDSConnection schedule has a value v that specifies
        # scheduled replication is to be performed at least once
        # per week.
        if cn_conn.is_schedule_minimum_once_per_week():

            if ((t_repsFrom.replica_flags &
                 drsuapi.DRSUAPI_DRS_PER_SYNC) == 0x0):
                t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_PER_SYNC

        # Bit DRS_INIT_SYNC is set in t.replicaFlags if and only
        # if the source DSA and the local DC's nTDSDSA object are
        # in the same site or source dsa is the FSMO role owner
        # of one or more FSMO roles in the NC replica.
        if same_site or n_rep.is_fsmo_role_owner(s_dnstr):

            if ((t_repsFrom.replica_flags &
                 drsuapi.DRSUAPI_DRS_INIT_SYNC) == 0x0):
                t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_INIT_SYNC

        # If bit NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT is set in
        # cn!options, bit DRS_NEVER_NOTIFY is set in t.replicaFlags
        # if and only if bit NTDSCONN_OPT_USE_NOTIFY is clear in
        # cn!options. Otherwise, bit DRS_NEVER_NOTIFY is set in
        # t.replicaFlags if and only if s and the local DC's
        # nTDSDSA object are in different sites.
        if ((cn_conn.options &
             dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT) != 0x0):

            if (cn_conn.options & dsdb.NTDSCONN_OPT_USE_NOTIFY) == 0x0:
                # XXX WARNING
                #
                # it LOOKS as if this next test is a bit silly: it
                # checks the flag then sets it if it not set; the same
                # effect could be achieved by unconditionally setting
                # it. But in fact the repsFrom object has special
                # magic attached to it, and altering replica_flags has
                # side-effects. That is bad in my opinion, but there
                # you go.
                if ((t_repsFrom.replica_flags &
                     drsuapi.DRSUAPI_DRS_NEVER_NOTIFY) == 0x0):
                    t_repsFrom.replica_flags |= \
                        drsuapi.DRSUAPI_DRS_NEVER_NOTIFY

        elif not same_site:

            if ((t_repsFrom.replica_flags &
                 drsuapi.DRSUAPI_DRS_NEVER_NOTIFY) == 0x0):
                t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_NEVER_NOTIFY

        # Bit DRS_USE_COMPRESSION is set in t.replicaFlags if
        # and only if s and the local DC's nTDSDSA object are
        # not in the same site and the
        # NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION bit is
        # clear in cn!options
        if (not same_site and
            (cn_conn.options &
             dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION) == 0x0):

            if ((t_repsFrom.replica_flags &
                 drsuapi.DRSUAPI_DRS_USE_COMPRESSION) == 0x0):
                t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_USE_COMPRESSION

        # Bit DRS_TWOWAY_SYNC is set in t.replicaFlags if and only
        # if bit NTDSCONN_OPT_TWOWAY_SYNC is set in cn!options.
        if (cn_conn.options & dsdb.NTDSCONN_OPT_TWOWAY_SYNC) != 0x0:

            if ((t_repsFrom.replica_flags &
                 drsuapi.DRSUAPI_DRS_TWOWAY_SYNC) == 0x0):
                t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_TWOWAY_SYNC

        # Bits DRS_DISABLE_AUTO_SYNC and DRS_DISABLE_PERIODIC_SYNC are
        # set in t.replicaFlags if and only if cn!enabledConnection = false.
        if not cn_conn.is_enabled():

            if ((t_repsFrom.replica_flags &
                 drsuapi.DRSUAPI_DRS_DISABLE_AUTO_SYNC) == 0x0):
                t_repsFrom.replica_flags |= \
                    drsuapi.DRSUAPI_DRS_DISABLE_AUTO_SYNC

            if ((t_repsFrom.replica_flags &
                 drsuapi.DRSUAPI_DRS_DISABLE_PERIODIC_SYNC) == 0x0):
                t_repsFrom.replica_flags |= \
                    drsuapi.DRSUAPI_DRS_DISABLE_PERIODIC_SYNC

        # If s and the local DC's nTDSDSA object are in the same site,
        # cn!transportType has no value, or the RDN of cn!transportType
        # is CN=IP:
        #
        #     Bit DRS_MAIL_REP in t.replicaFlags is clear.
        #
        #     t.uuidTransport = NULL GUID.
        #
        #     t.uuidDsa = The GUID-based DNS name of s.
        #
        # Otherwise:
        #
        #     Bit DRS_MAIL_REP in t.replicaFlags is set.
        #
        #     If x is the object with dsname cn!transportType,
        #     t.uuidTransport = x!objectGUID.
        #
        #     Let a be the attribute identified by
        #     x!transportAddressAttribute. If a is
        #     the dNSHostName attribute, t.uuidDsa = the GUID-based
        #      DNS name of s. Otherwise, t.uuidDsa = (s!parent)!a.
        #
        # It appears that the first statement i.e.
        #
        #     "If s and the local DC's nTDSDSA object are in the same
        #      site, cn!transportType has no value, or the RDN of
        #      cn!transportType is CN=IP:"
        #
        # could be a slightly tighter statement if it had an "or"
        # between each condition.  I believe this should
        # be interpreted as:
        #
        #     IF (same-site) OR (no-value) OR (type-ip)
        #
        # because IP should be the primary transport mechanism
        # (even in inter-site) and the absense of the transportType
        # attribute should always imply IP no matter if its multi-site
        #
        # NOTE MS-TECH INCORRECT:
        #
        #     All indications point to these statements above being
        #     incorrectly stated:
        #
        #         t.uuidDsa = The GUID-based DNS name of s.
        #
        #         Let a be the attribute identified by
        #         x!transportAddressAttribute. If a is
        #         the dNSHostName attribute, t.uuidDsa = the GUID-based
        #         DNS name of s. Otherwise, t.uuidDsa = (s!parent)!a.
        #
        #     because the uuidDSA is a GUID and not a GUID-base DNS
        #     name.  Nor can uuidDsa hold (s!parent)!a if not
        #     dNSHostName.  What should have been said is:
        #
        #         t.naDsa = The GUID-based DNS name of s
        #
        #     That would also be correct if transportAddressAttribute
        #     were "mailAddress" because (naDsa) can also correctly
        #     hold the SMTP ISM service address.
        #
        nastr = "%s._msdcs.%s" % (s_dsa.dsa_guid, self.samdb.forest_dns_name())

        # We're not currently supporting SMTP replication
        # so is_smtp_replication_available() is currently
        # always returning False
        if ((same_site or
             cn_conn.transport_dnstr is None or
             cn_conn.transport_dnstr.find("CN=IP") == 0 or
             not is_smtp_replication_available())):

            if ((t_repsFrom.replica_flags &
                 drsuapi.DRSUAPI_DRS_MAIL_REP) != 0x0):
                t_repsFrom.replica_flags &= ~drsuapi.DRSUAPI_DRS_MAIL_REP

            t_repsFrom.transport_guid = misc.GUID()

            # See (NOTE MS-TECH INCORRECT) above
            if t_repsFrom.version == 0x1:
                if t_repsFrom.dns_name1 is None or \
                   t_repsFrom.dns_name1 != nastr:
                    t_repsFrom.dns_name1 = nastr
            else:
                if t_repsFrom.dns_name1 is None or \
                   t_repsFrom.dns_name2 is None or \
                   t_repsFrom.dns_name1 != nastr or \
                   t_repsFrom.dns_name2 != nastr:
                    t_repsFrom.dns_name1 = nastr
                    t_repsFrom.dns_name2 = nastr

        else:
            # XXX This entire branch is NEVER used! Because we don't do SMTP!
            # (see the if condition above). Just close your eyes here.
            if ((t_repsFrom.replica_flags &
                 drsuapi.DRSUAPI_DRS_MAIL_REP) == 0x0):
                t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_MAIL_REP

            # We have a transport type but its not an
            # object in the database
            if cn_conn.transport_guid not in self.transport_table:
                raise KCCError("Missing inter-site transport - (%s)" %
                               cn_conn.transport_dnstr)

            x_transport = self.transport_table[str(cn_conn.transport_guid)]

            if t_repsFrom.transport_guid != x_transport.guid:
                t_repsFrom.transport_guid = x_transport.guid

            # See (NOTE MS-TECH INCORRECT) above
            if x_transport.address_attr == "dNSHostName":

                if t_repsFrom.version == 0x1:
                    if t_repsFrom.dns_name1 is None or \
                       t_repsFrom.dns_name1 != nastr:
                        t_repsFrom.dns_name1 = nastr
                else:
                    if t_repsFrom.dns_name1 is None or \
                       t_repsFrom.dns_name2 is None or \
                       t_repsFrom.dns_name1 != nastr or \
                       t_repsFrom.dns_name2 != nastr:
                        t_repsFrom.dns_name1 = nastr
                        t_repsFrom.dns_name2 = nastr

            else:
                # MS tech specification says we retrieve the named
                # attribute in "transportAddressAttribute" from the parent of
                # the DSA object
                try:
                    pdnstr = s_dsa.get_parent_dnstr()
                    attrs = [x_transport.address_attr]

                    res = self.samdb.search(base=pdnstr, scope=ldb.SCOPE_BASE,
                                            attrs=attrs)
                except ldb.LdbError, (enum, estr):
                    raise KCCError(
                        "Unable to find attr (%s) for (%s) - (%s)" %
                        (x_transport.address_attr, pdnstr, estr))

                msg = res[0]
                nastr = str(msg[x_transport.address_attr][0])

                # See (NOTE MS-TECH INCORRECT) above
                if t_repsFrom.version == 0x1:
                    if t_repsFrom.dns_name1 is None or \
                       t_repsFrom.dns_name1 != nastr:
                        t_repsFrom.dns_name1 = nastr
                else:
                    if t_repsFrom.dns_name1 is None or \
                       t_repsFrom.dns_name2 is None or \
                       t_repsFrom.dns_name1 != nastr or \
                       t_repsFrom.dns_name2 != nastr:

                        t_repsFrom.dns_name1 = nastr
                        t_repsFrom.dns_name2 = nastr

        if t_repsFrom.is_modified():
            logger.debug("modify_repsFrom(): %s" % t_repsFrom)

    def is_repsFrom_implied(self, n_rep, cn_conn):
        """Given a NC replica and NTDS Connection, determine if the connection
        implies a repsFrom tuple should be present from the source DSA listed
        in the connection to the naming context

        :param n_rep: NC replica
        :param conn: NTDS Connection
        ::returns (True || False), source DSA:
        """
        #XXX different conditions for "implies" than MS-ADTS 6.2.2

        # NTDS Connection must satisfy all the following criteria
        # to imply a repsFrom tuple is needed:
        #
        #    cn!enabledConnection = true.
        #    cn!options does not contain NTDSCONN_OPT_RODC_TOPOLOGY.
        #    cn!fromServer references an nTDSDSA object.

        s_dsa = None

        if cn_conn.is_enabled() and not cn_conn.is_rodc_topology():
            s_dnstr = cn_conn.get_from_dnstr()
            if s_dnstr is not None:
                s_dsa = self.get_dsa(s_dnstr)

        # No DSA matching this source DN string?
        if s_dsa is None:
            return False, None

        # To imply a repsFrom tuple is needed, each of these
        # must be True:
        #
        #     An NC replica of the NC "is present" on the DC to
        #     which the nTDSDSA object referenced by cn!fromServer
        #     corresponds.
        #
        #     An NC replica of the NC "should be present" on
        #     the local DC
        s_rep = s_dsa.get_current_replica(n_rep.nc_dnstr)

        if s_rep is None or not s_rep.is_present():
            return False, None

        # To imply a repsFrom tuple is needed, each of these
        # must be True:
        #
        #     The NC replica on the DC referenced by cn!fromServer is
        #     a writable replica or the NC replica that "should be
        #     present" on the local DC is a partial replica.
        #
        #     The NC is not a domain NC, the NC replica that
        #     "should be present" on the local DC is a partial
        #     replica, cn!transportType has no value, or
        #     cn!transportType has an RDN of CN=IP.
        #
        implied = (not s_rep.is_ro() or n_rep.is_partial()) and \
                  (not n_rep.is_domain() or
                   n_rep.is_partial() or
                   cn_conn.transport_dnstr is None or
                   cn_conn.transport_dnstr.find("CN=IP") == 0)

        if implied:
            return True, s_dsa
        else:
            return False, None

    def translate_ntdsconn(self, current_dsa=None):
        """Adjust repsFrom to match NTDSConnections

        This function adjusts values of repsFrom abstract attributes of NC
        replicas on the local DC to match those implied by
        nTDSConnection objects.

        Based on [MS-ADTS] 6.2.2.5

        :param current_dsa: optional DSA on whose behalf we are acting.
        :return: None
        """
        count = 0

        if current_dsa is None:
            current_dsa = self.my_dsa

        if current_dsa.is_translate_ntdsconn_disabled():
            logger.debug("skipping translate_ntdsconn() "
                         "because disabling flag is set")
            return

        logger.debug("translate_ntdsconn(): enter")

        current_rep_table, needed_rep_table = current_dsa.get_rep_tables()

        # Filled in with replicas we currently have that need deleting
        delete_reps = set()

        # We're using the MS notation names here to allow
        # correlation back to the published algorithm.
        #
        # n_rep      - NC replica (n)
        # t_repsFrom - tuple (t) in n!repsFrom
        # s_dsa      - Source DSA of the replica. Defined as nTDSDSA
        #              object (s) such that (s!objectGUID = t.uuidDsa)
        #              In our IDL representation of repsFrom the (uuidDsa)
        #              attribute is called (source_dsa_obj_guid)
        # cn_conn    - (cn) is nTDSConnection object and child of the local
        #               DC's nTDSDSA object and (cn!fromServer = s)
        # s_rep      - source DSA replica of n
        #
        # If we have the replica and its not needed
        # then we add it to the "to be deleted" list.
        for dnstr in current_rep_table:
            if dnstr not in needed_rep_table:
                delete_reps.add(dnstr)

        DEBUG_FN('current %d needed %d delete %d' % (len(current_rep_table),
                 len(needed_rep_table), len(delete_reps)))

        if delete_reps:
            DEBUG('deleting these reps: %s' % delete_reps)
            for dnstr in delete_reps:
                del current_rep_table[dnstr]

        # Now perform the scan of replicas we'll need
        # and compare any current repsFrom against the
        # connections
        for n_rep in needed_rep_table.values():

            # load any repsFrom and fsmo roles as we'll
            # need them during connection translation
            n_rep.load_repsFrom(self.samdb)
            n_rep.load_fsmo_roles(self.samdb)

            # Loop thru the existing repsFrom tupples (if any)
            # XXX This is a list and could contain duplicates
            #     (multiple load_repsFrom calls)
            for t_repsFrom in n_rep.rep_repsFrom:

                # for each tuple t in n!repsFrom, let s be the nTDSDSA
                # object such that s!objectGUID = t.uuidDsa
                guidstr = str(t_repsFrom.source_dsa_obj_guid)
                s_dsa = self.get_dsa_by_guidstr(guidstr)

                # Source dsa is gone from config (strange)
                # so cleanup stale repsFrom for unlisted DSA
                if s_dsa is None:
                    logger.warning("repsFrom source DSA guid (%s) not found" %
                                   guidstr)
                    t_repsFrom.to_be_deleted = True
                    continue

                s_dnstr = s_dsa.dsa_dnstr

                # Retrieve my DSAs connection object (if it exists)
                # that specifies the fromServer equivalent to
                # the DSA that is specified in the repsFrom source
                connections = current_dsa.get_connection_by_from_dnstr(s_dnstr)

                count = 0
                cn_conn = None

                for con in connections:
                    if con.is_rodc_topology():
                        continue
                    cn_conn = con

                # Let (cn) be the nTDSConnection object such that (cn)
                # is a child of the local DC's nTDSDSA object and
                # (cn!fromServer = s) and (cn!options) does not contain
                # NTDSCONN_OPT_RODC_TOPOLOGY or NULL if no such (cn) exists.

                # KCC removes this repsFrom tuple if any of the following
                # is true:
                #     cn = NULL.
                #     [...]

                #XXX varying possible interpretations of rodc_topology
                if cn_conn is None:
                    t_repsFrom.to_be_deleted = True
                    continue

                #     [...] KCC removes this repsFrom tuple if:
                #
                #     No NC replica of the NC "is present" on DSA that
                #     would be source of replica
                #
                #     A writable replica of the NC "should be present" on
                #     the local DC, but a partial replica "is present" on
                #     the source DSA
                s_rep = s_dsa.get_current_replica(n_rep.nc_dnstr)

                if s_rep is None or not s_rep.is_present() or \
                   (not n_rep.is_ro() and s_rep.is_partial()):

                    t_repsFrom.to_be_deleted = True
                    continue

                # If the KCC did not remove t from n!repsFrom, it updates t
                self.modify_repsFrom(n_rep, t_repsFrom, s_rep, s_dsa, cn_conn)

            # Loop thru connections and add implied repsFrom tuples
            # for each NTDSConnection under our local DSA if the
            # repsFrom is not already present
            for cn_conn in current_dsa.connect_table.values():

                implied, s_dsa = self.is_repsFrom_implied(n_rep, cn_conn)
                if not implied:
                    continue

                # Loop thru the existing repsFrom tupples (if any) and
                # if we already have a tuple for this connection then
                # no need to proceed to add.  It will have been changed
                # to have the correct attributes above
                for t_repsFrom in n_rep.rep_repsFrom:
                    guidstr = str(t_repsFrom.source_dsa_obj_guid)
                    #XXX what?
                    if s_dsa is self.get_dsa_by_guidstr(guidstr):
                        s_dsa = None
                        break

                if s_dsa is None:
                    continue

                # Create a new RepsFromTo and proceed to modify
                # it according to specification
                t_repsFrom = RepsFromTo(n_rep.nc_dnstr)

                t_repsFrom.source_dsa_obj_guid = s_dsa.dsa_guid

                s_rep = s_dsa.get_current_replica(n_rep.nc_dnstr)

                self.modify_repsFrom(n_rep, t_repsFrom, s_rep, s_dsa, cn_conn)

                # Add to our NC repsFrom as this is newly computed
                if t_repsFrom.is_modified():
                    n_rep.rep_repsFrom.append(t_repsFrom)

            if opts.readonly:
                # Display any to be deleted or modified repsFrom
                text = n_rep.dumpstr_to_be_deleted()
                if text:
                    logger.info("TO BE DELETED:\n%s" % text)
                text = n_rep.dumpstr_to_be_modified()
                if text:
                    logger.info("TO BE MODIFIED:\n%s" % text)

                # Peform deletion from our tables but perform
                # no database modification
                n_rep.commit_repsFrom(self.samdb, ro=True)
            else:
                # Commit any modified repsFrom to the NC replica
                n_rep.commit_repsFrom(self.samdb)

    def merge_failed_links(self):
        """Merge of kCCFailedLinks and kCCFailedLinks from bridgeheads.

        The KCC on a writable DC attempts to merge the link and connection
        failure information from bridgehead DCs in its own site to help it
        identify failed bridgehead DCs.

        Based on MS-ADTS 6.2.2.3.2 "Merge of kCCFailedLinks and kCCFailedLinks
        from Bridgeheads"

        :param ping: An oracle of current bridgehead availability
        :return: None
        """
        # 1. Queries every bridgehead server in your site (other than yourself)
        # 2. For every ntDSConnection that references a server in a different
        #    site merge all the failure info
        #
        # XXX - not implemented yet
        if opts.attempt_live_connections:
            DEBUG_RED("merge_failed_links() is NOT IMPLEMENTED")
        else:
            DEBUG_FN("skipping merge_failed_links() because it requires "
                     "real network connections\n"
                     "and we weren't asked to --attempt-live-connections")

    def setup_graph(self, part):
        """Set up an intersite graph

        An intersite graph has a Vertex for each site object, a
        MultiEdge for each SiteLink object, and a MutliEdgeSet for
        each siteLinkBridge object (or implied siteLinkBridge). It
        reflects the intersite topology in a slightly more abstract
        graph form.

        Roughly corresponds to MS-ADTS 6.2.2.3.4.3

        :param part: a Partition object
        :returns: an InterSiteGraph object
        """
        guid_to_vertex = {}
        # Create graph
        g = IntersiteGraph()
        # Add vertices
        for site_guid, site in self.site_table.items():
            vertex = Vertex(site, part)
            vertex.guid = site_guid
            vertex.ndrpacked_guid = ndr_pack(site.site_guid)
            g.vertices.add(vertex)

            if not guid_to_vertex.get(site_guid):
                guid_to_vertex[site_guid] = []

            guid_to_vertex[site_guid].append(vertex)

        connected_vertices = set()
        for transport_guid, transport in self.transport_table.items():
            # Currently only ever "IP"
            if transport.name != 'IP':
                DEBUG_FN("setup_graph is ignoring transport %s" %
                         transport.name)
                continue
            for site_link_dn, site_link in self.sitelink_table.items():
                new_edge = create_edge(transport_guid, site_link,
                                       guid_to_vertex)
                connected_vertices.update(new_edge.vertices)
                g.edges.add(new_edge)

            # If 'Bridge all site links' is enabled and Win2k3 bridges required
            # is not set
            # NTDSTRANSPORT_OPT_BRIDGES_REQUIRED 0x00000002
            # No documentation for this however, ntdsapi.h appears to have:
            # NTDSSETTINGS_OPT_W2K3_BRIDGES_REQUIRED = 0x00001000
            if (((self.my_site.site_options & 0x00000002) == 0
                 and (self.my_site.site_options & 0x00001000) == 0)):
                g.edge_set.add(create_auto_edge_set(g, transport_guid))
            else:
                # TODO get all site link bridges
                for site_link_bridge in []:
                    g.edge_set.add(create_edge_set(g, transport_guid,
                                                   site_link_bridge))

        g.connected_vertices = connected_vertices

        #be less verbose in dot file output unless --debug
        do_dot_files = opts.dot_files and opts.debug
        dot_edges = []
        for edge in g.edges:
            for a, b in itertools.combinations(edge.vertices, 2):
                dot_edges.append((a.site.site_dnstr, b.site.site_dnstr))
        verify_properties = ()
        verify_and_dot('site_edges', dot_edges, directed=False,
                       label=self.my_dsa_dnstr,
                       properties=verify_properties, debug=DEBUG,
                       verify=opts.verify,
                       dot_files=do_dot_files)

        return g

    def get_bridgehead(self, site, part, transport, partial_ok, detect_failed):
        """Get a bridghead DC.

        :param site: site object representing for which a bridgehead
            DC is desired.
        :param part: crossRef for NC to replicate.
        :param transport: interSiteTransport object for replication
            traffic.
        :param partial_ok: True if a DC containing a partial
            replica or a full replica will suffice, False if only
            a full replica will suffice.
        :param detect_failed: True to detect failed DCs and route
            replication traffic around them, False to assume no DC
            has failed.
        ::returns: dsa object for the bridgehead DC or None
        """

        bhs = self.get_all_bridgeheads(site, part, transport,
                                       partial_ok, detect_failed)
        if len(bhs) == 0:
            DEBUG_MAGENTA("get_bridgehead:\n\tsitedn=%s\n\tbhdn=None" %
                          site.site_dnstr)
            return None
        else:
            DEBUG_GREEN("get_bridgehead:\n\tsitedn=%s\n\tbhdn=%s" %
                        (site.site_dnstr, bhs[0].dsa_dnstr))
            return bhs[0]

    def get_all_bridgeheads(self, site, part, transport,
                            partial_ok, detect_failed):
        """Get all bridghead DCs satisfying the given criteria

        :param site: site object representing the site for which
            bridgehead DCs are desired.
        :param part: partition for NC to replicate.
        :param transport: interSiteTransport object for
            replication traffic.
        :param partial_ok: True if a DC containing a partial
            replica or a full replica will suffice, False if
            only a full replica will suffice.
        :param detect_failed: True to detect failed DCs and route
            replication traffic around them, FALSE to assume
            no DC has failed.
        ::returns: list of dsa object for available bridgehead
            DCs or None
        """

        bhs = []

        logger.debug("get_all_bridgeheads: %s" % transport.name)
        if 'Site-5' in site.site_dnstr:
            DEBUG_RED("get_all_bridgeheads with %s, part%s, partial_ok %s"
                      " detect_failed %s" % (site.site_dnstr, part.partstr,
                                             partial_ok, detect_failed))
        logger.debug(site.rw_dsa_table)
        for dsa in site.rw_dsa_table.values():

            pdnstr = dsa.get_parent_dnstr()

            # IF t!bridgeheadServerListBL has one or more values and
            # t!bridgeheadServerListBL does not contain a reference
            # to the parent object of dc then skip dc
            if ((len(transport.bridgehead_list) != 0 and
                 pdnstr not in transport.bridgehead_list)):
                continue

            # IF dc is in the same site as the local DC
            #    IF a replica of cr!nCName is not in the set of NC replicas
            #    that "should be present" on dc or a partial replica of the
            #    NC "should be present" but partialReplicasOkay = FALSE
            #        Skip dc
            if self.my_site.same_site(dsa):
                needed, ro, partial = part.should_be_present(dsa)
                if not needed or (partial and not partial_ok):
                    continue
                rep = dsa.get_current_replica(part.nc_dnstr)

            # ELSE
            #     IF an NC replica of cr!nCName is not in the set of NC
            #     replicas that "are present" on dc or a partial replica of
            #     the NC "is present" but partialReplicasOkay = FALSE
            #          Skip dc
            else:
                rep = dsa.get_current_replica(part.nc_dnstr)
                if rep is None or (rep.is_partial() and not partial_ok):
                    continue

            # IF AmIRODC() and cr!nCName corresponds to default NC then
            #     Let dsaobj be the nTDSDSA object of the dc
            #     IF  dsaobj.msDS-Behavior-Version < DS_DOMAIN_FUNCTION_2008
            #         Skip dc
            if self.my_dsa.is_ro() and rep is not None and rep.is_default():
                if not dsa.is_minimum_behavior(dsdb.DS_DOMAIN_FUNCTION_2008):
                    continue

            # IF t!name != "IP" and the parent object of dc has no value for
            # the attribute specified by t!transportAddressAttribute
            #     Skip dc
            if transport.name != "IP":
                # MS tech specification says we retrieve the named
                # attribute in "transportAddressAttribute" from the parent
                # of the DSA object
                try:
                    attrs = [transport.address_attr]

                    res = self.samdb.search(base=pdnstr, scope=ldb.SCOPE_BASE,
                                            attrs=attrs)
                except ldb.LdbError, (enum, estr):
                    continue

                msg = res[0]
                if transport.address_attr not in msg:
                    continue
                #XXX nastr is NEVER USED. It will be removed.
                nastr = str(msg[transport.address_attr][0])

            # IF BridgeheadDCFailed(dc!objectGUID, detectFailedDCs) = TRUE
            #     Skip dc
            if self.is_bridgehead_failed(dsa, detect_failed):
                DEBUG("bridgehead is failed")
                continue

            logger.debug("get_all_bridgeheads: dsadn=%s" % dsa.dsa_dnstr)
            bhs.append(dsa)

        # IF bit NTDSSETTINGS_OPT_IS_RAND_BH_SELECTION_DISABLED is set in
        # s!options
        #    SORT bhs such that all GC servers precede DCs that are not GC
        #    servers, and otherwise by ascending objectGUID
        # ELSE
        #    SORT bhs in a random order
        if site.is_random_bridgehead_disabled():
            bhs.sort(sort_dsa_by_gc_and_guid)
        else:
            random.shuffle(bhs)
        DEBUG_YELLOW(bhs)
        return bhs

    def is_bridgehead_failed(self, dsa, detect_failed):
        """Determine whether a given DC is known to be in a failed state
        ::returns: True if and only if the DC should be considered failed

        Here we DEPART from the pseudo code spec which appears to be
        wrong. It says, in full:

    /***** BridgeheadDCFailed *****/
    /* Determine whether a given DC is known to be in a failed state.
     * IN: objectGUID - objectGUID of the DC's nTDSDSA object.
     * IN: detectFailedDCs - TRUE if and only failed DC detection is
     *     enabled.
     * RETURNS: TRUE if and only if the DC should be considered to be in a
     *          failed state.
     */
    BridgeheadDCFailed(IN GUID objectGUID, IN bool detectFailedDCs) : bool
    {
        IF bit NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED is set in
        the options attribute of the site settings object for the local
        DC's site
            RETURN FALSE
        ELSEIF a tuple z exists in the kCCFailedLinks or
        kCCFailedConnections variables such that z.UUIDDsa =
        objectGUID, z.FailureCount > 1, and the current time -
        z.TimeFirstFailure > 2 hours
            RETURN TRUE
        ELSE
            RETURN detectFailedDCs
        ENDIF
    }

        where you will see detectFailedDCs is not behaving as
        advertised -- it is acting as a default return code in the
        event that a failure is not detected, not a switch turning
        detection on or off. Elsewhere the documentation seems to
        concur with the comment rather than the code.
        """
        if not detect_failed:
            return False

        # NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED = 0x00000008
        # When DETECT_STALE_DISABLED, we can never know of if
        # it's in a failed state
        if self.my_site.site_options & 0x00000008:
            return False

        return self.is_stale_link_connection(dsa)

    def create_connection(self, part, rbh, rsite, transport,
                          lbh, lsite, link_opt, link_sched,
                          partial_ok, detect_failed):
        """Create an nTDSConnection object with the given parameters
        if one does not already exist.

        :param part: crossRef object for the NC to replicate.
        :param rbh: nTDSDSA object for DC to act as the
            IDL_DRSGetNCChanges server (which is in a site other
            than the local DC's site).
        :param rsite: site of the rbh
        :param transport: interSiteTransport object for the transport
            to use for replication traffic.
        :param lbh: nTDSDSA object for DC to act as the
            IDL_DRSGetNCChanges client (which is in the local DC's site).
        :param lsite: site of the lbh
        :param link_opt: Replication parameters (aggregated siteLink options,
                                                 etc.)
        :param link_sched: Schedule specifying the times at which
            to begin replicating.
        :partial_ok: True if bridgehead DCs containing partial
            replicas of the NC are acceptable.
        :param detect_failed: True to detect failed DCs and route
            replication traffic around them, FALSE to assume no DC
            has failed.
        """
        rbhs_all = self.get_all_bridgeheads(rsite, part, transport,
                                            partial_ok, False)
        rbh_table = {x.dsa_dnstr: x for x in rbhs_all}

        DEBUG_GREY("rbhs_all: %s %s" % (len(rbhs_all),
                                        [x.dsa_dnstr for x in rbhs_all]))

        # MS-TECH says to compute rbhs_avail but then doesn't use it
        # rbhs_avail = self.get_all_bridgeheads(rsite, part, transport,
        #                                        partial_ok, detect_failed)

        lbhs_all = self.get_all_bridgeheads(lsite, part, transport,
                                            partial_ok, False)
        if lbh.is_ro():
            lbhs_all.append(lbh)

        DEBUG_GREY("lbhs_all: %s %s" % (len(lbhs_all),
                                        [x.dsa_dnstr for x in lbhs_all]))

        # MS-TECH says to compute lbhs_avail but then doesn't use it
        # lbhs_avail = self.get_all_bridgeheads(lsite, part, transport,
        #                                       partial_ok, detect_failed)

        # FOR each nTDSConnection object cn such that the parent of cn is
        # a DC in lbhsAll and cn!fromServer references a DC in rbhsAll
        for ldsa in lbhs_all:
            for cn in ldsa.connect_table.values():

                rdsa = rbh_table.get(cn.from_dnstr)
                if rdsa is None:
                    continue

                DEBUG_DARK_YELLOW("rdsa is %s" % rdsa.dsa_dnstr)
                # IF bit NTDSCONN_OPT_IS_GENERATED is set in cn!options and
                # NTDSCONN_OPT_RODC_TOPOLOGY is clear in cn!options and
                # cn!transportType references t
                if ((cn.is_generated() and
                     not cn.is_rodc_topology() and
                     cn.transport_guid == transport.guid)):

                    # IF bit NTDSCONN_OPT_USER_OWNED_SCHEDULE is clear in
                    # cn!options and cn!schedule != sch
                    #     Perform an originating update to set cn!schedule to
                    #     sched
                    if ((not cn.is_user_owned_schedule() and
                         not cn.is_equivalent_schedule(link_sched))):
                        cn.schedule = link_sched
                        cn.set_modified(True)

                    # IF bits NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
                    # NTDSCONN_OPT_USE_NOTIFY are set in cn
                    if cn.is_override_notify_default() and \
                       cn.is_use_notify():

                        # IF bit NTDSSITELINK_OPT_USE_NOTIFY is clear in
                        # ri.Options
                        #    Perform an originating update to clear bits
                        #    NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
                        #    NTDSCONN_OPT_USE_NOTIFY in cn!options
                        if (link_opt & dsdb.NTDSSITELINK_OPT_USE_NOTIFY) == 0:
                            cn.options &= \
                                ~(dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT |
                                  dsdb.NTDSCONN_OPT_USE_NOTIFY)
                            cn.set_modified(True)

                    # ELSE
                    else:

                        # IF bit NTDSSITELINK_OPT_USE_NOTIFY is set in
                        # ri.Options
                        #     Perform an originating update to set bits
                        #     NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
                        #     NTDSCONN_OPT_USE_NOTIFY in cn!options
                        if (link_opt & dsdb.NTDSSITELINK_OPT_USE_NOTIFY) != 0:
                            cn.options |= \
                                (dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT |
                                 dsdb.NTDSCONN_OPT_USE_NOTIFY)
                            cn.set_modified(True)

                    # IF bit NTDSCONN_OPT_TWOWAY_SYNC is set in cn!options
                    if cn.is_twoway_sync():

                        # IF bit NTDSSITELINK_OPT_TWOWAY_SYNC is clear in
                        # ri.Options
                        #     Perform an originating update to clear bit
                        #     NTDSCONN_OPT_TWOWAY_SYNC in cn!options
                        if (link_opt & dsdb.NTDSSITELINK_OPT_TWOWAY_SYNC) == 0:
                            cn.options &= ~dsdb.NTDSCONN_OPT_TWOWAY_SYNC
                            cn.set_modified(True)

                    # ELSE
                    else:

                        # IF bit NTDSSITELINK_OPT_TWOWAY_SYNC is set in
                        # ri.Options
                        #     Perform an originating update to set bit
                        #     NTDSCONN_OPT_TWOWAY_SYNC in cn!options
                        if (link_opt & dsdb.NTDSSITELINK_OPT_TWOWAY_SYNC) != 0:
                            cn.options |= dsdb.NTDSCONN_OPT_TWOWAY_SYNC
                            cn.set_modified(True)

                    # IF bit NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION is set
                    # in cn!options
                    if cn.is_intersite_compression_disabled():

                        # IF bit NTDSSITELINK_OPT_DISABLE_COMPRESSION is clear
                        # in ri.Options
                        #     Perform an originating update to clear bit
                        #     NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION in
                        #     cn!options
                        if ((link_opt &
                             dsdb.NTDSSITELINK_OPT_DISABLE_COMPRESSION) == 0):
                            cn.options &= \
                                ~dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION
                            cn.set_modified(True)

                    # ELSE
                    else:
                        # IF bit NTDSSITELINK_OPT_DISABLE_COMPRESSION is set in
                        # ri.Options
                        #     Perform an originating update to set bit
                        #     NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION in
                        #     cn!options
                        if ((link_opt &
                             dsdb.NTDSSITELINK_OPT_DISABLE_COMPRESSION) != 0):
                            cn.options |= \
                                dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION
                            cn.set_modified(True)

                    # Display any modified connection
                    if opts.readonly:
                        if cn.to_be_modified:
                            logger.info("TO BE MODIFIED:\n%s" % cn)

                        ldsa.commit_connections(self.samdb, ro=True)
                    else:
                        ldsa.commit_connections(self.samdb)
        # ENDFOR

        valid_connections = 0

        # FOR each nTDSConnection object cn such that cn!parent is
        # a DC in lbhsAll and cn!fromServer references a DC in rbhsAll
        for ldsa in lbhs_all:
            for cn in ldsa.connect_table.values():

                rdsa = rbh_table.get(cn.from_dnstr)
                if rdsa is None:
                    continue

                DEBUG_DARK_YELLOW("round 2: rdsa is %s" % rdsa.dsa_dnstr)

                # IF (bit NTDSCONN_OPT_IS_GENERATED is clear in cn!options or
                # cn!transportType references t) and
                # NTDSCONN_OPT_RODC_TOPOLOGY is clear in cn!options
                if (((not cn.is_generated() or
                      cn.transport_guid == transport.guid) and
                     not cn.is_rodc_topology())):

                    # LET rguid be the objectGUID of the nTDSDSA object
                    # referenced by cn!fromServer
                    # LET lguid be (cn!parent)!objectGUID

                    # IF BridgeheadDCFailed(rguid, detectFailedDCs) = FALSE and
                    # BridgeheadDCFailed(lguid, detectFailedDCs) = FALSE
                    #     Increment cValidConnections by 1
                    if ((not self.is_bridgehead_failed(rdsa, detect_failed) and
                         not self.is_bridgehead_failed(ldsa, detect_failed))):
                        valid_connections += 1

                    # IF keepConnections does not contain cn!objectGUID
                    #     APPEND cn!objectGUID to keepConnections
                    self.kept_connections.add(cn)

        # ENDFOR
        DEBUG_RED("valid connections %d" % valid_connections)
        DEBUG("kept_connections:\n%s" % (self.kept_connections,))
        # IF cValidConnections = 0
        if valid_connections == 0:

            # LET opt be NTDSCONN_OPT_IS_GENERATED
            opt = dsdb.NTDSCONN_OPT_IS_GENERATED

            # IF bit NTDSSITELINK_OPT_USE_NOTIFY is set in ri.Options
            #     SET bits NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
            #     NTDSCONN_OPT_USE_NOTIFY in opt
            if (link_opt & dsdb.NTDSSITELINK_OPT_USE_NOTIFY) != 0:
                opt |= (dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT |
                        dsdb.NTDSCONN_OPT_USE_NOTIFY)

            # IF bit NTDSSITELINK_OPT_TWOWAY_SYNC is set in ri.Options
            #     SET bit NTDSCONN_OPT_TWOWAY_SYNC opt
            if (link_opt & dsdb.NTDSSITELINK_OPT_TWOWAY_SYNC) != 0:
                opt |= dsdb.NTDSCONN_OPT_TWOWAY_SYNC

            # IF bit NTDSSITELINK_OPT_DISABLE_COMPRESSION is set in
            # ri.Options
            #     SET bit NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION in opt
            if ((link_opt &
                 dsdb.NTDSSITELINK_OPT_DISABLE_COMPRESSION) != 0):
                opt |= dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION

            # Perform an originating update to create a new nTDSConnection
            # object cn that is a child of lbh, cn!enabledConnection = TRUE,
            # cn!options = opt, cn!transportType is a reference to t,
            # cn!fromServer is a reference to rbh, and cn!schedule = sch
            DEBUG_FN("new connection, KCC dsa: %s" % self.my_dsa.dsa_dnstr)
            cn = lbh.new_connection(opt, 0, transport,
                                    rbh.dsa_dnstr, link_sched)

            # Display any added connection
            if opts.readonly:
                if cn.to_be_added:
                    logger.info("TO BE ADDED:\n%s" % cn)

                    lbh.commit_connections(self.samdb, ro=True)
            else:
                lbh.commit_connections(self.samdb)

            # APPEND cn!objectGUID to keepConnections
            self.kept_connections.add(cn)

    def add_transports(self, vertex, local_vertex, graph, detect_failed):
        """Build a Vertex's transport lists

        Each vertex has accept_red_red and accept_black lists that
        list what transports they accept under various conditions. The
        only transport that is ever accepted is IP, and a dummy extra
        transport called "EDGE_TYPE_ALL".

        Part of MS-ADTS 6.2.2.3.4.3 -- ColorVertices

        :param vertex: the remote vertex we are thinking about
        :param local_vertex: the vertex relating to the local site.
        :param graph: the intersite graph
        :param detect_failed: whether to detect failed links
        :return: True if some bridgeheads were not found
        """
        # The docs ([MS-ADTS] 6.2.2.3.4.3) say to use local_vertex
        # here, but using vertex seems to make more sense. That is,
        # the docs want this:
        #
        #bh = self.get_bridgehead(vertex.site, vertex.part, transport,
        #                         local_vertex.is_black(), detect_failed)
        #
        # TODO WHY?????

        vertex.accept_red_red = []
        vertex.accept_black = []
        found_failed = False
        for t_guid, transport in self.transport_table.items():
            if transport.name != 'IP':
                #XXX well this is cheating a bit
                logging.warning("WARNING: we are ignoring a transport named %r"
                                % transport.name)
                continue

            # FLAG_CR_NTDS_DOMAIN 0x00000002
            if ((vertex.is_red() and transport.name != "IP" and
                 vertex.part.system_flags & 0x00000002)):
                continue

            if vertex not in graph.connected_vertices:
                continue

            partial_replica_okay = vertex.is_black()
            bh = self.get_bridgehead(vertex.site, vertex.part, transport,
                                     partial_replica_okay, detect_failed)
            if bh is None:
                found_failed = True
                continue

            vertex.accept_red_red.append(t_guid)
            vertex.accept_black.append(t_guid)

            # Add additional transport to allow another run of Dijkstra
            vertex.accept_red_red.append("EDGE_TYPE_ALL")
            vertex.accept_black.append("EDGE_TYPE_ALL")

        return found_failed

    def create_connections(self, graph, part, detect_failed):
        """Construct an NC replica graph for the NC identified by
        the given crossRef, then create any additional nTDSConnection
        objects required.

        :param graph: site graph.
        :param part: crossRef object for NC.
        :param detect_failed:  True to detect failed DCs and route
            replication traffic around them, False to assume no DC
            has failed.

        Modifies self.kept_connections by adding any connections
        deemed to be "in use".

        ::returns: (all_connected, found_failed_dc)
        (all_connected) True if the resulting NC replica graph
            connects all sites that need to be connected.
        (found_failed_dc) True if one or more failed DCs were
            detected.
        """
        all_connected = True
        found_failed = False

        logger.debug("create_connections(): enter\n"
                     "\tpartdn=%s\n\tdetect_failed=%s" %
                     (part.nc_dnstr, detect_failed))

        # XXX - This is a highly abbreviated function from the MS-TECH
        #       ref.  It creates connections between bridgeheads to all
        #       sites that have appropriate replicas.  Thus we are not
        #       creating a minimum cost spanning tree but instead
        #       producing a fully connected tree.  This should produce
        #       a full (albeit not optimal cost) replication topology.

        my_vertex = Vertex(self.my_site, part)
        my_vertex.color_vertex()

        for v in graph.vertices:
            v.color_vertex()
            if self.add_transports(v, my_vertex, graph, False):
                found_failed = True

        # No NC replicas for this NC in the site of the local DC,
        # so no nTDSConnection objects need be created
        if my_vertex.is_white():
            return all_connected, found_failed

        edge_list, n_components = get_spanning_tree_edges(graph,
                                                          self.my_site,
                                                          label=part.partstr)

        logger.debug("%s Number of components: %d" %
                     (part.nc_dnstr, n_components))
        if n_components > 1:
            all_connected = False

        # LET partialReplicaOkay be TRUE if and only if
        # localSiteVertex.Color = COLOR.BLACK
        partial_ok = my_vertex.is_black()

        # Utilize the IP transport only for now
        transport = self.ip_transport

        DEBUG("edge_list %s" % edge_list)
        for e in edge_list:
            # XXX more accurate comparison?
            if e.directed and e.vertices[0].site is self.my_site:
                continue

            if e.vertices[0].site is self.my_site:
                rsite = e.vertices[1].site
            else:
                rsite = e.vertices[0].site

            # We don't make connections to our own site as that
            # is intrasite topology generator's job
            if rsite is self.my_site:
                DEBUG("rsite is my_site")
                continue

            # Determine bridgehead server in remote site
            rbh = self.get_bridgehead(rsite, part, transport,
                                      partial_ok, detect_failed)
            if rbh is None:
                continue

            # RODC acts as an BH for itself
            # IF AmIRODC() then
            #     LET lbh be the nTDSDSA object of the local DC
            # ELSE
            #     LET lbh be the result of GetBridgeheadDC(localSiteVertex.ID,
            #     cr, t, partialReplicaOkay, detectFailedDCs)
            if self.my_dsa.is_ro():
                lsite = self.my_site
                lbh = self.my_dsa
            else:
                lsite = self.my_site
                lbh = self.get_bridgehead(lsite, part, transport,
                                          partial_ok, detect_failed)
            # TODO
            if lbh is None:
                DEBUG_RED("DISASTER! lbh is None")
                return False, True

            DEBUG_CYAN("SITES")
            print lsite, rsite
            DEBUG_BLUE("vertices")
            print e.vertices
            DEBUG_BLUE("bridgeheads")
            print lbh, rbh
            DEBUG_BLUE("-" * 70)

            sitelink = e.site_link
            if sitelink is None:
                link_opt = 0x0
                link_sched = None
            else:
                link_opt = sitelink.options
                link_sched = sitelink.schedule

            self.create_connection(part, rbh, rsite, transport,
                                   lbh, lsite, link_opt, link_sched,
                                   partial_ok, detect_failed)

        return all_connected, found_failed

    def create_intersite_connections(self):
        """Computes an NC replica graph for each NC replica that "should be
        present" on the local DC or "is present" on any DC in the same site
        as the local DC. For each edge directed to an NC replica on such a
        DC from an NC replica on a DC in another site, the KCC creates an
        nTDSConnection object to imply that edge if one does not already
        exist.

        Modifies self.kept_connections - A set of nTDSConnection
        objects for edges that are directed
        to the local DC's site in one or more NC replica graphs.

        returns: True if spanning trees were created for all NC replica
            graphs, otherwise False.
        """
        all_connected = True
        self.kept_connections = set()

        # LET crossRefList be the set containing each object o of class
        # crossRef such that o is a child of the CN=Partitions child of the
        # config NC

        # FOR each crossRef object cr in crossRefList
        #    IF cr!enabled has a value and is false, or if FLAG_CR_NTDS_NC
        #        is clear in cr!systemFlags, skip cr.
        #    LET g be the GRAPH return of SetupGraph()

        for part in self.part_table.values():

            if not part.is_enabled():
                continue

            if part.is_foreign():
                continue

            graph = self.setup_graph(part)

            # Create nTDSConnection objects, routing replication traffic
            # around "failed" DCs.
            found_failed = False

            connected, found_failed = self.create_connections(graph,
                                                              part, True)

            DEBUG("with detect_failed: connected %s Found failed %s" %
                  (connected, found_failed))
            if not connected:
                all_connected = False

                if found_failed:
                    # One or more failed DCs preclude use of the ideal NC
                    # replica graph. Add connections for the ideal graph.
                    self.create_connections(graph, part, False)

        return all_connected

    def intersite(self):
        """The head method for generating the inter-site KCC replica
        connection graph and attendant nTDSConnection objects
        in the samdb.

        Produces self.kept_connections set of NTDS Connections
        that should be kept during subsequent pruning process.

        ::return (True or False):  (True) if the produced NC replica
            graph connects all sites that need to be connected
        """

        # Retrieve my DSA
        mydsa = self.my_dsa
        mysite = self.my_site
        all_connected = True

        logger.debug("intersite(): enter")

        # Determine who is the ISTG
        if opts.readonly:
            mysite.select_istg(self.samdb, mydsa, ro=True)
        else:
            mysite.select_istg(self.samdb, mydsa, ro=False)

        # Test whether local site has topology disabled
        if mysite.is_intersite_topology_disabled():
            logger.debug("intersite(): exit disabled all_connected=%d" %
                         all_connected)
            return all_connected

        if not mydsa.is_istg():
            logger.debug("intersite(): exit not istg all_connected=%d" %
                         all_connected)
            return all_connected

        self.merge_failed_links()

        # For each NC with an NC replica that "should be present" on the
        # local DC or "is present" on any DC in the same site as the
        # local DC, the KCC constructs a site graph--a precursor to an NC
        # replica graph. The site connectivity for a site graph is defined
        # by objects of class interSiteTransport, siteLink, and
        # siteLinkBridge in the config NC.

        all_connected = self.create_intersite_connections()

        logger.debug("intersite(): exit all_connected=%d" % all_connected)
        return all_connected

    def update_rodc_connection(self):
        """Runs when the local DC is an RODC and updates the RODC NTFRS
        connection object.
        """
        # Given an nTDSConnection object cn1, such that cn1.options contains
        # NTDSCONN_OPT_RODC_TOPOLOGY, and another nTDSConnection object cn2,
        # does not contain NTDSCONN_OPT_RODC_TOPOLOGY, modify cn1 to ensure
        # that the following is true:
        #
        #     cn1.fromServer = cn2.fromServer
        #     cn1.schedule = cn2.schedule
        #
        # If no such cn2 can be found, cn1 is not modified.
        # If no such cn1 can be found, nothing is modified by this task.

        if not self.my_dsa.is_ro():
            return

        all_connections = self.my_dsa.connect_table.values()
        ro_connections = [x for x in all_connections if x.is_rodc_topology()]
        rw_connections = [x for x in all_connections
                          if x not in ro_connections]

        # XXX here we are dealing with multiple RODC_TOPO connections,
        # if they exist. It is not clear whether the spec means that
        # or if it ever arises.
        if rw_connections and ro_connections:
            for con in ro_connections:
                cn2 = rw_connections[0]
                con.from_dnstr = cn2.from_dnstr
                con.schedule = cn2.schedule
                con.to_be_modified = True

            self.my_dsa.commit_connections(self.samdb, ro=opts.readonly)

    def intrasite_max_node_edges(self, node_count):
        """Returns the maximum number of edges directed to a node in
        the intrasite replica graph.

        The KCC does not create more
        than 50 edges directed to a single DC. To optimize replication,
        we compute that each node should have n+2 total edges directed
        to it such that (n) is the smallest non-negative integer
        satisfying (node_count <= 2*(n*n) + 6*n + 7)

        (If the number of edges is m (i.e. n + 2), that is the same as
        2 * m*m - 2 * m + 3).

        edges  n   nodecount
          2    0    7
          3    1   15
          4    2   27
          5    3   43
                  ...
         50   48 4903

        :param node_count: total number of nodes in the replica graph

        The intention is that there should be no more than 3 hops
        between any two DSAs at a site. With up to 7 nodes the 2 edges
        of the ring are enough; any configuration of extra edges with
        8 nodes will be enough. It is less clear that the 3 hop
        guarantee holds at e.g. 15 nodes in degenerate cases, but
        those are quite unlikely given the extra edges are randomly
        arranged.
        """
        n = 0
        while True:
            if node_count <= (2 * (n * n) + (6 * n) + 7):
                break
            n = n + 1
        n = n + 2
        if n < 50:
            return n
        return 50

    def construct_intrasite_graph(self, site_local, dc_local,
                                  nc_x, gc_only, detect_stale):
        # [MS-ADTS] 6.2.2.2
        # We're using the MS notation names here to allow
        # correlation back to the published algorithm.
        #
        # nc_x     - naming context (x) that we are testing if it
        #            "should be present" on the local DC
        # f_of_x   - replica (f) found on a DC (s) for NC (x)
        # dc_s     - DC where f_of_x replica was found
        # dc_local - local DC that potentially needs a replica
        #            (f_of_x)
        # r_list   - replica list R
        # p_of_x   - replica (p) is partial and found on a DC (s)
        #            for NC (x)
        # l_of_x   - replica (l) is the local replica for NC (x)
        #            that should appear on the local DC
        # r_len = is length of replica list |R|
        #
        # If the DSA doesn't need a replica for this
        # partition (NC x) then continue
        needed, ro, partial = nc_x.should_be_present(dc_local)

        DEBUG_YELLOW("construct_intrasite_graph(): enter" +
                     "\n\tgc_only=%d" % gc_only +
                     "\n\tdetect_stale=%d" % detect_stale +
                     "\n\tneeded=%s" % needed +
                     "\n\tro=%s" % ro +
                     "\n\tpartial=%s" % partial +
                     "\n%s" % nc_x)

        if not needed:
            DEBUG_RED("%s lacks 'should be present' status, "
                      "aborting construct_intersite_graph!" %
                      nc_x.nc_dnstr)
            return

        # Create a NCReplica that matches what the local replica
        # should say.  We'll use this below in our r_list
        l_of_x = NCReplica(dc_local.dsa_dnstr, dc_local.dsa_guid,
                           nc_x.nc_dnstr)

        l_of_x.identify_by_basedn(self.samdb)

        l_of_x.rep_partial = partial
        l_of_x.rep_ro = ro

        # Add this replica that "should be present" to the
        # needed replica table for this DSA
        dc_local.add_needed_replica(l_of_x)

        # Replica list
        #
        # Let R be a sequence containing each writable replica f of x
        # such that f "is present" on a DC s satisfying the following
        # criteria:
        #
        #  * s is a writable DC other than the local DC.
        #
        #  * s is in the same site as the local DC.
        #
        #  * If x is a read-only full replica and x is a domain NC,
        #    then the DC's functional level is at least
        #    DS_BEHAVIOR_WIN2008.
        #
        #  * Bit NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED is set
        #    in the options attribute of the site settings object for
        #    the local DC's site, or no tuple z exists in the
        #    kCCFailedLinks or kCCFailedConnections variables such
        #    that z.UUIDDsa is the objectGUID of the nTDSDSA object
        #    for s, z.FailureCount > 0, and the current time -
        #    z.TimeFirstFailure > 2 hours.

        r_list = []

        # We'll loop thru all the DSAs looking for
        # writeable NC replicas that match the naming
        # context dn for (nc_x)
        #
        for dc_s in self.my_site.dsa_table.values():
            # If this partition (nc_x) doesn't appear as a
            # replica (f_of_x) on (dc_s) then continue
            if not nc_x.nc_dnstr in dc_s.current_rep_table:
                continue

            # Pull out the NCReplica (f) of (x) with the dn
            # that matches NC (x) we are examining.
            f_of_x = dc_s.current_rep_table[nc_x.nc_dnstr]

            # Replica (f) of NC (x) must be writable
            if f_of_x.is_ro():
                continue

            # Replica (f) of NC (x) must satisfy the
            # "is present" criteria for DC (s) that
            # it was found on
            if not f_of_x.is_present():
                continue

            # DC (s) must be a writable DSA other than
            # my local DC.  In other words we'd only replicate
            # from other writable DC
            if dc_s.is_ro() or dc_s is dc_local:
                continue

            # Certain replica graphs are produced only
            # for global catalogs, so test against
            # method input parameter
            if gc_only and not dc_s.is_gc():
                continue

            # DC (s) must be in the same site as the local DC
            # as this is the intra-site algorithm. This is
            # handled by virtue of placing DSAs in per
            # site objects (see enclosing for() loop)

            # If NC (x) is intended to be read-only full replica
            # for a domain NC on the target DC then the source
            # DC should have functional level at minimum WIN2008
            #
            # Effectively we're saying that in order to replicate
            # to a targeted RODC (which was introduced in Windows 2008)
            # then we have to replicate from a DC that is also minimally
            # at that level.
            #
            # You can also see this requirement in the MS special
            # considerations for RODC which state that to deploy
            # an RODC, at least one writable domain controller in
            # the domain must be running Windows Server 2008
            if ro and not partial and nc_x.nc_type == NCType.domain:
                if not dc_s.is_minimum_behavior(dsdb.DS_DOMAIN_FUNCTION_2008):
                    continue

            # If we haven't been told to turn off stale connection
            # detection and this dsa has a stale connection then
            # continue
            if detect_stale and self.is_stale_link_connection(dc_s):
                continue

            # Replica meets criteria.  Add it to table indexed
            # by the GUID of the DC that it appears on
            r_list.append(f_of_x)

        # If a partial (not full) replica of NC (x) "should be present"
        # on the local DC, append to R each partial replica (p of x)
        # such that p "is present" on a DC satisfying the same
        # criteria defined above for full replica DCs.
        #
        # XXX This loop and the previous one differ only in whether
        # the replica is partial or not. here we only accept partial
        # (because we're partial); before we only accepted full. Order
        # doen't matter (the list is sorted a few lines down) so these
        # loops could easily be merged. Or this could be a helper
        # function.

        if partial:
            # Now we loop thru all the DSAs looking for
            # partial NC replicas that match the naming
            # context dn for (NC x)
            for dc_s in self.my_site.dsa_table.values():

                # If this partition NC (x) doesn't appear as a
                # replica (p) of NC (x) on the dsa DC (s) then
                # continue
                if not nc_x.nc_dnstr in dc_s.current_rep_table:
                    continue

                # Pull out the NCReplica with the dn that
                # matches NC (x) we are examining.
                p_of_x = dc_s.current_rep_table[nc_x.nc_dnstr]

                # Replica (p) of NC (x) must be partial
                if not p_of_x.is_partial():
                    continue

                # Replica (p) of NC (x) must satisfy the
                # "is present" criteria for DC (s) that
                # it was found on
                if not p_of_x.is_present():
                    continue

                # DC (s) must be a writable DSA other than
                # my DSA.  In other words we'd only replicate
                # from other writable DSA
                if dc_s.is_ro() or dc_s is dc_local:
                    continue

                # Certain replica graphs are produced only
                # for global catalogs, so test against
                # method input parameter
                if gc_only and not dc_s.is_gc():
                    continue

                # If we haven't been told to turn off stale connection
                # detection and this dsa has a stale connection then
                # continue
                if detect_stale and self.is_stale_link_connection(dc_s):
                    continue

                # Replica meets criteria.  Add it to table indexed
                # by the GUID of the DSA that it appears on
                r_list.append(p_of_x)

        # Append to R the NC replica that "should be present"
        # on the local DC
        r_list.append(l_of_x)

        r_list.sort(sort_replica_by_dsa_guid)
        r_len = len(r_list)

        max_node_edges = self.intrasite_max_node_edges(r_len)

        # Add a node for each r_list element to the replica graph
        graph_list = []
        for rep in r_list:
            node = GraphNode(rep.rep_dsa_dnstr, max_node_edges)
            graph_list.append(node)

        # For each r(i) from (0 <= i < |R|-1)
        i = 0
        while i < (r_len-1):
            # Add an edge from r(i) to r(i+1) if r(i) is a full
            # replica or r(i+1) is a partial replica
            if not r_list[i].is_partial() or r_list[i+1].is_partial():
                graph_list[i+1].add_edge_from(r_list[i].rep_dsa_dnstr)

            # Add an edge from r(i+1) to r(i) if r(i+1) is a full
            # replica or ri is a partial replica.
            if not r_list[i+1].is_partial() or r_list[i].is_partial():
                graph_list[i].add_edge_from(r_list[i+1].rep_dsa_dnstr)
            i = i + 1

        # Add an edge from r|R|-1 to r0 if r|R|-1 is a full replica
        # or r0 is a partial replica.
        if not r_list[r_len-1].is_partial() or r_list[0].is_partial():
            graph_list[0].add_edge_from(r_list[r_len-1].rep_dsa_dnstr)

        # Add an edge from r0 to r|R|-1 if r0 is a full replica or
        # r|R|-1 is a partial replica.
        if not r_list[0].is_partial() or r_list[r_len-1].is_partial():
            graph_list[r_len-1].add_edge_from(r_list[0].rep_dsa_dnstr)

        DEBUG("r_list is length %s" % len(r_list))
        DEBUG('\n'.join(str((x.rep_dsa_guid, x.rep_dsa_dnstr))
                        for x in r_list))

        do_dot_files = opts.dot_files and opts.debug
        if opts.verify or do_dot_files:
            dot_edges = []
            dot_vertices = set()
            for v1 in graph_list:
                dot_vertices.add(v1.dsa_dnstr)
                for v2 in v1.edge_from:
                    dot_edges.append((v2, v1.dsa_dnstr))
                    dot_vertices.add(v2)

            verify_properties = ('connected', 'directed_double_ring_or_small')
            verify_and_dot('intrasite_pre_ntdscon', dot_edges, dot_vertices,
                           label='%s__%s__%s' % (site_local.site_dnstr,
                                                 nctype_lut[nc_x.nc_type],
                                                 nc_x.nc_dnstr),
                           properties=verify_properties, debug=DEBUG,
                           verify=opts.verify,
                           dot_files=do_dot_files, directed=True)

        # For each existing nTDSConnection object implying an edge
        # from rj of R to ri such that j != i, an edge from rj to ri
        # is not already in the graph, and the total edges directed
        # to ri is less than n+2, the KCC adds that edge to the graph.
        for vertex in graph_list:
            dsa = self.my_site.dsa_table[vertex.dsa_dnstr]
            for connect in dsa.connect_table.values():
                remote = connect.from_dnstr
                if remote in self.my_site.dsa_table:
                    vertex.add_edge_from(remote)

        DEBUG('reps are:  %s' % '   '.join(x.rep_dsa_dnstr for x in r_list))
        DEBUG('dsas are:  %s' % '   '.join(x.dsa_dnstr for x in graph_list))

        for tnode in graph_list:
            # To optimize replication latency in sites with many NC
            # replicas, the KCC adds new edges directed to ri to bring
            # the total edges to n+2, where the NC replica rk of R
            # from which the edge is directed is chosen at random such
            # that k != i and an edge from rk to ri is not already in
            # the graph.
            #
            # Note that the KCC tech ref does not give a number for
            # the definition of "sites with many NC replicas". At a
            # bare minimum to satisfy n+2 edges directed at a node we
            # have to have at least three replicas in |R| (i.e. if n
            # is zero then at least replicas from two other graph
            # nodes may direct edges to us).
            if r_len >= 3 and not tnode.has_sufficient_edges():
                candidates = [x for x in graph_list if
                              (x is not tnode and
                               x.dsa_dnstr not in tnode.edge_from)]

                DEBUG_BLUE("looking for random link for %s. r_len %d, "
                           "graph len %d candidates %d"
                           % (tnode.dsa_dnstr, r_len, len(graph_list),
                              len(candidates)))

                DEBUG("candidates %s" % [x.dsa_dnstr for x in candidates])

                while candidates and not tnode.has_sufficient_edges():
                    other = random.choice(candidates)
                    DEBUG("trying to add candidate %s" % other.dsa_dstr)
                    if not tnode.add_edge_from(other):
                        DEBUG_RED("could not add %s" % other.dsa_dstr)
                    candidates.remove(other)
            else:
                DEBUG_FN("not adding links to %s: nodes %s, links is %s/%s" %
                         (tnode.dsa_dnstr, r_len, len(tnode.edge_from),
                          tnode.max_edges))

            # Print the graph node in debug mode
            logger.debug("%s" % tnode)

            # For each edge directed to the local DC, ensure a nTDSConnection
            # points to us that satisfies the KCC criteria

            if tnode.dsa_dnstr == dc_local.dsa_dnstr:
                tnode.add_connections_from_edges(dc_local)

        if opts.verify or do_dot_files:
            dot_edges = []
            dot_vertices = set()
            for v1 in graph_list:
                dot_vertices.add(v1.dsa_dnstr)
                for v2 in v1.edge_from:
                    dot_edges.append((v2, v1.dsa_dnstr))
                    dot_vertices.add(v2)

            verify_properties = ('connected', 'directed_double_ring_or_small')
            verify_and_dot('intrasite_post_ntdscon', dot_edges, dot_vertices,
                           label='%s__%s__%s' % (site_local.site_dnstr,
                                                 nctype_lut[nc_x.nc_type],
                                                 nc_x.nc_dnstr),
                           properties=verify_properties, debug=DEBUG,
                           verify=opts.verify,
                           dot_files=do_dot_files, directed=True)

    def intrasite(self):
        """The head method for generating the intra-site KCC replica
        connection graph and attendant nTDSConnection objects
        in the samdb
        """
        # Retrieve my DSA
        mydsa = self.my_dsa

        logger.debug("intrasite(): enter")

        # Test whether local site has topology disabled
        mysite = self.my_site
        if mysite.is_intrasite_topology_disabled():
            return

        detect_stale = (not mysite.is_detect_stale_disabled())
        for connect in mydsa.connect_table.values():
            if connect.to_be_added:
                DEBUG_CYAN("TO BE ADDED:\n%s" % connect)

        # Loop thru all the partitions, with gc_only False
        for partdn, part in self.part_table.items():
            self.construct_intrasite_graph(mysite, mydsa, part, False,
                                           detect_stale)
            for connect in mydsa.connect_table.values():
                if connect.to_be_added:
                    DEBUG_BLUE("TO BE ADDED:\n%s" % connect)

        # If the DC is a GC server, the KCC constructs an additional NC
        # replica graph (and creates nTDSConnection objects) for the
        # config NC as above, except that only NC replicas that "are present"
        # on GC servers are added to R.
        for connect in mydsa.connect_table.values():
            if connect.to_be_added:
                DEBUG_YELLOW("TO BE ADDED:\n%s" % connect)

        # Do it again, with gc_only True
        for partdn, part in self.part_table.items():
            if part.is_config():
                self.construct_intrasite_graph(mysite, mydsa, part, True,
                                               detect_stale)

        # The DC repeats the NC replica graph computation and nTDSConnection
        # creation for each of the NC replica graphs, this time assuming
        # that no DC has failed. It does so by re-executing the steps as
        # if the bit NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED were
        # set in the options attribute of the site settings object for
        # the local DC's site.  (ie. we set "detec_stale" flag to False)
        for connect in mydsa.connect_table.values():
            if connect.to_be_added:
                DEBUG_BLUE("TO BE ADDED:\n%s" % connect)

        # Loop thru all the partitions.
        for partdn, part in self.part_table.items():
            self.construct_intrasite_graph(mysite, mydsa, part, False,
                                           False)  # don't detect stale

        # If the DC is a GC server, the KCC constructs an additional NC
        # replica graph (and creates nTDSConnection objects) for the
        # config NC as above, except that only NC replicas that "are present"
        # on GC servers are added to R.
        for connect in mydsa.connect_table.values():
            if connect.to_be_added:
                DEBUG_RED("TO BE ADDED:\n%s" % connect)

        for partdn, part in self.part_table.items():
            if part.is_config():
                self.construct_intrasite_graph(mysite, mydsa, part, True,
                                               False)  # don't detect stale

        if opts.readonly:
            # Display any to be added or modified repsFrom
            for connect in mydsa.connect_table.values():
                if connect.to_be_deleted:
                    logger.info("TO BE DELETED:\n%s" % connect)
                if connect.to_be_modified:
                    logger.info("TO BE MODIFIED:\n%s" % connect)
                if connect.to_be_added:
                    DEBUG_GREEN("TO BE ADDED:\n%s" % connect)

            mydsa.commit_connections(self.samdb, ro=True)
        else:
            # Commit any newly created connections to the samdb
            mydsa.commit_connections(self.samdb)

    def list_dsas(self):
        self.load_my_site()
        self.load_my_dsa()

        self.load_all_sites()
        self.load_all_partitions()
        self.load_all_transports()
        self.load_all_sitelinks()
        dsas = []
        for site in self.site_table.values():
            dsas.extend([dsa.dsa_dnstr.replace('CN=NTDS Settings,', '', 1)
                         for dsa in site.dsa_table.values()])
        return dsas

    def load_samdb(self, dburl, lp, creds):
        self.samdb = SamDB(url=dburl,
                           session_info=system_session(),
                           credentials=creds, lp=lp)

    def plot_all_connections(self, basename, verify_properties=()):
        verify = verify_properties and opts.verify
        plot = opts.dot_files
        if not (verify or plot):
            return

        dot_edges = []
        dot_vertices = []
        edge_colours = []
        vertex_colours = []

        for dsa in self.dsa_by_dnstr.values():
            dot_vertices.append(dsa.dsa_dnstr)
            if dsa.is_ro():
                vertex_colours.append('#cc0000')
            else:
                vertex_colours.append('#0000cc')
            for con in dsa.connect_table.values():
                if con.is_rodc_topology():
                    edge_colours.append('red')
                else:
                    edge_colours.append('blue')
                dot_edges.append((con.from_dnstr, dsa.dsa_dnstr))

        verify_and_dot(basename, dot_edges, vertices=dot_vertices,
                       label=self.my_dsa_dnstr, properties=verify_properties,
                       debug=DEBUG, verify=verify, dot_files=plot,
                       directed=True, edge_colors=edge_colours,
                       vertex_colors=vertex_colours)

    def run(self, dburl, lp, creds, forced_local_dsa=None,
            forget_local_links=False, forget_intersite_links=False):
        """Method to perform a complete run of the KCC and
        produce an updated topology for subsequent NC replica
        syncronization between domain controllers
        """
        # We may already have a samdb setup if we are
        # currently importing an ldif for a test run
        if self.samdb is None:
            try:
                self.load_samdb(dburl, lp, creds)
            except ldb.LdbError, (num, msg):
                logger.error("Unable to open sam database %s : %s" %
                             (dburl, msg))
                return 1

        if forced_local_dsa:
            self.samdb.set_ntds_settings_dn("CN=NTDS Settings,%s" %
                                            forced_local_dsa)

        try:
            # Setup
            self.load_my_site()
            self.load_my_dsa()

            self.load_all_sites()
            self.load_all_partitions()
            self.load_all_transports()
            self.load_all_sitelinks()

            if opts.verify or opts.dot_files:
                guid_to_dnstr = {}
                for site in self.site_table.values():
                    guid_to_dnstr.update((str(dsa.dsa_guid), dnstr)
                                         for dnstr, dsa
                                         in site.dsa_table.items())

                self.plot_all_connections('dsa_initial')

                dot_edges = []
                current_reps, needed_reps = self.my_dsa.get_rep_tables()
                for dnstr, c_rep in current_reps.items():
                    DEBUG("c_rep %s" % c_rep)
                    dot_edges.append((self.my_dsa.dsa_dnstr, dnstr))

                verify_and_dot('dsa_repsFrom_initial', dot_edges,
                               directed=True, label=self.my_dsa_dnstr,
                               properties=(), debug=DEBUG, verify=opts.verify,
                               dot_files=opts.dot_files)

                dot_edges = []
                for site in self.site_table.values():
                    for dsa in site.dsa_table.values():
                        current_reps, needed_reps = dsa.get_rep_tables()
                        for dn_str, rep in current_reps.items():
                            for reps_from in rep.rep_repsFrom:
                                DEBUG("rep %s" % rep)
                                dsa_guid = str(reps_from.source_dsa_obj_guid)
                                dsa_dn = guid_to_dnstr[dsa_guid]
                                dot_edges.append((dsa.dsa_dnstr, dsa_dn))

                verify_and_dot('dsa_repsFrom_initial_all', dot_edges,
                               directed=True, label=self.my_dsa_dnstr,
                               properties=(), debug=DEBUG, verify=opts.verify,
                               dot_files=opts.dot_files)

                dot_edges = []
                for link in self.sitelink_table.values():
                    for a, b in itertools.combinations(link.site_list, 2):
                        dot_edges.append((str(a), str(b)))
                properties = ('connected',)
                verify_and_dot('dsa_sitelink_initial', dot_edges,
                               directed=False,
                               label=self.my_dsa_dnstr, properties=properties,
                               debug=DEBUG, verify=opts.verify,
                               dot_files=opts.dot_files)

            if forget_local_links:
                for dsa in self.my_site.dsa_table.values():
                    dsa.connect_table = {k: v for k, v in
                                         dsa.connect_table.items()
                                         if v.is_rodc_topology()}
                self.plot_all_connections('dsa_forgotten_local')

            if forget_intersite_links:
                for site in self.site_table.values():
                    for dsa in site.dsa_table.values():
                        dsa.connect_table = {k: v for k, v in
                                             dsa.connect_table.items()
                                             if site is self.my_site and
                                             v.is_rodc_topology()}

                self.plot_all_connections('dsa_forgotten_all')
            # These are the published steps (in order) for the
            # MS-TECH description of the KCC algorithm ([MS-ADTS] 6.2.2)

            # Step 1
            self.refresh_failed_links_connections()

            # Step 2
            self.intrasite()

            # Step 3
            all_connected = self.intersite()

            # Step 4
            self.remove_unneeded_ntdsconn(all_connected)

            # Step 5
            self.translate_ntdsconn()

            # Step 6
            self.remove_unneeded_failed_links_connections()

            # Step 7
            self.update_rodc_connection()

            if opts.verify or opts.dot_files:
                self.plot_all_connections('dsa_final',
                                          ('connected', 'forest_of_rings'))

                DEBUG_MAGENTA("there are %d dsa guids" % len(guid_to_dnstr))

                dot_edges = []
                edge_colors = []
                my_dnstr = self.my_dsa.dsa_dnstr
                current_reps, needed_reps = self.my_dsa.get_rep_tables()
                for dnstr, n_rep in needed_reps.items():
                    for reps_from in n_rep.rep_repsFrom:
                        guid_str = str(reps_from.source_dsa_obj_guid)
                        dot_edges.append((my_dnstr, guid_to_dnstr[guid_str]))
                        edge_colors.append('#' + str(n_rep.nc_guid)[:6])

                verify_and_dot('dsa_repsFrom_final', dot_edges, directed=True,
                               label=self.my_dsa_dnstr,
                               properties=(), debug=DEBUG, verify=opts.verify,
                               dot_files=opts.dot_files,
                               edge_colors=edge_colors)

                dot_edges = []

                for site in self.site_table.values():
                    for dsa in site.dsa_table.values():
                        current_reps, needed_reps = dsa.get_rep_tables()
                        for n_rep in needed_reps.values():
                            for reps_from in n_rep.rep_repsFrom:
                                dsa_guid = str(reps_from.source_dsa_obj_guid)
                                dsa_dn = guid_to_dnstr[dsa_guid]
                                dot_edges.append((dsa.dsa_dnstr, dsa_dn))

                verify_and_dot('dsa_repsFrom_final_all', dot_edges,
                               directed=True, label=self.my_dsa_dnstr,
                               properties=(), debug=DEBUG, verify=opts.verify,
                               dot_files=opts.dot_files)

        except:
            raise

        return 0

    def import_ldif(self, dburl, lp, creds, ldif_file):
        """Import all objects and attributes that are relevent
        to the KCC algorithms from a previously exported LDIF file.

        The point of this function is to allow a programmer/debugger to
        import an LDIF file with non-security relevent information that
        was previously extracted from a DC database.  The LDIF file is used
        to create a temporary abbreviated database.  The KCC algorithm can
        then run against this abbreviated database for debug or test
        verification that the topology generated is computationally the
        same between different OSes and algorithms.

        :param dburl: path to the temporary abbreviated db to create
        :param ldif_file: path to the ldif file to import
        """
        try:
            self.samdb = ldif_utils.ldif_to_samdb(dburl, lp, ldif_file,
                                                  opts.forced_local_dsa)
        except ldif_utils.LdifError, e:
            print e
            return 1
        return 0

    def export_ldif(self, dburl, lp, creds, ldif_file):
        """Routine to extract all objects and attributes that are relevent
        to the KCC algorithms from a DC database.

        The point of this function is to allow a programmer/debugger to
        extract an LDIF file with non-security relevent information from
        a DC database.  The LDIF file can then be used to "import" via
        the import_ldif() function this file into a temporary abbreviated
        database.  The KCC algorithm can then run against this abbreviated
        database for debug or test verification that the topology generated
        is computationally the same between different OSes and algorithms.

        :param dburl: LDAP database URL to extract info from
        :param ldif_file: output LDIF file name to create
        """
        try:
            ldif_utils.samdb_to_ldif_file(self.samdb, dburl, lp, creds,
                                          ldif_file)
        except ldif_utils.LdifError, e:
            print e
            return 1
        return 0

##################################################
# Global Functions
##################################################


def get_spanning_tree_edges(graph, my_site, label=None):
    # Phase 1: Run Dijkstra's to get a list of internal edges, which are
    # just the shortest-paths connecting colored vertices

    internal_edges = set()

    for e_set in graph.edge_set:
        edgeType = None
        for v in graph.vertices:
            v.edges = []

        # All con_type in an edge set is the same
        for e in e_set.edges:
            edgeType = e.con_type
            for v in e.vertices:
                v.edges.append(e)

        if opts.verify or opts.dot_files:
            graph_edges = [(a.site.site_dnstr, b.site.site_dnstr)
                           for a, b in
                           itertools.chain(
                               *(itertools.combinations(edge.vertices, 2)
                                 for edge in e_set.edges))]
            graph_nodes = [v.site.site_dnstr for v in graph.vertices]

            if opts.dot_files and opts.debug:
                write_dot_file('edgeset_%s' % (edgeType,), graph_edges,
                               vertices=graph_nodes, label=label)

            if opts.verify:
                verify_graph('spanning tree edge set %s' % edgeType,
                             graph_edges, vertices=graph_nodes,
                             properties=('complete', 'connected'),
                             debug=DEBUG)

        # Run dijkstra's algorithm with just the red vertices as seeds
        # Seed from the full replicas
        dijkstra(graph, edgeType, False)

        # Process edge set
        process_edge_set(graph, e_set, internal_edges)

        # Run dijkstra's algorithm with red and black vertices as the seeds
        # Seed from both full and partial replicas
        dijkstra(graph, edgeType, True)

        # Process edge set
        process_edge_set(graph, e_set, internal_edges)

    # All vertices have root/component as itself
    setup_vertices(graph)
    process_edge_set(graph, None, internal_edges)

    if opts.verify or opts.dot_files:
        graph_edges = [(e.v1.site.site_dnstr, e.v2.site.site_dnstr)
                       for e in internal_edges]
        graph_nodes = [v.site.site_dnstr for v in graph.vertices]
        verify_properties = ('multi_edge_forest',)
        verify_and_dot('prekruskal', graph_edges, graph_nodes, label=label,
                       properties=verify_properties, debug=DEBUG,
                       verify=opts.verify,
                       dot_files=opts.dot_files)

    # Phase 2: Run Kruskal's on the internal edges
    output_edges, components = kruskal(graph, internal_edges)

    # This recalculates the cost for the path connecting the
    # closest red vertex. Ignoring types is fine because NO
    # suboptimal edge should exist in the graph
    dijkstra(graph, "EDGE_TYPE_ALL", False)  # TODO rename
    # Phase 3: Process the output
    for v in graph.vertices:
        if v.is_red():
            v.dist_to_red = 0
        else:
            v.dist_to_red = v.repl_info.cost

    if opts.verify or opts.dot_files:
        graph_edges = [(e.v1.site.site_dnstr, e.v2.site.site_dnstr)
                       for e in internal_edges]
        graph_nodes = [v.site.site_dnstr for v in graph.vertices]
        verify_properties = ('multi_edge_forest',)
        verify_and_dot('postkruskal', graph_edges, graph_nodes,
                       label=label, properties=verify_properties,
                       debug=DEBUG, verify=opts.verify,
                       dot_files=opts.dot_files)

    # Ensure only one-way connections for partial-replicas,
    # and make sure they point the right way.
    edge_list = []
    for edge in output_edges:
        # We know these edges only have two endpoints because we made
        # them.
        v, w = edge.vertices
        if v.site is my_site or w.site is my_site:
            if (((v.is_black() or w.is_black()) and
                 v.dist_to_red != MAX_DWORD)):
                edge.directed = True

                if w.dist_to_red < v.dist_to_red:
                    edge.vertices[:] = w, v
            edge_list.append(edge)

    if opts.verify or opts.dot_files:
        graph_edges = [[x.site.site_dnstr for x in e.vertices]
                       for e in edge_list]
        #add the reverse edge if not directed.
        graph_edges.extend([x.site.site_dnstr
                            for x in reversed(e.vertices)]
                           for e in edge_list if not e.directed)
        graph_nodes = [x.site.site_dnstr for x in graph.vertices]
        verify_properties = ()
        verify_and_dot('post-one-way-partial', graph_edges, graph_nodes,
                       label=label, properties=verify_properties,
                       debug=DEBUG, verify=opts.verify,
                       directed=True,
                       dot_files=opts.dot_files)

    # count the components
    return edge_list, components


def sort_replica_by_dsa_guid(rep1, rep2):
    """Helper to sort NCReplicas by their DSA guids

    The guids need to be sorted in their NDR form.

    :param rep1: An NC replica
    :param rep2: Another replica
    :return: -1, 0, or 1, indicating sort order.
    """
    return cmp(ndr_pack(rep1.rep_dsa_guid), ndr_pack(rep2.rep_dsa_guid))


def sort_dsa_by_gc_and_guid(dsa1, dsa2):
    """Helper to sort DSAs by guid global catalog status

    GC DSAs come before non-GC DSAs, other than that, the guids are
    sorted in NDR form.

    :param dsa1: A DSA object
    :param dsa2: Another DSA
    :return: -1, 0, or 1, indicating sort order.
    """
    if dsa1.is_gc() and not dsa2.is_gc():
        return -1
    if not dsa1.is_gc() and dsa2.is_gc():
        return +1
    return cmp(ndr_pack(dsa1.dsa_guid), ndr_pack(dsa2.dsa_guid))


def is_smtp_replication_available():
    """Can the KCC use SMTP replication?

    Currently always returns false because Samba doesn't implement
    SMTP transfer for NC changes between DCs.

    :return: Boolean (always False)
    """
    return False


def create_edge(con_type, site_link, guid_to_vertex):
    e = MultiEdge()
    e.site_link = site_link
    e.vertices = []
    for site_guid in site_link.site_list:
        if str(site_guid) in guid_to_vertex:
            e.vertices.extend(guid_to_vertex.get(str(site_guid)))
    e.repl_info.cost = site_link.cost
    e.repl_info.options = site_link.options
    e.repl_info.interval = site_link.interval
    e.repl_info.schedule = convert_schedule_to_repltimes(site_link.schedule)
    e.con_type = con_type
    e.directed = False
    return e


def create_auto_edge_set(graph, transport):
    e_set = MultiEdgeSet()
    # use a NULL guid, not associated with a SiteLinkBridge object
    e_set.guid = misc.GUID()
    for site_link in graph.edges:
        if site_link.con_type == transport:
            e_set.edges.append(site_link)

    return e_set


def create_edge_set(graph, transport, site_link_bridge):
    # TODO not implemented - need to store all site link bridges
    e_set = MultiEdgeSet()
    # e_set.guid = site_link_bridge
    return e_set


def setup_vertices(graph):
    for v in graph.vertices:
        if v.is_white():
            v.repl_info.cost = MAX_DWORD
            v.root = None
            v.component_id = None
        else:
            v.repl_info.cost = 0
            v.root = v
            v.component_id = v

        v.repl_info.interval = 0
        v.repl_info.options = 0xFFFFFFFF
        v.repl_info.schedule = None  # TODO highly suspicious
        v.demoted = False


def dijkstra(graph, edge_type, include_black):
    queue = []
    setup_dijkstra(graph, edge_type, include_black, queue)
    while len(queue) > 0:
        cost, guid, vertex = heapq.heappop(queue)
        for edge in vertex.edges:
            for v in edge.vertices:
                if v is not vertex:
                    # add new path from vertex to v
                    try_new_path(graph, queue, vertex, edge, v)


def setup_dijkstra(graph, edge_type, include_black, queue):
    setup_vertices(graph)
    for vertex in graph.vertices:
        if vertex.is_white():
            continue

        if (((vertex.is_black() and not include_black)
             or edge_type not in vertex.accept_black
             or edge_type not in vertex.accept_red_red)):
            vertex.repl_info.cost = MAX_DWORD
            vertex.root = None  # NULL GUID
            vertex.demoted = True  # Demoted appears not to be used
        else:
            heapq.heappush(queue, (vertex.repl_info.cost, vertex.guid, vertex))


def try_new_path(graph, queue, vfrom, edge, vto):
    newRI = ReplInfo()
    # What this function checks is that there is a valid time frame for
    # which replication can actually occur, despite being adequately
    # connected
    intersect = combine_repl_info(vfrom.repl_info, edge.repl_info, newRI)

    # If the new path costs more than the current, then ignore the edge
    if newRI.cost > vto.repl_info.cost:
        return

    if newRI.cost < vto.repl_info.cost and not intersect:
        return

    new_duration = total_schedule(newRI.schedule)
    old_duration = total_schedule(vto.repl_info.schedule)

    # Cheaper or longer schedule
    if newRI.cost < vto.repl_info.cost or new_duration > old_duration:
        vto.root = vfrom.root
        vto.component_id = vfrom.component_id
        vto.repl_info = newRI
        heapq.heappush(queue, (vto.repl_info.cost, vto.guid, vto))


def check_demote_vertex(vertex, edge_type):
    if vertex.is_white():
        return

    # Accepts neither red-red nor black edges, demote
    if ((edge_type not in vertex.accept_black and
         edge_type not in vertex.accept_red_red)):
        vertex.repl_info.cost = MAX_DWORD
        vertex.root = None
        vertex.demoted = True  # Demoted appears not to be used


def undemote_vertex(vertex):
    if vertex.is_white():
        return

    vertex.repl_info.cost = 0
    vertex.root = vertex
    vertex.demoted = False


def process_edge_set(graph, e_set, internal_edges):
    if e_set is None:
        for edge in graph.edges:
            for vertex in edge.vertices:
                check_demote_vertex(vertex, edge.con_type)
            process_edge(graph, edge, internal_edges)
            for vertex in edge.vertices:
                undemote_vertex(vertex)
    else:
        for edge in e_set.edges:
            process_edge(graph, edge, internal_edges)


def process_edge(graph, examine, internal_edges):
    # Find the set of all vertices touches the edge to examine
    vertices = []
    for v in examine.vertices:
        # Append a 4-tuple of color, repl cost, guid and vertex
        vertices.append((v.color, v.repl_info.cost, v.ndrpacked_guid, v))
    # Sort by color, lower
    DEBUG("vertices is %s" % vertices)
    vertices.sort()

    color, cost, guid, bestv = vertices[0]
    # Add to internal edges an edge from every colored vertex to bestV
    for v in examine.vertices:
        if v.component_id is None or v.root is None:
            continue

        # Only add edge if valid inter-tree edge - needs a root and
        # different components
        if ((bestv.component_id is not None and
             bestv.root is not None and
             v.component_id is not None and
             v.root is not None and
             bestv.component_id != v.component_id)):
            add_int_edge(graph, internal_edges, examine, bestv, v)


# Add internal edge, endpoints are roots of the vertices to pass in
# and are always colored
def add_int_edge(graph, internal_edges, examine, v1, v2):
    root1 = v1.root
    root2 = v2.root

    red_red = False
    if root1.is_red() and root2.is_red():
        red_red = True

    if red_red:
        if ((examine.con_type not in root1.accept_red_red
             or examine.con_type not in root2.accept_red_red)):
            return
    elif (examine.con_type not in root1.accept_black
          or examine.con_type not in root2.accept_black):
        return

    ri = ReplInfo()
    ri2 = ReplInfo()

    # Create the transitive replInfo for the two trees and this edge
    if not combine_repl_info(v1.repl_info, v2.repl_info, ri):
        return
    # ri is now initialized
    if not combine_repl_info(ri, examine.repl_info, ri2):
        return

    newIntEdge = InternalEdge(root1, root2, red_red, ri2, examine.con_type,
                              examine.site_link)
    # Order by vertex guid
    #XXX guid comparison using ndr_pack
    if newIntEdge.v1.ndrpacked_guid > newIntEdge.v2.ndrpacked_guid:
        newIntEdge.v1 = root2
        newIntEdge.v2 = root1

    internal_edges.add(newIntEdge)


def kruskal(graph, edges):
    for v in graph.vertices:
        v.edges = []

    components = set([x for x in graph.vertices if not x.is_white()])
    edges = list(edges)

    # Sorted based on internal comparison function of internal edge
    edges.sort()

    #XXX expected_num_tree_edges is never used
    expected_num_tree_edges = 0  # TODO this value makes little sense

    count_edges = 0
    output_edges = []
    index = 0
    while index < len(edges):  # TODO and num_components > 1
        e = edges[index]
        parent1 = find_component(e.v1)
        parent2 = find_component(e.v2)
        if parent1 is not parent2:
            count_edges += 1
            add_out_edge(graph, output_edges, e)
            parent1.component_id = parent2
            components.discard(parent1)

        index += 1

    return output_edges, len(components)


def find_component(vertex):
    if vertex.component_id is vertex:
        return vertex

    current = vertex
    while current.component_id is not current:
        current = current.component_id

    root = current
    current = vertex
    while current.component_id is not root:
        n = current.component_id
        current.component_id = root
        current = n

    return root


def add_out_edge(graph, output_edges, e):
    v1 = e.v1
    v2 = e.v2

    # This multi-edge is a 'real' edge with no GUID
    ee = MultiEdge()
    ee.directed = False
    ee.site_link = e.site_link
    ee.vertices.append(v1)
    ee.vertices.append(v2)
    ee.con_type = e.e_type
    ee.repl_info = e.repl_info
    output_edges.append(ee)

    v1.edges.append(ee)
    v2.edges.append(ee)


def test_all_reps_from(lp, creds, rng_seed=None):
    kcc = KCC()
    kcc.load_samdb(opts.dburl, lp, creds)
    dsas = kcc.list_dsas()
    needed_parts = {}
    current_parts = {}

    guid_to_dnstr = {}
    for site in kcc.site_table.values():
        guid_to_dnstr.update((str(dsa.dsa_guid), dnstr)
                             for dnstr, dsa in site.dsa_table.items())

    dot_edges = []
    dot_vertices = []
    colours = []
    vertex_colours = []

    for dsa_dn in dsas:
        if rng_seed:
            random.seed(rng_seed)
        kcc = KCC()
        kcc.run(opts.dburl, lp, creds, forced_local_dsa=dsa_dn,
                forget_local_links=opts.forget_local_links,
                forget_intersite_links=opts.forget_intersite_links)

        current, needed = kcc.my_dsa.get_rep_tables()

        for dsa in kcc.my_site.dsa_table.values():
            if dsa is kcc.my_dsa:
                continue
            kcc.translate_ntdsconn(dsa)
            c, n = dsa.get_rep_tables()
            current.update(c)
            needed.update(n)

        for name, rep_table, rep_parts in (
                ('needed', needed, needed_parts),
                ('current', current, current_parts)):
            for part, nc_rep in rep_table.items():
                edges = rep_parts.setdefault(part, [])
                for reps_from in nc_rep.rep_repsFrom:
                    source = guid_to_dnstr[str(reps_from.source_dsa_obj_guid)]
                    dest = guid_to_dnstr[str(nc_rep.rep_dsa_guid)]
                    edges.append((source, dest))

        for site in kcc.site_table.values():
            for dsa in site.dsa_table.values():
                if dsa.is_ro():
                    vertex_colours.append('#cc0000')
                else:
                    vertex_colours.append('#0000cc')
                dot_vertices.append(dsa.dsa_dnstr)
                if dsa.connect_table:
                    DEBUG_FN("DSA %s %s connections:\n%s" %
                             (dsa.dsa_dnstr, len(dsa.connect_table),
                              [x.from_dnstr for x in
                               dsa.connect_table.values()]))
                for con in dsa.connect_table.values():
                    if con.is_rodc_topology():
                        colours.append('red')
                    else:
                        colours.append('blue')
                    dot_edges.append((con.from_dnstr, dsa.dsa_dnstr))

    verify_and_dot('all-dsa-connections', dot_edges, vertices=dot_vertices,
                   label="all dsa NTDSConnections", properties=(),
                   debug=DEBUG, verify=opts.verify, dot_files=opts.dot_files,
                   directed=True, edge_colors=colours,
                   vertex_colors=vertex_colours)

    for name, rep_parts in (('needed', needed_parts),
                            ('current', current_parts)):
        for part, edges in rep_parts.items():
            verify_and_dot('all-repsFrom_%s__%s' % (name, part), edges,
                           directed=True, label=part,
                           properties=(), debug=DEBUG, verify=opts.verify,
                           dot_files=opts.dot_files)


logger = logging.getLogger("samba_kcc")
logger.addHandler(logging.StreamHandler(sys.stdout))
DEBUG = logger.debug


def _color_debug(*args, **kwargs):
    DEBUG('%s%s%s' % (kwargs['color'], args[0], C_NORMAL), *args[1:])

_globals = globals()
for _color in ('DARK_RED', 'RED', 'DARK_GREEN', 'GREEN', 'YELLOW',
               'DARK_YELLOW', 'DARK_BLUE', 'BLUE', 'PURPLE', 'MAGENTA',
               'DARK_CYAN', 'CYAN', 'GREY', 'WHITE', 'REV_RED'):
    _globals['DEBUG_' + _color] = partial(_color_debug, color=_globals[_color])


def DEBUG_FN(msg=''):
    import traceback
    filename, lineno, function, text = traceback.extract_stack(None, 2)[0]
    DEBUG("%s%s:%s%s %s%s()%s '%s'" % (CYAN, filename, BLUE, lineno,
                                       CYAN, function, C_NORMAL, msg))


##################################################
# samba_kcc entry point
##################################################

parser = optparse.OptionParser("samba_kcc [options]")
sambaopts = options.SambaOptions(parser)
credopts = options.CredentialsOptions(parser)

parser.add_option_group(sambaopts)
parser.add_option_group(credopts)
parser.add_option_group(options.VersionOptions(parser))

parser.add_option("--readonly", default=False,
                  help="compute topology but do not update database",
                  action="store_true")

parser.add_option("--debug",
                  help="debug output",
                  action="store_true")

parser.add_option("--verify",
                  help="verify that assorted invariants are kept",
                  action="store_true")

parser.add_option("--list-verify-tests",
                  help=("list what verification actions are available "
                        "and do nothing else"),
                  action="store_true")

parser.add_option("--no-dot-files", dest='dot_files',
                  help="Don't write dot graph files in /tmp",
                  default=True, action="store_false")

parser.add_option("--seed",
                  help="random number seed",
                  type=int)

parser.add_option("--importldif",
                  help="import topology ldif file",
                  type=str, metavar="<file>")

parser.add_option("--exportldif",
                  help="export topology ldif file",
                  type=str, metavar="<file>")

parser.add_option("-H", "--URL",
                  help="LDB URL for database or target server",
                  type=str, metavar="<URL>", dest="dburl")

parser.add_option("--tmpdb",
                  help="schemaless database file to create for ldif import",
                  type=str, metavar="<file>")

parser.add_option("--now",
                  help=("assume current time is this ('YYYYmmddHHMMSS[tz]',"
                        " default: system time)"),
                  type=str, metavar="<date>")

parser.add_option("--forced-local-dsa",
                  help="run calculations assuming the DSA is this DN",
                  type=str, metavar="<DSA>")

parser.add_option("--attempt-live-connections", default=False,
                  help="Attempt to connect to other DSAs to test links",
                  action="store_true")

parser.add_option("--list-valid-dsas", default=False,
                  help=("Print a list of DSA dnstrs that could be"
                        " used in --forced-local-dsa"),
                  action="store_true")

parser.add_option("--test-all-reps-from", default=False,
                  help="Create and verify a graph of reps-from for every DSA",
                  action="store_true")

parser.add_option("--forget-local-links", default=False,
                  help="pretend not to know the existing local topology",
                  action="store_true")

parser.add_option("--forget-intersite-links", default=False,
                  help="pretend not to know the existing intersite topology",
                  action="store_true")


opts, args = parser.parse_args()


if opts.list_verify_tests:
    list_verify_tests()
    sys.exit(0)

if opts.debug:
    logger.setLevel(logging.DEBUG)
elif opts.readonly:
    logger.setLevel(logging.INFO)
else:
    logger.setLevel(logging.WARNING)

# initialize seed from optional input parameter
if opts.seed:
    random.seed(opts.seed)
else:
    random.seed(0xACE5CA11)

if opts.now:
    for timeformat in ("%Y%m%d%H%M%S%Z", "%Y%m%d%H%M%S"):
        try:
            now_tuple = time.strptime(opts.now, timeformat)
            break
        except ValueError:
            pass
    else:
        # else happens if break doesn't --> no match
        print >> sys.stderr, "could not parse time '%s'" % opts.now
        sys.exit(1)

    unix_now = int(time.mktime(now_tuple))
else:
    unix_now = int(time.time())

nt_now = unix2nttime(unix_now)

lp = sambaopts.get_loadparm()
creds = credopts.get_credentials(lp, fallback_machine=True)

if opts.dburl is None:
    opts.dburl = lp.samdb_url()

if opts.test_all_reps_from:
    opts.readonly = True
    rng_seed = opts.seed or 0xACE5CA11
    test_all_reps_from(lp, creds, rng_seed=rng_seed)
    sys.exit()

# Instantiate Knowledge Consistency Checker and perform run
kcc = KCC()

if opts.exportldif:
    rc = kcc.export_ldif(opts.dburl, lp, creds, opts.exportldif)
    sys.exit(rc)

if opts.importldif:
    if opts.tmpdb is None or opts.tmpdb.startswith('ldap'):
        logger.error("Specify a target temp database file with --tmpdb option")
        sys.exit(1)

    rc = kcc.import_ldif(opts.tmpdb, lp, creds, opts.importldif)
    if rc != 0:
        sys.exit(rc)

if opts.list_valid_dsas:
    kcc.load_samdb(opts.dburl, lp, creds)
    print '\n'.join(kcc.list_dsas())
    sys.exit()

try:
    rc = kcc.run(opts.dburl, lp, creds, opts.forced_local_dsa,
                 opts.forget_local_links, opts.forget_intersite_links)
    sys.exit(rc)

except GraphError, e:
    print e
    sys.exit(1)