summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDan Schult <dschult@colgate.edu>2019-09-29 08:26:08 -0400
committerGitHub <noreply@github.com>2019-09-29 08:26:08 -0400
commit4c56af7670eedfb5c4a4cdf3ac5071374587401e (patch)
tree4ee40271b8b6393c33b065620dea255027f3ee63
parent4a0f42b477e5b20377ba7502de555b67b429adc3 (diff)
downloadnetworkx-4c56af7670eedfb5c4a4cdf3ac5071374587401e.tar.gz
Fix many documentation based Issues (#3609)
* Move test.py from tests to testing. Fixes #3106 * Fix formula in docs and pycodestyle. Fixes #3189 * Fixes #3211 allowing generators to represent partitions Update docs. Add tests of is_partition * Improve docs for bipartite generator functions Fixes #3324 including adding which nodes are created in each partition add pycodestyle changes to bipartite generators * Add documentation about normalization of betweenness_centrality_subset Fixes #3481
-rw-r--r--networkx/__init__.py2
-rw-r--r--networkx/algorithms/assortativity/correlation.py40
-rw-r--r--networkx/algorithms/bipartite/generators.py79
-rw-r--r--networkx/algorithms/centrality/betweenness.py16
-rw-r--r--networkx/algorithms/centrality/betweenness_subset.py22
-rw-r--r--networkx/algorithms/community/community_utils.py24
-rw-r--r--networkx/algorithms/community/quality.py47
-rw-r--r--networkx/algorithms/community/tests/test_utils.py8
-rw-r--r--networkx/testing/__init__.py1
-rw-r--r--networkx/testing/test.py (renamed from networkx/tests/test.py)0
-rw-r--r--networkx/tests/README5
11 files changed, 154 insertions, 90 deletions
diff --git a/networkx/__init__.py b/networkx/__init__.py
index 49dedd4c..61f88774 100644
--- a/networkx/__init__.py
+++ b/networkx/__init__.py
@@ -121,7 +121,7 @@ from networkx.algorithms import *
import networkx.linalg
from networkx.linalg import *
-from networkx.tests.test import run as test
+from networkx.testing.test import run as test
import networkx.drawing
from networkx.drawing import *
diff --git a/networkx/algorithms/assortativity/correlation.py b/networkx/algorithms/assortativity/correlation.py
index feec0a31..a25545d7 100644
--- a/networkx/algorithms/assortativity/correlation.py
+++ b/networkx/algorithms/assortativity/correlation.py
@@ -1,4 +1,4 @@
-#-*- coding: utf-8 -*-
+# -*- coding: utf-8 -*-
"""Node assortativity coefficients and correlation measures.
"""
import networkx as nx
@@ -32,12 +32,12 @@ def degree_assortativity_coefficient(G, x='out', y='in', weight=None,
The degree type for target node (directed graphs only).
weight: string or None, optional (default=None)
- The edge attribute that holds the numerical value used
+ The edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
The degree is the sum of the edge weights adjacent to the node.
nodes: list or iterable (optional)
- Compute degree assortativity only for nodes in container.
+ Compute degree assortativity only for nodes in container.
The default is all nodes.
Returns
@@ -64,14 +64,14 @@ def degree_assortativity_coefficient(G, x='out', y='in', weight=None,
-----
This computes Eq. (21) in Ref. [1]_ , where e is the joint
probability distribution (mixing matrix) of the degrees. If G is
- directed than the matrix e is the joint probability of the
+ directed than the matrix e is the joint probability of the
user-specified degree type for the source and target.
References
----------
.. [1] M. E. J. Newman, Mixing patterns in networks,
Physical Review E, 67 026126, 2003
- .. [2] Foster, J.G., Foster, D.V., Grassberger, P. & Paczuski, M.
+ .. [2] Foster, J.G., Foster, D.V., Grassberger, P. & Paczuski, M.
Edge direction and the structure of networks, PNAS 107, 10815-20 (2010).
"""
M = degree_mixing_matrix(G, x=x, y=y, nodes=nodes, weight=weight)
@@ -80,12 +80,12 @@ def degree_assortativity_coefficient(G, x='out', y='in', weight=None,
def degree_pearson_correlation_coefficient(G, x='out', y='in',
weight=None, nodes=None):
- """Compute degree assortativity of graph.
+ """Compute degree assortativity of graph.
Assortativity measures the similarity of connections
in the graph with respect to the node degree.
- This is the same as degree_assortativity_coefficient but uses the
+ This is the same as degree_assortativity_coefficient but uses the
potentially faster scipy.stats.pearsonr function.
Parameters
@@ -99,7 +99,7 @@ def degree_pearson_correlation_coefficient(G, x='out', y='in',
The degree type for target node (directed graphs only).
weight: string or None, optional (default=None)
- The edge attribute that holds the numerical value used
+ The edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
The degree is the sum of the edge weights adjacent to the node.
@@ -115,7 +115,7 @@ def degree_pearson_correlation_coefficient(G, x='out', y='in',
Examples
--------
>>> G=nx.path_graph(4)
- >>> r=nx.degree_pearson_correlation_coefficient(G)
+ >>> r=nx.degree_pearson_correlation_coefficient(G)
>>> print("%3.1f"%r)
-0.5
@@ -127,7 +127,7 @@ def degree_pearson_correlation_coefficient(G, x='out', y='in',
----------
.. [1] M. E. J. Newman, Mixing patterns in networks
Physical Review E, 67 026126, 2003
- .. [2] Foster, J.G., Foster, D.V., Grassberger, P. & Paczuski, M.
+ .. [2] Foster, J.G., Foster, D.V., Grassberger, P. & Paczuski, M.
Edge direction and the structure of networks, PNAS 107, 10815-20 (2010).
"""
try:
@@ -150,12 +150,12 @@ def attribute_assortativity_coefficient(G, attribute, nodes=None):
----------
G : NetworkX graph
- attribute : string
+ attribute : string
Node attribute key
nodes: list or iterable (optional)
- Compute attribute assortativity for nodes in container.
- The default is all nodes.
+ Compute attribute assortativity for nodes in container.
+ The default is all nodes.
Returns
-------
@@ -173,7 +173,7 @@ def attribute_assortativity_coefficient(G, attribute, nodes=None):
Notes
-----
- This computes Eq. (2) in Ref. [1]_ , trace(M)-sum(M))/(1-sum(M),
+ This computes Eq. (2) in Ref. [1]_ , (trace(M)-sum(M^2))/(1-sum(M^2)),
where M is the joint probability distribution (mixing matrix)
of the specified attribute.
@@ -197,12 +197,12 @@ def numeric_assortativity_coefficient(G, attribute, nodes=None):
----------
G : NetworkX graph
- attribute : string
+ attribute : string
Node attribute key. The corresponding attribute value must be an
integer.
nodes: list or iterable (optional)
- Compute numeric assortativity only for attributes of nodes in
+ Compute numeric assortativity only for attributes of nodes in
container. The default is all nodes.
Returns
@@ -221,7 +221,7 @@ def numeric_assortativity_coefficient(G, attribute, nodes=None):
Notes
-----
- This computes Eq. (21) in Ref. [1]_ , for the mixing matrix of
+ This computes Eq. (21) in Ref. [1]_ , for the mixing matrix of
of the specified attribute.
References
@@ -243,7 +243,7 @@ def attribute_ac(M):
Notes
-----
- This computes Eq. (2) in Ref. [1]_ , (trace(e)-sum(e))/(1-sum(e)),
+ This computes Eq. (2) in Ref. [1]_ , (trace(e)-sum(e^2))/(1-sum(e^2)),
where e is the joint probability distribution (mixing matrix)
of the specified attribute.
@@ -293,9 +293,9 @@ def setup_module(module):
from nose import SkipTest
try:
import numpy
- except:
+ except ImportError:
raise SkipTest("NumPy not available")
try:
import scipy
- except:
+ except ImportError:
raise SkipTest("SciPy not available")
diff --git a/networkx/algorithms/bipartite/generators.py b/networkx/algorithms/bipartite/generators.py
index d4e37232..00635ff8 100644
--- a/networkx/algorithms/bipartite/generators.py
+++ b/networkx/algorithms/bipartite/generators.py
@@ -34,9 +34,9 @@ __all__ = ['configuration_model',
def complete_bipartite_graph(n1, n2, create_using=None):
"""Returns the complete bipartite graph `K_{n_1,n_2}`.
- Composed of two partitions with `n_1` nodes in the first
- and `n_2` nodes in the second. Each node in the first is
- connected to each node in the second.
+ The graph is composed of two partitions with nodes 0 to (n1 - 1)
+ in the first and nodes n1 to (n1 + n2 - 1) in the second.
+ Each node in the first is connected to each node in the second.
Parameters
----------
@@ -53,6 +53,9 @@ def complete_bipartite_graph(n1, n2, create_using=None):
The nodes are assigned the attribute 'bipartite' with the value 0 or 1
to indicate which bipartite set the node belongs to.
+
+ This function is not imported in the main namespace.
+ To use it use nx.bipartite.complete_bipartite_graph
"""
G = nx.empty_graph(0, create_using)
if G.is_directed():
@@ -85,9 +88,10 @@ def configuration_model(aseq, bseq, create_using=None, seed=None):
Indicator of random number generation state.
See :ref:`Randomness<randomness>`.
- Nodes from the set A are connected to nodes in the set B by
- choosing randomly from the possible free stubs, one in A and
- one in B.
+ The graph is composed of two partitions. Set A has nodes 0 to
+ (len(aseq) - 1) and set B has nodes len(aseq) to (len(bseq) - 1).
+ Nodes from set A are connected to nodes in set B by choosing
+ randomly from the possible free stubs, one in A and one in B.
Notes
-----
@@ -100,7 +104,7 @@ def configuration_model(aseq, bseq, create_using=None, seed=None):
to indicate which bipartite set the node belongs to.
This function is not imported in the main namespace.
- To use it you have to explicitly import the bipartite package.
+ To use it use nx.bipartite.configuration_model
"""
G = nx.empty_graph(0, create_using, default=nx.MultiGraph)
if G.is_directed():
@@ -147,6 +151,8 @@ def havel_hakimi_graph(aseq, bseq, create_using=None):
"""Returns a bipartite graph from two given degree sequences using a
Havel-Hakimi style construction.
+ The graph is composed of two partitions. Set A has nodes 0 to
+ (len(aseq) - 1) and set B has nodes len(aseq) to (len(bseq) - 1).
Nodes from the set A are connected to nodes in the set B by
connecting the highest degree nodes in set A to the highest degree
nodes in set B until all stubs are connected.
@@ -162,9 +168,6 @@ def havel_hakimi_graph(aseq, bseq, create_using=None):
Notes
-----
- This function is not imported in the main namespace.
- To use it you have to explicitly import the bipartite package.
-
The sum of the two sequences must be equal: sum(aseq)=sum(bseq)
If no graph type is specified use MultiGraph with parallel edges.
If you want a graph with no parallel edges use create_using=Graph()
@@ -172,6 +175,9 @@ def havel_hakimi_graph(aseq, bseq, create_using=None):
The nodes are assigned the attribute 'bipartite' with the value 0 or 1
to indicate which bipartite set the node belongs to.
+
+ This function is not imported in the main namespace.
+ To use it use nx.bipartite.havel_hakimi_graph
"""
G = nx.empty_graph(0, create_using, default=nx.MultiGraph)
if G.is_directed():
@@ -219,6 +225,8 @@ def reverse_havel_hakimi_graph(aseq, bseq, create_using=None):
"""Returns a bipartite graph from two given degree sequences using a
Havel-Hakimi style construction.
+ The graph is composed of two partitions. Set A has nodes 0 to
+ (len(aseq) - 1) and set B has nodes len(aseq) to (len(bseq) - 1).
Nodes from set A are connected to nodes in the set B by connecting
the highest degree nodes in set A to the lowest degree nodes in
set B until all stubs are connected.
@@ -234,9 +242,6 @@ def reverse_havel_hakimi_graph(aseq, bseq, create_using=None):
Notes
-----
- This function is not imported in the main namespace.
- To use it you have to explicitly import the bipartite package.
-
The sum of the two sequences must be equal: sum(aseq)=sum(bseq)
If no graph type is specified use MultiGraph with parallel edges.
If you want a graph with no parallel edges use create_using=Graph()
@@ -244,6 +249,9 @@ def reverse_havel_hakimi_graph(aseq, bseq, create_using=None):
The nodes are assigned the attribute 'bipartite' with the value 0 or 1
to indicate which bipartite set the node belongs to.
+
+ This function is not imported in the main namespace.
+ To use it use nx.bipartite.reverse_havel_hakimi_graph
"""
G = nx.empty_graph(0, create_using, default=nx.MultiGraph)
if G.is_directed():
@@ -290,6 +298,8 @@ def alternating_havel_hakimi_graph(aseq, bseq, create_using=None):
"""Returns a bipartite graph from two given degree sequences using
an alternating Havel-Hakimi style construction.
+ The graph is composed of two partitions. Set A has nodes 0 to
+ (len(aseq) - 1) and set B has nodes len(aseq) to (len(bseq) - 1).
Nodes from the set A are connected to nodes in the set B by
connecting the highest degree nodes in set A to alternatively the
highest and the lowest degree nodes in set B until all stubs are
@@ -306,9 +316,6 @@ def alternating_havel_hakimi_graph(aseq, bseq, create_using=None):
Notes
-----
- This function is not imported in the main namespace.
- To use it you have to explicitly import the bipartite package.
-
The sum of the two sequences must be equal: sum(aseq)=sum(bseq)
If no graph type is specified use MultiGraph with parallel edges.
If you want a graph with no parallel edges use create_using=Graph()
@@ -316,6 +323,9 @@ def alternating_havel_hakimi_graph(aseq, bseq, create_using=None):
The nodes are assigned the attribute 'bipartite' with the value 0 or 1
to indicate which bipartite set the node belongs to.
+
+ This function is not imported in the main namespace.
+ To use it use nx.bipartite.alternating_havel_hakimi_graph
"""
G = nx.empty_graph(0, create_using, default=nx.MultiGraph)
if G.is_directed():
@@ -346,7 +356,7 @@ def alternating_havel_hakimi_graph(aseq, bseq, create_using=None):
break # done, all are zero
bstubs.sort()
small = bstubs[0:degree // 2] # add these low degree targets
- large = bstubs[(-degree + degree // 2):] # and these high degree targets
+ large = bstubs[(-degree + degree // 2):] # now high degree targets
stubs = [x for z in zip(large, small) for x in z] # combine, sorry
if len(stubs) < len(small) + len(large): # check for zip truncation
stubs.append(large.pop())
@@ -366,6 +376,10 @@ def preferential_attachment_graph(aseq, p, create_using=None, seed=None):
"""Create a bipartite graph with a preferential attachment model from
a given single degree sequence.
+ The graph is composed of two partitions. Set A has nodes 0 to
+ (len(aseq) - 1) and set B has nodes starting with node len(aseq).
+ The number of nodes in set B is random.
+
Parameters
----------
aseq : list
@@ -380,7 +394,7 @@ def preferential_attachment_graph(aseq, p, create_using=None, seed=None):
References
----------
- .. [1] Guillaume, J.L. and Latapy, M.,
+ .. [1] Guillaume, J.L. and Latapy, M.,
Bipartite graphs as models of complex networks.
Physica A: Statistical Mechanics and its Applications,
2006, 371(2), pp.795-813.
@@ -391,9 +405,11 @@ def preferential_attachment_graph(aseq, p, create_using=None, seed=None):
Notes
-----
+ The nodes are assigned the attribute 'bipartite' with the value 0 or 1
+ to indicate which bipartite set the node belongs to.
This function is not imported in the main namespace.
- To use it you have to explicitly import the bipartite package.
+ To use it use nx.bipartite.preferential_attachment_graph
"""
G = nx.empty_graph(0, create_using, default=nx.MultiGraph)
if G.is_directed():
@@ -409,12 +425,12 @@ def preferential_attachment_graph(aseq, p, create_using=None, seed=None):
while vv[0]:
source = vv[0][0]
vv[0].remove(source)
- if seed.random() < p or G.number_of_nodes() == naseq:
- target = G.number_of_nodes()
+ if seed.random() < p or len(G) == naseq:
+ target = len(G)
G.add_node(target, bipartite=1)
G.add_edge(source, target)
else:
- bb = [[b] * G.degree(b) for b in range(naseq, G.number_of_nodes())]
+ bb = [[b] * G.degree(b) for b in range(naseq, len(G))]
# flatten the list of lists into a list.
bbstubs = reduce(lambda x, y: x + y, bb)
# choose preferentially a bottom node.
@@ -431,6 +447,8 @@ def random_graph(n, m, p, seed=None, directed=False):
"""Returns a bipartite random graph.
This is a bipartite version of the binomial (Erdős-Rényi) graph.
+ The graph is composed of two partitions. Set A has nodes 0 to
+ (n - 1) and set B has nodes n to (n + m - 1).
Parameters
----------
@@ -448,9 +466,6 @@ def random_graph(n, m, p, seed=None, directed=False):
Notes
-----
- This function is not imported in the main namespace.
- To use it you have to explicitly import the bipartite package.
-
The bipartite random graph algorithm chooses each of the n*m (undirected)
or 2*nm (directed) possible edges with probability p.
@@ -459,6 +474,9 @@ def random_graph(n, m, p, seed=None, directed=False):
The nodes are assigned the attribute 'bipartite' with the value 0 or 1
to indicate which bipartite set the node belongs to.
+ This function is not imported in the main namespace.
+ To use it use nx.bipartite.random_graph
+
See Also
--------
gnp_random_graph, configuration_model
@@ -516,6 +534,8 @@ def gnmk_random_graph(n, m, k, seed=None, directed=False):
Produces a bipartite graph chosen randomly out of the set of all graphs
with n top nodes, m bottom nodes, and k edges.
+ The graph is composed of two sets of nodes.
+ Set A has nodes 0 to (n - 1) and set B has nodes n to (n + m - 1).
Parameters
----------
@@ -542,12 +562,15 @@ def gnmk_random_graph(n, m, k, seed=None, directed=False):
Notes
-----
- This function is not imported in the main namespace.
- To use it you have to explicitly import the bipartite package.
-
If k > m * n then a complete bipartite graph is returned.
This graph is a bipartite version of the `G_{nm}` random graph model.
+
+ The nodes are assigned the attribute 'bipartite' with the value 0 or 1
+ to indicate which bipartite set the node belongs to.
+
+ This function is not imported in the main namespace.
+ To use it use nx.bipartite.gnmk_random_graph
"""
G = nx.Graph()
G = _add_nodes_with_bipartite_label(G, n, m)
diff --git a/networkx/algorithms/centrality/betweenness.py b/networkx/algorithms/centrality/betweenness.py
index 37e46d46..b848b635 100644
--- a/networkx/algorithms/centrality/betweenness.py
+++ b/networkx/algorithms/centrality/betweenness.py
@@ -89,6 +89,22 @@ def betweenness_centrality(G, k=None, normalized=True, weight=None,
Zero edge weights can produce an infinite number of equal length
paths between pairs of nodes.
+ The total number of paths between source and target is counted
+ differently for directed and undirected graphs. Directed paths
+ are easy to count. Undirected paths are tricky: should a path
+ from "u" to "v" count as 1 undirected path or as 2 directed paths?
+
+ For betweenness_centrality we report the number of undirected
+ paths when G is undirected.
+
+ For betweenness_centrality_subset the reporting is different.
+ If the source and target subsets are the same, then we want
+ to count undirected paths. But if the source and target subsets
+ differ -- for example, if sources is {0} and targets is {1},
+ then we are only counting the paths in one direction. They are
+ undirected paths but we are counting them in a directed way.
+ To count them as undirected paths, each should count as half a path.
+
References
----------
.. [1] Ulrik Brandes:
diff --git a/networkx/algorithms/centrality/betweenness_subset.py b/networkx/algorithms/centrality/betweenness_subset.py
index a68806da..3019240a 100644
--- a/networkx/algorithms/centrality/betweenness_subset.py
+++ b/networkx/algorithms/centrality/betweenness_subset.py
@@ -72,11 +72,25 @@ def betweenness_centrality_subset(G, sources, targets, normalized=False,
Zero edge weights can produce an infinite number of equal length
paths between pairs of nodes.
- The normalization might seem a little strange but it is the same
- as in betweenness_centrality() and is designed to make
- betweenness_centrality(G) be the same as
+ The normalization might seem a little strange but it is
+ designed to make betweenness_centrality(G) be the same as
betweenness_centrality_subset(G,sources=G.nodes(),targets=G.nodes()).
+ The total number of paths between source and target is counted
+ differently for directed and undirected graphs. Directed paths
+ are easy to count. Undirected paths are tricky: should a path
+ from "u" to "v" count as 1 undirected path or as 2 directed paths?
+
+ For betweenness_centrality we report the number of undirected
+ paths when G is undirected.
+
+ For betweenness_centrality_subset the reporting is different.
+ If the source and target subsets are the same, then we want
+ to count undirected paths. But if the source and target subsets
+ differ -- for example, if sources is {0} and targets is {1},
+ then we are only counting the paths in one direction. They are
+ undirected paths but we are counting them in a directed way.
+ To count them as undirected paths, each should count as half a path.
References
----------
@@ -197,7 +211,7 @@ def _accumulate_subset(betweenness, S, P, sigma, s, targets):
while S:
w = S.pop()
if w in target_set:
- coeff = (delta[w] + 1.0)/ sigma[w]
+ coeff = (delta[w] + 1.0) / sigma[w]
else:
coeff = delta[w] / sigma[w]
for v in P[w]:
diff --git a/networkx/algorithms/community/community_utils.py b/networkx/algorithms/community/community_utils.py
index a5eca122..fa9d39cd 100644
--- a/networkx/algorithms/community/community_utils.py
+++ b/networkx/algorithms/community/community_utils.py
@@ -16,22 +16,24 @@ __all__ = ['is_partition']
def is_partition(G, communities):
- """Returns True if and only if `communities` is a partition of
- the nodes of `G`.
+ """Returns *True* if `communities` is a partition of the nodes of `G`.
A partition of a universe set is a family of pairwise disjoint sets
whose union is the entire universe set.
- `G` is a NetworkX graph.
+ Parameters
+ ----------
+ G : NetworkX graph.
- `communities` is an iterable of sets of nodes of `G`. This
- iterable will be consumed multiple times during the execution of
- this function.
+ communities : list or iterable of sets of nodes
+ If not a list, the iterable is converted internally to a list.
+ If it is an iterator it is exhausted.
"""
# Alternate implementation:
- #
- # return (len(G) == sum(len(c) for c in community) and
- # set(G) == set.union(*community))
- #
- return all(sum(1 if v in c else 0 for c in communities) == 1 for v in G)
+ # return all(sum(1 if v in c else 0 for c in communities) == 1 for v in G)
+ if not isinstance(communities, list):
+ communities = list(communities)
+ nodes = set(n for c in communities for n in c if n in G)
+
+ return len(G) == len(nodes) == sum(len(c) for c in communities)
diff --git a/networkx/algorithms/community/quality.py b/networkx/algorithms/community/quality.py
index 36485bcc..fdd0c542 100644
--- a/networkx/algorithms/community/quality.py
+++ b/networkx/algorithms/community/quality.py
@@ -26,7 +26,6 @@ class NotAPartition(NetworkXError):
"""Raised if a given collection is not a partition.
"""
-
def __init__(self, G, collection):
msg = '{} is not a valid partition of the graph {}'
msg = msg.format(G, collection)
@@ -34,8 +33,7 @@ class NotAPartition(NetworkXError):
def require_partition(func):
- """Decorator that raises an exception if a partition is not a valid
- partition of the nodes of a graph.
+ """Decorator to check that a valid partition is input to a function
Raises :exc:`networkx.NetworkXError` if the partition is not valid.
@@ -63,7 +61,6 @@ def require_partition(func):
NetworkXError: `partition` is not a valid partition of the nodes of G
"""
-
@wraps(func)
def new_func(*args, **kw):
# Here we assume that the first two arguments are (G, partition).
@@ -75,12 +72,14 @@ def require_partition(func):
def intra_community_edges(G, partition):
- """Returns the number of intra-community edges according to the given
- partition of the nodes of `G`.
+ """Returns the number of intra-community edges for a partition of `G`.
- `G` must be a NetworkX graph.
+ Parameters
+ ----------
+ G : NetworkX graph.
- `partition` must be a partition of the nodes of `G`.
+ partition : iterable of sets of nodes
+ This must be a partition of the nodes of `G`.
The "intra-community edges" are those edges joining a pair of nodes
in the same block of the partition.
@@ -90,19 +89,22 @@ def intra_community_edges(G, partition):
def inter_community_edges(G, partition):
- """Returns the number of inter-community edges according to the given
+ """Returns the number of inter-community edges for a prtition of `G`.
+ according to the given
partition of the nodes of `G`.
- `G` must be a NetworkX graph.
+ Parameters
+ ----------
+ G : NetworkX graph.
- `partition` must be a partition of the nodes of `G`.
+ partition : iterable of sets of nodes
+ This must be a partition of the nodes of `G`.
The *inter-community edges* are those edges joining a pair of nodes
in different blocks of the partition.
Implementation note: this function creates an intermediate graph
- that may require the same amount of memory as required to store
- `G`.
+ that may require the same amount of memory as that of `G`.
"""
# Alternate implementation that does not require constructing a new
@@ -113,10 +115,8 @@ def inter_community_edges(G, partition):
# for block in partition))
# return sum(1 for u, v in G.edges() if aff[u] != aff[v])
#
- if G.is_directed():
- return nx.quotient_graph(G, partition, create_using=nx.MultiDiGraph()).size()
- else:
- return nx.quotient_graph(G, partition, create_using=nx.MultiGraph()).size()
+ MG = nx.MultiDiGraph if G.is_directed() else nx.MultiGraph
+ return nx.quotient_graph(G, partition, create_using=MG).size()
def inter_community_non_edges(G, partition):
@@ -163,7 +163,6 @@ def performance(G, partition):
A simple graph (directed or undirected).
partition : sequence
-
Partition of the nodes of `G`, represented as a sequence of
sets of nodes. Each block of the partition represents a
community.
@@ -264,9 +263,8 @@ def modularity(G, communities, weight='weight'):
----------
G : NetworkX Graph
- communities : list
- List of sets of nodes of `G` representing a partition of the
- nodes.
+ communities : list or iterable of set of nodes
+ These node sets must represent a partition of G's nodes.
Returns
-------
@@ -280,8 +278,11 @@ def modularity(G, communities, weight='weight'):
Examples
--------
+ >>> import networkx.algorithms.community as nx_comm
>>> G = nx.barbell_graph(3, 0)
- >>> nx.algorithms.community.modularity(G, [{0, 1, 2}, {3, 4, 5}])
+ >>> nx_comm.modularity(G, [{0, 1, 2}, {3, 4, 5}])
+ 0.35714285714285704
+ >>> nx_comm.modularity(G, nx_comm.label_propagation_communities(G))
0.35714285714285704
References
@@ -290,6 +291,8 @@ def modularity(G, communities, weight='weight'):
Oxford University Press, 2011.
"""
+ if not isinstance(communities, list):
+ communities = list(communities)
if not is_partition(G, communities):
raise NotAPartition(G, communities)
diff --git a/networkx/algorithms/community/tests/test_utils.py b/networkx/algorithms/community/tests/test_utils.py
index adc7496d..bfa01588 100644
--- a/networkx/algorithms/community/tests/test_utils.py
+++ b/networkx/algorithms/community/tests/test_utils.py
@@ -19,6 +19,9 @@ from networkx.algorithms.community import is_partition
def test_is_partition():
G = nx.empty_graph(3)
assert_true(is_partition(G, [{0, 1}, {2}]))
+ assert_true(is_partition(G, ({0, 1}, {2})))
+ assert_true(is_partition(G, ([0, 1], [2])))
+ assert_true(is_partition(G, [[0, 1], [2]]))
def test_not_covering():
@@ -29,3 +32,8 @@ def test_not_covering():
def test_not_disjoint():
G = nx.empty_graph(3)
assert_false(is_partition(G, [{0, 1}, {1, 2}]))
+
+
+def test_not_node():
+ G = nx.empty_graph(3)
+ assert_false(is_partition(G, [{0, 1}, {3}]))
diff --git a/networkx/testing/__init__.py b/networkx/testing/__init__.py
index db570765..884ac83d 100644
--- a/networkx/testing/__init__.py
+++ b/networkx/testing/__init__.py
@@ -1 +1,2 @@
from networkx.testing.utils import *
+from networkx.testing.test import run
diff --git a/networkx/tests/test.py b/networkx/testing/test.py
index 7941051a..7941051a 100644
--- a/networkx/tests/test.py
+++ b/networkx/testing/test.py
diff --git a/networkx/tests/README b/networkx/tests/README
index cc299aa4..87afc9e6 100644
--- a/networkx/tests/README
+++ b/networkx/tests/README
@@ -13,12 +13,9 @@ The simplest way is to import networkx and run the test() function.
or::
- python -c "import networkx; networkx.test()
+ python -c "import networkx; networkx.test()"
If you have the source package and the nose testing package you
can test the complete package from the unpacked source directory with::
python setup_egg.py nosetests
-
-The python module benchmark.py can be used to compare relative speed of small
-code bits using the timeit module for different graph classes.