243 lines
9.9 KiB
Diff
243 lines
9.9 KiB
Diff
--- networkx/algorithms/centrality/eigenvector.py.orig 2019-10-16 20:03:56.000000000 -0600
|
|
+++ networkx/algorithms/centrality/eigenvector.py 2020-03-09 11:41:34.375972965 -0600
|
|
@@ -224,6 +224,7 @@ def eigenvector_centrality_numpy(G, weig
|
|
Networks: An Introduction.
|
|
Oxford University Press, USA, 2010, pp. 169.
|
|
"""
|
|
+ import numpy as np
|
|
import scipy as sp
|
|
from scipy.sparse import linalg
|
|
if len(G) == 0:
|
|
@@ -234,7 +235,7 @@ def eigenvector_centrality_numpy(G, weig
|
|
eigenvalue, eigenvector = linalg.eigs(M.T, k=1, which='LR',
|
|
maxiter=max_iter, tol=tol)
|
|
largest = eigenvector.flatten().real
|
|
- norm = sp.sign(largest.sum()) * sp.linalg.norm(largest)
|
|
+ norm = np.sign(largest.sum()) * sp.linalg.norm(largest)
|
|
return dict(zip(G, largest / norm))
|
|
|
|
|
|
--- networkx/algorithms/centrality/subgraph_alg.py.orig 2019-10-16 20:03:56.000000000 -0600
|
|
+++ networkx/algorithms/centrality/subgraph_alg.py 2020-03-09 11:42:44.854815882 -0600
|
|
@@ -226,6 +226,7 @@ def communicability_betweenness_centrali
|
|
>>> G = nx.Graph([(0,1),(1,2),(1,5),(5,4),(2,4),(2,3),(4,3),(3,6)])
|
|
>>> cbc = nx.communicability_betweenness_centrality(G)
|
|
"""
|
|
+ import numpy
|
|
import scipy
|
|
import scipy.linalg
|
|
nodelist = list(G) # ordering of nodes in matrix
|
|
@@ -247,7 +248,7 @@ def communicability_betweenness_centrali
|
|
# sum with row/col of node v and diag set to zero
|
|
B[i, :] = 0
|
|
B[:, i] = 0
|
|
- B -= scipy.diag(scipy.diag(B))
|
|
+ B -= numpy.diag(numpy.diag(B))
|
|
cbc[v] = float(B.sum())
|
|
# put row and col back
|
|
A[i, :] = row
|
|
--- networkx/algorithms/link_analysis/hits_alg.py.orig 2019-10-16 20:03:56.000000000 -0600
|
|
+++ networkx/algorithms/link_analysis/hits_alg.py 2020-03-09 11:43:48.909764257 -0600
|
|
@@ -286,7 +286,7 @@ def hits_scipy(G, max_iter=100, tol=1.0e
|
|
M = nx.to_scipy_sparse_matrix(G, nodelist=list(G))
|
|
(n, m) = M.shape # should be square
|
|
A = M.T * M # authority matrix
|
|
- x = scipy.ones((n, 1)) / n # initial guess
|
|
+ x = np.ones((n, 1)) / n # initial guess
|
|
# power iteration on authority matrix
|
|
i = 0
|
|
while True:
|
|
@@ -294,7 +294,7 @@ def hits_scipy(G, max_iter=100, tol=1.0e
|
|
x = A * x
|
|
x = x / x.max()
|
|
# check convergence, l1 norm
|
|
- err = scipy.absolute(x - xlast).sum()
|
|
+ err = np.absolute(x - xlast).sum()
|
|
if err < tol:
|
|
break
|
|
if i > max_iter:
|
|
--- networkx/algorithms/link_analysis/pagerank_alg.py.orig 2019-10-16 20:03:56.000000000 -0600
|
|
+++ networkx/algorithms/link_analysis/pagerank_alg.py 2020-03-09 11:46:41.977922907 -0600
|
|
@@ -420,6 +420,7 @@ def pagerank_scipy(G, alpha=0.85, person
|
|
The PageRank citation ranking: Bringing order to the Web. 1999
|
|
http://dbpubs.stanford.edu:8090/pub/showDoc.Fulltext?lang=en&doc=1999-66&format=pdf
|
|
"""
|
|
+ import numpy
|
|
import scipy.sparse
|
|
|
|
N = len(G)
|
|
@@ -429,23 +430,23 @@ def pagerank_scipy(G, alpha=0.85, person
|
|
nodelist = list(G)
|
|
M = nx.to_scipy_sparse_matrix(G, nodelist=nodelist, weight=weight,
|
|
dtype=float)
|
|
- S = scipy.array(M.sum(axis=1)).flatten()
|
|
+ S = numpy.array(M.sum(axis=1)).flatten()
|
|
S[S != 0] = 1.0 / S[S != 0]
|
|
Q = scipy.sparse.spdiags(S.T, 0, *M.shape, format='csr')
|
|
M = Q * M
|
|
|
|
# initial vector
|
|
if nstart is None:
|
|
- x = scipy.repeat(1.0 / N, N)
|
|
+ x = numpy.repeat(1.0 / N, N)
|
|
else:
|
|
- x = scipy.array([nstart.get(n, 0) for n in nodelist], dtype=float)
|
|
+ x = numpy.array([nstart.get(n, 0) for n in nodelist], dtype=float)
|
|
x = x / x.sum()
|
|
|
|
# Personalization vector
|
|
if personalization is None:
|
|
- p = scipy.repeat(1.0 / N, N)
|
|
+ p = numpy.repeat(1.0 / N, N)
|
|
else:
|
|
- p = scipy.array([personalization.get(n, 0) for n in nodelist], dtype=float)
|
|
+ p = numpy.array([personalization.get(n, 0) for n in nodelist], dtype=float)
|
|
p = p / p.sum()
|
|
|
|
# Dangling nodes
|
|
@@ -453,10 +454,10 @@ def pagerank_scipy(G, alpha=0.85, person
|
|
dangling_weights = p
|
|
else:
|
|
# Convert the dangling dictionary into an array in nodelist order
|
|
- dangling_weights = scipy.array([dangling.get(n, 0) for n in nodelist],
|
|
+ dangling_weights = numpy.array([dangling.get(n, 0) for n in nodelist],
|
|
dtype=float)
|
|
dangling_weights /= dangling_weights.sum()
|
|
- is_dangling = scipy.where(S == 0)[0]
|
|
+ is_dangling = numpy.where(S == 0)[0]
|
|
|
|
# power iteration: make up to max_iter iterations
|
|
for _ in range(max_iter):
|
|
@@ -464,7 +465,7 @@ def pagerank_scipy(G, alpha=0.85, person
|
|
x = alpha * (x * M + sum(x[is_dangling]) * dangling_weights) + \
|
|
(1 - alpha) * p
|
|
# check convergence, l1 norm
|
|
- err = scipy.absolute(x - xlast).sum()
|
|
+ err = numpy.absolute(x - xlast).sum()
|
|
if err < N * tol:
|
|
return dict(zip(nodelist, map(float, x)))
|
|
raise nx.PowerIterationFailedConvergence(max_iter)
|
|
--- networkx/drawing/layout.py.orig 2019-10-16 20:03:56.000000000 -0600
|
|
+++ networkx/drawing/layout.py 2020-03-09 10:37:14.554300047 -0600
|
|
@@ -947,7 +947,7 @@ def planar_layout(G, scale=1, center=Non
|
|
raise nx.NetworkXException("G is not planar.")
|
|
pos = nx.combinatorial_embedding_to_pos(embedding)
|
|
node_list = list(embedding)
|
|
- pos = np.row_stack((pos[x] for x in node_list))
|
|
+ pos = np.row_stack(list(pos[x] for x in node_list))
|
|
pos = pos.astype(np.float64)
|
|
pos = rescale_layout(pos, scale=scale) + center
|
|
return dict(zip(node_list, pos))
|
|
--- networkx/drawing/tests/test_pylab.py.orig 2019-10-16 20:03:56.000000000 -0600
|
|
+++ networkx/drawing/tests/test_pylab.py 2020-03-09 10:44:23.573357737 -0600
|
|
@@ -4,7 +4,7 @@ import itertools
|
|
import pytest
|
|
|
|
mpl = pytest.importorskip('matplotlib')
|
|
-mpl.use('PS', warn=False)
|
|
+mpl.use('PS')
|
|
plt = pytest.importorskip('matplotlib.pyplot')
|
|
plt.rcParams['text.usetex'] = False
|
|
|
|
@@ -172,7 +172,7 @@ class TestPylab(object):
|
|
# See Issue #3295
|
|
G = nx.path_graph(3, create_using=nx.MultiDiGraph)
|
|
nx.draw_networkx(G, edgelist=[(0, 1, 0)])
|
|
- nx.draw_networkx(G, edgelist=[(0, 1, 0)], node_size=[10, 20])
|
|
+ nx.draw_networkx(G, edgelist=[(0, 1, 0)], node_size=[10, 20, 0])
|
|
|
|
def test_alpha_iter(self):
|
|
pos = nx.random_layout(self.G)
|
|
--- networkx/linalg/laplacianmatrix.py.orig 2019-10-16 20:03:56.000000000 -0600
|
|
+++ networkx/linalg/laplacianmatrix.py 2020-03-09 11:50:44.872221831 -0600
|
|
@@ -116,6 +116,8 @@ def normalized_laplacian_matrix(G, nodel
|
|
Laplacian, Electronic Journal of Linear Algebra, Volume 16, pp. 90-98,
|
|
March 2007.
|
|
"""
|
|
+ import numpy
|
|
+ import numpy.lib.scimath
|
|
import scipy
|
|
import scipy.sparse
|
|
if nodelist is None:
|
|
@@ -127,8 +129,8 @@ def normalized_laplacian_matrix(G, nodel
|
|
D = scipy.sparse.spdiags(diags, [0], m, n, format='csr')
|
|
L = D - A
|
|
with scipy.errstate(divide='ignore'):
|
|
- diags_sqrt = 1.0 / scipy.sqrt(diags)
|
|
- diags_sqrt[scipy.isinf(diags_sqrt)] = 0
|
|
+ diags_sqrt = 1.0 / numpy.lib.scimath.sqrt(diags)
|
|
+ diags_sqrt[numpy.isinf(diags_sqrt)] = 0
|
|
DH = scipy.sparse.spdiags(diags_sqrt, [0], m, n, format='csr')
|
|
return DH.dot(L.dot(DH))
|
|
|
|
@@ -196,6 +198,8 @@ def directed_laplacian_matrix(G, nodelis
|
|
Laplacians and the Cheeger inequality for directed graphs.
|
|
Annals of Combinatorics, 9(1), 2005
|
|
"""
|
|
+ import numpy as np
|
|
+ import numpy.lib.scimath
|
|
import scipy as sp
|
|
from scipy.sparse import spdiags, linalg
|
|
|
|
@@ -207,9 +211,9 @@ def directed_laplacian_matrix(G, nodelis
|
|
evals, evecs = linalg.eigs(P.T, k=1)
|
|
v = evecs.flatten().real
|
|
p = v / v.sum()
|
|
- sqrtp = sp.sqrt(p)
|
|
+ sqrtp = numpy.lib.scimath.sqrt(p)
|
|
Q = spdiags(sqrtp, [0], n, n) * P * spdiags(1.0 / sqrtp, [0], n, n)
|
|
- I = sp.identity(len(G))
|
|
+ I = np.identity(len(G))
|
|
|
|
return I - (Q + Q.T) / 2.0
|
|
|
|
@@ -329,6 +333,7 @@ def _transition_matrix(G, nodelist=None,
|
|
If walk_type not specified or alpha not in valid range
|
|
"""
|
|
|
|
+ import numpy as np
|
|
import scipy as sp
|
|
from scipy.sparse import identity, spdiags
|
|
if walk_type is None:
|
|
@@ -344,7 +349,7 @@ def _transition_matrix(G, nodelist=None,
|
|
dtype=float)
|
|
n, m = M.shape
|
|
if walk_type in ["random", "lazy"]:
|
|
- DI = spdiags(1.0 / sp.array(M.sum(axis=1).flat), [0], n, n)
|
|
+ DI = spdiags(1.0 / np.array(M.sum(axis=1).flat), [0], n, n)
|
|
if walk_type == "random":
|
|
P = DI * M
|
|
else:
|
|
@@ -357,7 +362,7 @@ def _transition_matrix(G, nodelist=None,
|
|
# this is using a dense representation
|
|
M = M.todense()
|
|
# add constant to dangling nodes' row
|
|
- dangling = sp.where(M.sum(axis=1) == 0)
|
|
+ dangling = np.where(M.sum(axis=1) == 0)
|
|
for d in dangling[0]:
|
|
M[d] = 1.0 / n
|
|
# normalize
|
|
--- networkx/readwrite/tests/test_graphml.py.orig 2019-10-16 20:03:56.000000000 -0600
|
|
+++ networkx/readwrite/tests/test_graphml.py 2020-03-09 11:37:53.114605527 -0600
|
|
@@ -878,7 +878,7 @@ class TestWriteGraphML(BaseGraphML):
|
|
|
|
xml = parse(fh)
|
|
# Children are the key elements, and the graph element
|
|
- children = xml.getroot().getchildren()
|
|
+ children = list(xml.getroot())
|
|
assert len(children) == 3
|
|
|
|
keys = [child.items() for child in children[:2]]
|
|
--- networkx/tests/test_convert_pandas.py.orig 2019-10-16 20:03:56.000000000 -0600
|
|
+++ networkx/tests/test_convert_pandas.py 2020-03-09 11:51:57.623748599 -0600
|
|
@@ -8,7 +8,8 @@ from networkx.testing import assert_node
|
|
|
|
class TestConvertPandas(object):
|
|
def setup_method(self):
|
|
- self.rng = pd.np.random.RandomState(seed=5)
|
|
+ import numpy
|
|
+ self.rng = numpy.random.RandomState(seed=5)
|
|
ints = self.rng.randint(1, 11, size=(3, 2))
|
|
a = ['A', 'B', 'C']
|
|
b = ['D', 'A', 'E']
|