# M3C 2017 Homework 1 solution¶

The complete Python script, hw1soln.py:

"""M3C 2017 Homework 1 solution
"""
import numpy as np
import matplotlib.pyplot as plt
import networkx as nx
from time import time

def rw2d(Nt,M,a=0,b=0,random=1):
"""Input variables
Nt: number of time steps
M: Number of simulations
a,b: bias parameters
"""

assert type(a) is int and type(b) is int, "a and b must be integers"
assert a>=-1, "a out of acceptable range"
assert b<=1, "b out of acceptable range"

#Compute all needed random numbers and include the effect of bias
if random == 1:
Rx = (2+a)*np.random.randint(0,2,(Nt+1,M))-1
Ry = (2-b)*np.random.randint(0,2,(Nt+1,M))-1
else:
Rx = np.random.choice((-1,1+a),(Nt+1,M))
Ry = np.random.choice((-1,1-b),(Nt+1,M))

#Initial conditions
Rx[0,:] = 0
Ry[0,:] = 0

#Compute cumulative sum to obtain trajectories
X = Rx.cumsum(axis=0)
Y = Ry.cumsum(axis=0)

#Compute statistics
Xave = X.mean(axis=1)
Yave = Y.mean(axis=1)
X2ave = np.mean(X*X,axis=1)
Y2ave = np.mean(Y*Y,axis=1)
XYave = np.mean(X*Y,axis=1)

return Xave,Yave,X2ave,Y2ave,XYave

def rwnet1(H,Hf,a=0,display=False,debug=False,timer=False):
"""Input variables
H: Height at which nodes are initially introduced
Hf: Final network height
a: horizontal bias parameter, should be -1, 0, or 1
display: figure displaying the network is created when true
Output variables
X,Y: Final node coordinates
output: a tuple containing any other information you would
like the function to return, may be left empty
"""

assert a in [-1,0,1], "a must be -1,0,or 1"

output = ()
if timer:
t1 = time()
dstar2 = 1+(1+a)**2 #distance threshold
X,Y = np.array(0),np.array(0) #seed node

while np.max(Y)<Hf:
xx,yy=0,H #location of new node
dist2 = np.min((xx-X)**2 + (yy-Y)**2)

#Compute trajectory of node, stopping when appropriate
while yy>0 and dist2>dstar2:
xx = xx + (2+a)*np.random.randint(0,2)-1
yy = yy + np.random.randint(-1,1)

dist2 = np.min((xx-X)**2 + (yy-Y)**2)

X=np.append(X,xx)
Y=np.append(Y,yy)
if debug:
print("i,xx,yy=",X.size,xx,yy)

#Display network
if display:
plt.figure()
plt.plot(X,Y,'.',markersize=4)
plt.xlabel('X')
plt.ylabel('Y')
plt.title('Simulated network, H=%5.1f,Hf=%5.1f,a=%d'%(H,Hf,a))

if timer:
t2 = time()
print("Time elapse:",t2-t1)
return X,Y,output

def rwnet2(L,H,Hf,a=0,display=False,debug=False):
"""Input variables
H: Height at which nodes are initially introduced
L: Walls are placed at X = +/- L
Hf: Final network height
a: horizontal bias parameter, should be -1, 0, or 1
display: figure displaying the network is created when true
Output variables
X,Y: Final node coordinates
output: a tuple containing any other information you would
like the function to return, may be left empty
"""

assert a in [-1,0,1], "a must be -1,0,or 1"
output = ()
dstar2 = 1+(1+a)**2 #distance threshold
X,Y = np.array(0),np.array(0) #seed node

while np.max(Y)<Hf:
xx,yy=0,H #location of new node
dist2 = np.min((xx-X)**2 + (yy-Y)**2)

#Compute trajectory of node, stopping when appropriate
while yy>0 and dist2>dstar2:
xx = xx + (2+a)*np.random.randint(0,2)-1
yy = yy + np.random.randint(-1,1)

#Apply wall conditions
if xx<-L:
xx = -L
elif xx>L:
xx = L

dist2 = np.min((xx-X)**2 + (yy-Y)**2)

X=np.append(X,xx)
Y=np.append(Y,yy)
if debug:
print("i,xx,yy=",X.size,xx,yy)

#Display network
if display:
plt.figure()
plt.plot(X,Y,'.',markersize=4)
plt.xlabel('X')
plt.ylabel('Y')
plt.title('Simulated network, H=%5.1f,Hf=%5.1f,a=%d'%(H,Hf,a))
return X,Y,output

def analyze(display=False):
""" Add input variables as needed
"""
#analyze rate of growth for M simulations of both confined and
# unconfined networks

M=20
H = 200
Hf = 150
L1,L2=30,150

#-----------
def simulateM(M,H,Hf,a=0,wall=False,L=H):
"""Run M simulations of random network
and return height data
"""
Ymax = []
lmin = 1e6
for i in range(M):
print(i,M)
if wall:
_,Ym,_ = rwnet2(L,H,Hf,a)
else:
_,Ym,_ = rwnet1(H,Hf,a)
height = np.zeros_like(Ym)
for j,y in enumerate(Ym[1:]):
height[j+1] = max(Ym[j+1],height[j])
Ymax.append(height)
lmin = min(lmin,len(height))

Ymaxf = np.zeros((M,lmin))
for i in range(M):
Ymaxf[i,:] = Ymax[i][:lmin]

Ymax_ave = np.mean(Ymaxf,axis=0)
return Ymax,Ymaxf,Ymax_ave
#-------

Ymax,Ymax,Ymax_ave1 = simulateM(M,H,Hf) #no walls, a=0
Ymax,Ymax,Ymax_ave2 = simulateM(M,H,Hf,a=1) #no walls, rightward bias
Ymax,Ymax,Ymax_ave3 = simulateM(M,H,Hf,wall=True,L=L1) #narrow walls, a=0
Ymax,Ymax,Ymax_ave4 = simulateM(M,H,Hf,wall=True,L=L2) #wide walls, a=0
Ymax,Ymax,Ymax_ave5 = simulateM(M,H,Hf,a=1,wall=True,L=L1) #narrow walls, rightward bias
Ymax,Ymax,Ymax_ave6 = simulateM(M,H,Hf,a=1,wall=True,L=L2) #wide walls, rightward bias

if display:
plt.figure()
plt.plot(Ymax_ave1,label=r'no walls, $\alpha=0$')
plt.plot(Ymax_ave2,label=r'no walls, $\alpha=1$')
plt.plot(Ymax_ave3,label=r'L=30, $\alpha=0$')
plt.plot(Ymax_ave4,label=r'L=150, $\alpha=0$')
plt.plot(Ymax_ave5,label=r'L=30, $\alpha=1$')
plt.plot(Ymax_ave6,label=r'L=150, $\alpha=1$')
plt.xlabel('t')
plt.ylabel('Network height')
plt.title('Variation of network heights with time (ensemble averaged with M=10)')
plt.legend()

return [Ymax_ave1,Ymax_ave2,Ymax_ave3,Ymax_ave4,Ymax_ave5,Ymax_ave6]

def network(X,Y,dstar,display=False,degree=False):
""" Input variables
X,Y: Numpy arrays containing network node coordinates
dstar: Links are placed between nodes within a distance, d<=dstar of each other
display: Draw graph when true
degree: Compute, display and return degree distribution for graph when true
Output variables:
G: NetworkX graph corresponding to X,Y,dstar
D: degree distribution, only returned when degree is true

Generating higher-degree nodes: Note that the maximum possible degree for our random network model is 8 which is also the maximum degree in the network.
A simple modification would be to generalize the model to an n-dimensional random walk for nodes with coordinates,
:math:X_1,X_2,...,X_N. If N=3, the maximum degree jumps to 26 (with :math:d^* appropriately modified). No substantial coding changes would be needed.
Equations would have to be added for :math:X_3, ..., X_N which would essentially be the same as the code for
:math:X_1 while the distance calculations would also have to be generalized to n-dimensions.

Many other possibilities exist. For example, step sizes, sticking criteria, and :math:d^* could be modified to allow
nodes to pack together more 'tightly'.

"""
dstar2 = dstar**2
N = X.size
L = []
#Construct edge list
for i in range(N):
dist2 = (X[i]-X[i+1:])**2 + (Y[i]-Y[i+1:])**2
ind = np.argwhere(dist2 <= dstar2)+i+1
for j in ind.flatten():
L.append([i,j])

#Create graph from edge list
G = nx.Graph()

#Draw graph
if display:
plt.figure()
nx.draw(G,node_shape='.',node_size=4)
plt.title('Soln, Network corresponding to input coordinates and $d^*$=%1.2f'%(dstar))
d={}
Z = np.array([X,Y]).T
for k,v in enumerate(Z):
d[k]=v
plt.figure()
nx.draw(G,pos=d,node_shape='.',node_size=4) #not required
plt.title('Soln, Network corresponding to input coordinates and $d^*$=%1.2f'%(dstar))

if degree:
D = nx.degree_histogram(G)
plt.figure()
plt.semilogy(D,'x')
plt.xlabel('degree')
plt.ylabel('Number of nodes')
plt.title('Soln, Degree distribution for random network')

return G,D
else:
return G

if __name__ == '__main__':
#The code here should call analyze and generate the
#figures that you are submitting with your code
output=analyze(display=True)
plt.show()

1. (5 pts) Generate a log file named hw1.txt using:

\$ git log > hw1.txt

2. (15 pts) Importing the module in a Python terminal, and running rw2d:

import hw1soln as hw1

Nt,M,a,b = 100,1000,1,2

Xm,Ym,X2m,Y2m,XYm = hw1.rw2d(Nt,M,a,b)


The code should be fully vectorized and loop-free.

(Not required for assignment) The analytical values for the averages at time, t, are:

\begin{align}\begin{aligned}<X> = \alpha/2 t , ~ <Y> = -\beta/2 t\\<X^2> = a^2 t(t-1)/4 + (1 + \alpha + \alpha^2/2) t, ~ <Y^2> = \beta^2 t(t-1)/4 + (1 - \beta + \beta^2/2) t\\<XY> = <X><Y>\end{aligned}\end{align}

The computed second-order statistics along with the analytical results are shown below:

2. i) (25 pts) With sufficiently large domain size, this model generates fractal-like structures. Running rwnet1 with $$H=500$$ and $$Hf=400$$ and no bias produces:

While adding bias with a=1 ‘skews’ the network:

ii) (5 pts) Walls, with L sufficiently small, produce a confining effect, and the network then fills more of the domain:

Particular combinations of bias and walls can produce more complicated and interesting structures:

3. (25 pts) The network height at a given time, Ymax(t), can be extracted from the network coordinates returned by rwnet1 or rwnet2 – see the code above. It is important to take an ensemble average over several simulations with a given set of parameters to obtain meaningful results as shown in the figure for M=20:

The unconfined+unbiased and weakly-confined+unbiased cases show very similar growth rates; in both cases a single dominant branch emerges near $$X=0$$ and then proceeds to attract nodes. Interestingly, the strongly-confined unbiased case is only modestly faster. There are two competing effects here. First, confinement accelerates the development of the ‘base’ of the network. However, the confined network base is more evenly distributed across the domain which then promotes lateral growth of branches (as seen in the figure above). So nodes can attach more quickly, however the growth has a larger lateral component. The network height initially increases linearly, however from the figure we clearly see that the rate of growth increases for larger network heights (when a dominant branch develops).

Adding bias in the unconfined case, the direction of growth for the network skews towards the horizontal. It is now less probable that the nodes will attach on the right side of the network, and the lateral extent of the network reduces and growth in the vertical direction is promoted. Overall, the rate of growth increases since $$d^*$$ increases.

The combination of bias and walls can prdouce a very strong effect. If the walls are narrowly spaced, The nodes simply stack up at the wall and grow primarily in the vertical direction. The wider spacing leads to an initial stage where growth is primarily vertical, however once this base reaches a certain height, the direction of growth suddenly shifts towards the horizontal and as a result, the rate of vertical growth for the network reduces to what is seen in the unconfined case. (The above discussion provides more detail than is expected in submitted work) Note: “Time” here refers to iterations as nodes are added to the network rather than computational or wallclock time.

4. (25 pts) Consider a network with N nodes; an edge list is constructed as follows: for the ith node, the code computes the distances to nodes i+1 through N and uses np.argwhere to extract the indices of the nodes within the distance threshold. These indices along with i are added to the edge list. These edges are then added to a NetworkX graph using the add_edges_from method. For the no-wall, unbiased case shown above, the graph is:

The above graph distributes the nodes randomly. If we provide the node positions (not required) to nx.draw, we see figures that look similar to those from question 2:

However, zooming in, an interesting network structure is apparent:

The degree distribution is:

Note that the maximum possible degree is 8 which is also the maximum degree in the network. A simple modification would be to generalize the model to an n-dimensional random walk for nodes with coordinates, $$X_1,X_2,...,X_N$$. If N=3, the maximum degree jumps to 26 (with $$d^*$$ appropriately modified). No substantial coding changes would be needed. Equations would have to be added for $$X_3, ..., X_N$$ which would essentially be the same as the code for $$X_1$$ while the distance calculations would also have to be generalized to n-dimensions.

Many other possibilities exist. For example, step sizes, sticking criteria, and $$d^*$$ could be modified to allow nodes to pack together more ‘tightly’.