Skip to content
Snippets Groups Projects
Commit 708b7580 authored by Benjamin Berkels's avatar Benjamin Berkels
Browse files

Upload New File

parent 9d6ac53f
Branches
No related tags found
No related merge requests found
%% Cell type:markdown id: tags:
## IPython notebook for exercise sheet 5
%% Cell type:markdown id: tags:
### Import all Python libraries we will need
%% Cell type:code id: tags:
``` python
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from scipy import sparse
import scipy.sparse.linalg
from IPython.display import set_matplotlib_formats, display, Math
set_matplotlib_formats('svg')
```
%% Cell type:markdown id: tags:
### Gradient descent with Armijo rule
%% Cell type:code id: tags:
``` python
def GradientDescent(x0, E, DE, beta, sigma, maxIter, stopEpsilon, inverseSPMatrix=None):
def ArmijoRule(x, initialTau, beta, sigma, energy, descentDir, gradientAtX, E):
tangentSlope = np.dot(descentDir.ravel(), gradientAtX.ravel())
def ArmijoCondition(tau):
secantSlope = (E(x + tau * descentDir) - energy)/tau
cond = (secantSlope / tangentSlope) >= sigma
return cond
tauMin = 0.000001
tauMax = 100000
tau = max(min(initialTau, tauMax), tauMin)
condition = ArmijoCondition(tau)
if condition:
while (condition and (tau < tauMax)):
tau = tau / beta
condition = ArmijoCondition(tau)
tau = beta * tau
else:
while ((not condition) and (tau > tauMin)):
tau = beta * tau
condition = ArmijoCondition(tau)
return tau
x = x0
energyNew = E(x)
tau = 1
print('Initial energy {:.6f}'.format(energyNew))
for i in range(maxIter):
energy = energyNew
gradient = DE(x)
descentDir = - gradient
if (inverseSPMatrix is not None):
descentDir = inverseSPMatrix * descentDir
tau = ArmijoRule(x, tau, beta, sigma, energy, descentDir, gradient, E)
x = x + tau * descentDir
energyNew = E(x)
print('{} steps, tau: {:.6f} energy {:.6f}'.format(i+1, tau, energyNew))
if ((energy - energyNew) < stopEpsilon):
break
return x
```
%% Cell type:markdown id: tags:
$$J_a[x]=\sum_{i=1}^n(x_i-f_i)^2+\lambda\sum_{i=1}^{n-1}\frac1{h^2}(x_{i+1}-x_i)^2$$
%% Cell type:code id: tags:
``` python
def Ja(x, f, h, lambda_):
energy = np.sum(np.square(x - f)) + lambda_ / h**2 * np.sum(np.square(x[1:]-x[0:-1]))
return energy
def DJa(x, f, h, lambda_):
grad = 2 * (x - f)
grad[0:-1] = grad[0:-1] - 2 * lambda_ / h**2 * (x[1:]-x[0:-1])
grad[1:] = grad[1:] - 2 * lambda_ / h**2 * (x[0:-1] - x[1:])
return grad
```
%% Cell type:markdown id: tags:
$$J_b[x]=\sum_{i=1}^n(x_i-f_i)^2+\lambda\sum_{i=1}^{n-1}\frac1{h}\left\vert x_{i+1}-x_i\right\vert_\epsilon.$$
%% Cell type:code id: tags:
``` python
def Jb(x, f, h, lambda_, epsilon):
energy = np.sum(np.square(x - f)) + lambda_ / h * np.sum(np.sqrt(np.square(x[1:] - x[0:-1]) + epsilon**2))
return energy
def DJb(x, f, h, lambda_, epsilon):
grad = 2 * (x - f)
grad[0:-1] = grad[0:-1] - lambda_ / h * np.divide(x[1:] - x[0:-1], np.sqrt(np.square(x[1:] - x[0:-1]) + epsilon**2))
grad[1:] = grad[1:] - lambda_ / h * np.divide(x[0:-1] - x[1:], np.sqrt(np.square(x[0:-1] - x[1:]) + epsilon**2))
return grad
```
%% Cell type:markdown id: tags:
### Create a noisy input signal
%% Cell type:code id: tags:
``` python
N = 200
coords = np.linspace(0, 2*np.pi, N)
f0 = np.sin(coords)
np.random.seed(42)
f = f0 + 0.2 * (np.random.rand(coords.size) - 0.5)
x0 = f
plt.plot(coords, f, label='f')
plt.plot(coords, f0, label='f0')
plt.legend()
plt.show()
```
%% Cell type:markdown id: tags:
### Set parameters
%% Cell type:code id: tags:
``` python
lambda_ = 0.005
sigma = 0.5
beta = 0.5
h = 1/(N - 1)
epsilon = .001
```
%% Cell type:markdown id: tags:
### Minimize $J_a$ using gradient descent.
%% Cell type:code id: tags:
``` python
def Ea(x): return Ja(x, f, h, lambda_)
def DEa(x): return DJa(x, f, h, lambda_)
xa = GradientDescent(x0, Ea, DEa, beta, sigma, 1000, 0.001)
plt.plot(coords, xa, label='xa')
plt.legend()
plt.show()
```
%% Cell type:markdown id: tags:
### Minimize $J_b$ using gradient descent.
%% Cell type:code id: tags:
``` python
def Eb(x): return Jb(x, f, h, lambda_, epsilon)
def DEb(x): return DJb(x, f, h, lambda_, epsilon)
xb = GradientDescent(x0, Eb, DEb, beta, sigma, 1000, 0.001)
plt.plot(coords, xb, label='xb')
plt.legend()
plt.show()
```
%% Cell type:markdown id: tags:
### Minimize $J_a$ the system of linear equations
%% Cell type:code id: tags:
``` python
L = 1/h * sparse.csr_matrix(sparse.diags([-1, 1], [0, 1], shape=(N, N)))
L[N-1, N-1] = 0
identityMatrix = sparse.eye(N)
A = identityMatrix + lambda_*L.transpose()*L
# For such a low number of unknowns, computing the inverse is fine.
invA = scipy.sparse.linalg.inv(sparse.csc_matrix(A))
xaLE = invA*x0
plt.plot(coords, xaLE, label='xaLE')
plt.legend()
plt.show()
```
%% Cell type:markdown id: tags:
### Minimize $J_a$ using gradient flow.
%% Cell type:code id: tags:
``` python
# Use a small sigma here to get the optimal tau in this case.
xaG = GradientDescent(x0, Ea, DEa, beta, 0.1, 1000, 0.001, invA)
plt.plot(coords, xaG, label='xaG')
plt.legend()
plt.show()
```
%% Cell type:markdown id: tags:
### Plot all results in one figure.
%% Cell type:code id: tags:
``` python
plt.plot(coords, xa, label='xa')
plt.plot(coords, xaLE, label='xaLE')
plt.plot(coords, xaG, label='xaG')
plt.plot(coords, xb, label='xb')
plt.plot(coords, f, label='f')
plt.plot(coords, f0, label='f0')
plt.legend()
plt.show()
```
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment