Skip to content
Snippets Groups Projects
Commit 1b96bd25 authored by Benjamin Berkels's avatar Benjamin Berkels
Browse files

Upload New File

parent 8f1423fb
No related branches found
No related tags found
No related merge requests found
ex5.py 0 → 100644
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
from scipy import sparse
import scipy.sparse.linalg
def GradientDescent(x0, E, DE, beta, sigma, maxIter, stopEpsilon, inverseSPMatrix=None):
def ArmijoRule(x, initialTau, beta, sigma, energy, descentDir, gradientAtX, E):
tangentSlope = np.dot(descentDir.ravel(), gradientAtX.ravel())
def ArmijoCondition(tau):
secantSlope = (E(x + tau * descentDir) - energy)/tau
cond = (secantSlope / tangentSlope) >= sigma
return cond
tauMin = 0.000001
tauMax = 100000
tau = max(min(initialTau, tauMax), tauMin)
condition = ArmijoCondition(tau)
if condition:
while (condition and (tau < tauMax)):
tau = tau / beta
condition = ArmijoCondition(tau)
tau = beta * tau
else:
while ((not condition) and (tau > tauMin)):
tau = beta * tau
condition = ArmijoCondition(tau)
return tau
x = x0
energyNew = E(x)
tau = 1
print('Initial energy {:.6f}'.format(energyNew))
for i in range(maxIter):
energy = energyNew
gradient = DE(x)
descentDir = - gradient
if (inverseSPMatrix is not None):
descentDir = inverseSPMatrix * descentDir
tau = ArmijoRule(x, tau, beta, sigma, energy, descentDir, gradient, E)
x = x + tau * descentDir
energyNew = E(x)
print('{} steps, tau: {:.6f} energy {:.6f}'.format(i+1, tau, energyNew))
if ((energy - energyNew) < stopEpsilon):
break
return x
def Ja(x, f, h, lambda_):
energy = np.sum(np.square(x - f)) + lambda_ / h**2 * np.sum(np.square(x[1:]-x[0:-1]))
return energy
def DJa(x, f, h, lambda_):
grad = 2 * (x - f)
grad[0:-1] = grad[0:-1] - 2 * lambda_ / h**2 * (x[1:]-x[0:-1])
grad[1:] = grad[1:] - 2 * lambda_ / h**2 * (x[0:-1] - x[1:])
return grad
def Jb(x, f, h, lambda_, epsilon):
energy = np.sum(np.square(x - f)) + lambda_ / h * np.sum(np.sqrt(np.square(x[1:] - x[0:-1]) + epsilon**2))
return energy
def DJb(x, f, h, lambda_, epsilon):
grad = 2 * (x - f)
grad[0:-1] = grad[0:-1] - lambda_ / h * np.divide(x[1:] - x[0:-1], np.sqrt(np.square(x[1:] - x[0:-1]) + epsilon**2))
grad[1:] = grad[1:] - lambda_ / h * np.divide(x[0:-1] - x[1:], np.sqrt(np.square(x[0:-1] - x[1:]) + epsilon**2))
return grad
def main():
# Create a noisy input signal
N = 200
coords = np.linspace(0, 2*np.pi, N)
f0 = np.sin(coords)
np.random.seed(42)
f = f0 + 0.2 * (np.random.rand(coords.size) - 0.5)
x0 = f
# Set parameters;
lambda_ = 0.005
sigma = 0.5
beta = 0.5
h = 1/(N - 1)
epsilon = .001
# First functional
def Ea(x): return Ja(x, f, h, lambda_)
def DEa(x): return DJa(x, f, h, lambda_)
xa = GradientDescent(x0, Ea, DEa, beta, sigma, 1000, 0.001)
# Second functional
def Eb(x): return Jb(x, f, h, lambda_, epsilon)
def DEb(x): return DJb(x, f, h, lambda_, epsilon)
xb = GradientDescent(x0, Eb, DEb, beta, sigma, 1000, 0.001)
# First functional using the system of linear equations
L = 1/h * sparse.csr_matrix(sparse.diags([-1, 1], [0, 1], shape=(N, N)))
L[N-1, N-1] = 0
identityMatrix = sparse.eye(N)
A = identityMatrix + lambda_*L.transpose()*L
# For such a low number of unknowns, computing the inverse is fine.
invA = scipy.sparse.linalg.inv(sparse.csc_matrix(A))
xaLE = invA*x0
# First functional with gradient flow
# Use a small sigma here to get the optimal tau in this case.
xaG = GradientDescent(x0, Ea, DEa, beta, 0.1, 1000, 0.001, invA)
# figure
plt.plot(coords, xa, label='xa')
plt.plot(coords, xaLE, label='xaLE')
plt.plot(coords, xaG, label='xaG')
plt.plot(coords, xb, label='xb')
plt.plot(coords, f, label='f')
plt.plot(coords, f0, label='f0')
plt.legend()
plt.show()
if __name__ == '__main__':
main()
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment