import numpy as np
import scipy.special as scp
import matplotlib.pyplot as plt
import math
%matplotlib inline
Exercise 1
(a)
x = np.linspace(1, 6, num = 100)
k = 5
n = np.array([1, 2, 3, 4, 5, 6])
X = np.array([n**i for i in range(0, k+1)]).T # Vandermode matrix
y = np.array([1, 1, 2, 6, 24, 120])
g_coefficients = np.linalg.solve(X, y)
g = np.sum(np.array([g_coefficients[i]*x**i for i in range(0, k+1)]), 0)
print("coefficients of g:", g_coefficients)
(b)
y_log = np.log(y)
g_coefficients_log = np.linalg.solve(X, y_log)
h = np.exp(np.sum(np.array([g_coefficients_log[i]*x**i for i in range(0, k+1)]), 0))
print(g_coefficients_log)
(c)
Gamma = scp.gamma(x)
fig = plt.figure(figsize = (15, 5))
ax = fig.add_subplot(121)
ax.plot(x, Gamma, label = 'Gamma(x)')
ax.plot(x, g, label = 'g(x)')
ax.plot(x, h, label = 'h(x)')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_title('Two polynomial approximations of Gamma(x)')
ax.legend(loc = 'best')
plt.show()
fig = plt.figure(figsize = (15, 5))
ax2 = fig.add_subplot(121)
ax2.plot(x, np.abs((Gamma - g)/Gamma), label = 'Gamma(x) - g(x)')
ax2.plot(x, np.abs((Gamma - h)/Gamma), label = 'Gamma(x) - h(x)')
ax2.set_xlabel('x')
ax2.set_ylabel('y')
ax2.set_title('Relative residuals')
ax2.legend(loc = 'best')
ax3 = fig.add_subplot(122)
ax3.plot(x, np.abs(Gamma - g), label = 'Gamma(x) - g(x)')
ax3.plot(x, np.abs(Gamma - h), label = 'Gamma(x) - h(x)')
ax3.set_xlabel('x')
ax3.set_ylabel('y')
ax3.set_title('Absolute residuals')
ax3.legend(loc = 'best')
plt.show()
(d)
maximum_relative_error_Gamma_g = max(np.abs((Gamma - g)/Gamma))
maximum_relative_error_Gamma_h = max(np.abs((Gamma - h)/Gamma))
print("Maximum relative error between Gamma(x) and g(x): ", maximum_relative_error_Gamma_g)
print("Maximum relative error between Gamma(x) and h(x): ", maximum_relative_error_Gamma_h)
maximum_absolute_error_Gamma_g = max(np.abs(Gamma - g))
maximum_absolute_error_Gamma_h = max(np.abs(Gamma - h))
print("Maximum absolute error between Gamma(x) and g(x): ", maximum_absolute_error_Gamma_g)
print("Maximum absolute error between Gamma(x) and h(x): ", maximum_absolute_error_Gamma_h)
Exercise 2
(a)
n = 4
x_chebyshev = np.array([np.cos((2*i - 1)*np.pi/(2*n)) for i in range(1, n+1)])
x_linspace = np.linspace(-1, 1, 1000)
def f(x):
return np.exp(-3*x) + np.exp(2*x)
y_chebyshev_exact = f(x_chebyshev)
y_linspace_exact = f(x_linspace)
def Lagrangian(k, x, x_chebyshev):
L_k = np.prod([(x - x_chebyshev[j])/(x_chebyshev[k] - x_chebyshev[j]) for j in range(0, n) if j != k])
return L_k
def sum_Lagrangians(x, y, x_chebyshev):
return np.sum([Lagrangian(i, x, x_chebyshev)*y[i] for i in range(0, n)])
y_linspace_predictions = np.array([sum_Lagrangians(x_linspace[i], y_chebyshev_exact, x_chebyshev) for i in range(len(x_linspace))])
fig = plt.figure(figsize = (15, 5))
ax = fig.add_subplot(121)
ax.plot(x_linspace, y_linspace_exact, label = 'f(x)')
ax.plot(x_linspace, y_linspace_predictions, label = 'p_3(x)')
ax.scatter(x_chebyshev, y_chebyshev_exact)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_title('Polynomial approximation p_3(x) of f(x)')
ax.legend(loc = 'best')
ax2 = fig.add_subplot(122)
ax2.plot(x_linspace, np.abs(y_linspace_exact - y_linspace_predictions))
ax2.set_xlabel('x')
ax2.set_ylabel('y')
ax2.set_title('Absolute residual between f(x) and its polynomial approximation p_3(x)')
(b)
g_norm_infinity = max(np.abs(y_linspace_exact - y_linspace_predictions))
print("Infinity norm of (f - p_3): ", g_norm_infinity)
(d)
norm_infinity = 2
while (norm_infinity > g_norm_infinity):
x_positive_random = np.random.uniform(0, 1, 2)
x_random = np.hstack((-x_positive_random[::-1], x_positive_random))
y_random_exact = f(x_random)
y_linspace_predictions = np.array([sum_Lagrangians(x_linspace[i], y_random_exact, x_random) for i in range(len(x_linspace))])
norm_infinity = max(np.abs(y_linspace_exact - y_linspace_predictions))
fig = plt.figure(figsize = (15, 5))
ax = fig.add_subplot(121)
ax.plot(x_linspace, y_linspace_exact, label = 'f(x)')
ax.plot(x_linspace, y_linspace_predictions, label = 'p_3(x)')
ax.scatter(x_chebyshev, y_chebyshev_exact)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_title('Polynomial approximation p_3(x) of f(x)')
ax.legend(loc = 'best')
ax2 = fig.add_subplot(122)
ax2.plot(x_linspace, np.abs(y_linspace_exact - y_linspace_predictions))
ax2.set_xlabel('x')
ax2.set_ylabel('y')
ax2.set_title('Absolute residual between f(x) and its polynomial approximation p_3(x)')
norm_infinity = max(np.abs(y_linspace_exact - y_linspace_predictions))
X = np.array([x_random**i for i in range(0, n)]).T # Vandermode matrix
y = f(x_random)
p_coefficients_star = np.linalg.solve(X, y)
print("Infinity norm of (f - p_3*): ", norm_infinity)
print("This can be obtained by interpolating at the following points: ", x_random)
print("This polynomial is fully described by the following set of parameters: [b_0, b_1, b_2, b_3] = ", p_coefficients_star)
Exercise 3
(a)
(b)
Exercise 4
(a)
A = np.array([[1, 0, 1, 4], [4, 1, 0, 1], [1, 4, 1, 0], [0, 1, 4, 1]])
b = np.array([6, 0, -6, 0])
alpha, beta, gamma, delta = np.linalg.solve(A, b)
print("Alpha =", alpha)
print("Beta =", beta)
print("Gamma =", gamma)
print("Delta =", delta)
(b)
def c0(t):
return t**2 * (3-2*t)
def c1(t):
return -t**2 * (1-t)
def c2(t):
return (t-1)**2 * t
def c3(t):
return 2*t**3 - 3*t**2 + 1
def s_1(t, alpha, beta, gamma, delta):
return c0(t) + alpha*c1(t) + delta*c2(t)
def s_2(t, alpha, beta, gamma, delta):
return beta*c1(t-1) + alpha*c2(t-1) + c3(t-1)
def s_3(t, alpha, beta, gamma, delta):
return -c0(t-2) + gamma*c1(t-2) + beta*c2(t-2)
def s_4(t, alpha, beta, gamma, delta):
return delta*c1(t-3) + gamma*c2(t-3) - c3(t-3)
grid_points = 10000
t01 = np.linspace(0, 1, grid_points)
t12 = np.linspace(1, 2, grid_points)
t23 = np.linspace(2, 3, grid_points)
t34 = np.linspace(3, 4, grid_points)
x01 = s_1(t01, alpha, beta, gamma, delta)
x12 = s_2(t12, alpha, beta, gamma, delta)
x23 = s_3(t23, alpha, beta, gamma, delta)
x34 = s_4(t34, alpha, beta, gamma, delta)
t = np.hstack([t01, t12, t23, t34])
sx = np.hstack([x01, x12, x23, x34])
fig = plt.figure(figsize = (15, 5))
ax1 = fig.add_subplot(121)
ax1.plot(t, sx, label = 'sx(t)')
ax1.plot(t, np.sin(t*np.pi*0.5), label = 'sin(t*pi/2)')
ax1.set_xlabel('t')
ax1.set_title('Cubic Spline approximation of sin(t*pi/2)')
ax1.legend(loc = 'best')
ax2 = fig.add_subplot(122)
ax2.plot(t, sx - np.sin(t*np.pi*0.5))
ax2.set_xlabel('t')
ax2.set_ylabel('sx - sin(t*pi/2)')
ax2.set_title('Error Term')
print('The maximum error between the cubic spline approximation and the exact function is', max(abs(sx - np.sin(t*np.pi*0.5))))
print('Therefore, the two functions are similar.')
(c)
y01 = x12
y12 = x23
y23 = x34
y34 = x01
sy = np.hstack([y01, y12, y23, y34])
fig = plt.figure(figsize = (15, 5))
ax1 = fig.add_subplot(121)
ax1.plot(t, sy, label = 'sy(t)')
ax1.plot(t, np.cos(t*np.pi*0.5), label = 'cos(t*pi/2)')
ax1.set_xlabel('t')
ax1.set_title('Cubic Spline approximation of cos(t*pi/2)')
ax1.legend(loc = 'best')
ax2 = fig.add_subplot(122)
ax2.plot(t, sy - np.cos(t*np.pi*0.5))
ax2.set_xlabel('t')
ax2.set_ylabel('sy - cos(t*pi/2)')
ax2.set_title('Error Term')
print('The maximum error between the cubic spline approximation and the exact function is', max(abs(sy - np.cos(t*np.pi*0.5))))
print('Therefore, the two functions are similar.')
(d)
fig = plt.figure(figsize = (15, 5))
ax1 = fig.add_subplot(121)
ax1.scatter(sx, sy)
ax1.set_xlabel('x')
ax1.set_ylabel('y')
ax1.set_title('Parametric curve (sx(t), sy(t))')
# implementation of Shoelace's formula (https://en.wikipedia.org/wiki/Shoelace_formula)
def PolyArea(x,y):
return 0.5*np.abs(np.dot(x,np.roll(y,1))-np.dot(y,np.roll(x,1)))
A = PolyArea(sx, sy)
r = 1
pi_approximation = A/(r**2)
print("The approximation of pi, significant to five digits, is", pi_approximation)
Exercise 6
(a)
(b)
# read data
data = np.genfromtxt("Supporting Files/am205_hw1_files/problem6/measured-concs.txt", delimiter = '')
data_x = data[:, 0]
data_y = data[:, 1]
data_rho = data[:, 2] # right-hand side
def rhoc(x, y, t):
return 1/(4*np.pi*b*t)*np.exp(-(x**2 + y**2)/(4*b*t))
x = np.linspace(-3, 3, 7)
y = np.linspace(-3, 3, 7)
b = 1
def compute_concentration_strenghts(data_x, data_y, data_rho, x, y, t):
A = np.zeros((np.shape(data_x)[0], np.shape(x)[0]*np.shape(y)[0]))
for i in range(np.shape(data_x)[0]):
for j in range(np.shape(x)[0]):
for k in range(np.shape(y)[0]):
# lattice grid
l = int(7*(x[j]+3) + (y[k]+3))
v_k = np.array([x[j], y[k]])
A[i, l] = rhoc(data_x[i] - v_k[0], data_y[i] - v_k[1], t)
concentration_strengths = np.linalg.solve(np.dot(A.T, A), np.dot(A.T, data_rho))
return concentration_strengths
concentration_strenghts = compute_concentration_strenghts(data_x, data_y, data_rho, x, y, t = 4)
print("The concentration strengths are given by the following vector: \n", concentration_strenghts)
(c)
mu = 0
variance = 1e-8
n = 200
repetitions = 100 # at least 100 times
concentration_strenghts_matrix = np.zeros((np.shape(x)[0]*np.shape(y)[0], repetitions)) # each column corresponds to one vector of concentration strenghts
for i in range(repetitions):
noise_vector = np.random.normal(mu, np.sqrt(variance), n)
true_rho = data_rho + noise_vector
concentration_strenghts_matrix[:, i] = compute_concentration_strenghts(data_x, data_y, true_rho, x, y, t = 4)
lambda_0_0_values = concentration_strenghts_matrix[24, :]
lambda_1_1_values = concentration_strenghts_matrix[32, :]
lambda_2_2_values = concentration_strenghts_matrix[40, :]
lambda_3_3_values = concentration_strenghts_matrix[48, :]
std_lambda_0_0 = np.std(lambda_0_0_values)
std_lambda_1_1 = np.std(lambda_1_1_values)
std_lambda_2_2 = np.std(lambda_2_2_values)
std_lambda_3_3 = np.std(lambda_3_3_values)
print("The standard deviation for the lambda in (0, 0) is", std_lambda_0_0)
print("The standard deviation for the lambda in (1, 1) is", std_lambda_1_1)
print("The standard deviation for the lambda in (2, 2) is", std_lambda_2_2)
print("The standard deviation for the lambda in (3, 3) is", std_lambda_3_3)
fig = plt.figure(figsize = [7.5, 7.5])
ax = fig.add_subplot(111)
ax.scatter(data_x, data_y, color = 'blue', label = 'Datapoints')
ax.plot([-3, 3, 3, -3, -3], [-3, -3, 3, 3, -3], color = 'orange', label = 'Lattice grid')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.legend(loc = 'best')
plt.show()