import numpy as np
import random
import matplotlib.pyplot as plt
#config plt
plt.rc('font', size=10) # controls default text sizes
# plt.rc('axes', titlesize=30) # fontsize of the axes title
plt.rc('axes', titlesize=20) # fontsize of the axes title
plt.rc('axes', labelsize=25) # fontsize of the x and y labels
plt.rc('xtick', labelsize=18) # fontsize of the tick labels
plt.rc('ytick', labelsize=18) # fontsize of the tick labels
plt.rc('legend', fontsize=20) # legend fontsize
plt.rc('figure', titlesize=35) # fontsize of the figure title
plt.rc('axes',titlelocation='left') # position of axes title
# added init
def generating_true_traj( N, Fi, G, init=[[0],[0]], acc_var = 0, plot = False ):
X = np.zeros((N,2,1))
X[0] = init
for i in range(1, N):
acc = random.gauss(0, np.sqrt(acc_var))
X[i] = Fi.dot(X[i - 1]) + acc * G
if plot:
figure, ax = plt.subplots(figsize = (16,9))
ax.set_title(f'True trajectory of {N} points')
ax.plot(X[:,0], linewidth=3, color = 'red', label = 'coordinate')
ax.plot(X[:,1], linewidth=3, color = 'blue', label = 'velocity')
ax.set_xlabel('number of points')
ax.set_ylabel('data values')
ax.grid()
ax.legend()
plt.show()
return X
def generating_measurments ( true_traj, H=np.array([[1,0]]),var_noise=0, plot = False ) :
N = len(true_traj)
z = np.zeros(N)
for i in range(N):
# print(H.dot(true_traj[i]).shape)
z[i] = H.dot(true_traj[i]) + random.gauss(0, np.sqrt(var_noise))
if plot:
figure, ax = plt.subplots(figsize = (16,9))
ax.set_title(f'Measurements of {N} points')
# ax.plot( true_traj, color=)
ax.plot(z, linewidth=3, color = 'orange')
ax.set_xlabel('number of points')
ax.set_ylabel('measurements of coordinate')
ax.grid()
plt.show()
return z
### Corrected
def cov_mat( G, rand_acc ):
cov_q = G * G.T * rand_acc
class Kalman():
# Fi - Transition error
# H - measurement matrix
# cov_Q - covariance matrix of state noise
# cor_r - covariance matrix of measurement noise
def __init__( self, Fi, H, G, acc_var, cov_r ):
self.Fi = Fi
self.H = H
self.G = G
self.acc_var = acc_var
self.cov_Q = G @ G.T * acc_var
self.cov_r = cov_r
def calc_Kalman(self, true, meas,
X_f0 = None, P_f0=None,
K_cust = None, ret = False, plot = False ):
# X_p - Prediction X matrix
# X_f - filtered X matrix
# P_p - Prediction error covariance matrix
# P_f - Filtration error covariance matrix
# Filter gain
# Each call of this method recalculates P matrixes of class!!!
# and recalculates X matrixes of class!!!
# create local variables to convenience
N = len(meas)
Fi = self.Fi
H = self.H
cov_Q = self.cov_Q
cov_r = self.cov_r
X_p = np.zeros((N, 2, 1))
P_f = np.zeros((N, 2, 2))
if P_f0 is not None:
P_f[0] = P_f0
else:
P_f[0] = [[10000, 0], [0, 10000]]
P_p = np.zeros((N, 2, 2))
X_f = np.zeros((N, 2, 1))
K = np.zeros((N, 2, 1))
if K_cust is not None :
K = K_cust
P_fx = np.zeros(N)
if X_f0 is not None:
X_f[0] = X_f0
else:
X_f[0] = [[2], [0]]
for i in range(1, N):
X_p[i] = Fi @ X_f[i - 1]
P_p[i] = Fi @ P_f[i - 1] @ Fi.T + cov_Q
if K_cust is None :
K[i] = (P_p[i] @ H.T) @ (np.linalg.inv(H @ P_p[i] @ H.T + cov_r))
X_f[i] = X_p[i] + K[i].dot(float(meas[i] - H @ X_p[i]))
P_f[i] = (np.eye(2) - K[i] @ H) @ P_p[i]
#print(P_f[i][0][0])
P_fx[i] = np.sqrt(abs(P_f[i][0][0]))
# Update P,X and other matrixes of Class to store learning log
self.true_data = true
self.meas = meas
self.cov_Q = cov_Q
self.X_p = X_p
self.X_f = X_f
self.P_p = P_p
self.P_f = P_f
self.P_fx = P_fx
self.K = K
self.X_f0 = X_f0
self.P_f0 = P_f0
if plot:
figure, ax = plt.subplots(figsize = (16,9))
ax.set_title('Kalman filtration')
ax.plot(X_f[:, 0], linewidth=3, color = 'red', label = 'filtered')
ax.plot(meas, linewidth=3, color = 'blue', label = 'measurements')
ax.plot(true[:,0], linewidth=3, color = 'green', label = 'true trajectory')
ax.set_xlabel('number of points')
ax.set_ylabel('data')
ax.grid()
ax.legend()
plt.show()
if ret:
return X_f
def calc_Kalman_mstep( self, m, plot=False, ret = False):
# usual case is m=2
N = len(self.X_f)
X_f = self.X_f
X_p_m = np.zeros((N, 2, 1))
Fi_m = np.linalg.matrix_power(self.Fi, m-1)
# first m-2 member are equal to 0th filtered element
X_p_m[:m-1] = X_f[0]
for i in range(m-1, N):
X_p_m[i] = Fi_m.dot(X_f[i - 1])
if plot:
rng = np.arange(N)
graph, ax = plt.subplots(figsize = (16,9))
ax.set_title('Extrapolation on m=7')
ax.plot( X_p_m[:, 0], linewidth=3, color = 'red')
ax.plot(self.meas, linewidth=3, color = 'blue')
ax.set_xlabel('number of points')
ax.set_ylabel('Data')
ax.grid()
plt.show()
if ret:
return X_p_m
def calc_errors ( self, M, N, Fi, G, init, acc_var, X_f0, P_f0, cov_Q = None, K_cust=None ) :
final_err = np.zeros((N - 3, 2, 1))
final_err_7 = np.zeros((N - 3, 2, 1))
m = 7
for i in range( M ):
# generate data with 0 random acceleration noise
true_data = generating_true_traj( N, Fi, G, init, acc_var, plot = False )
meas = generating_measurments( true_data, H, meas_var, plot = False )
X_f = self.calc_Kalman(true_data, meas, X_f0, P_f0, K_cust, ret = True)
X_p7 = self.calc_Kalman_mstep( m, plot=False, ret = True)
final_err += (true_data[3:] - X_f[3:])**2
final_err_7 += (true_data[3:] - X_p7[3:])**2
final_err = np.sqrt(1/(M-1)*final_err)
final_err_7 = np.sqrt(1/(M-1)*final_err_7)
self.final_err = final_err
self.final_err_7 = final_err_7
return final_err, final_err_7
N = 200
T = 1
init = np.array([[5],[1]])
rand_acc_var = 0.2**2
meas_var = 20**2
Fi = np.array([[1, T], [0, 1]])
G = np.array([[T**2/2],[T]])
cov_Q = G.dot(G.T)*rand_acc_var
H = np.array([[1, 0]])
cov_r = meas_var
X_f0 = np.array([[2],[0]])
P_f0 = np.array([[10000, 0], [0, 10000]])
true_traj = generating_true_traj( N, Fi, G, init, rand_acc_var, plot=False )
meas = generating_measurments( true_traj, H, var_noise=meas_var, plot = False)
figure, ax = plt.subplots(figsize = (16,9))
ax.set_title(f'Figure 1, Measurements of {N} points', fontweight='bold', x=-0.1, y=-0.18)
# ax.plot( true_traj, color=)
ax.plot(meas, linewidth=3, color = 'orange')
ax.set_xlabel('number of points')
ax.set_ylabel('measurements of coordinate')
ax.grid()
plt.show()
N = 200
T = 1
init = np.array([[5],[1]])
rand_acc_var = 0.2**2
meas_var = 20**2
Fi = np.array([[1, T], [0, 1]])
G = np.array([[T**2/2],[T]])
cov_Q = G.dot(G.T)*rand_acc_var
H = np.array([[1, 0]])
cov_r = meas_var
X_f0 = np.array([[2],[0]])
P_f0 = np.array([[10000, 0], [0, 10000]])
true_arr = (); meas_arr = () ; X_f_arr = ()
fig, ax = plt.subplots( 1,3, figsize = (8*3,4.5))
for i in range(3):
true_traj = generating_true_traj( N, Fi, G, init, rand_acc_var, plot=False )
meas = generating_measurments( true_traj, H, var_noise=meas_var, plot = False)
Kalm = Kalman( Fi, H, G, rand_acc_var, cov_r )
Kalm.calc_Kalman( true_traj, meas, X_f0, P_f0, ret=False, plot=False)
ax[i].plot(Kalm.meas, linewidth=3, color = 'blue', label = 'measurements')
ax[i].plot(Kalm.true_data[:,0], linewidth=3, color = 'green', label = 'true trajectory')
ax[i].plot(Kalm.X_f[:, 0], linewidth=3, color = 'red', label = 'filtered')
ax[i].set_xlabel('number of points')
ax[i].set_ylabel('data')
ax[i].grid()
ax[i].legend()
fig.suptitle('Figure 2, Kalman filtration for 3 different data', fontweight='bold', fontsize=25, y=-0.07)
fig.tight_layout()
# fig.subplots_adjust(top=0.85)
plt.show()
figure, ax = plt.subplots(figsize = (16,9))
ax.plot(Kalm.K[:,0], linewidth=3, color = 'red', label = 'Filter gain')
ax.plot(Kalm.P_fx, linewidth=3, color = 'blue', label = 'std of estimation error')
ax.set_title('Figure 3, Filter gain of x part evolution', fontweight='bold', x=-0.1, y = -0.18)
ax.set_xlabel("number of point")
ax.set_ylabel("Filter gain and std")
ax.grid()
ax.legend()
plt.show()
# Use filtered data from task 5
m = 7
pred_7 = Kalm.calc_Kalman_mstep( m=m, plot = False, ret = True)
# calculates mean-squared errors of
# filtered estimation
# and forecasting with m step
# all variables from previous assignment
M = 500
X_f0 = np.array([[2],[0]])
P_f0 = np.array([[10000, 0], [0, 10000]])
Kalm = Kalman( Fi, H, G, rand_acc_var, cov_r )
final_err, final_err_7 = Kalm.calc_errors(M, N, Fi, G, init, rand_acc_var, X_f0, P_f0 )
rand_acc_var
figure, ax = plt.subplots(figsize = (16,9))
ax.plot(final_err[:,0], linewidth=3, color = 'red', label = 'MSE of filtered data')
ax.plot(final_err_7[:,0], linewidth=3, color = 'blue', label = 'MSE of predicted(m=7) data')
ax.legend()
ax.grid()
ax.set_title('Fig 4 MSE evalution of filtered and predicted data with step m=7', fontweight='bold', x=-0.1, y = -0.18)
ax.set_xlabel("number of point")
ax.set_ylabel("MSE")
plt.show()
#std of measurement errors
meas_std = (Kalm.meas - Kalm.true_data[:,0]).std()
print(f"MSE of filtered estimation = {final_err[:,0].max()}" )
print(f"standart deviation of measurement errors = {meas_std}" )
10. Make 𝑀 = 500 runs again, but with more accurate initial filtration error covariance matrix
# calculates mean-squared errors of
# filtered estimation
# and forecasting with m step
# all variables from previous assignment
M = 500
X_f0 = np.array([[2],[0]])
P_f0 = np.array([[100, 0], [0, 100]])
Kalm = Kalman( Fi, H, G, rand_acc_var, cov_r )
new_final_err, new_final_err_7 = Kalm.calc_errors(M, N, Fi, G, init, rand_acc_var, X_f0, P_f0 )
figure, ax = plt.subplots(figsize = (16,9))
ax.plot(final_err[:,0], linewidth=3, color = 'red', alpha=0.5, linestyle='--', label = 'filtered data')
ax.plot(final_err_7[:,0], linewidth=3, color = 'blue', alpha=0.5, linestyle='--', label = 'predicted(m=7) data')
ax.plot(new_final_err[:,0], linewidth=3, color = 'red', label = 'more precise filtered data')
ax.plot(new_final_err_7[:,0], linewidth=3, color = 'blue', label = 'more precize predicted(m=7) data')
ax.legend()
ax.grid()
ax.set_title('Figure 5, Comparison of MSE evalution of filtered and predicted data with different $P_{0,0}$', fontweight='bold', x=-0.1, y = -0.18)
ax.set_xlabel("number of point")
ax.set_ylabel("MSE")
plt.show()
Here we see that filtering with more accurate initial estimation error covariance matrix provide faster decreasing of estimation errors
# plot_fError_vs_stdx()
P_fx = np.sqrt(Kalm.P_f[:,0,0])
figure, ax = plt.subplots(figsize = (16,9))
ax.plot(final_err[:,0], linewidth=3, color = 'red', label = 'true estimation error')
ax.plot(P_fx, linewidth=3, color = 'blue', label = 'calculation errors of $P_{i,i}$')
ax.legend()
ax.grid()
ax.set_title('Figure 6, Comparison of MSE evalution of filtered and predicted data with more precise $P_{0,0}$', fontweight='bold', x=-0.1, y = -0.18)
ax.set_xlabel("step")
ax.set_ylabel("MSE")
plt.show()
print(P_fx.min())
# generating_true_traj_no_state_noise()
# data = generating_measurments()
# calc_Kalman(data, k_con=1, plot=1)
# plot_errors_with_pf_no_state_noise(500)
# plot_fError_vs_stdx()
# cov_Q matrix is equal to zero also
cov_Q = 0
acc_var = 0
init = np.array([[5],[1]])
M = 500
X_f0 = np.array([[2],[0]])
P_f0 = np.array([[100, 0], [0, 100]])
Kalm = Kalman( Fi, H, G, acc_var=0, cov_r=cov_r )
final_err, final_err_7 = Kalm.calc_errors(M, N, Fi, G, init, acc_var , X_f0, P_f0, )
figure, ax = plt.subplots(figsize = (16,9))
ax.set_title('Figure 7, Kalman filtration in determenistic trajectory case', fontweight='bold', x=-0.1, y = -0.18)
ax.plot(Kalm.X_f[:, 0], linewidth=3, color = 'red', label = 'filtered')
ax.plot(Kalm.meas, linewidth=3, color = 'blue', label = 'measurements')
ax.plot(Kalm.true_data[:,0], linewidth=3, color = 'green', label = 'true trajectory')
ax.set_xlabel('number of points')
ax.set_ylabel('data')
ax.grid()
ax.legend()
plt.show()
figure, ax = plt.subplots(figsize = (16,9))
ax.set_title('Figure 8, Filter gain evolution in case of deterministic data', fontweight='bold', x=-0.1, y = -0.18)
ax.plot(Kalm.K[:, 0], linewidth=3, color = 'red')
# ax.plot(Kalm.meas, linewidth=3, color = 'blue', label = 'measurements')
# ax.plot(Kalm.true_data[:,0], linewidth=3, color = 'green', label = 'true trajectory')
ax.set_xlabel('number of points')
ax.set_ylabel('filter gain')
ax.grid()
ax.legend()
plt.show()
# plot_fError_vs_stdx()
P_fx = np.sqrt(Kalm.P_f[:,0,0])
figure, ax = plt.subplots(figsize = (16,9))
rng = np.arange(3,N)
# final error started from i=3
ax.plot(rng, final_err[:,0], linewidth=3, color = 'red', label = 'true estimation error')
ax.plot(rng, P_fx[3:], linewidth=3, color = 'blue', label = 'calculation errors of $P_{i,i}$')
ax.legend()
ax.grid()
ax.set_title('Figure 9, Comparison of MSE evalution of filtered and predicted data in case of deterministic data $P_{0,0}$', fontweight='bold', x=-0.1, y = -0.18)
ax.set_xlabel("step")
ax.set_ylabel("MSE")
plt.show()
# generating_true_traj_no_state_noise()
# data = generating_measurments()
# calc_Kalman(data, k_con=1, plot=1)
# plot_errors_with_pf_no_state_noise(500)
# plot_fError_vs_stdx()
# cov_Q matrix is considered as zero
cov_Q = 0
rand_acc_var = 0.2**2
M = 500
X_f0 = np.array([[2],[0]])
P_f0 = np.array([[100, 0], [0, 100]])
Kalm = Kalman( Fi, H, G, 0, cov_r )
final_err, final_err_7 = Kalm.calc_errors( M, N, Fi, G, init, acc_var=rand_acc_var, X_f0=X_f0, P_f0=P_f0)
figure, ax = plt.subplots(figsize = (16,9))
ax.set_title('Figure 10, Kalman filtration in case of wrong covariance matrix Q', fontweight='bold', x=-0.1, y = -0.18)
ax.plot(Kalm.X_f[:, 0], linewidth=3, color = 'red', label = 'filtered')
ax.plot(Kalm.meas, linewidth=3, color = 'blue', label = 'measurements')
ax.plot(Kalm.true_data[:,0], linewidth=3, color = 'green', label = 'true trajectory')
ax.set_xlabel('number of points')
ax.set_ylabel('data')
ax.grid()
ax.legend()
plt.show()
# plot_fError_vs_stdx()
P_fx = np.sqrt(Kalm.P_f[:,0,0])
P_px = np.sqrt(Kalm.P_p[:,0,0])
figure, ax = plt.subplots(figsize = (16,9))
rng = np.arange(3,N)
# final error started from i=3
ax.plot(rng, final_err[:,0], linewidth=3, color = 'red', label = 'true estimation error')
ax.plot(rng, P_fx[3:], linewidth=3, color = 'orange', label = 'calculation errors of $P_{i,i}$')
ax.plot(rng, P_px[3:], linewidth=3, color = 'blue', label = 'calculation errors of $P_{i,i-1}$')
ax.legend()
ax.set_title('Figure 11, Comparison of MSE evalution of filtered and predicted data \n in case of wrong covariance matrix Q', fontweight='bold', x=-0.1, y = -0.2)
ax.set_xlabel("step")
ax.set_ylabel("MSE")
ax.grid()
plt.show()
###################
acc_var1 = 1
cov_Q1 = G.dot(G.T)*acc_var1
true_data1 = generating_true_traj( N, Fi, G, init, acc_var1 )
meas1 = generating_measurments( true_data1, H, meas_var )
Kalm1 = Kalman( Fi, H, G, acc_var1, cov_r )
Kalm1.calc_Kalman( true_data1, meas1, X_f0, P_f0, ret = False )
##################
acc_var2 = 0.2**2
cov_Q2 = G.dot(G.T)*acc_var2
true_data2 = generating_true_traj( N, Fi, G, init, acc_var2 )
meas2 = generating_measurments( true_data2, H, meas_var )
Kalm2 = Kalman( Fi, H, G, acc_var2, cov_r )
Kalm2.calc_Kalman( true_data2, meas2, X_f0, P_f0, ret = False )
figure, ax = plt.subplots(figsize = (16,9))
ax.set_title('Figure 12, Filter gain evolution for different state noises', fontweight='bold', x=-0.1, y = -0.18)
ax.plot(Kalm1.K[:, 0], linewidth=3, color = 'blue', label=f'$\sigma_{{a1}}^2={acc_var1}$')
ax.plot(Kalm2.K[:, 0], linewidth=3, color = 'red', label=f'$\sigma_{{a2}}^2=0.2^2$')
ax.set_xlabel('number of points')
ax.set_ylabel('filter gain')
ax.grid()
ax.legend()
plt.show()
init = np.array([[5],[1]])
acc_var = 0.2**2
true_data = generating_true_traj( N, Fi, G, init, acc_var )
meas = generating_measurments(true_data, H, meas_var)
cov_Q = G.dot(G.T)*acc_var
X_f0 = np.array([[100], [5]])
P_f0 = np.array([[100, 0], [0, 100]])
Kalm = Kalman( Fi, H, G, acc_var, cov_r )
Kalm.calc_Kalman( true_data, meas, X_f0, P_f0, ret=False, plot=False)
figure, ax = plt.subplots(figsize = (16,9))
ax.set_title('Figure 13, Filter gain evolution to steady-state value', fontweight='bold', x=-0.1, y = -0.18)
ax.plot(Kalm.K[:, 0], linewidth=3, color = 'blue')
ax.set_xlabel('number of points')
ax.set_ylabel('filter gain')
ax.grid()
plt.show()
final_err, final_err_7 = Kalm.calc_errors(M, N, Fi, G, init, acc_var, X_f0, P_f0)
K_opt = Kalm.K[150,0]
print(f"K_opt = {K_opt[0]:.3f}")
K_un_est = K_opt/5
import copy
Kalm_cus_K = copy.copy(Kalm)
X_f_under_estim = Kalm_cus_K.calc_Kalman( true_data, meas, X_f0=X_f0, P_f0=P_f0, K_cust = np.ones((N,2,1))*K_un_est, ret=True )
figure, ax = plt.subplots(figsize = (16,9))
ax.set_title('Figure 14, Kalman filtration with underestimated filter gain', fontweight='bold', x=-0.1, y = -0.18)
ax.plot(Kalm_cus_K.X_f[:, 0], linewidth=3, color = 'red', label = 'filtered')
ax.plot(Kalm_cus_K.meas, linewidth=3, color = 'blue', label = 'measurements')
ax.plot(Kalm_cus_K.true_data[:,0], linewidth=3, color = 'green', label = 'true trajectory')
ax.set_xlabel('number of points')
ax.set_ylabel('data')
ax.grid()
ax.legend()
plt.show()
final_err_cus_K, final_err_cus_K7 = Kalm_cus_K.calc_errors(M, N, Fi, G, init, acc_var, X_f0, P_f0, K_cust=np.ones((N,2,1))*K_un_est )
# plt.plot(final_err_cus_K[:,0])
# plt.plot(final_err[:,0])
P_fx = np.sqrt(abs(Kalm.P_f[:,0,0]))
P_px = np.sqrt(abs(Kalm.P_p[:,0,0]))
P_fx_cus_K = np.sqrt(abs(Kalm_cus_K.P_f[:,0,0]))
P_px_cus_K = np.sqrt(abs(Kalm_cus_K.P_p[:,0,0]))
figure, ax = plt.subplots(figsize = (16,9))
rng = np.arange(3,N)
# final error started from i=3
ax.plot(rng, final_err[:,0], linewidth=3, color = 'green', label = 'optimal K using')
# ax.plot(rng, P_fx[3:], linewidth=3, color = 'orange', label = 'calculation errors of $P_{i,i}$')
# ax.plot(rng, P_px[3:], linewidth=3, color = 'blue', label = 'calculation errors of $P_{i,i-1}$')
ax.plot(rng, final_err_cus_K[:,0], linewidth=3, color = 'red', label = 'underestimated K')
# ax.plot(rng, P_fx_cus_K[3:], linewidth=3, color = 'orange', label = 'calculation errors of $P_{i,i}$')
# ax.plot(rng, P_px_cus_K[3:], linewidth=3, color = 'blue', label = 'calculation errors of $P_{i,i-1}$')
ax.legend()
ax.set_title('Figure 15, Comparison of MSE evalution of filtered and predicted data with different $P_{0,0}$', fontweight='bold', x=-0.1, y = -0.18 )
ax.set_xlabel("step")
ax.set_ylabel("MSE")
ax.grid()
plt.show()