import numpy as np
import matplotlib.pyplot as plt
sequenceLength = 100 # length of time series
stateVariance = np.array([[0.01],
[0.01]])
observationVariance = np.array([0.01])
x = np.array([[0],
[0]]) # Initial state
transitionMatrix = np.array([ [1, 0.1],
[-0.1, 1]])
y = []
def stateSpaceModel(oldState, stateVariance, nrParticles):
newStates = transitionMatrix.dot(oldState) + np.sqrt(stateVariance)*np.random.randn(2,nrParticles)
return newStates
def observationModel(State):
Observation = np.power(State[0,:],1)+ np.power(State[1,:],3)
return Observation
def generateData(x, y):
for i in range(sequenceLength-1):
#Generate new state
newState = stateSpaceModel(x[i,:], stateVariance)
x = np.vstack( (x,newState) )
#Generate observations
newObservation = observationModel(newState) # sensor positioned at (0,0)
y = np.append( y,newObservation )
newState = []
newObservation = []
return x,y
def evaluateGaussianLikelyhood(mean,cov):
evaluation = np.exp( -1 * (np.power(mean,2) ) / (2*cov) )
return evaluation
def GaussianParticleFilter(observationVector,transitionMatrix, stateNoise, observationNoise, nrParticles):
# init
estimatedStateVector = np.array([[0],[0]])
L = observationVector.shape
L = L[0]
Particles = np.sqrt(0.1)*np.random.randn(2,nrParticles)
y_hat_output = observationVector
for iterInd in range(L): # loop over time steps of time-series
Particles = stateSpaceModel(Particles, stateVariance, nrParticles) # advance particles trough state transisition model
y_hat = observationModel(Particles) # generate probability distribution of observations y if assumed states x were correct. Non-linearities here!
y_error = observationVector[iterInd] - y_hat # substract above from true observation = prediction error of y
# just the likelyhood remains, prior and importance cancel
#evaluation not sampling
unnormalized_weights = evaluateGaussianLikelyhood( mean = y_error , cov = observationNoise )
# we assume that this prediction error is gaussian distributed with mean and var of the observation noise
# the weights are the density values at the given y_error values -> weights = pdf(y_error).
# Since we normalize the weights to 1 anyways we don't need the full gaussian but only the likelyhood
# Thus we don't sample from the likelyhood but evaluate.
# Normalize weights to sum = 1 (so that they form a representation of the posterior x given y pdf)
weightsSum= np.sum(unnormalized_weights)
weightsNormalized = np.divide(unnormalized_weights,weightsSum+0.0001) # regularitzation to avoid division by 0
# Calculate mean-Vec and Cov-Matrix of the weighted particles
mean1D = Particles.dot(weightsNormalized)
mean = np.tile(mean1D, [1 , 1])
mean = np.transpose(mean)
Variance = Particles - np.tile(mean, [1, nrParticles])
Variance = np.multiply(Variance,weightsNormalized)
Variance = (Variance) @ np.transpose(Variance) + np.diag([0.0001, 0.0001]) # regularized cov-matrix
# Resampling step
# We sort of restart the weights (same statistics as before but not exactly the same values, only equivalent)
# Here the gaussianity assumption comes to play! We assume the filtering distribution p(xn given y=0...n) is approximated by a gaussian
Particles = []
Particles = np.random.multivariate_normal(mean1D, Variance, (nrParticles)) # resampling step
Particles = np.transpose(Particles)
# Save estimated state vector for outputting
estimatedStateVector = np.concatenate((estimatedStateVector , mean),1)
y_hat_output[iterInd] = np.average(y_hat)
return estimatedStateVector, y_hat_output
fig = plt.figure(figsize=(10,5))
for i in range(sequenceLength-1):
#Generate new state
newState = stateSpaceModel(x[:, [i]], stateVariance, 1)
x = np.concatenate((x,newState),1)
#Generate observations
newObservation = np.power(newState[0],1)+ np.power(newState[1],3) # sensor positioned at (0,0)
y = np.concatenate((y,newObservation))
newState = []
newObservation = []
nrParticles = 100
estimatedStateVector, y_hat = GaussianParticleFilter(y,transitionMatrix, stateVariance, observationVariance, nrParticles)
plt.plot(x[1,:], x[0,:])
plt.plot(estimatedStateVector[1,:], estimatedStateVector[0,:])
plt.legend(('True State', 'Estimated State'))
L = y.shape
L = L[0]
plt.show()
fig = plt.figure(figsize=(10,5))
plt.plot(x[0,:])
plt.plot(x[1,:])
plt.plot(estimatedStateVector[0,:])
plt.plot(estimatedStateVector[1,:])
plt.legend(('True State $x_{0}$', 'True State $x_{1}$', 'Estim State $x_{0}$', 'Estim State $x_{1}$'))
plt.show()
fig = plt.figure()
plt.plot(y)
plt.legend(('True observation $y$'))
plt.show()
fig = plt.figure()
plt.plot(y_hat)
plt.legend(('Estimated Observation $\hat y$'))
plt.show()