import matplotlib.pyplot as plt
import matplotlib.image as mpimg
%matplotlib inline
import numpy as np
def get_position(x1,x2,omega,v=-50.0,a=1.0,r=0.5):
position = np.array([x1,x2])
result = '('+str(position[0])+','+str(position[1])+')'
'''Since we only run this for one iteration,
personal best minus current position is (0,0) and can be ignored'''
new_v = omega*v+a*r*(-420.9687-x1)
set_position = np.vectorize(update_position)
position = set_position(position,new_v)
result = result+'->('+str(position[0])+','+str(position[1])+')'
return result
def update_position(x,v):
x = x+v
if x<-500.0:
x = -500.0
elif x>500.0:
x = 500.0
return x
omegas = np.array([2.0,0.5,0.1])
particles = np.array([[-400.0,-400.0],[-410.0,-410.0],[-415.0,-415.0]])
for omega in omegas:
print('Omega='+str(omega)+'\n')
for particle in particles:
print(get_position(particle[0],particle[1],omega))
print('\n------')
class Particle:
def __init__(self,position,velocity):
self.position = float(position)
self.velocity = float(velocity)
self.p_best = self.position #personal best
def update_position(self,omega,alpha,r):
omega = float(omega)
alpha = float(alpha)
r = float(r)
'''Global best is the same as personal best, since there's only 1 particle'''
self.velocity = omega*self.velocity+alpha*r*(self.p_best-self.position)+alpha*r*(self.p_best-self.position)
self.position = self.position+self.velocity
if np.abs(self.position)<np.abs(self.p_best): #minimisation
self.p_best = self.position
def __repr__(self):
return ('position '+'{:.10f}'.format(self.position)+', velocity '
+'{:.10f}'.format(self.velocity)+' and personal best '+'{:.10f}'.format(self.p_best)+'.')
particle = Particle(20,10)
it = 0
max_it = 100 #maximum number of iterations
x = np.linspace(0,25)
y = x**2
plt.plot(x,y)
while(particle.position != 0.0):
plt.scatter(particle.position,particle.position**2,color='blue',s=30,label='Function (x^2)')
plt.scatter(particle.position,0,color='red',s=5,label='Particle (x)')
it = it+1
if it==max_it:
break
particle.update_position(0.5,1.5,0.5)
handles, labels = plt.gca().get_legend_handles_labels()
by_label = dict(zip(labels, handles))
plt.legend(by_label.values(), by_label.keys())
txt = 'Fig. 1: ω=0.5, α_1=α_2=1.5, r_1=r_2=0.5,\nfinal particle at '+str(particle)
plt.figtext(0, -0.1, txt, horizontalalignment='left', wrap=True, fontsize=12)
plt.show()
#--------
particle = Particle(20,10)
it = 0
max_it = 100 #maximum number of iterations
x = np.linspace(-30,30)
y = x**2
plt.plot(x,y)
while(particle.position != 0.0):
plt.scatter(particle.position,particle.position**2,color='blue',s=30,label='Function (x^2)')
plt.scatter(particle.position,0,color='red',s=5,label='Particle (x)')
it = it+1
if it==max_it:
break
particle.update_position(0.7,1.5,1.0)
handles, labels = plt.gca().get_legend_handles_labels()
by_label = dict(zip(labels, handles))
plt.legend(by_label.values(), by_label.keys())
txt = 'Fig. 2: ω=0.7, α_1=α_2=1.5, r_1=r_2=1.0,\nfinal particle at '+str(particle)
plt.figtext(0, -0.1, txt, horizontalalignment='left', wrap=True, fontsize=12)
plt.show()
particle = Particle(20,10)
it = 0
max_it = 100 #maximum number of iterations
x = np.linspace(-1000,1000)
y = x**2
plt.plot(x,y)
while(particle.position != 0.0):
plt.scatter(particle.position,particle.position**2,color='blue',s=30,label='Function (x^2)')
plt.scatter(particle.position,0,color='red',s=5,label='Particle (x)')
it = it+1
if it==max_it:
break
particle.update_position(1.1,1.5,1.0)
handles, labels = plt.gca().get_legend_handles_labels()
by_label = dict(zip(labels, handles))
plt.legend(by_label.values(), by_label.keys())
txt = 'Fig. 3: ω=1.1, α_1=α_2=1.5, r_1=r_2=1.0,\nfinal particle at '+str(particle)
plt.figtext(0, -0.1, txt, horizontalalignment='left', wrap=True, fontsize=12)
plt.show()
import scipy
import numpy as np
def quantization_error(n_clusters, centroids, data):
clusters = {i:[] for i in range(n_clusters)}
for d in data:
distances = [scipy.spatial.distance.euclidean(c,d) for c in centroids]
clusters[np.argmin(distances)].append(min(distances))
error = []
for i in clusters.values():
if i:
error.append(sum(i)/len(i))
else:
error.append(0)
errors = sum(error)/n_clusters
return errors
def PSO_clustering(n_clusters, data, w, alpha1, alpha2):
particles = []
v = []
for i in range(10):
index = np.random.permutation(range(data.shape[0]))[:n_clusters]
particle = data[index]
particles.append(particle)
v.append(np.zeros(particle.shape))
local_best = [{"particle":[], "fitness":float('inf')} for particle in particles]
global_best = {"particle":[], "fitness":float('inf')}
for iteration in range(100):
for i in range(10):
fitness = quantization_error(n_clusters, particles[i], data)
if fitness < local_best[i]["fitness"]:
local_best[i]["particle"] = particles[i]
local_best[i]["fitness"] = fitness
best_index = np.argmin([l_b["fitness"] for l_b in local_best])
if local_best[best_index]["fitness"] < global_best["fitness"]:
global_best = local_best[best_index]
for i in range(10):
r1 = np.random.uniform(0,1,particles[i].shape)
r2 = np.random.uniform(0,1,particles[i].shape)
v[i] = w*v[i] + np.multiply(alpha1*r1, local_best[i]["particle"]-particles[i]) + np.multiply(alpha2*r2, global_best["particle"]-particles[i])
particles[i] = particles[i] + v[i]
return global_best['particle']
from sklearn.cluster import KMeans
import numpy as np
import random
import numpy as np
data_vectors = np.array([(random.uniform(-1,1), random.uniform(-1,1)) for i in range(400)])
classes = np.array([int((z_1 >= 0.7) or ((z_1 <= 0.3) and (z_2 >= -0.2 - z_1))) for (z_1,z_2) in data_vectors])
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data
y = iris.target
import scipy
errors_art = []
for i in range(30):
KMeans_solution = KMeans(n_clusters=2, max_iter=100, n_init=10, init="random").fit(data_vectors)
cluster_centers = KMeans_solution.cluster_centers_
centroid1 = cluster_centers[0]
centroid2 = cluster_centers[1]
cluster1 = data_vectors[KMeans_solution.labels_ == 0]
cluster2 = data_vectors[KMeans_solution.labels_ == 1]
distance1=0
distance2=0
for i in range(len(cluster1)):
dist1 = scipy.spatial.distance.euclidean(cluster1[i], centroid1)
distance1 = distance1 + dist1
if len(cluster1)>0:
error1=distance1/len(cluster1)
else:
error1=0
for i in range(len(cluster2)):
dist2 = scipy.spatial.distance.euclidean(cluster2[i], centroid2)
distance2 = distance2 + dist2
if len(cluster2)>0:
error2=distance2/len(cluster2)
else:
error2=0
error = (error1 + error2)/2
errors_art.append(error)
print("Quantization error: "+ str(np.average(errors_art)) + " ± " + str(np.std(errors_art)))
errors_iris = []
for i in range(30):
KMeans_solution = KMeans(n_clusters=3, max_iter=100, n_init=10, init="random").fit(X)
cluster_centers = KMeans_solution.cluster_centers_
centroid1 = cluster_centers[0]
centroid2 = cluster_centers[1]
centroid3 = cluster_centers[2]
cluster1 = X[KMeans_solution.labels_ == 0]
cluster2 = X[KMeans_solution.labels_ == 1]
cluster3 = X[KMeans_solution.labels_ == 2]
distance1=0
distance2=0
distance3=0
for i in range(len(cluster1)):
dist1 = scipy.spatial.distance.euclidean(cluster1[i], centroid1)
distance1 = distance1 + dist1
if len(cluster1)>0:
error1=distance1/len(cluster1)
else:
error1=0
for i in range(len(cluster2)):
dist2 = scipy.spatial.distance.euclidean(cluster2[i], centroid2)
distance2 = distance2 + dist2
if len(cluster2)>0:
error2=distance2/len(cluster2)
else:
error2=0
for i in range(len(cluster3)):
dist3 = scipy.spatial.distance.euclidean(cluster3[i], centroid3)
distance3 = distance3 + dist3
if len(cluster3)>0:
error3=distance3/len(cluster3)
else:
error3=0
error = (error1 + error2 + error3)/3
errors_iris.append(error)
print("Quantization error: "+ str(np.average(errors_iris)) + " ± " + str(np.std(errors_iris)))
errors_art_pso = []
for i in range(30):
error = quantization_error(2, PSO_clustering(2, data_vectors, w=0.7298, alpha1=1.49618, alpha2=1.49618), data_vectors)
errors_art_pso.append(error)
print("Quantization error: "+ str(np.average(errors_art_pso)) + " ± " + str(np.std(errors_art_pso)))
errors_iris_pso = []
for i in range(30):
error = quantization_error(3, PSO_clustering(3, X, w=0.7298, alpha1=1.49618, alpha2=1.49618), X)
errors_iris_pso.append(error)
print("Quantization error: "+ str(np.average(errors_iris_pso)) + " ± " + str(np.std(errors_iris_pso)))
PSO_solution = PSO_clustering(2, data_vectors, w=0.7298, alpha1=1.49618, alpha2=1.49618)
KMeans_solution = KMeans(n_clusters=2, max_iter=100, n_init=10, init="random").fit(data_vectors)
clusters = {i:[] for i in range(2)}
for d in data_vectors:
distances = [scipy.spatial.distance.euclidean(c,d) for c in PSO_solution]
clusters[np.argmin(distances)].append(d)
import matplotlib.pyplot as plt
plt.figure()
plt.scatter([i[0] for i in clusters[0]], [i[1] for i in clusters[0]],label='cluster '+str(0))
plt.scatter([i[0] for i in clusters[1]], [i[1] for i in clusters[1]],label='cluster '+str(1))
plt.title("PSO")
plt.legend()
plt.xlabel('${x}_{1}$')
plt.ylabel('${x}_{2}$')
plt.show()
plt.figure()
for i in range(2):
index = KMeans_solution.labels_==i
plt.scatter(data_vectors[index, 0], data_vectors[index, 1],label='cluster '+str(i))
plt.title("KMeans")
plt.legend()
plt.xlabel('${x}_{1}$')
plt.ylabel('${x}_{2}$')
plt.show()
plt.figure()
for i in range(2):
index = classes==i
plt.scatter(data_vectors[index, 0], data_vectors[index, 1],label='cluster '+str(i))
plt.title("True")
plt.legend()
plt.xlabel('${x}_{1}$')
plt.ylabel('${x}_{2}$')
plt.show()
img = mpimg.imread('ass2fig1.png')
plt.tick_params(axis='both',which='both',bottom=False,top=False,left=False,right=False,labelleft=False,labelbottom=False)
imgplot = plt.imshow(img)
img = mpimg.imread('ass2fig2.png')
plt.tick_params(axis='both',which='both',bottom=False,top=False,left=False,right=False,labelleft=False,labelbottom=False)
imgplot = plt.imshow(img)