from numpy import *
import matplotlib.pyplot as plt
data = loadtxt('data/nonlinear_classification.data')
#print(data)
X = data[:,:2]
T = data[:,2]
N, d = X.shape
eta = 0.05
K = 10
max_val= 0.1
W_hid = random.uniform(-max_val, max_val, (d, K))
#W_hid = zeros((d, K))
b_hid = random.uniform(-max_val, max_val, K) #bias
#W_out = random.uniform(-max_val, max_val, K)
W_out = zeros(K)
b_out = random.uniform(-max_val, max_val, 1)
W_hid
T
def logistic(x):
return 1.0/(1.0 + exp(-x))
def threshold(x):
data = x.copy()
data[data > 0.] = 1.
data[data < 0.] = -1.
return data
def feedforward(X, W_hid, b_hid, W_out, b_out):
#hidden layer
Y = logistic(dot(X, W_hid) + b_hid)
#Output
O = threshold(dot(Y, W_out) + b_out)
return Y, O
X[0, :]
T[0]
def backpropag(y, delta_out):
delta_hidden = []
for k in range(K):
delta_hidden.append(y[k]*(1 - y[k])*delta_out[0]*W_out[k])
return delta_hidden
def plot_classification(X, T, W_hid, b_hid, W_out, b_out):
# True values
positive_class = X[T>0]
negative_class = X[T<0]
# Prediction
Y, O = feedforward(X, W_hid, b_hid, W_out, b_out)
misclassification = X[O!=T]
# Plot
plt.plot(positive_class[:,0], positive_class[:,1], 'bo')
plt.plot(negative_class[:,0], negative_class[:,1], 'go')
plt.plot(misclassification[:,0], misclassification[:,1], 'ro')
plt.show()
errors = []
for epoch in range(2000):
nb_errors = 0
for i in range(N):
x = X[i, :]
t = T[i]
y, o = feedforward(x, W_hid, b_hid, W_out, b_out)
if t != o:
nb_errors += 1
delta_out = (t - o)
delta_hidden = y*(1-y)*W_out*delta_out #TODO
#print(delta_hidden)
#print()
W_out += [eta*y[k]*delta_out[0] for k in range(K)]#TODO
b_out += eta*delta_out[0] #TODO
for k in range(K):
W_hid[:, k] += eta*x*delta_hidden[k] #TODO
b_hid += eta*delta_hidden #TODO
#if epoch % 5 == 0:
#print("epoch nb :", epoch)
#plot_classification(X, T, W_hid, b_hid, W_out, b_out)
if nb_errors == 0:
print("epoch nb :", epoch)
break
plot_classification(X, T, W_hid, b_hid, W_out, b_out)
positive_class = X[T>0]
negative_class = X[T<0]
plt.plot(positive_class[:,0], positive_class[:,1], 'bo')
plt.plot(negative_class[:,0], negative_class[:,1], 'go')
def neural_net(W_hid, W_out, b_hid, b_out):
errors = []
for epoch in range(2000):
nb_errors = 0
for i in range(N):
x = X[i, :]
t = T[i]
y, o = feedforward(x, W_hid, b_hid, W_out, b_out)
if t != o:
nb_errors += 1
delta_out = (t - o)
delta_hidden = y*(1-y)*W_out*delta_out #TODO
#print(delta_hidden)
#print()
W_out += [eta*y[k]*delta_out[0] for k in range(K)]#TODO
b_out += eta*delta_out[0] #TODO
for k in range(K):
W_hid[:, k] += eta*x*delta_hidden[k] #TODO
b_hid += eta*delta_hidden #TODO
#if epoch % 5 == 0:
#print("epoch nb :", epoch)
#plot_classification(X, T, W_hid, b_hid, W_out, b_out)
if nb_errors == 0:
print("epoch nb :", epoch)
break
return epoch
t_epoch = []
nb_it = 5
for k in range(nb_it):
max_val= 0.1
W_hid = random.uniform(-max_val, max_val, (d, K))
#W_hid = zeros((d, K))
b_hid = random.uniform(-max_val, max_val, K) #bias
#W_out = random.uniform(-max_val, max_val, K)
W_out = zeros(K)
b_out = random.uniform(-max_val, max_val, 1)
t_epoch.append(neural_net(W_hid, W_out, b_hid, b_out))
moy = 0
var = 0
for ep in t_epoch:
moy += ep
moy = moy/nb_it
for ep in t_epoch:
var += (ep - moy)**2
var = var/nb_it
print("variance :", var)
print("moyenne :", moy)