import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import SGD
import matplotlib.pyplot as plt
training_inputs = torch.tensor(np.load("inputs_training.npy")).float()
training_targets = torch.tensor(np.load("targets_training.npy")).float()
print("Shape training inputs: ", training_inputs.shape)
print("Shape training targets: ", training_targets.shape)
test_inputs = torch.tensor(np.load("inputs_test.npy")).float()
test_targets = torch.tensor(np.load("targets_test.npy")).float()
print(test_inputs.shape)
class NeuralNetwork(torch.nn.Module):
def __init__(self,x):
super(NeuralNetwork, self).__init__() #2D input (like Logistic reg)
self.weights = nn.ParameterDict({
'w11': nn.Parameter(torch.zeros(50, len(x))),
'w12': nn.Parameter(torch.zeros(50, len(x))),
'b1': nn.Parameter(torch.zeros(50)),
'w2': nn.Parameter(torch.zeros(20,50)),
'b2': nn.Parameter(torch.zeros(20)),
'w3': nn.Parameter(torch.zeros(len(x),20)),
'b3': nn.Parameter(torch.zeros(len(x)))
})
def forward(self, x):
xcol1 = x[:,0]
xcol2 = x[:,1]
x = self.weights['w11'] @ xcol1 + self.weights['w12'] @ xcol2 + self.weights['b1']
#x = self.linear1(x)
x = torch.sigmoid(x)
x = self.weights['w2'] @ x + self.weights['b2']
x = torch.sigmoid(x)
x = self.weights['w3'] @ x + self.weights['b3']
x = torch.sigmoid(x)
return x
data = training_inputs
targets = training_targets
model = NeuralNetwork(data)
optimizer = torch.optim.SGD(model.parameters(model.parameters()), lr=1e-3)
loss_history = []
for step in range(3000):
predictions = model(data)
loss = torch.nn.MSELoss(reduction="sum")(predictions, targets)
loss_history.append(loss)
loss.backward()
optimizer.step()
optimizer.zero_grad()
params = list(model.parameters())
w11 = params[0].detach().numpy()
w12 = params[1].detach().numpy()
b1 = params[2].detach().numpy()
w2 = params[3].detach().numpy()
b2 = params[4].detach().numpy()
w3 = params[4].detach().numpy()
b3 = params[5].detach().numpy()
param_dict = {'w11':w11, 'w12':w12, 'b1':b1, 'w2':w2, 'b2':b2, 'w3':w3, 'b3':b3}
# Plot loss history
new_loss_history = []
for item in loss_history:
new_loss_history.append(item.detach().numpy())
fig, ax = plt.subplots()
ax.plot(new_loss_history)
ax.set_xlabel('Training Epoch')
ax.set_ylabel('Sum of Squares Loss')
ax.set_title('Sum of Squares Loss Over Training Time')
ax.legend('Model Loss')
fig.show()
# Apply to training data and get classification accuracy
p_dict = {}
for key in param_dict:
p_dict[key] = torch.tensor(param_dict[key])
# Running into an error here because I am not sure how to apply the weights again with the training set
def apply_weights(x):
xcol1 = x[:,0]
xcol2 = x[:,1]
x = p_dict['w11'] @ xcol1 + p_dict['w12'] @ xcol2 + p_dict['b1']
x = torch.sigmoid(x)
x = p_dict['w2'] @ x + p_dict['b2']
x = torch.sigmoid(x)
x = p_dict['w3'] @ x + p_dict['b3']
return torch.sigmoid(x)
#predictions = apply_weights(data)
#loss torch.mean(torch.abs(predictions - targets)**2)
adj = np.load("adjmat.npy")
num_rows, num_col = adj.shape
print(num_rows)
x = [1, 1, 1]
Atilde = np.identity(num_rows) + adj #self loops included
print(Atilde[:40, 1])
D = np.zeros_like(Atilde) #initialize D matrix
for i in range(num_col):
list_col = Atilde[:, i]
D[i,i] = np.sum(list_col) #Diagonal matrix of node counts
#print(D**-0.5)
L = D - Atilde
print("Graph Laplacian: \n", L)
normalized_L = np.identity(num_rows) - np.linalg.inv(D)**(0.5) @ Atilde @ np.linalg.inv(D)**(0.5)
print("Normalized Graph Laplacian: \n", normalized_L)