import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import SGD
import matplotlib.pyplot as plt
training_inputs = torch.tensor(np.load("inputs_training.npy")).float()
training_targets = torch.tensor(np.load("targets_training.npy")).float()
test_inputs = torch.tensor(np.load("inputs_test.npy")).float()
test_targets = torch.tensor(np.load("targets_test.npy")).float()
class NeuralNetwork(nn.Module):
def __init__(self, d=2, m1_dim=50, m2_dim=20, k_dim=1):
super(NeuralNetwork, self).__init__()
self.m1 = nn.Linear(d, m1_dim)
self.m2 = nn.Linear(m1_dim, m2_dim)
self.non_linearity = nn.ReLU()
self.output = nn.Linear(m2_dim, 1)
def forward(self, x):
m1_out = self.m1(x)
m2_out = self.m2(self.non_linearity(m1_out))
out = self.output(self.non_linearity(m2_out))
#return self.non_linearity(out)
return out
model = NeuralNetwork()
x = training_inputs
y = training_targets
optimizer = torch.optim.SGD(model.parameters(), lr=.01, momentum=.9)
all_loss = []
for step in range(10000):
preds = model.forward(x.float())
loss = ((preds - y.reshape(-1,1))**2).mean()
all_loss.append(loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
y.size()
plt.scatter(list(range(len(all_loss))), all_loss)
y = training_targets.detach()
y_pred = model.forward(training_inputs).detach()
print("RMSE training:",torch.sum((1/len(y))*((y_pred - y)**2))**(1/2))
y = test_targets.detach()
y_pred = model.forward(test_inputs).detach()
print("RMSE test:",torch.sum((1/len(y))*((y_pred - y)**2))**(1/2))
y_pred
adj = np.load("adjmat.npy")