import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import Adam, SGD
import matplotlib.pyplot as plt
glass_data = pd.read_csv("glass.csv")
glass_data
glass_data_x = glass_data.drop(["Type", "RI"], axis = 1)
glass_data_x_normalized =(glass_data_x-glass_data_x.mean())/glass_data_x.std()
glass_data_x_normalized
x_data = torch.tensor(glass_data_x_normalized.to_numpy()).float()
x_data = x_data.unsqueeze(-2)
#Set y to be from 0-6
y_data = torch.tensor(glass_data.iloc[:, 9].to_numpy()).long() - 1
print("X data shape", x_data.shape)
print("Y data shape", y_data.shape)
print("X max is ", x_data.max())
print("X min is ", x_data.min())
print("Y max is ", y_data.max())
print("Y min is ", y_data.min())
class Glass(nn.Module):
def __init__(self, params: dict):
super(Glass, self).__init__()
#############################
#### YOUR CODE HERE ####
#############################
self.kernel_size = params["kernel_size"]
self.hidden_dim = params["hidden_dim"]
self.out_dim = params["out_dim"]
self.conv1d = nn.Sequential(
nn.Conv1d(1, self.hidden_dim, self.kernel_size, padding=1),
nn.ReLU()
)
self.fc = nn.Sequential(
nn.Linear((8 - self.kernel_size + 3) * self.hidden_dim, self.out_dim),
nn.ReLU(),
nn.Linear(self.out_dim, 7)
)
def forward(self, x):
#############################
#### YOUR CODE HERE ####
#############################
x = self.conv1d(x)
out = self.fc(x.view(x.size(0), -1))
return out
num_examples = x_data.shape[0]
num_train = int(num_examples * .8)
num_test = num_examples - num_train
random_indices = torch.randperm(num_examples)
train_index = random_indices[0:num_train]
test_index = random_indices[num_train:]
params = {
"hidden_dim": 32,
"kernel_size": 3,
"out_dim": 512
}
g = Glass(params)
torch.manual_seed(42)
batch_size = 64 #### YOUR CODE HERE ####
num_epochs= 1000 #### YOUR CODE HERE ####
learning_rate = 1e-2 #### YOUR CODE HERE ####
opt = SGD(g.parameters(), lr=learning_rate)
criterion = nn.CrossEntropyLoss()
num_train_batch = (num_train + batch_size - 1) // batch_size
num_test_batch = (num_test + batch_size - 1) // batch_size
all_train_loss = []
all_test_loss = []
for epoch in range(num_epochs):
train_order = train_index[torch.randperm(num_train)]
epoch_train_loss = 0
g.train()
for batch in range(num_train_batch):
indices = train_order[batch * batch_size:(batch + 1) * batch_size]
x_batch = x_data[indices]
y_batch = y_data[indices]
pred = g(x_batch)
loss = criterion(pred, y_batch) #### YOUR CODE HERE ####
epoch_train_loss += loss.item()
opt.zero_grad()
loss.backward()
opt.step()
epoch_test_loss = 0
g.eval()
for batch in range(num_test_batch):
test_indices = test_index[batch * batch_size:(batch + 1) * batch_size]
x_batch = x_data[test_indices]
y_batch = y_data[test_indices]
with torch.no_grad():
pred = g(x_batch)
loss = criterion(pred, y_batch) #### YOUR CODE HERE ####
epoch_test_loss += loss.item()
all_train_loss.append(epoch_train_loss/num_train_batch)
all_test_loss.append(epoch_test_loss/num_test_batch)
print(f"Train Loss: {all_train_loss[-1]:.3f}")
print(f"Test Loss: {all_test_loss[-1]:.3f}")
plt.plot(all_train_loss, label="Train")
plt.plot(all_test_loss, label="Test")
plt.legend()
#### YOUR CODE HERE ####
from sklearn.metrics import classification_report, accuracy_score
with torch.no_grad():
g.eval()
pred = g(x_data[train_index])
train_accu = accuracy_score(y_data[train_index], pred.argmax(axis=1)) * 100
print(f"Training Accuracy: {train_accu:.1f}%")
report_dict = classification_report(
y_data[train_index], pred.argmax(axis=1), \
labels=range(7), output_dict=True, zero_division=0)
for i in range(7):
class_accuracy = report_dict[str(i)]["precision"] * 100
print(f"(Train) Class {i} Accuracy: {class_accuracy:.2f}%")
pred = g(x_data[test_index])
test_accu = accuracy_score(y_data[test_index], pred.argmax(axis=1)) * 100
print(f"Test Accuracy: {test_accu:.1f}%")
report_dict = classification_report(
y_data[test_index], pred.argmax(axis=1), \
labels=range(7), output_dict=True, zero_division=0)
for i in range(7):
class_accuracy = report_dict[str(i)]["precision"] * 100
print(f"(Test) Class {i} Accuracy: {class_accuracy:.2f}%")
num_data_points = 20
PI = torch.tensor(np.pi).float()
noise = 0.01
phases = torch.rand((10, 1)) * 2 * PI
t = torch.linspace(0, 2 * PI, num_data_points)
x = torch.sin(t + phases)
#Add_noise
x = x + torch.randn_like(x) * noise
for (phase_i, x_i) in zip(phases, x):
plt.plot(x_i.cpu().numpy(), label="{:.2f}".format(phase_i.item()))
plt.legend()
class RNNNet(nn.Module):
def __init__(self, input_size=1, hidden_layer_size=32, output_size=1):
super(RNNNet, self).__init__()
#############################
#### YOUR CODE HERE ####
#############################
self.rnn = nn.RNN(input_size, hidden_layer_size, batch_first=True)
self.fc = nn.Sequential(
nn.Linear(hidden_layer_size, hidden_layer_size),
nn.ReLU(),
nn.Linear(hidden_layer_size, hidden_layer_size),
nn.ReLU(),
nn.Linear(hidden_layer_size, output_size)
)
def forward(self, input_seq):
#############################
#### YOUR CODE HERE ####
#############################
x, _ = self.rnn(input_seq)
out = self.fc(x)
return out
x_data = x[:, :-1].unsqueeze(-1)
y_data = x[:, 1:].unsqueeze(-1)
num_examples = x_data.shape[0]
num_train = int(num_examples * .8)
num_val = num_examples - num_train
random_indices = torch.randperm(num_examples)
train_index = random_indices[0:num_train]
val_index = random_indices[num_train:]
print(x_data.shape)
print(y_data.shape)
rnn_net = RNNNet()
torch.manual_seed(42)
batch_size = 4 #### YOUR CODE HERE ####
num_epochs= 100 #### YOUR CODE HERE ####
learning_rate = 1e-2 #### YOUR CODE HERE ####
opt = Adam(rnn_net.parameters(), lr=learning_rate)
criterion = nn.MSELoss()
num_train_batch = (num_train + batch_size - 1) // batch_size
num_val_batch = (num_val + batch_size - 1) // batch_size
all_train_loss = []
all_val_loss = []
for epoch in range(num_epochs):
train_order = train_index[torch.randperm(num_train)]
epoch_train_loss = 0
for batch in range(num_train_batch):
indices = train_order[batch * batch_size:(batch + 1) * batch_size]
x_batch = x_data[indices]
y_batch = y_data[indices]
pred = rnn_net(x_batch)
loss = criterion(pred, y_batch) #### YOUR CODE HERE ####
epoch_train_loss += loss.item()
opt.zero_grad()
loss.backward()
opt.step()
epoch_val_loss = 0
for batch in range(num_val_batch):
val_indices = val_index[batch * batch_size:(batch + 1) * batch_size]
x_batch = x_data[val_indices]
y_batch = y_data[val_indices]
with torch.no_grad():
pred = rnn_net(x_batch)
loss = criterion(pred, y_batch) #### YOUR CODE HERE ####
epoch_val_loss += loss.item()
all_train_loss.append(epoch_train_loss/num_train_batch)
all_val_loss.append(epoch_val_loss/num_val_batch)
print(f"Train Loss: {all_train_loss[-1]:.5f}")
print(f"Validation Loss: {all_val_loss[-1]:.5f}")
plt.plot(all_train_loss, label="Train")
plt.plot(all_val_loss, label="Val")
plt.legend()
num_test = 5
sequence_known_frac = 0.4
sequence_known_len = int(num_data_points * sequence_known_frac)
sequence_pred_len = num_data_points - sequence_known_len
sequence_to_predict = int(num_data_points * sequence_known_frac)
t_test = torch.linspace(0, 1 * PI, sequence_known_len)
phases_test = torch.rand((num_test, 1)) * 2 * PI
x_test = torch.sin(t_test + phases_test)
#Add_noise
x_test = x_test + torch.randn_like(x_test) * noise
for (phase_i, x_i) in zip(phases_test, x_test):
plt.plot(x_i.cpu().numpy(), label="{:.2f}".format(phase_i.item()))
plt.legend()
x_test = x_test.unsqueeze(-1)
print(x_test.shape)
#### YOUR CODE HERE ####
data = x_test.clone()
with torch.no_grad():
rnn_net.eval()
for _ in range(sequence_pred_len):
preds = rnn_net(data)[:, -1, :].unsqueeze(dim=-1)
data = torch.cat((data, preds), dim=1)
for (phase_i, x_i) in zip(phases_test, data.squeeze()):
plt.plot(x_i.numpy(), label='{:.2f}'.format(phase_i.item()))
plt.legend()