!pip install scikit-optimize
from tensorflow.keras import regularizers
from skopt import BayesSearchCV
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Bidirectional
from tensorflow.keras.layers import LSTM
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Conv1D
from tensorflow.keras.layers import MaxPooling1D
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import GRU
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import TimeDistributed
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import LeakyReLU
from tensorflow.keras.layers import ReLU
from tensorflow.keras.layers import PReLU
from tensorflow.keras.layers import SimpleRNN
from tensorflow.keras import Input
from tensorflow.keras.activations import softplus
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.optimizers.schedules import LearningRateSchedule
from tensorflow.keras.optimizers.schedules import ExponentialDecay
from tensorflow.keras.callbacks import LearningRateScheduler
import math
from sklearn.model_selection import GridSearchCV
from tensorflow.keras.wrappers.scikit_learn import KerasRegressor
from sklearn.model_selection import TimeSeriesSplit
from sklearn.model_selection import RandomizedSearchCV
from tensorflow.keras.callbacks import EarlyStopping
from sklearn.metrics import mean_squared_error
from tensorflow.keras.regularizers import l1
from tensorflow.keras.regularizers import l2
from tensorflow.keras.regularizers import l1_l2
from sklearn.model_selection import cross_val_score
Collecting scikit-optimize
Downloading scikit_optimize-0.9.0-py2.py3-none-any.whl (100 kB)
|████████████████████████████████| 100 kB 2.5 MB/s
Requirement already satisfied: numpy>=1.13.3 in /shared-libs/python3.7/py/lib/python3.7/site-packages (from scikit-optimize) (1.19.5)
Requirement already satisfied: scikit-learn>=0.20.0 in /shared-libs/python3.7/py/lib/python3.7/site-packages (from scikit-optimize) (1.0.1)
Collecting pyaml>=16.9
Downloading pyaml-21.10.1-py2.py3-none-any.whl (24 kB)
Requirement already satisfied: scipy>=0.19.1 in /shared-libs/python3.7/py/lib/python3.7/site-packages (from scikit-optimize) (1.7.2)
Requirement already satisfied: joblib>=0.11 in /shared-libs/python3.7/py/lib/python3.7/site-packages (from scikit-optimize) (1.1.0)
Requirement already satisfied: threadpoolctl>=2.0.0 in /shared-libs/python3.7/py/lib/python3.7/site-packages (from scikit-learn>=0.20.0->scikit-optimize) (3.0.0)
Requirement already satisfied: PyYAML in /shared-libs/python3.7/py/lib/python3.7/site-packages (from pyaml>=16.9->scikit-optimize) (6.0)
Installing collected packages: pyaml, scikit-optimize
Successfully installed pyaml-21.10.1 scikit-optimize-0.9.0
WARNING: You are using pip version 20.1.1; however, version 21.3.1 is available.
You should consider upgrading via the '/root/venv/bin/python -m pip install --upgrade pip' command.
def all_features():
#Reading in DF and Formatting
clean_good_full_df = pd.read_csv('imputed_clean_good_full_df_all_features.csv', index_col=0)
#clean_good_full_df = pd.read_csv('imputed_clean_good_full_df_HomeTime.csv', index_col=0)
clean_good_full_df['geo_value'] = clean_good_full_df['geo_value'].astype("float")
clean_good_full_df['geo_value_t_minus_1'] = clean_good_full_df['geo_value_t_minus_1'].astype("float")
t = clean_good_full_df.iloc[:,0:10].reset_index()
t_minus_1 = clean_good_full_df.iloc[:,10:20].reset_index()
t_plus_1 = clean_good_full_df.iloc[:,20].reset_index()
clean_good_full_df = t_minus_1.merge(t, on = ['index']).merge(t_plus_1, on = ['index']).drop(columns = ['index']).reset_index(drop = True)
clean_good_full_df = clean_good_full_df.sort_values(by = 'time_value').drop(columns = ['time_value','time_value_t_minus_1'])
clean_good_full_df.shape
global scaler
scaler = StandardScaler()
global full_df_scaled
global X_full
global y_full
global X_train
global X_test
global y_train
global y_test
global X_cv
global y_cv
global X_df_scaled_reshaped
global Y_df_scaled_reshaped
global X_test_df_scaled_reshaped
global Y_test_df_scaled_reshaped
global X_cv_df_scaled_reshaped
global Y_cv_df_scaled_reshaped
#Scaling and Reshaping
scaler = StandardScaler()
scaler = scaler.fit(clean_good_full_df)
full_df_scaled = scaler.transform(clean_good_full_df)
X_full = full_df_scaled[:,0:18]
y_full = full_df_scaled[:,18]
X_train = X_full[0:4256,:]
X_test = X_full[4256:,:]
y_train = y_full[0:4256]
y_test = y_full[4256:]
X_cv = X_train
y_cv = y_train
X_df_scaled_reshaped = X_train.reshape(4256,2,9)
Y_df_scaled_reshaped = y_train.reshape(4256,1)
X_test_df_scaled_reshaped = X_test.reshape(1065,2,9)
Y_test_df_scaled_reshaped = y_test.reshape(1065,1)
X_cv_df_scaled_reshaped = X_cv.reshape(4256,2,9)
Y_cv_df_scaled_reshaped = y_cv.reshape(4256,1)
def no_google():
#Reading in DF and Formatting
clean_good_full_df = pd.read_csv('imputed_clean_good_full_df_GoogleDropped.csv', index_col=0)
#clean_good_full_df = pd.read_csv('imputed_clean_good_full_df_HomeTime.csv', index_col=0)
clean_good_full_df['geo_value'] = clean_good_full_df['geo_value'].astype("float")
clean_good_full_df['geo_value_t_minus_1'] = clean_good_full_df['geo_value_t_minus_1'].astype("float")
t = clean_good_full_df.iloc[:,0:8].reset_index()
t_minus_1 = clean_good_full_df.iloc[:,8:16].reset_index()
t_plus_1 = clean_good_full_df.iloc[:,16].reset_index()
clean_good_full_df = t_minus_1.merge(t, on = ['index']).merge(t_plus_1, on = ['index']).drop(columns = ['index']).reset_index(drop = True)
clean_good_full_df = clean_good_full_df.sort_values(by = 'time_value').drop(columns = ['time_value','time_value_t_minus_1'])
clean_good_full_df.shape
global scaler
scaler = StandardScaler()
global full_df_scaled
global X_full
global y_full
global X_train
global X_test
global y_train
global y_test
global X_cv
global y_cv
global X_df_scaled_reshaped
global Y_df_scaled_reshaped
global X_test_df_scaled_reshaped
global Y_test_df_scaled_reshaped
global X_cv_df_scaled_reshaped
global Y_cv_df_scaled_reshaped
#Scaling and Reshaping
scaler = StandardScaler()
scaler = scaler.fit(clean_good_full_df)
full_df_scaled = scaler.transform(clean_good_full_df)
X_full = full_df_scaled[:,0:14]
y_full = full_df_scaled[:,14]
X_train = X_full[0:6398,:]
X_test = X_full[6398:,:]
y_train = y_full[0:6398]
y_test = y_full[6398:]
X_cv = X_train
y_cv = y_train
X_df_scaled_reshaped = X_train.reshape(6398,2,7)
Y_df_scaled_reshaped = y_train.reshape(6398,1)
X_test_df_scaled_reshaped = X_test.reshape(1600,2,7)
Y_test_df_scaled_reshaped = y_test.reshape(1600,1)
X_cv_df_scaled_reshaped = X_cv.reshape(6398,2,7)
Y_cv_df_scaled_reshaped = y_cv.reshape(6398,1)
def no_home():
#Reading in DF and Formatting
clean_good_full_df = pd.read_csv('imputed_clean_good_full_df_HomeTimeDropped.csv', index_col=0)
#clean_good_full_df = pd.read_csv('imputed_clean_good_full_df_HomeTime.csv', index_col=0)
clean_good_full_df['geo_value'] = clean_good_full_df['geo_value'].astype("float")
clean_good_full_df['geo_value_t_minus_1'] = clean_good_full_df['geo_value_t_minus_1'].astype("float")
t = clean_good_full_df.iloc[:,0:9].reset_index()
t_minus_1 = clean_good_full_df.iloc[:,9:18].reset_index()
t_plus_1 = clean_good_full_df.iloc[:,18].reset_index()
clean_good_full_df = t_minus_1.merge(t, on = ['index']).merge(t_plus_1, on = ['index']).drop(columns = ['index']).reset_index(drop = True)
clean_good_full_df = clean_good_full_df.sort_values(by = 'time_value').drop(columns = ['time_value','time_value_t_minus_1'])
clean_good_full_df.shape
global scaler
scaler = StandardScaler()
global full_df_scaled
global X_full
global y_full
global X_train
global X_test
global y_train
global y_test
global X_cv
global y_cv
global X_df_scaled_reshaped
global Y_df_scaled_reshaped
global X_test_df_scaled_reshaped
global Y_test_df_scaled_reshaped
global X_cv_df_scaled_reshaped
global Y_cv_df_scaled_reshaped
#Scaling and Reshaping
scaler = StandardScaler()
scaler = scaler.fit(clean_good_full_df)
full_df_scaled = scaler.transform(clean_good_full_df)
X_full = full_df_scaled[:,0:16]
y_full = full_df_scaled[:,16]
X_train = X_full[0:5948,:]
X_test = X_full[5948:,:]
y_train = y_full[0:5948]
y_test = y_full[5948:]
X_cv = X_train
y_cv = y_train
X_df_scaled_reshaped = X_train.reshape(5948,2,8)
Y_df_scaled_reshaped = y_train.reshape(5948,1)
X_test_df_scaled_reshaped = X_test.reshape(1488,2,8)
Y_test_df_scaled_reshaped = y_test.reshape(1488,1)
X_cv_df_scaled_reshaped = X_cv.reshape(5948,2,8)
Y_cv_df_scaled_reshaped = y_cv.reshape(5948,1)
def no_home_no_google():
#Reading in DF and Formatting
clean_good_full_df = pd.read_csv('imputed_clean_good_full_df_HomeTimeDropped_GoogleDropped.csv', index_col=0)
#clean_good_full_df = pd.read_csv('imputed_clean_good_full_df_HomeTime.csv', index_col=0)
clean_good_full_df['geo_value'] = clean_good_full_df['geo_value'].astype("float")
clean_good_full_df['geo_value_t_minus_1'] = clean_good_full_df['geo_value_t_minus_1'].astype("float")
t = clean_good_full_df.iloc[:,0:7].reset_index()
t_minus_1 = clean_good_full_df.iloc[:,7:14].reset_index()
t_plus_1 = clean_good_full_df.iloc[:,14].reset_index()
clean_good_full_df = t_minus_1.merge(t, on = ['index']).merge(t_plus_1, on = ['index']).drop(columns = ['index']).reset_index(drop = True)
clean_good_full_df = clean_good_full_df.sort_values(by = 'time_value').drop(columns = ['time_value','time_value_t_minus_1'])
clean_good_full_df.shape
global scaler
scaler = StandardScaler()
global full_df_scaled
global X_full
global y_full
global X_train
global X_test
global y_train
global y_test
global X_cv
global y_cv
global X_df_scaled_reshaped
global Y_df_scaled_reshaped
global X_test_df_scaled_reshaped
global Y_test_df_scaled_reshaped
global X_cv_df_scaled_reshaped
global Y_cv_df_scaled_reshaped
#Scaling and Reshaping
scaler = StandardScaler()
scaler = scaler.fit(clean_good_full_df)
full_df_scaled = scaler.transform(clean_good_full_df)
X_full = full_df_scaled[:,0:12]
y_full = full_df_scaled[:,12]
X_train = X_full[0:9048,:]
X_test = X_full[9048:,:]
y_train = y_full[0:9048]
y_test = y_full[9048:]
X_cv = X_train
y_cv = y_train
X_df_scaled_reshaped = X_train.reshape(9048,2,6)
Y_df_scaled_reshaped = y_train.reshape(9048,1)
X_test_df_scaled_reshaped = X_test.reshape(2263,2,6)
Y_test_df_scaled_reshaped = y_test.reshape(2263,1)
X_cv_df_scaled_reshaped = X_cv.reshape(9048,2,6)
Y_cv_df_scaled_reshaped = y_cv.reshape(9048,1)
def delta_noHome_noGoogle():
#Reading in DF and Formatting
clean_good_full_df = pd.read_csv('delta_full_df_GoogleDropped_HomeTimeDropped.csv', index_col=0)
#clean_good_full_df = pd.read_csv('imputed_clean_good_full_df_HomeTime.csv', index_col=0)
clean_good_full_df['geo_value'] = clean_good_full_df['geo_value'].astype("float")
clean_good_full_df = clean_good_full_df.sort_values(by = 'time_value').drop(columns = ['time_value'])
clean_good_full_df.shape
global scaler
scaler = StandardScaler()
global full_df_scaled
global X_full
global y_full
global X_train
global X_test
global y_train
global y_test
global X_cv
global y_cv
scaler = StandardScaler()
scaler = scaler.fit(clean_good_full_df)
full_df_scaled = scaler.transform(clean_good_full_df)
X_full = full_df_scaled[:,0:11]
y_full = full_df_scaled[:,11]
X_train = X_full[0:9048,:]
X_test = X_full[9048:,:]
y_train = y_full[0:9048]
y_test = y_full[9048:]
X_cv = X_train
y_cv = y_train
def delta_noHome():
#Reading in DF and Formatting
clean_good_full_df = pd.read_csv('delta_full_df_HomeTimeDropped.csv', index_col=0)
#clean_good_full_df = pd.read_csv('imputed_clean_good_full_df_HomeTime.csv', index_col=0)
clean_good_full_df['geo_value'] = clean_good_full_df['geo_value'].astype("float")
clean_good_full_df = clean_good_full_df.sort_values(by = 'time_value').drop(columns = ['time_value'])
clean_good_full_df.shape
global scaler
scaler = StandardScaler()
global full_df_scaled
global X_full
global y_full
global X_train
global X_test
global y_train
global y_test
global X_cv
global y_cv
scaler = StandardScaler()
scaler = scaler.fit(clean_good_full_df)
full_df_scaled = scaler.transform(clean_good_full_df)
X_full = full_df_scaled[:,0:15]
y_full = full_df_scaled[:,15]
X_train = X_full[0:5948,:]
X_test = X_full[5948:,:]
y_train = y_full[0:5948]
y_test = y_full[5948:]
X_cv = X_train
y_cv = y_train
def delta_noGoogle():
#Reading in DF and Formatting
clean_good_full_df = pd.read_csv('delta_full_df_GoogleDropped.csv', index_col=0)
#clean_good_full_df = pd.read_csv('imputed_clean_good_full_df_HomeTime.csv', index_col=0)
clean_good_full_df['geo_value'] = clean_good_full_df['geo_value'].astype("float")
clean_good_full_df = clean_good_full_df.sort_values(by = 'time_value').drop(columns = ['time_value'])
clean_good_full_df.shape
global scaler
scaler = StandardScaler()
global full_df_scaled
global X_full
global y_full
global X_train
global X_test
global y_train
global y_test
global X_cv
global y_cv
scaler = StandardScaler()
scaler = scaler.fit(clean_good_full_df)
full_df_scaled = scaler.transform(clean_good_full_df)
X_full = full_df_scaled[:,0:13]
y_full = full_df_scaled[:,13]
X_train = X_full[0:6398,:]
X_test = X_full[6398:,:]
y_train = y_full[0:6398]
y_test = y_full[6398:]
X_cv = X_train
y_cv = y_train
def delta_allFeatures():
#Reading in DF and Formatting
clean_good_full_df = pd.read_csv('delta_full_df_all_features.csv', index_col=0)
#clean_good_full_df = pd.read_csv('imputed_clean_good_full_df_HomeTime.csv', index_col=0)
clean_good_full_df['geo_value'] = clean_good_full_df['geo_value'].astype("float")
clean_good_full_df = clean_good_full_df.sort_values(by = 'time_value').drop(columns = ['time_value'])
clean_good_full_df.shape
global scaler
scaler = StandardScaler()
global full_df_scaled
global X_full
global y_full
global X_train
global X_test
global y_train
global y_test
global X_cv
global y_cv
scaler = StandardScaler()
scaler = scaler.fit(clean_good_full_df)
full_df_scaled = scaler.transform(clean_good_full_df)
X_full = full_df_scaled[:,0:17]
y_full = full_df_scaled[:,17]
X_train = X_full[0:4256,:]
X_test = X_full[4256:,:]
y_train = y_full[0:4256]
y_test = y_full[4256:]
X_cv = X_train
y_cv = y_train
def no_google_6timesteps():
#Reading in DF and Formatting
global clean_good_full_df
global time
clean_good_full_df = pd.read_csv('imputed_clean_good_full_df_GoogleDropped_6timesteps.csv', index_col=0)
#clean_good_full_df = pd.read_csv('imputed_clean_good_full_df_HomeTime.csv', index_col=0)
clean_good_full_df['geo_value'] = clean_good_full_df['geo_value'].astype("float")
clean_good_full_df['geo_value_t_minus_1'] = clean_good_full_df['geo_value_t_minus_1'].astype("float")
clean_good_full_df['geo_value_t_minus_2'] = clean_good_full_df['geo_value_t_minus_2'].astype("float")
clean_good_full_df['geo_value_t_minus_3'] = clean_good_full_df['geo_value_t_minus_3'].astype("float")
clean_good_full_df['geo_value_t_minus_4'] = clean_good_full_df['geo_value_t_minus_4'].astype("float")
clean_good_full_df['geo_value_t_minus_5'] = clean_good_full_df['geo_value_t_minus_5'].astype("float")
time = clean_good_full_df.sort_values(by = 'time_value').drop(columns = [
'time_value_t_minus_1','time_value_t_minus_2','time_value_t_minus_3','time_value_t_minus_4',
'time_value_t_minus_5'])
clean_good_full_df = clean_good_full_df.sort_values(by = 'time_value').drop(columns = ['time_value',
'time_value_t_minus_1','time_value_t_minus_2','time_value_t_minus_3','time_value_t_minus_4',
'time_value_t_minus_5'])
clean_good_full_df.shape
global scaler
scaler = StandardScaler()
global full_df_scaled
global X_full
global y_full
global X_train
global X_test
global y_train
global y_test
global X_cv
global y_cv
global X_df_scaled_reshaped
global Y_df_scaled_reshaped
global X_test_df_scaled_reshaped
global Y_test_df_scaled_reshaped
global X_cv_df_scaled_reshaped
global Y_cv_df_scaled_reshaped
#Scaling and Reshaping
scaler = StandardScaler()
scaler = scaler.fit(clean_good_full_df)
full_df_scaled = scaler.transform(clean_good_full_df)
X_full = full_df_scaled[:,0:42]
y_full = full_df_scaled[:,42]
X_train = X_full[0:6695,:]
X_test = X_full[6695:,:]
y_train = y_full[0:6695]
y_test = y_full[6695:]
X_cv = X_train
y_cv = y_train
#(8402, 49
X_df_scaled_reshaped = X_train.reshape(6695,6,7)
Y_df_scaled_reshaped = y_train.reshape(6695,1)
X_test_df_scaled_reshaped = X_test.reshape(1674,6,7)
Y_test_df_scaled_reshaped = y_test.reshape(1674,1)
X_cv_df_scaled_reshaped = X_cv.reshape(6695,6,7)
Y_cv_df_scaled_reshaped = y_cv.reshape(6695,1)
no_google_6timesteps()
def no_google_5timesteps():
#Reading in DF and Formatting
clean_good_full_df = pd.read_csv('imputed_clean_good_full_df_GoogleDropped_5timesteps.csv', index_col=0)
#clean_good_full_df = pd.read_csv('imputed_clean_good_full_df_HomeTime.csv', index_col=0)
clean_good_full_df['geo_value'] = clean_good_full_df['geo_value'].astype("float")
clean_good_full_df['geo_value_t_minus_1'] = clean_good_full_df['geo_value_t_minus_1'].astype("float")
clean_good_full_df['geo_value_t_minus_2'] = clean_good_full_df['geo_value_t_minus_2'].astype("float")
clean_good_full_df['geo_value_t_minus_3'] = clean_good_full_df['geo_value_t_minus_3'].astype("float")
clean_good_full_df['geo_value_t_minus_4'] = clean_good_full_df['geo_value_t_minus_4'].astype("float")
clean_good_full_df = clean_good_full_df.sort_values(by = 'time_value').drop(columns = ['time_value',
'time_value_t_minus_1','time_value_t_minus_2','time_value_t_minus_3','time_value_t_minus_4'])
clean_good_full_df.shape
global scaler
scaler = StandardScaler()
global full_df_scaled
global X_full
global y_full
global X_train
global X_test
global y_train
global y_test
global X_cv
global y_cv
global X_df_scaled_reshaped
global Y_df_scaled_reshaped
global X_test_df_scaled_reshaped
global Y_test_df_scaled_reshaped
global X_cv_df_scaled_reshaped
global Y_cv_df_scaled_reshaped
#Scaling and Reshaping
scaler = StandardScaler()
scaler = scaler.fit(clean_good_full_df)
full_df_scaled = scaler.transform(clean_good_full_df)
X_full = full_df_scaled[:,0:35]
y_full = full_df_scaled[:,35]
X_train = X_full[0:6695,:]
X_test = X_full[6695:,:]
y_train = y_full[0:6695]
y_test = y_full[6695:]
X_cv = X_train
y_cv = y_train
#(8402, 49
X_df_scaled_reshaped = X_train.reshape(6695,5,7)
Y_df_scaled_reshaped = y_train.reshape(6695,1)
X_test_df_scaled_reshaped = X_test.reshape(1795,5,7)
Y_test_df_scaled_reshaped = y_test.reshape(1795,1)
X_cv_df_scaled_reshaped = X_cv.reshape(6695,5,7)
Y_cv_df_scaled_reshaped = y_cv.reshape(6695,1)
no_google_5timesteps()
def no_google_4timesteps():
#Reading in DF and Formatting
clean_good_full_df = pd.read_csv('imputed_clean_good_full_df_GoogleDropped_4timesteps.csv', index_col=0)
#clean_good_full_df = pd.read_csv('imputed_clean_good_full_df_HomeTime.csv', index_col=0)
clean_good_full_df['geo_value'] = clean_good_full_df['geo_value'].astype("float")
clean_good_full_df['geo_value_t_minus_1'] = clean_good_full_df['geo_value_t_minus_1'].astype("float")
clean_good_full_df['geo_value_t_minus_2'] = clean_good_full_df['geo_value_t_minus_2'].astype("float")
clean_good_full_df['geo_value_t_minus_3'] = clean_good_full_df['geo_value_t_minus_3'].astype("float")
clean_good_full_df = clean_good_full_df.sort_values(by = 'time_value').drop(columns = ['time_value',
'time_value_t_minus_1','time_value_t_minus_2','time_value_t_minus_3'])
clean_good_full_df.shape
global scaler
scaler = StandardScaler()
global full_df_scaled
global X_full
global y_full
global X_train
global X_test
global y_train
global y_test
global X_cv
global y_cv
global X_df_scaled_reshaped
global Y_df_scaled_reshaped
global X_test_df_scaled_reshaped
global Y_test_df_scaled_reshaped
global X_cv_df_scaled_reshaped
global Y_cv_df_scaled_reshaped
#Scaling and Reshaping
scaler = StandardScaler()
scaler = scaler.fit(clean_good_full_df)
full_df_scaled = scaler.transform(clean_good_full_df)
X_full = full_df_scaled[:,0:28]
y_full = full_df_scaled[:,28]
X_train = X_full[0:6721,:]
X_test = X_full[6721:,:]
y_train = y_full[0:6721]
y_test = y_full[6721:]
X_cv = X_train
y_cv = y_train
#(8402, 49
X_df_scaled_reshaped = X_train.reshape(6721,4,7)
Y_df_scaled_reshaped = y_train.reshape(6721,1)
X_test_df_scaled_reshaped = X_test.reshape(1859,4,7)
Y_test_df_scaled_reshaped = y_test.reshape(1859,1)
X_cv_df_scaled_reshaped = X_cv.reshape(6721,4,7)
Y_cv_df_scaled_reshaped = y_cv.reshape(6721,1)
no_google_4timesteps()
def no_google_3timesteps():
global clean_good_full_df
global time
#Reading in DF and Formatting
clean_good_full_df = pd.read_csv('imputed_clean_good_full_df_GoogleDropped_3timesteps.csv', index_col=0)
#clean_good_full_df = pd.read_csv('imputed_clean_good_full_df_HomeTime.csv', index_col=0)
clean_good_full_df['geo_value'] = clean_good_full_df['geo_value'].astype("float")
clean_good_full_df['geo_value_t_minus_1'] = clean_good_full_df['geo_value_t_minus_1'].astype("float")
clean_good_full_df['geo_value_t_minus_2'] = clean_good_full_df['geo_value_t_minus_2'].astype("float")
time = clean_good_full_df.sort_values(by = 'time_value').drop(columns = [
'time_value_t_minus_1','time_value_t_minus_2'])
clean_good_full_df = clean_good_full_df.sort_values(by = 'time_value').drop(columns = ['time_value', 'time_value_t_minus_1','time_value_t_minus_2'])
clean_good_full_df.shape
global scaler
scaler = StandardScaler()
global full_df_scaled
global X_full
global y_full
global X_train
global X_test
global y_train
global y_test
global X_cv
global y_cv
global X_df_scaled_reshaped
global Y_df_scaled_reshaped
global X_test_df_scaled_reshaped
global Y_test_df_scaled_reshaped
global X_cv_df_scaled_reshaped
global Y_cv_df_scaled_reshaped
#Scaling and Reshaping
scaler = StandardScaler()
scaler = scaler.fit(clean_good_full_df)
full_df_scaled = scaler.transform(clean_good_full_df)
X_full = full_df_scaled[:,0:21]
y_full = full_df_scaled[:,21]
X_train = X_full[0:6863,:]
X_test = X_full[6863:,:]
y_train = y_full[0:6863]
y_test = y_full[6863:]
X_cv = X_train
y_cv = y_train
global train_combined
train_combined = pd.DataFrame(np.append(X_train, y_train.reshape(6863,1), axis = 1)).sample(frac = 1,replace = True).to_numpy()
X_train = train_combined[:,0:21]
y_train = train_combined[:,21]
#(8402, 49
X_df_scaled_reshaped = X_train.reshape(6863,3,7)
Y_df_scaled_reshaped = y_train.reshape(6863,1)
X_test_df_scaled_reshaped = X_test.reshape(1747,3,7)
Y_test_df_scaled_reshaped = y_test.reshape(1747,1)
X_cv_df_scaled_reshaped = X_cv.reshape(6863,3,7)
Y_cv_df_scaled_reshaped = y_cv.reshape(6863,1)
no_google_3timesteps()
#Regular
all_features()
no_google()
no_home()
no_home_no_google()
#Delta
delta_allFeatures()
delta_noGoogle()
delta_noHome()
delta_noHome_noGoogle()
#Multi Time steps
no_google_6timesteps()
no_google_5timesteps()
no_google_4timesteps()
no_google_3timesteps()
from tensorflow.keras.callbacks import Callback
# Learning Rate schedulerclass EarlyStoppingByLossVal(Callback):
class EarlyStoppingByLossVal(Callback):
def __init__(self, monitor='val_loss', value=0.00001, verbose=0):
super(Callback, self).__init__()
self.monitor = monitor
self.value = value
self.verbose = verbose
def on_epoch_end(self, epoch, logs={}):
current = logs.get(self.monitor)
if current is None:
warnings.warn("Early stopping requires %s available!" % self.monitor, RuntimeWarning)
if current < self.value:
if self.verbose > 0:
print("Epoch %05d: early stopping THR" % epoch)
self.model.stop_training = True
def step_decay(epoch):
initial_lrate = 0.01
drop = 0.5
epochs_drop = 9.0
lrate = initial_lrate * math.pow(drop,
math.floor((1+epoch)/epochs_drop))
return lrate
lrate = LearningRateScheduler(step_decay)
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=100)
callbacks_list = [lrate, EarlyStoppingByLossVal(monitor='val_loss', value=4.5*10**(-5), verbose=1)]
#LSTM Model
from tensorflow.keras.constraints import max_norm
def create_LSTMmodel(optimizer="sgd", dropout_rate=0.4,
activation='softplus', neurons1 = 96, init_mode='glorot_uniform' , learn_rate=0.01, momentum = 0.9):
model = Sequential()
model.add(LSTM(neurons1,activation = activation, kernel_initializer=init_mode,input_shape=(X_df_scaled_reshaped.shape[1],X_df_scaled_reshaped.shape[2]),kernel_constraint=max_norm(3), recurrent_constraint=max_norm(3), bias_constraint=max_norm(3)))
#model.add(Dropout(dropout_rate))
model.add(Dense(1,kernel_initializer=init_mode))
model.compile(optimizer = SGD(lr=0.01,momentum= .9, clipnorm=1.0,clipvalue=0.5), loss = 'mse')
return model
model = create_LSTMmodel()
model.summary()
#kernel_regularizer=regularizers.l2(l2=0.0001)
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
lstm (LSTM) (None, 96) 39936
_________________________________________________________________
dense (Dense) (None, 1) 97
=================================================================
Total params: 40,033
Trainable params: 40,033
Non-trainable params: 0
_________________________________________________________________
#Bidirectional LSTM Model
def create_Bidirectional_LSTMmodel(optimizer="sgd", dropout_rate=0.0,
activation='softplus', neurons1 = 128, init_mode='glorot_uniform', momentum = 0.9):
model = Sequential()
model.add(Bidirectional(LSTM(neurons1,activation = activation, kernel_initializer=init_mode),input_shape=(X_df_scaled_reshaped.shape[1],X_df_scaled_reshaped.shape[2])))
model.add(Dropout(dropout_rate))
model.add(Dense(1,kernel_initializer=init_mode))
model.compile(optimizer = SGD(lr=0.01,momentum= .9,clipnorm=1.0,clipvalue=0.5), loss = 'mse')
return model
model = create_Bidirectional_LSTMmodel()
model.summary()
Model: "sequential_3"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
bidirectional_2 (Bidirection (None, 256) 139264
_________________________________________________________________
dropout_3 (Dropout) (None, 256) 0
_________________________________________________________________
dense_3 (Dense) (None, 1) 257
=================================================================
Total params: 139,521
Trainable params: 139,521
Non-trainable params: 0
_________________________________________________________________
#GRU Model
def create_GRUmodel(optimizer="sgd", dropout_rate=0.0, activation='softplus', neurons = 128, init_mode='glorot_uniform' , learn_rate=0.01, momentum = 0.9):
model = Sequential()
model.add(GRU(neurons, kernel_initializer=init_mode,activation= activation,input_shape = (X_df_scaled_reshaped.shape[1],X_df_scaled_reshaped.shape[2])))
model.add(Dropout(dropout_rate))
model.add(Dense(Y_df_scaled_reshaped.shape[1],kernel_initializer=init_mode))
model.compile(optimizer = SGD(lr=0.0, momentum=momentum,clipnorm=1.0,clipvalue=0.5), loss = 'mse')
return model
model = create_GRUmodel()
model.summary()
Model: "sequential_4"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
gru (GRU) (None, 128) 52608
_________________________________________________________________
dropout_4 (Dropout) (None, 128) 0
_________________________________________________________________
dense_4 (Dense) (None, 1) 129
=================================================================
Total params: 52,737
Trainable params: 52,737
Non-trainable params: 0
_________________________________________________________________
#Convolutional Model
def create_Convmodel(optimizer="sgd", dropout_rate=0.2,
activation='softplus', neurons1 = 256, neurons2 = 64,neurons3 = 8, init_mode='glorot_uniform' , learn_rate=0.01, momentum = 0.9):
model = Sequential()
model.add(Conv1D(filters=64, kernel_size=2, activation=activation, input_shape = (X_df_scaled_reshaped.shape[1],X_df_scaled_reshaped.shape[2])))
model.add(MaxPooling1D(pool_size=1))
model.add(Flatten())
model.add(Dense(50, activation= activation))
model.add(Dense(1,kernel_initializer=init_mode))
model.compile(optimizer = SGD(lr=0.0, momentum=momentum,clipnorm=1.0,clipvalue=0.5), loss = 'mse')
return model
model = create_Convmodel()
model.summary()
Model: "sequential_5"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv1d (Conv1D) (None, 5, 64) 960
_________________________________________________________________
max_pooling1d (MaxPooling1D) (None, 5, 64) 0
_________________________________________________________________
flatten (Flatten) (None, 320) 0
_________________________________________________________________
dense_5 (Dense) (None, 50) 16050
_________________________________________________________________
dense_6 (Dense) (None, 1) 51
=================================================================
Total params: 17,061
Trainable params: 17,061
Non-trainable params: 0
_________________________________________________________________
#RNN Model
def create_RNNmodel(optimizer="sgd", dropout_rate=0.0, activation='softplus', neurons = 128, init_mode='glorot_uniform' , learn_rate=0.01, momentum = 0.9):
model = Sequential()
model.add(SimpleRNN(neurons, kernel_initializer=init_mode,activation= activation,input_shape = (X_df_scaled_reshaped.shape[1],X_df_scaled_reshaped.shape[2])))
model.add(Dropout(dropout_rate))
model.add(Dense(1,kernel_initializer=init_mode))
model.compile(optimizer = SGD(lr=0.0, momentum=momentum,clipnorm=1.0,clipvalue=0.5), loss = 'mse')
return model
model = create_RNNmodel()
model.summary()
Model: "sequential_6"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
simple_rnn (SimpleRNN) (None, 128) 17408
_________________________________________________________________
dropout_5 (Dropout) (None, 128) 0
_________________________________________________________________
dense_7 (Dense) (None, 1) 129
=================================================================
Total params: 17,537
Trainable params: 17,537
Non-trainable params: 0
_________________________________________________________________
#Feed-Forward Model
def create_FFmodel(optimizer="sgd", dropout_rate=0.0, activation='relu', neurons = 128, init_mode='glorot_uniform' , learn_rate=0.01, momentum = 0.9):
model = Sequential()
model.add(Flatten(input_shape = (X_df_scaled_reshaped.shape[1],X_df_scaled_reshaped.shape[2])))
model.add(Dense(neurons, activation=activation,kernel_initializer=init_mode))
model.add(Dense(1,kernel_initializer=init_mode))
model.compile(optimizer = SGD(lr=0.0, momentum=momentum,clipnorm=1.0,clipvalue=0.5), loss = 'mse')
return model
model = create_FFmodel()
model.summary()
Model: "sequential_7"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
flatten_1 (Flatten) (None, 42) 0
_________________________________________________________________
dense_8 (Dense) (None, 128) 5504
_________________________________________________________________
dense_9 (Dense) (None, 1) 129
=================================================================
Total params: 5,633
Trainable params: 5,633
Non-trainable params: 0
_________________________________________________________________
# multiLayer-LSTM Model
def create_Bidirectional_multiLayer_LSTMmodel(optimizer="sgd", dropout_rate=0.0,
activation='softplus', hidden_layers = 1, neurons1 = 128, neurons2 = 128,neurons3 = 128, init_mode='glorot_uniform' , learn_rate=0.01, momentum = 0.9):
model = Sequential()
if hidden_layers == 1:
model.add(Bidirectional(LSTM(neurons1,activation= activation, kernel_initializer=init_mode),input_shape=(X_df_scaled_reshaped.shape[1],X_df_scaled_reshaped.shape[2])))
if hidden_layers == 2:
model.add(Bidirectional(LSTM(neurons1, return_sequences=True,activation= activation, kernel_initializer=init_mode),input_shape=(X_df_scaled_reshaped.shape[1],X_df_scaled_reshaped.shape[2])))
model.add(Bidirectional(LSTM(neurons1, kernel_initializer=init_mode)))
if hidden_layers == 3:
model.add(Bidirectional(LSTM(neurons1, return_sequences=True,activation= activation, kernel_initializer=init_mode),input_shape=(X_df_scaled_reshaped.shape[1],X_df_scaled_reshaped.shape[2])))
model.add(Bidirectional(LSTM(neurons1, return_sequences=True, kernel_initializer=init_mode)))
model.add(Bidirectional(LSTM(neurons1, kernel_initializer=init_mode)))
if hidden_layers == 4:
model.add(Bidirectional(LSTM(neurons1, return_sequences=True,activation= activation, kernel_initializer=init_mode),input_shape=(X_df_scaled_reshaped.shape[1],X_df_scaled_reshaped.shape[2])))
model.add(Bidirectional(LSTM(neurons1, return_sequences=True, kernel_initializer=init_mode)))
model.add(Bidirectional(LSTM(neurons1, return_sequences=True, kernel_initializer=init_mode)))
model.add(Bidirectional(LSTM(neurons1, kernel_initializer=init_mode)))
if hidden_layers == 5:
model.add(Bidirectional(LSTM(neurons1, return_sequences=True,activation= activation, kernel_initializer=init_mode),input_shape=(X_df_scaled_reshaped.shape[1],X_df_scaled_reshaped.shape[2])))
model.add(Bidirectional(LSTM(neurons1, return_sequences=True, kernel_initializer=init_mode)))
model.add(Bidirectional(LSTM(neurons1, return_sequences=True, kernel_initializer=init_mode)))
model.add(Bidirectional(LSTM(neurons1, return_sequences=True, kernel_initializer=init_mode)))
model.add(Bidirectional(LSTM(neurons1, kernel_initializer=init_mode)))
#if hidden_layers = 10:
#if hidden_layers = 20:
model.add(Dropout(dropout_rate))
model.add(Dense(1,kernel_initializer=init_mode))
model.compile(optimizer = SGD(lr=0.01,momentum= momentum,clipnorm=1.0,clipvalue=0.5), loss = 'mse')
return model
model = create_Bidirectional_multiLayer_LSTMmodel()
model.summary()
Model: "sequential_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
bidirectional_1 (Bidirection (None, 256) 139264
_________________________________________________________________
dropout_1 (Dropout) (None, 256) 0
_________________________________________________________________
dense_1 (Dense) (None, 1) 257
=================================================================
Total params: 139,521
Trainable params: 139,521
Non-trainable params: 0
_________________________________________________________________
# multiLayer-LSTM Model
def create_multiLayer_LSTMmodel(optimizer="sgd", dropout_rate=0.0,
activation='tanh', hidden_layers = 1, neurons1 = 256, neurons2 = 128,neurons3 = 128, init_mode='glorot_uniform' , learn_rate=0.01, momentum = 0.9):
model = Sequential()
if hidden_layers == 1:
model.add(LSTM(neurons1,activation= activation, kernel_initializer=init_mode,input_shape=(X_df_scaled_reshaped.shape[1],X_df_scaled_reshaped.shape[2])))
if hidden_layers == 2:
model.add(LSTM(neurons1, return_sequences=True,activation= activation, kernel_initializer=init_mode,input_shape=(X_df_scaled_reshaped.shape[1],X_df_scaled_reshaped.shape[2])))
model.add(LSTM(neurons1, kernel_initializer=init_mode))
if hidden_layers == 3:
model.add(LSTM(neurons1, return_sequences=True,activation= activation, kernel_initializer=init_mode,input_shape=(X_df_scaled_reshaped.shape[1],X_df_scaled_reshaped.shape[2])))
model.add(LSTM(neurons1, return_sequences=True, kernel_initializer=init_mode))
model.add(LSTM(neurons1, kernel_initializer=init_mode))
if hidden_layers == 4:
model.add(LSTM(neurons1, return_sequences=True,activation= activation, kernel_initializer=init_mode,input_shape=(X_df_scaled_reshaped.shape[1],X_df_scaled_reshaped.shape[2])))
model.add(LSTM(neurons1, return_sequences=True, kernel_initializer=init_mode))
model.add(LSTM(neurons1, return_sequences=True, kernel_initializer=init_mode))
model.add(LSTM(neurons1, kernel_initializer=init_mode))
if hidden_layers == 5:
model.add(LSTM(neurons1, return_sequences=True,activation= activation, kernel_initializer=init_mode,input_shape=(X_df_scaled_reshaped.shape[1],X_df_scaled_reshaped.shape[2])))
model.add(LSTM(neurons1, return_sequences=True, kernel_initializer=init_mode))
model.add(LSTM(neurons1, return_sequences=True, kernel_initializer=init_mode))
model.add(LSTM(neurons1, return_sequences=True, kernel_initializer=init_mode))
model.add(LSTM(neurons1, kernel_initializer=init_mode))
#if hidden_layers = 10:
#if hidden_layers = 20:
model.add(Dropout(dropout_rate))
model.add(Dense(1,kernel_initializer=init_mode))
model.compile(optimizer = SGD(lr=0.01,momentum= momentum,clipnorm=1.0,clipvalue=0.5), loss = 'mse')
return model
model = create_multiLayer_LSTMmodel()
model.summary()
Model: "sequential_3"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
lstm_3 (LSTM) (None, 256) 270336
_________________________________________________________________
dropout_3 (Dropout) (None, 256) 0
_________________________________________________________________
dense_3 (Dense) (None, 1) 257
=================================================================
Total params: 270,593
Trainable params: 270,593
Non-trainable params: 0
_________________________________________________________________
def create_Bidirectional_LSTM_Convmodel(optimizer="sgd", dropout_rate=0.0,
activation='softplus', neurons1 = 9, neurons2 = 128,neurons3 = 128, init_mode='glorot_uniform' , learn_rate=0.01, momentum = 0.9):
model = Sequential()
model.add(Bidirectional(LSTM(neurons1, return_sequences=True,activation= activation, kernel_initializer=init_mode),input_shape=(X_df_scaled_reshaped.shape[1],X_df_scaled_reshaped.shape[2])))
model.add(Conv1D(filters=64, kernel_size=2, activation=activation))
model.add(MaxPooling1D(pool_size=1))
model.add(Flatten())
model.add(Dense(1,kernel_initializer=init_mode))
model.compile(optimizer = SGD(lr=0.0, momentum=momentum,clipnorm=1.0,clipvalue=0.5), loss = 'mse')
return model
model = create_Bidirectional_LSTM_Convmodel()
model.summary()
Model: "sequential_6"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
bidirectional_4 (Bidirection (None, 2, 4) 160
_________________________________________________________________
conv1d_4 (Conv1D) (None, 1, 1) 9
_________________________________________________________________
max_pooling1d_4 (MaxPooling1 (None, 1, 1) 0
_________________________________________________________________
flatten_4 (Flatten) (None, 1) 0
_________________________________________________________________
dense_7 (Dense) (None, 1) 2
=================================================================
Total params: 171
Trainable params: 171
Non-trainable params: 0
_________________________________________________________________
#Regular
all_features()
no_google()
no_home()
no_home_no_google()
#Delta
delta_allFeatures()
delta_noGoogle()
delta_noHome()
delta_noHome_noGoogle()
create_LSTMmodel()
create_Bidirectional_LSTMmodel()
create_GRUmodel()
create_Convmodel()
create_RNNmodel()
create_FFmodel()
create_Bidirectional_multiLayer_LSTMmodel()
create_multiLayer_LSTMmodel()
create_Bidirectional_LSTM_Convmodel()
#No Google
no_google()
tscv = TimeSeriesSplit(n_splits=5)
bi_multi_model = KerasRegressor(build_fn=create_LSTMmodel, verbose=2, epochs = 40)
print(np.mean(cross_val_score(bi_multi_model, X_cv_df_scaled_reshaped , Y_cv_df_scaled_reshaped , cv= tscv, n_jobs = -1, scoring = 'neg_mean_squared_error')))
Execution error
NameError: name 'create_LSTMmodel' is not defined
#No Google
no_google()
tscv = TimeSeriesSplit(n_splits=5)
Bi_LSTM_model = KerasRegressor(build_fn=create_Bidirectional_LSTMmodel, verbose=2, epochs = 40)
print(np.mean(cross_val_score(Bi_LSTM_model, X_cv_df_scaled_reshaped , Y_cv_df_scaled_reshaped , cv= tscv, n_jobs = -1, scoring = 'neg_mean_squared_error')))
-0.04903872102439845
#All Features
all_features()
tscv = TimeSeriesSplit(n_splits=5)
Bi_LSTM_model = KerasRegressor(build_fn=create_Bidirectional_LSTMmodel, verbose=2, epochs = 40)
print(np.mean(cross_val_score(Bi_LSTM_model, X_cv_df_scaled_reshaped , Y_cv_df_scaled_reshaped , cv= tscv, n_jobs = -1, scoring = 'neg_mean_squared_error')))
-0.008153595819397255
#No Home
no_home()
tscv = TimeSeriesSplit(n_splits=5)
Bi_LSTM_model = KerasRegressor(build_fn=create_Bidirectional_LSTMmodel, verbose=2, epochs = 40)
print(np.mean(cross_val_score(Bi_LSTM_model, X_cv_df_scaled_reshaped , Y_cv_df_scaled_reshaped , cv= tscv, n_jobs = -1, scoring = 'neg_mean_squared_error')))
-0.011252653156316964
#No Home, No Google
no_home_no_google()
tscv = TimeSeriesSplit(n_splits=5)
Bi_LSTM_model = KerasRegressor(build_fn=create_Bidirectional_LSTMmodel, verbose=2, epochs = 40)
print(np.mean(cross_val_score(Bi_LSTM_model, X_cv_df_scaled_reshaped , Y_cv_df_scaled_reshaped , cv= tscv, n_jobs = -1, scoring = 'neg_mean_squared_error')))
-0.009359750538754982
from sklearn.model_selection import cross_val_score
# create dict to save all the cv scores
cv_scores = {}
cv_scores['1 Timesteps'] = []
cv_scores['2 Timesteps'] = []
cv_scores['3 Timesteps'] = []
cv_scores['4 Timesteps'] = []
cv_scores['5 Timesteps'] = []
# forecast_models list
tscv = TimeSeriesSplit(n_splits=5)
#LSTM_model = KerasRegressor(build_fn=create_LSTMmodel, verbose=2, epochs = 10) # test with 10 epochs
#Bi_LSTM_model = KerasRegressor(build_fn=create_Bi_Directional_LSTMmodel, verbose=2, epochs = 50)
#forecast_models = [LSTM_model,Bi_LSTM_model]
# for loop to run through models and epochs
for epoch in np.array([10,20,30,40,50, 60, 70, 80, 90, 100]):
epoch_num = int(epoch)
# LSTM Model
no_google()
LSTM_model = KerasRegressor(build_fn=create_LSTMmodel, verbose=0, epochs = epoch_num)
cv_scores['1 Timesteps'] += [np.mean(cross_val_score(LSTM_model, X_cv_df_scaled_reshaped, Y_cv_df_scaled_reshaped, cv= tscv))]
no_google_3timesteps()
LSTM_model = KerasRegressor(build_fn=create_LSTMmodel, verbose=0, epochs = epoch_num)
cv_scores['2 Timesteps'] += [np.mean(cross_val_score(LSTM_model, X_cv_df_scaled_reshaped, Y_cv_df_scaled_reshaped, cv= tscv))]
no_google_4timesteps()
LSTM_model = KerasRegressor(build_fn=create_LSTMmodel, verbose=0, epochs = epoch_num)
cv_scores['3 Timesteps'] += [np.mean(cross_val_score(LSTM_model, X_cv_df_scaled_reshaped, Y_cv_df_scaled_reshaped, cv= tscv))]
no_google_5timesteps()
LSTM_model = KerasRegressor(build_fn=create_LSTMmodel, verbose=0, epochs = epoch_num)
cv_scores['4 Timesteps'] += [np.mean(cross_val_score(LSTM_model, X_cv_df_scaled_reshaped, Y_cv_df_scaled_reshaped, cv= tscv))]
no_google_6timesteps()
LSTM_model = KerasRegressor(build_fn=create_LSTMmodel, verbose=0, epochs = epoch_num)
cv_scores['5 Timesteps'] += [np.mean(cross_val_score(LSTM_model, X_cv_df_scaled_reshaped, Y_cv_df_scaled_reshaped, cv= tscv))]
print('epoch '+ str(epoch) + ' done.')
epoch 10 done.
epoch 20 done.
epoch 30 done.
epoch 40 done.
epoch 50 done.
epoch 60 done.
epoch 70 done.
epoch 80 done.
epoch 90 done.
epoch 100 done.
cv_df = pd.DataFrame(cv_scores, index= ['10','20','30','40','50', '60', '70','80','90','100'])
cv_df.plot(marker="o")
plt.xlabel("Epoch")
plt.ylabel("Cross Validation Scores")
plt.title("Comparing cross validation scores for different timesteps included in training")
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.grid()
plt.show()
from sklearn.model_selection import cross_val_score
tscv = TimeSeriesSplit(n_splits=5)
no_google_6timesteps()
#no_google()
# create dict to save all the cv scores
cv_scores = {}
cv_scores['LSTM'] = []
cv_scores['Bi LSTM'] = []
cv_scores['FF'] = []
cv_scores['Conv'] = []
cv_scores['RNN'] = []
cv_scores['GRU'] = []
# forecast_models list
tscv = TimeSeriesSplit(n_splits=5)
#LSTM_model = KerasRegressor(build_fn=create_LSTMmodel, verbose=2, epochs = 10) # test with 10 epochs
#Bi_LSTM_model = KerasRegressor(build_fn=create_Bi_Directional_LSTMmodel, verbose=2, epochs = 50)
#forecast_models = [LSTM_model,Bi_LSTM_model]
# for loop to run through models and epochs
for epoch in np.array([10,20,30,40,50,60,70, 80, 90, 100, 110, 120 , 130, 140, 150]):
epoch_num = int(epoch)
# LSTM Model
LSTM_model = KerasRegressor(build_fn=create_LSTMmodel, verbose=0, epochs = epoch_num)
cv_scores['LSTM'] += [np.mean(cross_val_score(LSTM_model, X_cv_df_scaled_reshaped, Y_cv_df_scaled_reshaped, cv= tscv))]
# Bi LSTM Model
Bi_LSTM_model = KerasRegressor(build_fn=create_Bidirectional_LSTMmodel, verbose=0, epochs = epoch_num)
cv_scores['Bi LSTM'] += [np.mean(cross_val_score(Bi_LSTM_model, X_cv_df_scaled_reshaped, Y_cv_df_scaled_reshaped, cv= tscv))]
# Multi Bi LSTM Model
#Bi_multi_LSTM_model = KerasRegressor(build_fn=create_Bidirectional_multiLayer_LSTMmodel, verbose=0, epochs = epoch_num)
#cv_scores['Bi Multi LSTM '+str(epoch)] = np.mean(cross_val_score(Bi_multi_LSTM_model, X_cv_df_scaled_reshaped, Y_cv_df_scaled_reshaped, cv= tscv))
# Multi LSTM Model
#Multi_LSTM_model = KerasRegressor(build_fn=create_multiLayer_LSTMmodel, verbose=0, epochs = epoch_num)
#cv_scores['Multi LSTM '+str(epoch)] = np.mean(cross_val_score(Multi_LSTM_model, X_cv_df_scaled_reshaped, Y_cv_df_scaled_reshaped, cv= tscv))
# FF Model
#FF_model = KerasRegressor(build_fn=create_FFmodel, verbose=0, epochs = epoch_num)
#cv_scores['FF'] += [np.mean(cross_val_score(FF_model, X_cv_df_scaled_reshaped, Y_cv_df_scaled_reshaped, cv= tscv))]
# Conv model
#Conv_model = KerasRegressor(build_fn=create_Convmodel, verbose=0, epochs = epoch_num)
#cv_scores['Conv'] += [np.mean(cross_val_score(Conv_model, X_cv_df_scaled_reshaped, Y_cv_df_scaled_reshaped, cv= tscv))]
# RNN model
#RNN_model = KerasRegressor(build_fn=create_RNNmodel, verbose=0, epochs = epoch_num)
#cv_scores['RNN'] += [np.mean(cross_val_score(RNN_model, X_cv_df_scaled_reshaped, Y_cv_df_scaled_reshaped, cv= tscv))]
# GRU model
#GRU_model = KerasRegressor(build_fn=create_GRUmodel, verbose=0, epochs = epoch_num)
#cv_scores['GRU'] += [np.mean(cross_val_score(GRU_model, X_cv_df_scaled_reshaped, Y_cv_df_scaled_reshaped, cv= tscv))]
print('epoch '+ str(epoch) + ' done.')
epoch 10 done.
epoch 20 done.
epoch 30 done.
epoch 40 done.
epoch 50 done.
epoch 60 done.
epoch 70 done.
epoch 80 done.
epoch 90 done.
epoch 100 done.
epoch 110 done.
epoch 120 done.
epoch 130 done.
epoch 140 done.
epoch 150 done.
# visualization + explanation
cv_df = pd.DataFrame(cv_scores, index= ['Epoch 10','Epoch 20','Epoch 30','Epoch 40','Epoch 50'])
cv_df.plot(marker="o")
plt.xlabel("Epoch")
plt.ylabel("Cross Validation Scores")
plt.title("Comparing cross validation scores for different epochs across models")
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.grid()
plt.show()
cv_scores.pop('FF', None)
cv_scores.pop('Conv', None)
cv_scores.pop('RNN', None)
cv_scores.pop('GRU', None)
cv_df = pd.DataFrame(cv_scores, index= ['10','20','30','40','50', '60', '70', '80','90', '100', '110', '120' , '130', '140', '150'])
cv_df.iloc[:,:2].plot(marker="o")
plt.xlabel("Epoch")
plt.ylabel("Cross Validation Scores")
plt.title("Comparing cross validation scores for different epochs across models")
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.grid()
plt.show()
model = KerasRegressor(build_fn= create_multiLayer_LSTMmodel, verbose=1, epochs = 30)
optimizer = ['SGD', 'RMSprop', 'Adagrad', 'Adadelta', 'Adam', 'Adamax', 'Nadam']
init_mode = ['uniform', 'lecun_uniform', 'normal', 'zero', 'glorot_normal', 'glorot_uniform', 'he_normal', 'he_uniform']
activation = ['softplus', 'softsign', 'relu', 'tanh', 'sigmoid', 'hard_sigmoid', 'linear']
learn_rate = [0.001, 0.01, 0.1, 0.2, 0.3]
dropout_rate = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
batch_size = [2, 4, 8, 16, 32, 64, 128, 256]
neurons1 = [256, 512]
neurons2 = [8, 16, 32, 64, 128, 256]
neurons3 = [8, 16, 32, 64, 128, 256]
hidden_layers = [1,2 , 3 , 4 , 5]
epochs = [10, 30, 50, 70, 100]
momentum = [0.0, 0.2, 0.4, 0.6, 0.8, 0.9]
tscv = TimeSeriesSplit(n_splits=5)
param_grid = dict(batch_size = batch_size, dropout_rate = dropout_rate, activation= activation, hidden_layers = hidden_layers, momentum = momentum, neurons1 = neurons1)
grid = RandomizedSearchCV(estimator=model, n_iter = 20 ,param_distributions=param_grid, cv=tscv,n_jobs=-1,verbose = 2, )
#grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1, cv=tscv)
#grid = BayesSearchCV(estimator=model, search_spaces=param_grid, n_jobs=-1, cv=tscv, n_iter = 5, verbose = 1)
grid_result = grid.fit( X_cv_df_scaled_reshaped , Y_cv_df_scaled_reshaped, verbose = 1)
Execution error
NameError: name 'create_multiLayer_LSTMmodel' is not defined
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
#means = grid_result.cv_results_['mean_test_score']
#stds = grid_result.cv_results_['std_test_score']
#params = grid_result.cv_results_['params']
#for mean, stdev, param in zip(means, stds, params):
#print("%f (%f) with: %r" % (mean, stdev, param))
Best: -0.003467 using {'activation': 'linear'}
-0.005195 (0.003973) with: {'activation': 'softplus'}
-0.034118 (0.049580) with: {'activation': 'softsign'}
-0.010156 (0.006519) with: {'activation': 'relu'}
-0.015307 (0.020636) with: {'activation': 'tanh'}
-0.020984 (0.025198) with: {'activation': 'sigmoid'}
-0.023543 (0.028380) with: {'activation': 'hard_sigmoid'}
-0.003467 (0.003376) with: {'activation': 'linear'}
# Train and Plot loss
no_google_3timesteps()
forecast_model = create_LSTMmodel()
history = forecast_model.fit(X_df_scaled_reshaped, Y_df_scaled_reshaped,validation_data=(X_test_df_scaled_reshaped, Y_test_df_scaled_reshaped), epochs = 100, batch_size = 2, verbose=1, callbacks=callbacks_list)#callbacks=callbacks_list
plt.plot(history.history['loss'][5:], label='Training loss')
plt.plot(history.history['val_loss'][5:], label = 'Validation loss')
plt.xlabel("Epoch")
plt.ylabel("MSE Scores")
plt.grid()
plt.legend()
Epoch 1/100
3432/3432 [==============================] - 14s 4ms/step - loss: 0.0802 - val_loss: 0.0507
Epoch 2/100
3432/3432 [==============================] - 12s 4ms/step - loss: 0.0043 - val_loss: 0.0032
Epoch 3/100
3432/3432 [==============================] - 12s 3ms/step - loss: 0.0023 - val_loss: 0.0031
Epoch 4/100
3432/3432 [==============================] - 12s 4ms/step - loss: 0.0015 - val_loss: 0.0013
Epoch 5/100
3432/3432 [==============================] - 13s 4ms/step - loss: 0.0011 - val_loss: 5.5337e-04
Epoch 6/100
3432/3432 [==============================] - 12s 4ms/step - loss: 0.0019 - val_loss: 0.0024
Epoch 7/100
3432/3432 [==============================] - 12s 3ms/step - loss: 0.0015 - val_loss: 0.0553
Epoch 8/100
3432/3432 [==============================] - 12s 4ms/step - loss: 0.0012 - val_loss: 3.8257e-04
Epoch 9/100
3432/3432 [==============================] - 17s 5ms/step - loss: 0.0016 - val_loss: 0.0558
Epoch 10/100
3432/3432 [==============================] - 17s 5ms/step - loss: 0.0012 - val_loss: 1.3612e-04
Epoch 11/100
3432/3432 [==============================] - 12s 4ms/step - loss: 0.0010 - val_loss: 0.0178
Epoch 12/100
3432/3432 [==============================] - 12s 3ms/step - loss: 8.4093e-04 - val_loss: 0.0378
Epoch 13/100
3432/3432 [==============================] - 13s 4ms/step - loss: 9.8664e-04 - val_loss: 3.1121e-04
Epoch 14/100
3432/3432 [==============================] - 12s 4ms/step - loss: 8.6809e-04 - val_loss: 0.0015
Epoch 15/100
3432/3432 [==============================] - 12s 4ms/step - loss: 1.5254e-04 - val_loss: 2.5267e-04
Epoch 16/100
3432/3432 [==============================] - 12s 4ms/step - loss: 1.2087e-04 - val_loss: 3.9255e-04
Epoch 17/100
3432/3432 [==============================] - 12s 3ms/step - loss: 1.0432e-04 - val_loss: 8.5985e-04
Epoch 18/100
3432/3432 [==============================] - 12s 3ms/step - loss: 1.0894e-04 - val_loss: 2.3103e-04
Epoch 19/100
3432/3432 [==============================] - 14s 4ms/step - loss: 9.2247e-05 - val_loss: 6.0534e-04
Epoch 20/100
3432/3432 [==============================] - 12s 3ms/step - loss: 1.8540e-04 - val_loss: 5.1581e-05
Epoch 21/100
3432/3432 [==============================] - 13s 4ms/step - loss: 1.1912e-04 - val_loss: 8.9975e-04
Epoch 22/100
3432/3432 [==============================] - 12s 4ms/step - loss: 9.5153e-05 - val_loss: 5.6691e-05
Epoch 23/100
3432/3432 [==============================] - 12s 3ms/step - loss: 1.4925e-04 - val_loss: 9.0977e-05
Epoch 24/100
3432/3432 [==============================] - 12s 3ms/step - loss: 1.1193e-04 - val_loss: 0.0018
Epoch 25/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.0735e-04 - val_loss: 1.5323e-04
Epoch 26/100
3432/3432 [==============================] - 12s 3ms/step - loss: 1.4949e-04 - val_loss: 4.7804e-04
Epoch 27/100
3432/3432 [==============================] - 13s 4ms/step - loss: 1.2257e-04 - val_loss: 4.8564e-05
Epoch 28/100
3432/3432 [==============================] - 22s 6ms/step - loss: 1.2962e-04 - val_loss: 2.4096e-05
Epoch 29/100
3432/3432 [==============================] - 14s 4ms/step - loss: 1.4819e-04 - val_loss: 2.1660e-04
Epoch 30/100
3432/3432 [==============================] - 12s 4ms/step - loss: 5.0836e-05 - val_loss: 4.1854e-05
Epoch 31/100
3432/3432 [==============================] - 12s 4ms/step - loss: 3.9764e-05 - val_loss: 3.4385e-05
Epoch 32/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.7683e-05 - val_loss: 2.4856e-05
Epoch 33/100
3432/3432 [==============================] - 12s 3ms/step - loss: 7.7559e-05 - val_loss: 4.8698e-05
Epoch 34/100
3432/3432 [==============================] - 12s 3ms/step - loss: 5.5834e-05 - val_loss: 1.3599e-04
Epoch 35/100
3432/3432 [==============================] - 13s 4ms/step - loss: 3.5362e-05 - val_loss: 4.4917e-05
Epoch 36/100
3432/3432 [==============================] - 12s 4ms/step - loss: 5.6815e-05 - val_loss: 4.4796e-05
Epoch 37/100
3432/3432 [==============================] - 12s 3ms/step - loss: 4.4170e-05 - val_loss: 5.1345e-05
Epoch 38/100
3432/3432 [==============================] - 12s 3ms/step - loss: 4.9682e-05 - val_loss: 6.6728e-05
Epoch 39/100
3432/3432 [==============================] - 12s 4ms/step - loss: 5.5737e-05 - val_loss: 5.1285e-05
Epoch 40/100
3432/3432 [==============================] - 13s 4ms/step - loss: 3.3519e-05 - val_loss: 2.8759e-05
Epoch 41/100
3432/3432 [==============================] - 12s 4ms/step - loss: 4.1318e-05 - val_loss: 5.9451e-05
Epoch 42/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.6943e-05 - val_loss: 9.6657e-05
Epoch 43/100
3432/3432 [==============================] - 12s 3ms/step - loss: 4.9406e-05 - val_loss: 7.9911e-05
Epoch 44/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.6062e-05 - val_loss: 1.3568e-04
Epoch 45/100
3432/3432 [==============================] - 14s 4ms/step - loss: 3.3372e-05 - val_loss: 5.7244e-05
Epoch 46/100
3432/3432 [==============================] - 12s 4ms/step - loss: 3.8249e-05 - val_loss: 3.9971e-05
Epoch 47/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.7457e-05 - val_loss: 3.2700e-05
Epoch 48/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.3371e-05 - val_loss: 3.2631e-05
Epoch 49/100
3432/3432 [==============================] - 12s 4ms/step - loss: 3.6265e-05 - val_loss: 2.9824e-05
Epoch 50/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.5975e-05 - val_loss: 5.0617e-05
Epoch 51/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.9441e-05 - val_loss: 6.0714e-05
Epoch 52/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.4019e-05 - val_loss: 7.3999e-05
Epoch 53/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.8694e-05 - val_loss: 3.0388e-05
Epoch 54/100
3432/3432 [==============================] - 12s 4ms/step - loss: 3.0543e-05 - val_loss: 4.2812e-05
Epoch 55/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.4096e-05 - val_loss: 3.9974e-05
Epoch 56/100
3432/3432 [==============================] - 13s 4ms/step - loss: 2.4044e-05 - val_loss: 4.0037e-05
Epoch 57/100
3432/3432 [==============================] - 12s 4ms/step - loss: 4.4389e-05 - val_loss: 7.7362e-05
Epoch 58/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.7847e-05 - val_loss: 1.1931e-04
Epoch 59/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.8988e-05 - val_loss: 4.9434e-05
Epoch 60/100
3432/3432 [==============================] - 12s 4ms/step - loss: 4.3269e-05 - val_loss: 5.0348e-05
Epoch 61/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.8008e-05 - val_loss: 3.9860e-05
Epoch 62/100
3432/3432 [==============================] - 13s 4ms/step - loss: 2.3976e-05 - val_loss: 6.6150e-05
Epoch 63/100
3432/3432 [==============================] - 12s 4ms/step - loss: 3.4057e-05 - val_loss: 4.7749e-05
Epoch 64/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.2805e-05 - val_loss: 4.4212e-05
Epoch 65/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.4944e-05 - val_loss: 8.0690e-05
Epoch 66/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.5743e-05 - val_loss: 5.4345e-05
Epoch 67/100
3432/3432 [==============================] - 12s 4ms/step - loss: 3.3163e-05 - val_loss: 8.0369e-05
Epoch 68/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.0566e-05 - val_loss: 3.9018e-05
Epoch 69/100
3432/3432 [==============================] - 13s 4ms/step - loss: 2.6456e-05 - val_loss: 3.4756e-05
Epoch 70/100
3432/3432 [==============================] - 12s 4ms/step - loss: 3.7064e-05 - val_loss: 4.8026e-05
Epoch 71/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.4773e-05 - val_loss: 1.4871e-04
Epoch 72/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.8325e-05 - val_loss: 3.7222e-05
Epoch 73/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.3229e-05 - val_loss: 1.0888e-04
Epoch 74/100
3432/3432 [==============================] - 12s 4ms/step - loss: 3.4329e-05 - val_loss: 4.7687e-05
Epoch 75/100
3432/3432 [==============================] - 13s 4ms/step - loss: 3.6318e-05 - val_loss: 3.9473e-05
Epoch 76/100
3432/3432 [==============================] - 14s 4ms/step - loss: 2.2324e-05 - val_loss: 3.9662e-05
Epoch 77/100
3432/3432 [==============================] - 13s 4ms/step - loss: 2.4054e-05 - val_loss: 5.3235e-05
Epoch 78/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.8616e-05 - val_loss: 4.8431e-05
Epoch 79/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.0642e-05 - val_loss: 8.2103e-05
Epoch 80/100
3432/3432 [==============================] - 12s 4ms/step - loss: 3.6869e-05 - val_loss: 4.3950e-05
Epoch 81/100
3432/3432 [==============================] - 13s 4ms/step - loss: 2.6927e-05 - val_loss: 5.4544e-05
Epoch 82/100
3432/3432 [==============================] - 12s 4ms/step - loss: 1.8008e-05 - val_loss: 5.2424e-05
Epoch 83/100
3432/3432 [==============================] - 12s 4ms/step - loss: 3.0697e-05 - val_loss: 4.6266e-05
Epoch 84/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.9520e-05 - val_loss: 3.6516e-05
Epoch 85/100
3432/3432 [==============================] - 12s 4ms/step - loss: 4.0833e-05 - val_loss: 5.9315e-05
Epoch 86/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.1268e-05 - val_loss: 5.9187e-05
Epoch 87/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.7904e-05 - val_loss: 3.9008e-05
Epoch 88/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.3894e-05 - val_loss: 5.0378e-05
Epoch 89/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.7778e-05 - val_loss: 6.2813e-05
Epoch 90/100
3432/3432 [==============================] - 14s 4ms/step - loss: 3.3017e-05 - val_loss: 4.3885e-05
Epoch 91/100
3432/3432 [==============================] - 12s 4ms/step - loss: 1.7951e-05 - val_loss: 7.7658e-05
Epoch 92/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.7761e-05 - val_loss: 4.5114e-05
Epoch 93/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.3646e-05 - val_loss: 7.0595e-05
Epoch 94/100
3432/3432 [==============================] - 12s 3ms/step - loss: 4.1961e-05 - val_loss: 4.8337e-05
Epoch 95/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.0521e-05 - val_loss: 5.2124e-05
Epoch 96/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.2922e-05 - val_loss: 5.0781e-05
Epoch 97/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.5746e-05 - val_loss: 4.6178e-05
Epoch 98/100
3432/3432 [==============================] - 13s 4ms/step - loss: 3.2049e-05 - val_loss: 4.9793e-05
Epoch 99/100
3432/3432 [==============================] - 12s 4ms/step - loss: 1.8485e-05 - val_loss: 7.4743e-05
Epoch 100/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.8339e-05 - val_loss: 3.9380e-05
for i in np.arange(8,100):
no_google_3timesteps()
forecast_model = create_LSTMmodel()
history = forecast_model.fit(X_df_scaled_reshaped, Y_df_scaled_reshaped,validation_data=(X_test_df_scaled_reshaped, Y_test_df_scaled_reshaped), epochs = 100, batch_size = 2, verbose=1, callbacks=callbacks_list)#callbacks=callbacks_list
forecast_model.save('forecast_model'+str(i))
Epoch 65/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.0692e-05 - val_loss: 1.0382e-04
Epoch 66/100
3432/3432 [==============================] - 12s 4ms/step - loss: 1.7563e-05 - val_loss: 1.0817e-04
Epoch 67/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.0807e-05 - val_loss: 1.0724e-04
Epoch 68/100
3432/3432 [==============================] - 12s 4ms/step - loss: 1.7967e-05 - val_loss: 1.0783e-04
Epoch 69/100
3432/3432 [==============================] - 12s 3ms/step - loss: 1.9111e-05 - val_loss: 1.1028e-04
Epoch 70/100
3432/3432 [==============================] - 12s 3ms/step - loss: 1.7168e-05 - val_loss: 1.0670e-04
Epoch 71/100
3432/3432 [==============================] - 12s 4ms/step - loss: 1.6508e-05 - val_loss: 1.0905e-04
Epoch 72/100
3432/3432 [==============================] - 12s 3ms/step - loss: 1.9572e-05 - val_loss: 1.1304e-04
Epoch 73/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.4476e-05 - val_loss: 1.1813e-04
Epoch 74/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.1684e-05 - val_loss: 1.2086e-04
Epoch 75/100
3432/3432 [==============================] - 12s 3ms/step - loss: 1.7274e-05 - val_loss: 1.1755e-04
Epoch 76/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.1219e-05 - val_loss: 1.1575e-04
Epoch 77/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.3566e-05 - val_loss: 1.1754e-04
Epoch 78/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.0621e-05 - val_loss: 1.1921e-04
Epoch 79/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.0289e-05 - val_loss: 1.1545e-04
Epoch 80/100
3432/3432 [==============================] - 12s 4ms/step - loss: 1.7937e-05 - val_loss: 1.2120e-04
Epoch 81/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.4349e-05 - val_loss: 1.2161e-04
Epoch 82/100
3432/3432 [==============================] - 12s 4ms/step - loss: 1.9832e-05 - val_loss: 1.3073e-04
Epoch 83/100
3432/3432 [==============================] - 12s 4ms/step - loss: 1.7960e-05 - val_loss: 1.2965e-04
Epoch 84/100
3432/3432 [==============================] - 13s 4ms/step - loss: 2.1735e-05 - val_loss: 1.3962e-04
Epoch 85/100
3432/3432 [==============================] - 13s 4ms/step - loss: 2.1142e-05 - val_loss: 1.3500e-04
Epoch 86/100
3432/3432 [==============================] - 13s 4ms/step - loss: 1.9545e-05 - val_loss: 1.3762e-04
Epoch 87/100
3432/3432 [==============================] - 13s 4ms/step - loss: 1.8120e-05 - val_loss: 1.3472e-04
Epoch 88/100
3432/3432 [==============================] - 13s 4ms/step - loss: 2.1090e-05 - val_loss: 1.3887e-04
Epoch 89/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.1045e-05 - val_loss: 1.3825e-04
Epoch 90/100
3432/3432 [==============================] - 13s 4ms/step - loss: 2.0429e-05 - val_loss: 1.5334e-04
Epoch 91/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.4692e-05 - val_loss: 1.5981e-04
Epoch 92/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.9260e-05 - val_loss: 1.6803e-04
Epoch 93/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.1396e-05 - val_loss: 1.7386e-04
Epoch 94/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.5253e-05 - val_loss: 1.8186e-04
Epoch 95/100
3432/3432 [==============================] - 14s 4ms/step - loss: 2.8480e-05 - val_loss: 1.7835e-04
Epoch 96/100
3432/3432 [==============================] - 13s 4ms/step - loss: 3.4461e-05 - val_loss: 1.9388e-04
Epoch 97/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.9035e-05 - val_loss: 1.9252e-04
Epoch 98/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.1155e-05 - val_loss: 2.0006e-04
Epoch 99/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.0806e-05 - val_loss: 2.1323e-04
Epoch 100/100
3432/3432 [==============================] - 14s 4ms/step - loss: 3.1852e-05 - val_loss: 2.3153e-04
Epoch 1/100
3432/3432 [==============================] - 14s 4ms/step - loss: 0.0372 - val_loss: 0.0237
Epoch 2/100
3432/3432 [==============================] - 12s 3ms/step - loss: 0.0035 - val_loss: 0.0105
Epoch 3/100
3432/3432 [==============================] - 12s 3ms/step - loss: 0.0021 - val_loss: 0.0113
Epoch 4/100
3432/3432 [==============================] - 12s 3ms/step - loss: 0.0017 - val_loss: 0.0096
Epoch 5/100
3432/3432 [==============================] - 12s 4ms/step - loss: 0.0017 - val_loss: 4.5495e-04
Epoch 6/100
3432/3432 [==============================] - 12s 3ms/step - loss: 0.0013 - val_loss: 0.0039
Epoch 7/100
3432/3432 [==============================] - 12s 3ms/step - loss: 0.0018 - val_loss: 4.0948e-04
Epoch 8/100
3432/3432 [==============================] - 12s 4ms/step - loss: 7.5690e-04 - val_loss: 0.0921
Epoch 9/100
3432/3432 [==============================] - 12s 4ms/step - loss: 0.0013 - val_loss: 0.0012
Epoch 10/100
3432/3432 [==============================] - 13s 4ms/step - loss: 2.1262e-04 - val_loss: 6.7425e-04
Epoch 11/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.0286e-04 - val_loss: 0.0078
Epoch 12/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.5159e-04 - val_loss: 1.4180e-04
Epoch 13/100
3432/3432 [==============================] - 12s 4ms/step - loss: 1.4070e-04 - val_loss: 0.0034
Epoch 14/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.9391e-04 - val_loss: 7.3461e-05
Epoch 15/100
3432/3432 [==============================] - 13s 4ms/step - loss: 2.4225e-04 - val_loss: 8.6676e-05
Epoch 16/100
3432/3432 [==============================] - 12s 3ms/step - loss: 1.8896e-04 - val_loss: 6.5082e-05
Epoch 17/100
3432/3432 [==============================] - 12s 4ms/step - loss: 1.9151e-04 - val_loss: 1.6370e-04
Epoch 18/100
3432/3432 [==============================] - 12s 3ms/step - loss: 6.1951e-05 - val_loss: 8.2785e-05
Epoch 19/100
3432/3432 [==============================] - 12s 3ms/step - loss: 5.8428e-05 - val_loss: 1.2449e-04
Epoch 20/100
3432/3432 [==============================] - 12s 4ms/step - loss: 6.4853e-05 - val_loss: 4.7174e-05
Epoch 21/100
3432/3432 [==============================] - 13s 4ms/step - loss: 6.6447e-05 - val_loss: 2.8702e-04
Epoch 22/100
3432/3432 [==============================] - 12s 4ms/step - loss: 5.3990e-05 - val_loss: 4.8854e-05
Epoch 23/100
3432/3432 [==============================] - 12s 3ms/step - loss: 5.1247e-05 - val_loss: 5.1625e-05
Epoch 24/100
3432/3432 [==============================] - 12s 3ms/step - loss: 4.2960e-05 - val_loss: 7.8056e-05
Epoch 25/100
3432/3432 [==============================] - 12s 4ms/step - loss: 4.7928e-05 - val_loss: 4.2633e-05
Epoch 00024: early stopping THR
INFO:tensorflow:Assets written to: forecast_model10/assets
Epoch 1/100
3432/3432 [==============================] - 14s 4ms/step - loss: 0.0535 - val_loss: 0.0238
Epoch 2/100
3432/3432 [==============================] - 13s 4ms/step - loss: 0.0033 - val_loss: 0.0052
Epoch 3/100
3432/3432 [==============================] - 12s 3ms/step - loss: 0.0029 - val_loss: 0.0018
Epoch 4/100
3432/3432 [==============================] - 12s 3ms/step - loss: 0.0015 - val_loss: 0.0034
Epoch 5/100
3432/3432 [==============================] - 13s 4ms/step - loss: 9.3872e-04 - val_loss: 0.0039
Epoch 6/100
3432/3432 [==============================] - 12s 3ms/step - loss: 0.0016 - val_loss: 9.4773e-04
Epoch 7/100
3432/3432 [==============================] - 12s 3ms/step - loss: 6.1383e-04 - val_loss: 0.0014
Epoch 8/100
3432/3432 [==============================] - 12s 4ms/step - loss: 0.0012 - val_loss: 0.0021
Epoch 9/100
3432/3432 [==============================] - 12s 4ms/step - loss: 3.5555e-04 - val_loss: 0.0011
Epoch 10/100
3432/3432 [==============================] - 12s 4ms/step - loss: 1.6494e-04 - val_loss: 1.2127e-04
Epoch 11/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.3303e-04 - val_loss: 6.2115e-04
Epoch 12/100
3432/3432 [==============================] - 12s 3ms/step - loss: 1.8355e-04 - val_loss: 2.4367e-04
Epoch 13/100
3432/3432 [==============================] - 12s 3ms/step - loss: 1.4462e-04 - val_loss: 9.6497e-04
Epoch 14/100
3432/3432 [==============================] - 13s 4ms/step - loss: 1.0372e-04 - val_loss: 1.1685e-04
Epoch 15/100
3432/3432 [==============================] - 13s 4ms/step - loss: 1.8850e-04 - val_loss: 0.0014
Epoch 16/100
3432/3432 [==============================] - 12s 4ms/step - loss: 1.6257e-04 - val_loss: 0.0015
Epoch 17/100
3432/3432 [==============================] - 12s 3ms/step - loss: 1.7175e-04 - val_loss: 6.8790e-04
Epoch 18/100
3432/3432 [==============================] - 12s 4ms/step - loss: 5.7128e-05 - val_loss: 0.0018
Epoch 19/100
3432/3432 [==============================] - 12s 4ms/step - loss: 4.5748e-05 - val_loss: 1.9243e-04
Epoch 20/100
3432/3432 [==============================] - 12s 3ms/step - loss: 5.3054e-05 - val_loss: 1.0135e-04
Epoch 21/100
3432/3432 [==============================] - 12s 3ms/step - loss: 5.3282e-05 - val_loss: 1.9718e-04
Epoch 22/100
3432/3432 [==============================] - 12s 3ms/step - loss: 4.8805e-05 - val_loss: 5.7147e-04
Epoch 23/100
3432/3432 [==============================] - 12s 4ms/step - loss: 5.8015e-05 - val_loss: 1.3755e-04
Epoch 24/100
3432/3432 [==============================] - 12s 4ms/step - loss: 5.4831e-05 - val_loss: 3.3602e-04
Epoch 25/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.3818e-05 - val_loss: 9.9407e-05
Epoch 26/100
3432/3432 [==============================] - 12s 3ms/step - loss: 5.0568e-05 - val_loss: 0.0013
Epoch 27/100
3432/3432 [==============================] - 12s 3ms/step - loss: 4.4648e-05 - val_loss: 1.6280e-04
Epoch 28/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.8812e-05 - val_loss: 1.7336e-04
Epoch 29/100
3432/3432 [==============================] - 12s 4ms/step - loss: 3.5616e-05 - val_loss: 9.7302e-05
Epoch 30/100
3432/3432 [==============================] - 13s 4ms/step - loss: 4.9122e-05 - val_loss: 1.2091e-04
Epoch 31/100
3432/3432 [==============================] - 12s 3ms/step - loss: 4.1131e-05 - val_loss: 1.3174e-04
Epoch 32/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.6247e-05 - val_loss: 2.0713e-04
Epoch 33/100
3432/3432 [==============================] - 12s 3ms/step - loss: 4.0624e-05 - val_loss: 2.1200e-04
Epoch 34/100
3432/3432 [==============================] - 12s 4ms/step - loss: 3.7000e-05 - val_loss: 1.0639e-04
Epoch 35/100
3432/3432 [==============================] - 13s 4ms/step - loss: 2.6326e-05 - val_loss: 8.7728e-05
Epoch 36/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.5655e-05 - val_loss: 1.7439e-04
Epoch 37/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.8956e-05 - val_loss: 9.3392e-05
Epoch 38/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.7883e-05 - val_loss: 7.7102e-05
Epoch 39/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.5226e-05 - val_loss: 1.2743e-04
Epoch 40/100
3432/3432 [==============================] - 13s 4ms/step - loss: 3.5957e-05 - val_loss: 8.7033e-05
Epoch 41/100
3432/3432 [==============================] - 12s 3ms/step - loss: 4.0882e-05 - val_loss: 1.4576e-04
Epoch 42/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.9719e-05 - val_loss: 5.7597e-04
Epoch 43/100
3432/3432 [==============================] - 12s 3ms/step - loss: 4.4417e-05 - val_loss: 1.5815e-04
Epoch 44/100
3432/3432 [==============================] - 12s 4ms/step - loss: 3.7145e-05 - val_loss: 1.0759e-04
Epoch 45/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.3663e-05 - val_loss: 1.4198e-04
Epoch 46/100
3432/3432 [==============================] - 12s 4ms/step - loss: 3.0557e-05 - val_loss: 8.9105e-05
Epoch 47/100
3432/3432 [==============================] - 12s 4ms/step - loss: 3.1077e-05 - val_loss: 1.2119e-04
Epoch 48/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.9704e-05 - val_loss: 1.3016e-04
Epoch 49/100
3432/3432 [==============================] - 13s 4ms/step - loss: 3.7779e-05 - val_loss: 1.0418e-04
Epoch 50/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.7012e-05 - val_loss: 1.3207e-04
Epoch 51/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.9898e-05 - val_loss: 1.0749e-04
Epoch 52/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.8352e-05 - val_loss: 8.1768e-05
Epoch 53/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.6676e-05 - val_loss: 1.0862e-04
Epoch 54/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.1601e-05 - val_loss: 1.8080e-04
Epoch 55/100
3432/3432 [==============================] - 14s 4ms/step - loss: 2.3750e-05 - val_loss: 1.4415e-04
Epoch 56/100
3432/3432 [==============================] - 13s 4ms/step - loss: 2.5587e-05 - val_loss: 1.1164e-04
Epoch 57/100
3432/3432 [==============================] - 12s 4ms/step - loss: 3.2483e-05 - val_loss: 1.1208e-04
Epoch 58/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.7558e-05 - val_loss: 1.2058e-04
Epoch 59/100
3432/3432 [==============================] - 13s 4ms/step - loss: 2.1052e-05 - val_loss: 1.2083e-04
Epoch 60/100
3432/3432 [==============================] - 12s 4ms/step - loss: 3.2153e-05 - val_loss: 1.2510e-04
Epoch 61/100
3432/3432 [==============================] - 12s 4ms/step - loss: 4.4194e-05 - val_loss: 1.1094e-04
Epoch 62/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.6925e-05 - val_loss: 9.8717e-05
Epoch 63/100
3432/3432 [==============================] - 21s 6ms/step - loss: 3.8202e-05 - val_loss: 1.2662e-04
Epoch 64/100
3432/3432 [==============================] - 19s 5ms/step - loss: 2.2356e-05 - val_loss: 1.2489e-04
Epoch 65/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.7266e-05 - val_loss: 1.3317e-04
Epoch 66/100
3432/3432 [==============================] - 12s 4ms/step - loss: 1.9985e-05 - val_loss: 1.7817e-04
Epoch 67/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.8846e-05 - val_loss: 1.3105e-04
Epoch 68/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.6946e-05 - val_loss: 1.2954e-04
Epoch 69/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.8607e-05 - val_loss: 1.2407e-04
Epoch 70/100
3432/3432 [==============================] - 12s 3ms/step - loss: 4.5081e-05 - val_loss: 1.1565e-04
Epoch 71/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.1251e-05 - val_loss: 1.4573e-04
Epoch 72/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.5858e-05 - val_loss: 1.5152e-04
Epoch 73/100
3432/3432 [==============================] - 12s 4ms/step - loss: 3.2628e-05 - val_loss: 1.3120e-04
Epoch 74/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.3645e-05 - val_loss: 1.6000e-04
Epoch 75/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.6507e-05 - val_loss: 1.5648e-04
Epoch 76/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.6892e-05 - val_loss: 1.5336e-04
Epoch 77/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.2344e-05 - val_loss: 1.4987e-04
Epoch 78/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.4576e-05 - val_loss: 1.6720e-04
Epoch 79/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.4730e-05 - val_loss: 1.5397e-04
Epoch 80/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.8766e-05 - val_loss: 1.5420e-04
Epoch 81/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.4422e-05 - val_loss: 1.7492e-04
Epoch 82/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.2013e-05 - val_loss: 1.8677e-04
Epoch 83/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.5622e-05 - val_loss: 1.9249e-04
Epoch 84/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.4261e-05 - val_loss: 2.0211e-04
Epoch 85/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.5626e-05 - val_loss: 2.0304e-04
Epoch 86/100
3432/3432 [==============================] - 12s 4ms/step - loss: 3.9032e-05 - val_loss: 2.0915e-04
Epoch 87/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.6612e-05 - val_loss: 2.1202e-04
Epoch 88/100
3432/3432 [==============================] - 13s 4ms/step - loss: 2.7073e-05 - val_loss: 2.1167e-04
Epoch 89/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.7226e-05 - val_loss: 2.1195e-04
Epoch 90/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.0642e-05 - val_loss: 2.3350e-04
Epoch 91/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.7801e-05 - val_loss: 2.5960e-04
Epoch 92/100
3432/3432 [==============================] - 12s 4ms/step - loss: 3.3680e-05 - val_loss: 2.7881e-04
Epoch 93/100
3432/3432 [==============================] - 13s 4ms/step - loss: 2.9306e-05 - val_loss: 2.9610e-04
Epoch 94/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.5825e-05 - val_loss: 3.1276e-04
Epoch 95/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.8236e-05 - val_loss: 3.1763e-04
Epoch 96/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.8062e-05 - val_loss: 3.3016e-04
Epoch 97/100
3432/3432 [==============================] - 12s 3ms/step - loss: 4.4789e-05 - val_loss: 3.2604e-04
Epoch 98/100
3432/3432 [==============================] - 12s 4ms/step - loss: 4.0711e-05 - val_loss: 3.3256e-04
Epoch 99/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.2480e-05 - val_loss: 3.7842e-04
Epoch 100/100
3432/3432 [==============================] - 12s 3ms/step - loss: 4.4460e-05 - val_loss: 4.1251e-04
Epoch 1/100
3432/3432 [==============================] - 14s 4ms/step - loss: 0.0379 - val_loss: 0.0724
Epoch 2/100
3432/3432 [==============================] - 12s 4ms/step - loss: 0.0030 - val_loss: 0.0079
Epoch 3/100
3432/3432 [==============================] - 12s 4ms/step - loss: 0.0019 - val_loss: 0.0225
Epoch 4/100
3432/3432 [==============================] - 12s 3ms/step - loss: 0.0013 - val_loss: 0.0256
Epoch 5/100
3432/3432 [==============================] - 13s 4ms/step - loss: 0.0019 - val_loss: 5.7449e-04
Epoch 6/100
3432/3432 [==============================] - 12s 4ms/step - loss: 7.4073e-04 - val_loss: 0.0092
Epoch 7/100
3432/3432 [==============================] - 12s 4ms/step - loss: 9.5687e-04 - val_loss: 0.0015
Epoch 8/100
3432/3432 [==============================] - 12s 4ms/step - loss: 8.4909e-04 - val_loss: 0.0260
Epoch 9/100
3432/3432 [==============================] - 12s 3ms/step - loss: 4.2351e-04 - val_loss: 0.0020
Epoch 10/100
3432/3432 [==============================] - 12s 4ms/step - loss: 4.6616e-04 - val_loss: 1.5145e-04
Epoch 11/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.3069e-04 - val_loss: 0.0058
Epoch 12/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.8899e-04 - val_loss: 0.0016
Epoch 13/100
3432/3432 [==============================] - 12s 3ms/step - loss: 1.2171e-04 - val_loss: 1.8015e-04
Epoch 14/100
3432/3432 [==============================] - 12s 4ms/step - loss: 1.7827e-04 - val_loss: 3.9913e-04
Epoch 15/100
3432/3432 [==============================] - 12s 4ms/step - loss: 1.9591e-04 - val_loss: 3.5073e-04
Epoch 16/100
3432/3432 [==============================] - 12s 4ms/step - loss: 1.9557e-04 - val_loss: 6.7427e-04
Epoch 17/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.0472e-04 - val_loss: 9.6888e-04
Epoch 18/100
3432/3432 [==============================] - 12s 3ms/step - loss: 5.1162e-05 - val_loss: 2.4768e-04
Epoch 19/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.2397e-05 - val_loss: 1.5053e-04
Epoch 20/100
3432/3432 [==============================] - 12s 4ms/step - loss: 5.3748e-05 - val_loss: 1.4955e-04
Epoch 21/100
3432/3432 [==============================] - 12s 3ms/step - loss: 5.9874e-05 - val_loss: 1.3814e-04
Epoch 22/100
3432/3432 [==============================] - 13s 4ms/step - loss: 4.7139e-05 - val_loss: 2.9513e-04
Epoch 23/100
3432/3432 [==============================] - 12s 4ms/step - loss: 4.8025e-05 - val_loss: 9.8454e-05
Epoch 24/100
3432/3432 [==============================] - 12s 4ms/step - loss: 3.6514e-05 - val_loss: 2.7639e-04
Epoch 25/100
3432/3432 [==============================] - 12s 4ms/step - loss: 3.6510e-05 - val_loss: 1.0964e-04
Epoch 26/100
3432/3432 [==============================] - 12s 4ms/step - loss: 5.0323e-05 - val_loss: 0.0010
Epoch 27/100
3432/3432 [==============================] - 12s 4ms/step - loss: 5.4469e-05 - val_loss: 2.1554e-04
Epoch 28/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.7720e-05 - val_loss: 2.8547e-04
Epoch 29/100
3432/3432 [==============================] - 13s 4ms/step - loss: 2.8679e-05 - val_loss: 2.7829e-04
Epoch 30/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.5152e-05 - val_loss: 2.5718e-04
Epoch 31/100
3432/3432 [==============================] - 12s 3ms/step - loss: 4.7726e-05 - val_loss: 2.9828e-04
Epoch 32/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.7727e-05 - val_loss: 2.7535e-04
Epoch 33/100
3432/3432 [==============================] - 12s 4ms/step - loss: 4.2482e-05 - val_loss: 2.0214e-04
Epoch 34/100
3432/3432 [==============================] - 13s 4ms/step - loss: 2.9768e-05 - val_loss: 2.5244e-04
Epoch 35/100
3432/3432 [==============================] - 13s 4ms/step - loss: 3.0577e-05 - val_loss: 1.5096e-04
Epoch 36/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.3670e-05 - val_loss: 2.2653e-04
Epoch 37/100
3432/3432 [==============================] - 13s 4ms/step - loss: 3.3501e-05 - val_loss: 2.1666e-04
Epoch 38/100
3432/3432 [==============================] - 21s 6ms/step - loss: 2.1479e-05 - val_loss: 2.8291e-04
Epoch 39/100
3432/3432 [==============================] - 14s 4ms/step - loss: 2.7171e-05 - val_loss: 2.3920e-04
Epoch 40/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.7268e-05 - val_loss: 1.6940e-04
Epoch 41/100
3432/3432 [==============================] - 12s 4ms/step - loss: 5.0989e-05 - val_loss: 2.9920e-04
Epoch 42/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.3594e-05 - val_loss: 1.6992e-04
Epoch 43/100
3432/3432 [==============================] - 13s 4ms/step - loss: 3.2174e-05 - val_loss: 2.5071e-04
Epoch 44/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.8448e-05 - val_loss: 2.2181e-04
Epoch 45/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.3747e-05 - val_loss: 3.5851e-04
Epoch 46/100
3432/3432 [==============================] - 12s 4ms/step - loss: 1.9986e-05 - val_loss: 3.6040e-04
Epoch 47/100
3432/3432 [==============================] - 12s 4ms/step - loss: 3.2017e-05 - val_loss: 2.1095e-04
Epoch 48/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.2090e-05 - val_loss: 3.2013e-04
Epoch 49/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.8981e-05 - val_loss: 2.2536e-04
Epoch 50/100
3432/3432 [==============================] - 12s 4ms/step - loss: 3.5302e-05 - val_loss: 1.6296e-04
Epoch 51/100
3432/3432 [==============================] - 13s 4ms/step - loss: 3.0454e-05 - val_loss: 1.7668e-04
Epoch 52/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.6407e-05 - val_loss: 2.6367e-04
Epoch 53/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.5236e-05 - val_loss: 1.8142e-04
Epoch 54/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.7134e-05 - val_loss: 2.6018e-04
Epoch 55/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.8072e-05 - val_loss: 2.1442e-04
Epoch 56/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.8299e-05 - val_loss: 2.3977e-04
Epoch 57/100
3432/3432 [==============================] - 13s 4ms/step - loss: 2.2408e-05 - val_loss: 2.6166e-04
Epoch 58/100
3432/3432 [==============================] - 15s 4ms/step - loss: 2.1747e-05 - val_loss: 2.9097e-04
Epoch 59/100
3432/3432 [==============================] - 14s 4ms/step - loss: 2.3423e-05 - val_loss: 2.1817e-04
Epoch 60/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.6948e-05 - val_loss: 2.2923e-04
Epoch 61/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.5981e-05 - val_loss: 1.7287e-04
Epoch 62/100
3432/3432 [==============================] - 12s 3ms/step - loss: 1.7842e-05 - val_loss: 2.6712e-04
Epoch 63/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.4429e-05 - val_loss: 2.4155e-04
Epoch 64/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.3896e-05 - val_loss: 2.1527e-04
Epoch 65/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.0173e-05 - val_loss: 2.2606e-04
Epoch 66/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.6903e-05 - val_loss: 2.5776e-04
Epoch 67/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.0402e-05 - val_loss: 2.3129e-04
Epoch 68/100
3432/3432 [==============================] - 13s 4ms/step - loss: 2.3948e-05 - val_loss: 2.3149e-04
Epoch 69/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.4872e-05 - val_loss: 2.3898e-04
Epoch 70/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.8931e-05 - val_loss: 2.2973e-04
Epoch 71/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.7442e-05 - val_loss: 2.1107e-04
Epoch 72/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.5627e-05 - val_loss: 2.2600e-04
Epoch 73/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.9805e-05 - val_loss: 2.4154e-04
Epoch 74/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.5822e-05 - val_loss: 2.5138e-04
Epoch 75/100
3432/3432 [==============================] - 12s 4ms/step - loss: 1.9348e-05 - val_loss: 2.4468e-04
Epoch 76/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.7001e-05 - val_loss: 2.3972e-04
Epoch 77/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.9774e-05 - val_loss: 2.2902e-04
Epoch 78/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.6043e-05 - val_loss: 2.3579e-04
Epoch 79/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.6775e-05 - val_loss: 2.2640e-04
Epoch 80/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.7072e-05 - val_loss: 2.2452e-04
Epoch 81/100
3432/3432 [==============================] - 12s 3ms/step - loss: 1.8137e-05 - val_loss: 2.4440e-04
Epoch 82/100
3432/3432 [==============================] - 12s 4ms/step - loss: 1.9055e-05 - val_loss: 2.6500e-04
Epoch 83/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.1454e-05 - val_loss: 2.6584e-04
Epoch 84/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.2189e-05 - val_loss: 2.7488e-04
Epoch 85/100
3432/3432 [==============================] - 12s 4ms/step - loss: 3.1616e-05 - val_loss: 2.7117e-04
Epoch 86/100
3432/3432 [==============================] - 12s 4ms/step - loss: 4.0044e-05 - val_loss: 2.7226e-04
Epoch 87/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.6345e-05 - val_loss: 2.7221e-04
Epoch 88/100
3432/3432 [==============================] - 12s 4ms/step - loss: 3.4232e-05 - val_loss: 2.6168e-04
Epoch 89/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.5395e-05 - val_loss: 2.6119e-04
Epoch 90/100
3432/3432 [==============================] - 13s 4ms/step - loss: 3.6234e-05 - val_loss: 2.9494e-04
Epoch 91/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.0445e-05 - val_loss: 3.0973e-04
Epoch 92/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.7695e-05 - val_loss: 3.2760e-04
Epoch 93/100
3432/3432 [==============================] - 12s 4ms/step - loss: 4.5428e-05 - val_loss: 3.3701e-04
Epoch 94/100
3432/3432 [==============================] - 12s 4ms/step - loss: 3.0535e-05 - val_loss: 3.5332e-04
Epoch 95/100
3432/3432 [==============================] - 13s 4ms/step - loss: 2.7741e-05 - val_loss: 3.5507e-04
Epoch 96/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.8677e-05 - val_loss: 3.6640e-04
Epoch 97/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.7047e-05 - val_loss: 3.5831e-04
Epoch 98/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.1890e-05 - val_loss: 3.7335e-04
Epoch 99/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.9211e-05 - val_loss: 4.1394e-04
Epoch 100/100
3432/3432 [==============================] - 13s 4ms/step - loss: 3.9270e-05 - val_loss: 4.4846e-04
Epoch 1/100
3432/3432 [==============================] - 15s 4ms/step - loss: 0.0318 - val_loss: 0.0022
Epoch 2/100
3432/3432 [==============================] - 12s 4ms/step - loss: 0.0034 - val_loss: 0.0035
Epoch 3/100
3432/3432 [==============================] - 12s 3ms/step - loss: 0.0026 - val_loss: 0.0032
Epoch 4/100
3432/3432 [==============================] - 12s 4ms/step - loss: 0.0018 - val_loss: 0.0053
Epoch 5/100
3432/3432 [==============================] - 12s 4ms/step - loss: 0.0015 - val_loss: 3.8053e-04
Epoch 6/100
3432/3432 [==============================] - 12s 4ms/step - loss: 0.0018 - val_loss: 0.0054
Epoch 7/100
3432/3432 [==============================] - 12s 4ms/step - loss: 0.0014 - val_loss: 0.0093
Epoch 8/100
3432/3432 [==============================] - 12s 4ms/step - loss: 0.0010 - val_loss: 0.0069
Epoch 9/100
3432/3432 [==============================] - 13s 4ms/step - loss: 2.3327e-04 - val_loss: 1.3644e-04
Epoch 10/100
3432/3432 [==============================] - 23s 7ms/step - loss: 2.1237e-04 - val_loss: 0.0012
Epoch 11/100
3432/3432 [==============================] - 17s 5ms/step - loss: 2.3082e-04 - val_loss: 2.0255e-04
Epoch 12/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.7140e-04 - val_loss: 0.0022
Epoch 13/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.2149e-04 - val_loss: 4.7079e-04
Epoch 14/100
3432/3432 [==============================] - 12s 3ms/step - loss: 1.4734e-04 - val_loss: 1.3296e-04
Epoch 15/100
3432/3432 [==============================] - 13s 4ms/step - loss: 3.1020e-04 - val_loss: 2.1980e-04
Epoch 16/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.4187e-04 - val_loss: 2.4322e-04
Epoch 17/100
3432/3432 [==============================] - 16s 5ms/step - loss: 1.7659e-04 - val_loss: 0.0075
Epoch 18/100
3432/3432 [==============================] - 12s 4ms/step - loss: 1.2371e-04 - val_loss: 0.0032
Epoch 19/100
3432/3432 [==============================] - 12s 3ms/step - loss: 1.2428e-04 - val_loss: 1.3362e-04
Epoch 20/100
3432/3432 [==============================] - 12s 3ms/step - loss: 6.8083e-05 - val_loss: 8.6615e-04
Epoch 21/100
3432/3432 [==============================] - 12s 4ms/step - loss: 4.9786e-05 - val_loss: 2.3039e-04
Epoch 22/100
3432/3432 [==============================] - 12s 4ms/step - loss: 5.5839e-05 - val_loss: 1.4226e-04
Epoch 23/100
3432/3432 [==============================] - 13s 4ms/step - loss: 5.2842e-05 - val_loss: 2.3063e-04
Epoch 24/100
3432/3432 [==============================] - 13s 4ms/step - loss: 5.7359e-05 - val_loss: 6.0341e-04
Epoch 25/100
3432/3432 [==============================] - 13s 4ms/step - loss: 7.7152e-05 - val_loss: 6.3071e-04
Epoch 26/100
3432/3432 [==============================] - 12s 4ms/step - loss: 4.5819e-05 - val_loss: 2.2269e-04
Epoch 27/100
3432/3432 [==============================] - 13s 4ms/step - loss: 5.1544e-05 - val_loss: 2.4152e-04
Epoch 28/100
3432/3432 [==============================] - 12s 3ms/step - loss: 4.2964e-05 - val_loss: 2.8634e-04
Epoch 29/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.6171e-05 - val_loss: 2.6995e-04
Epoch 30/100
3432/3432 [==============================] - 13s 4ms/step - loss: 6.6219e-05 - val_loss: 1.4490e-04
Epoch 31/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.7483e-05 - val_loss: 9.0646e-05
Epoch 32/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.9158e-05 - val_loss: 1.5752e-04
Epoch 33/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.8492e-05 - val_loss: 8.2891e-04
Epoch 34/100
3432/3432 [==============================] - 12s 3ms/step - loss: 4.5710e-05 - val_loss: 2.6528e-04
Epoch 35/100
3432/3432 [==============================] - 12s 3ms/step - loss: 5.6252e-05 - val_loss: 7.7725e-05
Epoch 36/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.3111e-05 - val_loss: 4.3774e-04
Epoch 37/100
3432/3432 [==============================] - 12s 4ms/step - loss: 5.0513e-05 - val_loss: 1.5240e-04
Epoch 38/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.7852e-05 - val_loss: 1.6876e-04
Epoch 39/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.7204e-05 - val_loss: 2.3523e-04
Epoch 40/100
3432/3432 [==============================] - 12s 4ms/step - loss: 3.2969e-05 - val_loss: 1.5741e-04
Epoch 41/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.3837e-05 - val_loss: 1.4322e-04
Epoch 42/100
3432/3432 [==============================] - 12s 4ms/step - loss: 4.3460e-05 - val_loss: 1.6540e-04
Epoch 43/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.0956e-05 - val_loss: 1.0917e-04
Epoch 44/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.5073e-05 - val_loss: 1.4537e-04
Epoch 45/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.8727e-05 - val_loss: 1.9048e-04
Epoch 46/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.1069e-05 - val_loss: 1.5384e-04
Epoch 47/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.4439e-05 - val_loss: 1.4717e-04
Epoch 48/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.6868e-05 - val_loss: 1.8504e-04
Epoch 49/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.2332e-05 - val_loss: 1.7895e-04
Epoch 50/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.0969e-05 - val_loss: 2.9532e-04
Epoch 51/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.9277e-05 - val_loss: 1.8464e-04
Epoch 52/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.7881e-05 - val_loss: 1.8241e-04
Epoch 53/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.4419e-05 - val_loss: 2.2738e-04
Epoch 54/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.2243e-05 - val_loss: 1.7413e-04
Epoch 55/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.9746e-05 - val_loss: 1.8723e-04
Epoch 56/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.1466e-05 - val_loss: 2.1257e-04
Epoch 57/100
3432/3432 [==============================] - 14s 4ms/step - loss: 2.4369e-05 - val_loss: 2.1439e-04
Epoch 58/100
3432/3432 [==============================] - 12s 4ms/step - loss: 4.3160e-05 - val_loss: 1.7051e-04
Epoch 59/100
3432/3432 [==============================] - 12s 4ms/step - loss: 3.5686e-05 - val_loss: 1.4630e-04
Epoch 60/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.2755e-05 - val_loss: 2.2129e-04
Epoch 61/100
3432/3432 [==============================] - 12s 3ms/step - loss: 4.6791e-05 - val_loss: 1.6101e-04
Epoch 62/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.1286e-05 - val_loss: 1.8621e-04
Epoch 63/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.5989e-05 - val_loss: 1.8009e-04
Epoch 64/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.6570e-05 - val_loss: 1.7915e-04
Epoch 65/100
3432/3432 [==============================] - 12s 4ms/step - loss: 3.3064e-05 - val_loss: 1.5507e-04
Epoch 66/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.5475e-05 - val_loss: 1.9870e-04
Epoch 67/100
3432/3432 [==============================] - 13s 4ms/step - loss: 2.6236e-05 - val_loss: 1.8723e-04
Epoch 68/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.1280e-05 - val_loss: 1.6107e-04
Epoch 69/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.9404e-05 - val_loss: 1.8231e-04
Epoch 70/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.4027e-05 - val_loss: 1.8501e-04
Epoch 71/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.1754e-05 - val_loss: 1.4340e-04
Epoch 72/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.0353e-05 - val_loss: 1.7955e-04
Epoch 73/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.9351e-05 - val_loss: 1.8600e-04
Epoch 74/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.3229e-05 - val_loss: 2.0403e-04
Epoch 75/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.1337e-05 - val_loss: 1.8050e-04
Epoch 76/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.3018e-05 - val_loss: 1.7439e-04
Epoch 77/100
3432/3432 [==============================] - 12s 4ms/step - loss: 3.1807e-05 - val_loss: 1.6521e-04
Epoch 78/100
3432/3432 [==============================] - 13s 4ms/step - loss: 4.2214e-05 - val_loss: 1.6645e-04
Epoch 79/100
3432/3432 [==============================] - 14s 4ms/step - loss: 2.5354e-05 - val_loss: 1.8099e-04
Epoch 80/100
3432/3432 [==============================] - 13s 4ms/step - loss: 2.7358e-05 - val_loss: 1.6144e-04
Epoch 81/100
3432/3432 [==============================] - 13s 4ms/step - loss: 2.2509e-05 - val_loss: 1.9233e-04
Epoch 82/100
3432/3432 [==============================] - 14s 4ms/step - loss: 2.5528e-05 - val_loss: 1.9432e-04
Epoch 83/100
3432/3432 [==============================] - 21s 6ms/step - loss: 2.4985e-05 - val_loss: 2.0415e-04
Epoch 84/100
3432/3432 [==============================] - 12s 4ms/step - loss: 4.1522e-05 - val_loss: 2.0240e-04
Epoch 85/100
3432/3432 [==============================] - 12s 4ms/step - loss: 3.3059e-05 - val_loss: 2.0251e-04
Epoch 86/100
3432/3432 [==============================] - 12s 4ms/step - loss: 5.1217e-05 - val_loss: 2.0191e-04
Epoch 87/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.4867e-05 - val_loss: 1.9990e-04
Epoch 88/100
3432/3432 [==============================] - 12s 4ms/step - loss: 3.3101e-05 - val_loss: 1.8618e-04
Epoch 89/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.4155e-05 - val_loss: 1.9616e-04
Epoch 90/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.9641e-05 - val_loss: 2.2104e-04
Epoch 91/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.8163e-05 - val_loss: 2.3317e-04
Epoch 92/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.8872e-05 - val_loss: 2.4240e-04
Epoch 93/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.9561e-05 - val_loss: 2.6506e-04
Epoch 94/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.3950e-05 - val_loss: 2.7107e-04
Epoch 95/100
3432/3432 [==============================] - 12s 4ms/step - loss: 3.4357e-05 - val_loss: 2.6607e-04
Epoch 96/100
3432/3432 [==============================] - 12s 4ms/step - loss: 3.5114e-05 - val_loss: 2.7371e-04
Epoch 97/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.9987e-05 - val_loss: 2.7613e-04
Epoch 98/100
3432/3432 [==============================] - 12s 3ms/step - loss: 4.8580e-05 - val_loss: 2.7198e-04
Epoch 99/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.5180e-05 - val_loss: 3.1326e-04
Epoch 100/100
3432/3432 [==============================] - 13s 4ms/step - loss: 4.1386e-05 - val_loss: 3.4245e-04
Epoch 1/100
3432/3432 [==============================] - 13s 3ms/step - loss: 0.0304 - val_loss: 0.1314
Epoch 2/100
3432/3432 [==============================] - 12s 3ms/step - loss: 0.0038 - val_loss: 0.0120
Epoch 3/100
3432/3432 [==============================] - 12s 3ms/step - loss: 0.0019 - val_loss: 0.0105
Epoch 4/100
3432/3432 [==============================] - 12s 3ms/step - loss: 0.0015 - val_loss: 0.0021
Epoch 5/100
3432/3432 [==============================] - 13s 4ms/step - loss: 9.2170e-04 - val_loss: 0.0016
Epoch 6/100
3432/3432 [==============================] - 12s 4ms/step - loss: 9.0598e-04 - val_loss: 0.0024
Epoch 7/100
3432/3432 [==============================] - 12s 3ms/step - loss: 0.0010 - val_loss: 0.0074
Epoch 8/100
3432/3432 [==============================] - 12s 4ms/step - loss: 9.0984e-04 - val_loss: 0.0019
Epoch 9/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.5525e-04 - val_loss: 7.0492e-04
Epoch 10/100
3432/3432 [==============================] - 12s 4ms/step - loss: 3.4438e-04 - val_loss: 0.0019
Epoch 11/100
3432/3432 [==============================] - 12s 3ms/step - loss: 1.2222e-04 - val_loss: 0.0016
Epoch 12/100
3432/3432 [==============================] - 12s 3ms/step - loss: 1.1266e-04 - val_loss: 0.0047
Epoch 13/100
3432/3432 [==============================] - 12s 3ms/step - loss: 1.6686e-04 - val_loss: 8.9113e-04
Epoch 14/100
3432/3432 [==============================] - 12s 3ms/step - loss: 9.1559e-05 - val_loss: 0.0023
Epoch 15/100
3432/3432 [==============================] - 12s 4ms/step - loss: 1.1179e-04 - val_loss: 5.4052e-04
Epoch 16/100
3432/3432 [==============================] - 12s 3ms/step - loss: 1.0891e-04 - val_loss: 1.1180e-04
Epoch 17/100
3432/3432 [==============================] - 12s 3ms/step - loss: 4.8973e-05 - val_loss: 0.0025
Epoch 18/100
3432/3432 [==============================] - 12s 3ms/step - loss: 5.5257e-05 - val_loss: 5.3605e-04
Epoch 19/100
3432/3432 [==============================] - 12s 3ms/step - loss: 4.0637e-05 - val_loss: 4.3154e-04
Epoch 20/100
3432/3432 [==============================] - 12s 4ms/step - loss: 4.2576e-05 - val_loss: 1.8302e-04
Epoch 21/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.8434e-05 - val_loss: 4.4461e-04
Epoch 22/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.9460e-05 - val_loss: 1.4272e-04
Epoch 23/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.8172e-05 - val_loss: 6.6638e-04
Epoch 24/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.1207e-05 - val_loss: 6.5682e-04
Epoch 25/100
3432/3432 [==============================] - 13s 4ms/step - loss: 4.1427e-05 - val_loss: 1.0070e-04
Epoch 26/100
3432/3432 [==============================] - 12s 3ms/step - loss: 6.2319e-05 - val_loss: 9.0550e-04
Epoch 27/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.6670e-05 - val_loss: 4.9969e-04
Epoch 28/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.6294e-05 - val_loss: 2.2520e-04
Epoch 29/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.2108e-05 - val_loss: 1.6857e-04
Epoch 30/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.5192e-05 - val_loss: 5.0177e-04
Epoch 31/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.9299e-05 - val_loss: 6.4650e-04
Epoch 32/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.3885e-05 - val_loss: 3.5356e-04
Epoch 33/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.3892e-05 - val_loss: 2.0820e-04
Epoch 34/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.1515e-05 - val_loss: 1.4718e-04
Epoch 35/100
3432/3432 [==============================] - 12s 4ms/step - loss: 3.0544e-05 - val_loss: 2.5716e-04
Epoch 36/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.4019e-05 - val_loss: 2.2508e-04
Epoch 37/100
3432/3432 [==============================] - 12s 3ms/step - loss: 1.8204e-05 - val_loss: 2.3961e-04
Epoch 38/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.1587e-05 - val_loss: 3.8252e-04
Epoch 39/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.1632e-05 - val_loss: 2.4479e-04
Epoch 40/100
3432/3432 [==============================] - 12s 4ms/step - loss: 3.0525e-05 - val_loss: 2.4846e-04
Epoch 41/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.3430e-05 - val_loss: 3.3190e-04
Epoch 42/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.3648e-05 - val_loss: 1.7777e-04
Epoch 43/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.1578e-05 - val_loss: 2.5338e-04
Epoch 44/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.2329e-05 - val_loss: 1.7712e-04
Epoch 45/100
3432/3432 [==============================] - 13s 4ms/step - loss: 2.5441e-05 - val_loss: 2.7770e-04
Epoch 46/100
3432/3432 [==============================] - 13s 4ms/step - loss: 1.8302e-05 - val_loss: 2.1720e-04
Epoch 47/100
3432/3432 [==============================] - 13s 4ms/step - loss: 2.2645e-05 - val_loss: 2.1644e-04
Epoch 48/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.0971e-05 - val_loss: 2.5151e-04
Epoch 49/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.1899e-05 - val_loss: 1.7630e-04
Epoch 50/100
3432/3432 [==============================] - 13s 4ms/step - loss: 1.9591e-05 - val_loss: 3.2696e-04
Epoch 51/100
3432/3432 [==============================] - 12s 3ms/step - loss: 1.9753e-05 - val_loss: 2.2257e-04
Epoch 52/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.3115e-05 - val_loss: 2.6231e-04
Epoch 53/100
3432/3432 [==============================] - 13s 4ms/step - loss: 2.0468e-05 - val_loss: 2.1097e-04
Epoch 54/100
3432/3432 [==============================] - 12s 4ms/step - loss: 1.7936e-05 - val_loss: 2.2623e-04
Epoch 55/100
3432/3432 [==============================] - 14s 4ms/step - loss: 2.0955e-05 - val_loss: 2.2102e-04
Epoch 56/100
3432/3432 [==============================] - 24s 7ms/step - loss: 2.2335e-05 - val_loss: 2.1422e-04
Epoch 57/100
3432/3432 [==============================] - 22s 6ms/step - loss: 1.9301e-05 - val_loss: 2.0725e-04
Epoch 58/100
3432/3432 [==============================] - 22s 6ms/step - loss: 2.1180e-05 - val_loss: 2.4813e-04
Epoch 59/100
3432/3432 [==============================] - 17s 5ms/step - loss: 2.4750e-05 - val_loss: 2.2540e-04
Epoch 60/100
3432/3432 [==============================] - 13s 4ms/step - loss: 2.0798e-05 - val_loss: 2.2767e-04
Epoch 61/100
3432/3432 [==============================] - 13s 4ms/step - loss: 2.1456e-05 - val_loss: 2.0296e-04
Epoch 62/100
3432/3432 [==============================] - 13s 4ms/step - loss: 1.9289e-05 - val_loss: 2.0933e-04
Epoch 63/100
3432/3432 [==============================] - 13s 4ms/step - loss: 2.2301e-05 - val_loss: 2.0031e-04
Epoch 64/100
3432/3432 [==============================] - 13s 4ms/step - loss: 2.0460e-05 - val_loss: 2.0978e-04
Epoch 65/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.0507e-05 - val_loss: 2.1372e-04
Epoch 66/100
3432/3432 [==============================] - 12s 3ms/step - loss: 1.8266e-05 - val_loss: 2.2653e-04
Epoch 67/100
3432/3432 [==============================] - 12s 4ms/step - loss: 1.8370e-05 - val_loss: 1.9504e-04
Epoch 68/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.0954e-05 - val_loss: 2.2211e-04
Epoch 69/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.0423e-05 - val_loss: 2.1411e-04
Epoch 70/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.0090e-05 - val_loss: 2.1351e-04
Epoch 71/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.3394e-05 - val_loss: 2.1311e-04
Epoch 72/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.5001e-05 - val_loss: 2.1317e-04
Epoch 73/100
3432/3432 [==============================] - 12s 4ms/step - loss: 1.9619e-05 - val_loss: 2.0408e-04
Epoch 74/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.8134e-05 - val_loss: 1.9593e-04
Epoch 75/100
3432/3432 [==============================] - 13s 4ms/step - loss: 1.9411e-05 - val_loss: 2.0061e-04
Epoch 76/100
3432/3432 [==============================] - 12s 4ms/step - loss: 1.9299e-05 - val_loss: 2.0297e-04
Epoch 77/100
3432/3432 [==============================] - 12s 4ms/step - loss: 1.9817e-05 - val_loss: 2.0177e-04
Epoch 78/100
3432/3432 [==============================] - 12s 3ms/step - loss: 1.7843e-05 - val_loss: 2.0971e-04
Epoch 79/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.3047e-05 - val_loss: 2.1307e-04
Epoch 80/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.0215e-05 - val_loss: 2.1805e-04
Epoch 81/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.3923e-05 - val_loss: 1.9791e-04
Epoch 82/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.5374e-05 - val_loss: 1.9117e-04
Epoch 83/100
3432/3432 [==============================] - 13s 4ms/step - loss: 2.7203e-05 - val_loss: 1.8687e-04
Epoch 84/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.1055e-05 - val_loss: 1.8384e-04
Epoch 85/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.3711e-05 - val_loss: 1.8809e-04
Epoch 86/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.1584e-05 - val_loss: 1.9001e-04
Epoch 87/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.4474e-05 - val_loss: 1.9192e-04
Epoch 88/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.9018e-05 - val_loss: 1.9249e-04
Epoch 89/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.8414e-05 - val_loss: 1.9677e-04
Epoch 90/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.8150e-05 - val_loss: 1.8311e-04
Epoch 91/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.7719e-05 - val_loss: 1.7644e-04
Epoch 92/100
3432/3432 [==============================] - 12s 4ms/step - loss: 2.5105e-05 - val_loss: 1.7028e-04
Epoch 93/100
3432/3432 [==============================] - 12s 4ms/step - loss: 3.2003e-05 - val_loss: 1.7025e-04
Epoch 94/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.5353e-05 - val_loss: 1.6914e-04
Epoch 95/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.1852e-05 - val_loss: 1.7076e-04
Epoch 96/100
3432/3432 [==============================] - 12s 4ms/step - loss: 3.1962e-05 - val_loss: 1.7382e-04
Epoch 97/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.8165e-05 - val_loss: 1.7496e-04
Epoch 98/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.7950e-05 - val_loss: 1.7650e-04
Epoch 99/100
3432/3432 [==============================] - 12s 4ms/step - loss: 3.6058e-05 - val_loss: 1.7646e-04
Epoch 100/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.4143e-05 - val_loss: 1.7996e-04
Epoch 1/100
3432/3432 [==============================] - 14s 4ms/step - loss: 0.0395 - val_loss: 0.0223
Epoch 2/100
3432/3432 [==============================] - 12s 3ms/step - loss: 0.0028 - val_loss: 0.0022
Epoch 3/100
3432/3432 [==============================] - 12s 3ms/step - loss: 0.0026 - val_loss: 0.0029
Epoch 4/100
3432/3432 [==============================] - 12s 4ms/step - loss: 0.0018 - val_loss: 7.8627e-04
Epoch 5/100
3432/3432 [==============================] - 12s 3ms/step - loss: 0.0014 - val_loss: 5.3916e-04
Epoch 6/100
3432/3432 [==============================] - 12s 4ms/step - loss: 0.0013 - val_loss: 0.0518
Epoch 7/100
3432/3432 [==============================] - 12s 3ms/step - loss: 0.0020 - val_loss: 2.0695e-04
Epoch 8/100
3432/3432 [==============================] - 12s 3ms/step - loss: 9.3531e-04 - val_loss: 0.0175
Epoch 9/100
3432/3432 [==============================] - 12s 3ms/step - loss: 7.8153e-04 - val_loss: 0.0063
Epoch 10/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.6305e-04 - val_loss: 2.6993e-04
Epoch 11/100
3432/3432 [==============================] - 12s 3ms/step - loss: 1.4075e-04 - val_loss: 5.5967e-04
Epoch 12/100
3432/3432 [==============================] - 12s 3ms/step - loss: 4.3145e-04 - val_loss: 9.2918e-04
Epoch 13/100
3432/3432 [==============================] - 11s 3ms/step - loss: 5.1899e-04 - val_loss: 0.0040
Epoch 14/100
3432/3432 [==============================] - 12s 3ms/step - loss: 6.1128e-04 - val_loss: 7.9576e-04
Epoch 15/100
3432/3432 [==============================] - 12s 3ms/step - loss: 2.5769e-04 - val_loss: 0.0014
Epoch 16/100
3432/3432 [==============================] - 13s 4ms/step - loss: 3.3555e-04 - val_loss: 7.1205e-05
Epoch 17/100
3432/3432 [==============================] - 12s 4ms/step - loss: 3.6570e-04 - val_loss: 8.8547e-05
Epoch 18/100
3432/3432 [==============================] - 12s 4ms/step - loss: 9.5387e-05 - val_loss: 0.0013
Epoch 19/100
3432/3432 [==============================] - 12s 4ms/step - loss: 1.0977e-04 - val_loss: 1.8119e-04
Epoch 20/100
3432/3432 [==============================] - 12s 3ms/step - loss: 9.0507e-05 - val_loss: 1.8450e-04
Epoch 21/100
3432/3432 [==============================] - 12s 3ms/step - loss: 1.1258e-04 - val_loss: 4.3085e-04
Epoch 22/100
3432/3432 [==============================] - 12s 4ms/step - loss: 7.6404e-05 - val_loss: 7.4106e-04
Epoch 23/100
3432/3432 [==============================] - 12s 3ms/step - loss: 4.9742e-05 - val_loss: 0.0015
Epoch 24/100
3432/3432 [==============================] - 13s 4ms/step - loss: 8.1302e-05 - val_loss: 1.4328e-04
Epoch 25/100
3432/3432 [==============================] - 13s 4ms/step - loss: 6.9938e-05 - val_loss: 2.0028e-04
Epoch 26/100
3432/3432 [==============================] - 13s 4ms/step - loss: 8.3762e-05 - val_loss: 6.7983e-04
Epoch 27/100
3432/3432 [==============================] - 21s 6ms/step - loss: 3.6574e-05 - val_loss: 3.5828e-04
Epoch 28/100
3432/3432 [==============================] - 23s 7ms/step - loss: 3.7904e-05 - val_loss: 0.0014
Epoch 29/100
3432/3432 [==============================] - 20s 6ms/step - loss: 5.6952e-05 - val_loss: 0.0012
Epoch 30/100
3432/3432 [==============================] - 12s 3ms/step - loss: 4.4343e-05 - val_loss: 1.8092e-04
Epoch 31/100
3432/3432 [==============================] - 12s 3ms/step - loss: 4.8053e-05 - val_loss: 4.2245e-04
Epoch 32/100
3432/3432 [==============================] - 12s 3ms/step - loss: 6.3394e-05 - val_loss: 5.2314e-04
Epoch 33/100
3432/3432 [==============================] - 12s 3ms/step - loss: 3.7247e-05 - val_loss: 4.7166e-04
Epoch 34/100
3432/3432 [==============================] - 12s 4ms/step - loss: 3.0671e-05 - val_loss: 0.0017
Epoch 35/100
3432/3432 [==============================] - 12s 4ms/step - loss: 5.5509e-05 - val_loss: 4.0850e-04
Epoch 36/100
3432/3432 [==============================] - 12s 3ms/step - loss: 7.8698e-05 - val_loss: 7.6043e-04
Epoch 37/100
3432/3432 [==============================] - 12s 3ms/step - loss: 5.4262e-05 - val_loss: 5.7153e-04
Epoch 38/100
1289/3432 [==========>...................] - ETA: 6s - loss: 3.8496e-05
Execution error
KeyboardInterrupt:
forecast_model = create_FFmodel()
history = forecast_model.fit(X_train, y_train,validation_data=(X_test, y_test), epochs = 100, batch_size = 8, verbose=1, callbacks=callbacks_list)
plt.plot(history.history['loss'][5:], label='Training loss')
plt.plot(history.history['val_loss'][5:], label = 'Validation loss')
plt.xlabel("Epoch")
plt.ylabel("MSE Scores")
plt.grid()
plt.legend()
Execution error
NameError: name 'create_FFmodel' is not defined
import tensorflow as tf; print(tf.__version__)
2.4.1
from tensorflow import keras
def ensemble_load():
global forecast_ensemble
forecast_ensemble = list()
for i in np.arange(8):
reconstructed_model = keras.models.load_model("forecast_model"+str(i))
forecast_ensemble.append(reconstructed_model)
def ensemble_predict(ensemble, number_of_predictions):
predictions = np.zeros(number_of_predictions)
for model in np.arange(len(ensemble)):
forecast = ensemble[model].predict(X_test_df_scaled_reshaped)
full_scaled_df_forecast = np.append(X_test, forecast, axis = 1)
full_df_forecast = scaler.inverse_transform(full_scaled_df_forecast)
predictions = predictions + full_df_forecast[:,full_df_scaled.shape[1]-1]
predictions = predictions / len(ensemble)
return predictions
def ensemble_rsme(ensemble):
for model in np.arange(len(ensemble)):
forecast = ensemble[model].predict(X_test_df_scaled_reshaped)
full_scaled_df_forecast = np.append(X_test, forecast, axis = 1)
full_df_forecast = scaler.inverse_transform(full_scaled_df_forecast)
test_df_transformed = scaler.inverse_transform(full_df_scaled[int(6863):,:])
print('RMSE: ',np.sqrt(mean_squared_error(test_df_transformed[:,full_df_scaled.shape[1]-1], full_df_forecast[:,full_df_scaled.shape[1]-1])))
ensemble_load()
ensemble_rsme(forecast_ensemble)
RMSE: 883.2408976506175
RMSE: 706.9516520361925
RMSE: 851.1251836329398
RMSE: 877.217921316251
RMSE: 714.5290387977115
RMSE: 882.3204028078022
RMSE: 881.1847012161662
RMSE: 867.0039216194143
test = ensemble_predict(forecast_ensemble, 1747)
from sklearn.metrics import median_absolute_error
from sklearn.metrics import max_error
from sklearn.metrics import r2_score
test_df_transformed = scaler.inverse_transform(full_df_scaled[int(6863):,:])
#full_df_forecast = full_df_forecast.clip(min=0)
print('RMSE: ',np.sqrt(mean_squared_error(test_df_transformed[:,full_df_scaled.shape[1]-1], test)))
print('Median Absolute Error: ',median_absolute_error(test_df_transformed[:,full_df_scaled.shape[1]-1], test))
print('Max Error: ',max_error(test_df_transformed[:,full_df_scaled.shape[1]-1], test))
print('R squared Score: ', r2_score(test_df_transformed[:,full_df_scaled.shape[1]-1], test))
RMSE: 677.7825422447407
Median Absolute Error: 353.6334730070521
Max Error: 5244.0236521973275
R squared Score: 0.9999904765652292
forecast = forecast_model.predict(X_test_df_scaled_reshaped)
full_scaled_df_forecast = np.append(X_test, forecast, axis = 1)
full_df_forecast = scaler.inverse_transform(full_scaled_df_forecast)
test_df_transformed = scaler.inverse_transform(full_df_scaled[int(6863):,:])
#full_df_forecast = full_df_forecast.clip(min=0)
print('RMSE: ',np.sqrt(mean_squared_error(test_df_transformed[:,full_df_scaled.shape[1]-1], full_df_forecast[:,full_df_scaled.shape[1]-1])))
print('Scaled MSE: ' , mean_squared_error(full_df_scaled[int(6863):,full_df_scaled.shape[1]-1],
full_scaled_df_forecast[:,full_df_scaled.shape[1]-1]))
RMSE: 966.9673177808254
Scaled MSE: 5.3476566521586154e-05
forecast_train = forecast_model.predict(X_df_scaled_reshaped)
full_scaled_df_forecast_train = np.append(X_train, forecast_train, axis = 1)
full_df_forecast_train = scaler.inverse_transform(full_scaled_df_forecast_train)
test_df_transformed_train = scaler.inverse_transform(train_combined[0:6863,:])
#full_df_forecast = full_df_forecast.clip(min=0)
print('RMSE: ',np.sqrt(mean_squared_error(test_df_transformed_train[:,full_df_scaled.shape[1]-1], full_df_forecast_train[:,full_df_scaled.shape[1]-1])))
print('Scaled MSE: ' , mean_squared_error(train_combined[0:6863:,full_df_scaled.shape[1]-1],
full_scaled_df_forecast_train[:,full_df_scaled.shape[1]-1]))
RMSE: 664.8077847481788
Scaled MSE: 2.5277383448224485e-05
forecast = forecast_model.predict(X_test)
full_scaled_df_forecast = np.append(X_test, forecast, axis = 1)
full_df_forecast = scaler.inverse_transform(full_scaled_df_forecast)
test_df_transformed = scaler.inverse_transform(full_df_scaled[int(full_df_scaled.shape[0]*.8):,:])
#full_df_forecast = full_df_forecast.clip(min=0)
print('RMSE: ',np.sqrt(mean_squared_error(test_df_transformed[:,full_df_scaled.shape[1]-1], full_df_forecast[:,full_df_scaled.shape[1]-1])))
print('Scaled MSE: ' , mean_squared_error(full_df_scaled[int(full_df_scaled.shape[0]*.8):,full_df_scaled.shape[1]-1],
full_scaled_df_forecast[:,full_df_scaled.shape[1]-1]))
Execution error
ValueError: in user code:
/shared-libs/python3.7/py/lib/python3.7/site-packages/tensorflow/python/keras/engine/training.py:1478 predict_function *
return step_function(self, iterator)
/shared-libs/python3.7/py/lib/python3.7/site-packages/tensorflow/python/keras/engine/training.py:1468 step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
/shared-libs/python3.7/py/lib/python3.7/site-packages/tensorflow/python/distribute/distribute_lib.py:1259 run
return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
/shared-libs/python3.7/py/lib/python3.7/site-packages/tensorflow/python/distribute/distribute_lib.py:2730 call_for_each_replica
return self._call_for_each_replica(fn, args, kwargs)
/shared-libs/python3.7/py/lib/python3.7/site-packages/tensorflow/python/distribute/distribute_lib.py:3417 _call_for_each_replica
return fn(*args, **kwargs)
/shared-libs/python3.7/py/lib/python3.7/site-packages/ten...
time_values = time.iloc[6863:,:][time['geo_value_t_minus_2']==25].time_value
actual_covid_cases = clean_good_full_df.iloc[6863:,21][clean_good_full_df.iloc[6863:,[0,21]].geo_value_t_minus_2 == 25]
predicted_covid_cases = full_df_forecast[clean_good_full_df.iloc[6863:,[0,21]].geo_value_t_minus_2 == 25][:,full_df_scaled.shape[1]-1]
plt.figure(figsize=(13,6))
plt.plot(time_values, actual_covid_cases, label='Actual Covid Cases')
plt.plot(time_values,predicted_covid_cases, label='Predicted Covid Cases')
plt.title("Los Angeles County Covid Cases Forecast")
plt.xlabel("Time")
plt.ylabel("Number of Covid Cases")
plt.xticks(np.arange(0,77,10),time_values.to_numpy()[np.arange(0,77,10)])
plt.grid()
plt.legend()
/shared-libs/python3.7/py-core/lib/python3.7/site-packages/ipykernel_launcher.py:1: UserWarning: Boolean Series key will be reindexed to match DataFrame index.
"""Entry point for launching an IPython kernel.
# create empty dictionary
county_RMSE = {}
for county in clean_good_full_df.iloc[6863:,[0,21]].geo_value_t_minus_2.unique():
# Train
# filter to get predicted and actual covid cases for each county
predicted_covid_train = full_df_forecast_train[pd.DataFrame(test_df_transformed_train).iloc[0:6863,[0,21]][0] == county][:,full_df_scaled.shape[1]-1]
actual_covid_train = test_df_transformed_train[0:6863,21][pd.DataFrame(test_df_transformed_train).iloc[0:6863,[0,21]][0] == county]
# Test
# filter to get predicted and actual covid cases for each county
predicted_covid = full_df_forecast[clean_good_full_df.iloc[6863:,[0,21]].geo_value_t_minus_2 == county][:,full_df_scaled.shape[1]-1]
actual_covid = clean_good_full_df.iloc[6863:,21][clean_good_full_df.iloc[6863:,[0,21]].geo_value_t_minus_2 == county]
# calculate RMSE
RMSE_train = np.sqrt(np.mean(actual_covid_train - predicted_covid_train)**2)
RMSE_test = np.sqrt(np.mean(actual_covid - predicted_covid)**2)
# save RMSE
county_RMSE[county] = [RMSE_train,RMSE_test]
county_RMSE
# .rename(columns = {'6.0': 'Merced','22.0':'San Diego'})
# 6: minimum 56.95034971416714 - 6047 geovalue Merced County
# 22: maximum 2064.8157958559805 - 6073 geovalue San Diego
RMSE_df = pd.DataFrame.from_dict(county_RMSE, orient='index').sort_index()
# plot
plt.figure(figsize=(13,6))
plt.scatter(np.arange(1,25),RMSE_df[1], label='Testing RMSE')
plt.scatter(np.arange(1,25),RMSE_df[0], label='Training RMSE')
plt.title("County Training and Testing RMSE values")
plt.xlabel("County (Ascendingly ordered by amount of Covid Cases")
plt.ylabel("RMSE")
plt.xticks(np.arange(1,25),RMSE_df.index)
plt.grid()
plt.legend(loc='upper left')
# get predicted and actual values of merced
time_values_merced = time.iloc[6863:,:][time['geo_value_t_minus_2']==6].time_value
actual_covid_cases_merced = clean_good_full_df.iloc[6863:,21][clean_good_full_df.iloc[6863:,[0,21]].geo_value_t_minus_2 == 6]
predicted_covid_cases_merced = full_df_forecast[clean_good_full_df.iloc[6863:,[0,21]].geo_value_t_minus_2 == 6][:,full_df_scaled.shape[1]-1]
merced_len = len(time_values_merced)
# get predicted and actual values of san diego
time_values_sd = time.iloc[6863:,:][time['geo_value_t_minus_2']==22].time_value
actual_covid_cases_sd = clean_good_full_df.iloc[6863:,21][clean_good_full_df.iloc[6863:,[0,21]].geo_value_t_minus_2 == 22]
predicted_covid_cases_sd = full_df_forecast[clean_good_full_df.iloc[6863:,[0,21]].geo_value_t_minus_2 == 22][:,full_df_scaled.shape[1]-1]
sd_len = len(time_values_sd)
# plot
# figure 1
plt.figure(figsize=(20,10))
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12,5))
fig.suptitle('Comparing Test predictions for Merced and San Diego')
ax1.plot(time_values_merced, actual_covid_cases_merced, label='Actual Covid Cases')
ax1.plot(time_values_merced, predicted_covid_cases_merced, label='Predicted Covid Cases')
ax1.grid()
ax1.legend()
# figure 2
ax2.plot(time_values_sd, actual_covid_cases_sd, label='Actual Covid Cases')
ax2.plot(time_values_sd, predicted_covid_cases_sd, label='Predicted Covid Cases')
ax2.grid()
ax2.legend()
# labeling the plots
# figure 1
ax1.set_xlabel("Time")
ax1.set_ylabel("Number of Covid Cases in Merced")
plt.sca(ax1)
plt.xticks(np.arange(0,merced_len,17),time_values.to_numpy()[np.arange(0,merced_len,17)])
# figure 2
ax2.set_xlabel("Time")
ax2.set_ylabel("Number of Covid Cases in San Diego")
plt.sca(ax2)
plt.xticks(np.arange(0,sd_len,17),time_values.to_numpy()[np.arange(0,sd_len,17)])
plt.tight_layout(pad=2.0)
/shared-libs/python3.7/py-core/lib/python3.7/site-packages/ipykernel_launcher.py:2: UserWarning: Boolean Series key will be reindexed to match DataFrame index.
/shared-libs/python3.7/py-core/lib/python3.7/site-packages/ipykernel_launcher.py:8: UserWarning: Boolean Series key will be reindexed to match DataFrame index.