# this is a short example using Tensor Flow,
# to carry out a simple linear regression at a low level
# i copied the core of this code from a google tensor flow and machine learning specialisation course which I am studying
# i have studied it closely and commented on almost every line
# i have made a few changes including the code to plot the prediction
# i have also added plots with matlab, which really helped with understanding exactly how this works
# hopefully this can help you to learn as well !
# please add comments below, if you can offer any feedback
# import required libraries
import tensorflow as tf
import matplotlib.pyplot as plt
# i found this code via google. it sets all figures to the same size for Kaggle
from pylab import rcParams
rcParams['figure.figsize'] = 10, 10
# create the x input tensor using the range expression
# this tensor represents one feature in the model
X = tf.constant(range(10), dtype=tf.float32)
Y = 2 * X + 10
# print the X and Y tensors
print('Tensors: X for input and Y for output have been created')
print("X:{}".format(X))
print("Y:{}".format(Y))
print('\n')
# create the test data, input X_test and output Y_test as tensors
X_test = tf.constant(range(10, 20), dtype=tf.float32)
Y_test = 2 * X_test + 10
# print the X_test and Y_test tensors
print('Tensors: X_test for input and Y_test for output have been created')
print("X_test:{}".format(X_test))
print("Y_test:{}".format(Y_test))
print('\n')
# define loss_mse() function
# this function allows loss_mse to be calculated for any given vector for this linear regression model
# note the formula for the models predictions Y_hat = w0*X + w1
def loss_mse(X, Y, w0, w1):
Y_hat = w0 * X + w1
errors = (Y_hat - Y)**2
return tf.reduce_mean(errors)
# define compute_gradients() procedure for computing the loss gradients with respect to the model weights:
# note this is the model formula again Y = w0*X + w1. Passing in tensors (arrays as X feature, Y as output, w0 and w1)
def compute_gradients(X, Y, w0, w1):
with tf.GradientTape() as tape:
loss = loss_mse(X, Y, w0, w1)
return tape.gradient(loss, [w0, w1])
# the variable() constructor requires an initial value for the variable, which can be a Tensor of any type and shape.
w0 = tf.Variable(0.0)
w1 = tf.Variable(0.0)
# calulates the gradient decent for both w0 and w1
dw0, dw1 = compute_gradients(X, Y, w0, w1)
STEPS = 100 # Any number from 1+
LEARNING_RATE = .02
MSG = "STEP {step} - loss: {loss}, w0: {w0}, w1: {w1}"
# the Variable() constructor requires an initial value for the variable, which can be a Tensor of any type and shape.
w0 = tf.Variable(0.0)
w1 = tf.Variable(0.0)
for step in range(0, STEPS + 1):
# calulates the gradient decent for both w0 and w1 for each step
dw0, dw1 = compute_gradients(X, Y, w0, w1)
# plot current iteration of model during training
# y = mx + c or Y = 2 * w0X + w1
# use current iteration values for w0 and w1
GradientY = w0 * X + w1
# plot the training process
plt.figure(1)
plt.title("Visualization of training process")
plt.xlabel("X Input")
plt.ylabel("Y - Output. Darker lines are later predictions")
greyscale = 0.8 - ((step / STEPS)*0.8)
percGrey = str(greyscale)
plt.plot(X,GradientY,c=percGrey)
# add this to figure 3 as well, because it shows how it trains on the x data set and not the test data
plt.figure(3)
plt.plot(X,GradientY,c=percGrey)
# plot the loss during the training process
loss = loss_mse(X, Y, w0, w1)
plt.figure(2)
plt.scatter(step,loss,5,c='grey')
plt.title("Visualization of loss against steps taken")
plt.xlabel("Steps")
plt.ylabel("Loss - Mean Squared Error")
# subtracts the dw0 and dw1 multiplied by the learning rate
w0.assign_sub(dw0 * LEARNING_RATE)
w1.assign_sub(dw1 * LEARNING_RATE)
# this step simply prints the loss every 100 steps to avoid spamming the output window
if step % 100 == 0:
loss = loss_mse(X, Y, w0, w1)
print(MSG.format(step=step, loss=loss, w0=w0.numpy(), w1=w1.numpy()))
# here we compare the test loss for the linear regression we have just run to the test loss from the baseline model.
# passing in the values of w0 and w1
loss = loss_mse(X_test, Y_test, w0, w1)
loss.numpy()
# now we run the prediction !!!
# setup the inputs which are created as tensors and stored as X_Prediction
# create the output by running the model against the inputs ! Values w0 and w1 are from training the model.
X_Prediction = tf.constant(range(20), dtype=tf.float32)
Y_Prediction = (X_Prediction * w0) + w1
# finally i plot everything below !
# note to plot the predictions is simply passing the tensor X_Prediction and Y_Prediction to the plot function
plt.figure(3)
plt.xlabel("X - Feature")
plt.ylabel("Y - Output")
plt.plot(X,Y,'o', label='X data set') #plot X data set
plt.plot(X_test,Y_test,'x', label='X test set') #plot X test set
plt.plot(X_Prediction,Y_Prediction, label='Prediction') #plot prediction
plt.legend()
plt.show()

```
Tensors: X for input and Y for output have been created
X:[0. 1. 2. 3. 4. 5. 6. 7. 8. 9.]
Y:[10. 12. 14. 16. 18. 20. 22. 24. 26. 28.]
Tensors: X_test for input and Y_test for output have been created
X_test:[10. 11. 12. 13. 14. 15. 16. 17. 18. 19.]
Y_test:[30. 32. 34. 36. 38. 40. 42. 44. 46. 48.]
STEP 0 - loss: 35.70719528198242, w0: 4.079999923706055, w1: 0.7599999904632568
STEP 100 - loss: 2.6017532348632812, w0: 2.4780430793762207, w1: 7.002389907836914
```