I'm not sure how useful this really is for a regression task but it would be quite nice to see how well my algorithm has learnt the training set.
I found plotting for a 2D problem quite simple and yet I'm having trouble plotting in 3D.
import numpy as np
import itertools
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def gradient_descent(x, y, w, lr, m, iter):
xTrans = x.transpose()
for i in range(iter):
prediction = np.dot(x, w)
loss = prediction - y
cost = np.sum(loss ** 2) / m
print("Iteration %d | Cost: %f" % (i + 1, cost))
gradient = np.dot(xTrans, loss) / m # avg gradient
w -= lr * gradient # update the weight vector
return w
it = np.ones(shape=(100, 3)) # it[:,2] will be set of bias values
x = np.arange(1, 200, 20)
d = np.random.uniform(-100, 100, 100)
m, n = np.shape(it)
# initialise weights to 0
w = np.zeros(n)
# produce a 100x2 containing every combination of x
indx = 0
for a, b in itertools.product(x, x):
it[indx][0] = a
it[indx][1] = b
indx += 1
# function with uniform distribution
y = .4*it[:,0] + 1.4*it[:,1] + d
iter = 1500 # number of iterations
lr = 0.00001 # learning rate / alpha
trained_w = gradient_descent(it, y, w, lr, m, iter)
result = trained_w[0] * it[:,0] + trained_w[1] * it[:,1] + trained_w[2] # linear plot of our predicted function
print("Final weights: %f | %f | %f" % (trained_w[0], trained_w[1], trained_w[2]))
# scatter of data set + trained function hyperplane
plt3d = plt.figure().gca(projection='3d')
plt3d.hold(True)
plt3d.scatter(it[:,0], it[:,1], y)
x_surf, y_surf = np.meshgrid(it[:,0], it[:,1])
plt3d.plot_surface(x_surf, y_surf, result)
plt.show()
My result of the plot is a little odd: