import matplotlib.pyplot as plt
import numpy as np
from mlspear import *
model = NeuralNetwork([Tanh(2, 8), PReLU(8, 8), Classification(8, 1)], print_error = True)
model.draw_neural_network()
#Classification (Non Linear)
points = np.random.randn(10000, 2)
green = []
red = []
for point in points:
x = point[0]
y = point[1]
r = np.sqrt(x ** 2 + y ** 2)
if r < 1: green.append([x, y])
if r > 2 and r < 3: red.append([x, y])
green = np.array(green)
red = np.array(red)
plt.scatter(green[:,0], green[:,1], color = 'green')
plt.scatter(red[:, 0], red[:, 1], color = 'red')
plt.show()
X = np.vstack((green, red))
Y = np.zeros((X.shape[0], 1))
Y[0:green.shape[0], 0] = 1
To train the neural network, use the train method that takes in the following parameters.
model.train(X, Y, 3000, 0.0001, batch_size = X.shape[0])
Training Progress: 100%|██████████| 3000/3000 [00:23<00:00, 125.94it/s]
#Plot Decision boundary
boundary = []
for x in np.linspace(-3, 3, 1000):
for y in np.linspace(-3, 3, 1000):
point = np.array([x, y])
prediction = model.predict(point)
if np.abs(prediction - 0.5) < 0.01:
boundary.append([x, y])
boundary = np.array(boundary)
plt.scatter(boundary[:, 0], boundary[:, 1], color = 'blue')
plt.scatter(green[:, 0], green[:, 1], color = 'green')
plt.scatter(red[:, 0], red[:, 1], color = 'red')
plt.show()
The train method allows other parameters to be passed in. For example, you can set the optimizer (optimizer), mometum type (mtype), and the momentum parameter (mu).
model.train(X, Y, 6, 0.0001, batch_size = 300, mu = 0.0000001, mtype = 'nesterov', optimizer = 'rmsprop')
# To add momentum, simply include the mtype parameter, set it to either 'nesterov' or 'conventional',
# and set mu to a number between 0 and 1.
Training Progress: 100%|██████████| 6/6 [00:38<00:00, 6.49s/it]
Again, you can plot the decision boundary and you will get a similar result.
#Plot Decision boundary
boundary = []
for x in np.linspace(-3, 3, 1000):
for y in np.linspace(-3, 3, 1000):
point = np.array([x, y])
prediction = model.predict(point)
if np.abs(prediction - 0.5) < 0.01:
boundary.append([x, y])
boundary = np.array(boundary)
plt.scatter(boundary[:, 0], boundary[:, 1], color = 'blue')
plt.scatter(green[:, 0], green[:, 1], color = 'green')
plt.scatter(red[:, 0], red[:, 1], color = 'red')
plt.show()
This package allows users to create a linear regression, here is an example on how to do it. First let's us create two gaussian clouds.
#Regression
yellow_1 = np.random.randn(5000, 2)
yellow_2 = np.random.randn(5000, 2) + np.array([4, 5])
plt.scatter(yellow_1[:, 0], yellow_1[:, 1], color = 'gold')
plt.scatter(yellow_2[:, 0], yellow_2[:, 1], color = 'gold')
plt.show()
data = np.vstack((yellow_1, yellow_2))
X = data[:,0].reshape((data[:,0].shape[0], 1))
Y = data[:,1].reshape((data[:,1].shape[0], 1))
Next, create a linear regression model by calling in the Neural Network class with one regression layer.
model = NeuralNetwork([Regression(1, 1)], print_error = True)
Train the model using vanilla gradient descent with 200 epochs at learning rate 0.00001 (Note: you can use any gradient descent method).
model.train(X, Y, 200, 0.00001, batch_size = X.shape[0])
Training Progress: 100%|██████████| 200/200 [00:00<00:00, 3001.65it/s]
Finally, draw the line of best fit to the dataset.
Y_hat = model.predict(X)
plt.plot(X, Y_hat, color = 'blue')
plt.scatter(yellow_1[:, 0], yellow_1[:, 1], color = 'gold')
plt.scatter(yellow_2[:, 0], yellow_2[:, 1], color = 'gold')
plt.show()
We can create a logistic regression model that seperates the two datasets. To do that, let's first create the same data set but allow different colors in different gaussian clouds.
# Logistic Regression
orange = np.random.randn(5000, 2)
purple = np.random.randn(5000, 2) + np.array([4, 5])
plt.scatter(orange[:, 0], orange[:, 1], color = 'orange')
plt.scatter(purple[:, 0], purple[:, 1], color = 'purple')
plt.show()
X = np.vstack((orange, purple))
Y = np.zeros((X.shape[0], 1))
Y[0:orange.shape[0], 0] = 1
Create a logistic regression model by calling in Neural Network with one softmax layer.
model = NeuralNetwork([Classification(2, 1)], print_error = True)
Train the model using batch gradient descent (or any gradient descent methods).
model.train(X, Y, 2, 0.001)
Training Progress: 100%|██████████| 2/2 [00:01<00:00, 1.00it/s]
Finally, plot the decision boundary for the two gaussian clouds.
#Plot Decision boundary
boundary = []
for x in np.linspace(-4, 8, 1000):
for y in np.linspace(-4, 8, 1000):
point = np.array([x, y])
prediction = model.predict(point)
if np.abs(prediction - 0.5) < 0.001:
boundary.append([x, y])
boundary = np.array(boundary)
plt.scatter(boundary[:, 0], boundary[:, 1], color = 'blue')
plt.scatter(orange[:, 0], orange[:, 1], color = 'orange')
plt.scatter(purple[:, 0], purple[:, 1], color = 'purple')
plt.show()