Neural Network Regression Demo

This is a demo code for Neural Network Regression using PyTorch.

NN Regression Demo
Download
# %% [markdown]
# The code is adapted from [Here](https://medium.com/@benjamin.phillips22/simple-regression-with-neural-networks-in-pytorch-313f06910379).

# %%
import torch
import torch.nn.functional as F

import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('TKAgg')
# %matplotlib inline


device = 'cuda' if torch.cuda.is_available() else 'cpu'

x = torch.unsqueeze(torch.linspace(-1, 1, 100), dim=1)  # x data (tensor), shape=(100, 1)
y = x.pow(2) + 0.2*torch.rand(x.size())                 # noisy y data (tensor), shape=(100, 1)
x = x.to(device, dtype=torch.float32)
y = y.to(device, dtype=torch.float32)

train_size = 80
train_input = x[:train_size]
train_targets = y[:train_size]

# view data
plt.figure(figsize=(10, 4))
plt.scatter(x.data.cpu().numpy(), y.data.cpu().numpy(), color="orange")
plt.title('X')
plt.xlabel('Input')
plt.ylabel('Target')
plt.show()

# this is one way to define a network


class Net(torch.nn.Module):
    def __init__(self, n_feature, n_hidden, n_output):
        super(Net, self).__init__()
        self.l1 = torch.nn.Linear(n_feature, n_hidden)   # hidden layer
        self.l2 = torch.nn.Linear(n_hidden, n_output)   # output layer

    def forward(self, x):
        x = F.relu(self.l1(x))      # activation function for hidden layer
        x = self.l2(x)             # linear output
        return x


net = Net(n_feature=1, n_hidden=10, n_output=1)     # define the network
# print(net)  # net architecture
net.to(device, dtype=torch.float32)
optimizer = torch.optim.Adam(net.parameters(), lr=0.01)

my_images = []
fig, ax = plt.subplots(figsize=(12, 7))

# train the network
for t in range(200):

    prediction = net(train_input)     # input x and predict based on x

    loss = F.mse_loss(prediction, train_targets)     # must be (1. nn output, 2. target)

    optimizer.zero_grad()   # clear gradients for next train
    loss.backward()         # backpropagation, compute gradients
    optimizer.step()        # apply gradients
    print("iteration %3i. loss: %8.3g" % (t, loss))

    # plot and show learning process
    plt.cla()
    ax.set_title('Regression Analysis', fontsize=35)
    ax.set_xlabel('Input', fontsize=24)
    ax.set_ylabel('Target', fontsize=24)
    ax.set_xlim(-1.05, 1.5)
    ax.set_ylim(-0.25, 1.25)
    ax.scatter(x.data.cpu().numpy(), y.data.cpu().numpy(), color="orange")
    ax.scatter(train_input.data.cpu().numpy(),
               train_targets.data.cpu().numpy(), color='red')

    with torch.no_grad():
        prediction = net(x)     # input x and predict based on x

    ax.plot(x.data.cpu().numpy(), prediction.data.cpu().numpy(), 'g-', lw=3)
    ax.text(1.0, 0.1, 'Step = %d' % t, fontdict={'size': 24, 'color':  'red'})
    ax.text(1.0, 0, 'Loss = %.4f' % loss.data.cpu().numpy(),
            fontdict={'size': 24, 'color':  'red'})

    # For live demo
    plt.pause(0.001)
    fig.canvas.draw()       # draw the canvas, cache the renderer
#     image = np.frombuffer(fig.canvas.buffer_rgba(), dtype='uint8')
#     image = image.reshape(fig.canvas.get_width_height()[::-1] + (4,))
#     image = image[:, :, :3]  # convert RGBA to RGB by removing alpha channel

#     my_images.append(image)


# # save images as a gif
# imageio.mimsave('./curve_1.gif', my_images, fps=10)