| | |
| | |
| | import numpy as np |
| | import torch |
| | import torch.nn as nn |
| | import matplotlib.pyplot as plt |
| | from IPython import display |
| | display.set_matplotlib_formats('svg') |
| | |
| | |
| |
|
| | N = 30 |
| | x = torch.randn(N, 1) |
| | y = x + torch.randn(N, 1)/2 |
| |
|
| | |
| | plt.plot(x,y,'s') |
| | plt.show() |
| | |
| | |
| | ANNreg = nn.Sequential( |
| | nn.Linear(1, 1), |
| | nn.ReLU(), |
| | nn.Linear(1, 1) |
| | ) |
| | ANNreg |
| | |
| | |
| | learning_rate = .05 |
| |
|
| | |
| | loss_fn = nn.MSELoss() |
| |
|
| | |
| | optimizer = torch.optim.SGD(ANNreg.parameters(), lr=learning_rate) |
| | |
| | |
| | num_epochs = 500 |
| | losses = torch.zeros(num_epochs) |
| |
|
| | |
| | for epochi in range(num_epochs): |
| |
|
| | |
| | yhat = ANNreg(x) |
| |
|
| | |
| | loss = loss_fn(yhat, y) |
| | losses[epochi] = loss |
| |
|
| | |
| | optimizer.zero_grad() |
| | loss.backward() |
| | optimizer.step() |
| | |
| | |
| |
|
| | |
| | |
| | predictions = ANNreg(x) |
| |
|
| | |
| | test_loss = loss_fn(predictions, y).pow(2).mean() |
| |
|
| | plt.plot(losses.detach(), 'o', markerfacecolor='w', linewidth=.1) |
| | plt.plot(num_epochs, test_loss.detach(), 'ro') |
| | plt.xlabel('Epoch') |
| | plt.ylabel('Loss') |
| | plt.title('Final loss: %.3f' % test_loss.item()) |
| | plt.show() |
| | |
| | test_loss.item() |
| | |
| | |
| | plt.plot(x,y,'bo', label='Real data') |
| | plt.plot(x, predictions.detach(), 'rs', label='Predictions') |
| | plt.title(f'preiction-data r = {np.corrcoef(y.T, predictions.detach().T)[0,1]:.2f}') |
| | plt.legend() |
| | plt.show() |
| | |
| | def Model(x,y, num_epochs, learning_rate): |
| |
|
| | |
| | ANNreg =nn.Sequential( |
| | nn.Linear(1,1), |
| | nn.ReLU(), |
| | nn.Linear(1,1) |
| | ) |
| |
|
| | |
| | loss_fn = nn.MSELoss() |
| |
|
| | |
| | optimizer = torch.optim.SGD(ANNreg.parameters(), lr=learning_rate) |
| |
|
| | |
| | losses = torch.zeros(num_epochs) |
| |
|
| | for epoch in range(num_epochs): |
| | |
| | yHat = ANNreg(x) |
| |
|
| | |
| | loss = loss_fn(yHat,y) |
| | losses[epoch] = loss |
| |
|
| | |
| | optimizer.zero_grad() |
| | loss.backward() |
| | optimizer.step() |
| |
|
| | final_predictions = ANNreg(x) |
| |
|
| |
|
| | return final_predictions, losses |
| |
|
| | N = 30 |
| | x = torch.randn(N, 1) |
| | y = x + torch.randn(N, 1)/2 |
| | final_predictions, losses = Model(x,y, num_epochs=500, learning_rate=.05) |
| | final_predictions |
| |
|
| | |
| | |
| |
|
| | def buildAndTrainTheModel(x,y): |
| |
|
| | |
| | ANNreg = nn.Sequential( |
| | nn.Linear(1,1), |
| | nn.ReLU(), |
| | nn.Linear(1,1) |
| | ) |
| |
|
| | |
| | loss_fn = nn.MSELoss() |
| | optimizer = torch.optim.SGD(ANNreg.parameters(), lr=.05) |
| |
|
| | |
| | num_epochs = 500 |
| | losses = torch.zeros(num_epochs) |
| |
|
| | for epochi in range(num_epochs): |
| |
|
| | |
| | yHat = ANNreg(x) |
| |
|
| | |
| | loss = loss_fn(yHat, y) |
| | losses[epochi] = loss |
| |
|
| | |
| | optimizer.zero_grad() |
| | loss.backward() |
| | optimizer.step() |
| |
|
| | |
| |
|
| | |
| | predictions = ANNreg(x) |
| |
|
| | return predictions, losses |
| |
|
| | |
| | |
| |
|
| | def createTheData(m): |
| | N = 30 |
| | x = torch.randn(N,1) |
| | y = m*x + torch.randn(N,1)/2 |
| | return x, y |
| | |
| | |
| |
|
| | |
| | x, y = createTheData(.8) |
| |
|
| | |
| | yhat, losses = buildAndTrainTheModel(x,y) |
| |
|
| | fig, ax = plt.subplots(1,2, figsize=(12,4)) |
| |
|
| | ax[0].plot(losses.detach(),'o',markerfacecolor='w',linewidth=.1) |
| | ax[0].set_xlabel('Epoch') |
| | ax[0].set_title('loss') |
| |
|
| | ax[1].plot(x,y,'bo',label='Real data') |
| | ax[1].plot(x,yhat.detach(),'rs',label='Predictions') |
| | ax[1].set_xlabel('x') |
| | ax[1].set_ylabel('y') |
| | ax[1].set_title(f'prediction-data corr = {np.corrcoef(y.T, yhat.detach().T)[0,1]:.2f}') |
| | ax[1].legend() |
| | plt.show() |
| |
|
| | |
| | |
| |
|
| | |
| |
|
| | |
| | slopes = np.linspace(-2,2,21) |
| |
|
| | numExps = 50 |
| |
|
| | |
| | results = np.zeros((len(slopes), numExps,2)) |
| |
|
| | for slopi in range(len(slopes)): |
| | for expi in range(numExps): |
| |
|
| | |
| | x, y = createTheData(slopes[slopi]) |
| |
|
| | |
| | yhat, losses = buildAndTrainTheModel(x,y) |
| |
|
| | |
| | results[slopi, expi, 0] = losses[-1] |
| | results[slopi, expi, 1] = np.corrcoef(y.T, yhat.detach().T)[0,1] |
| |
|
| | |
| | results[np.isnan(results)] = 0 |
| | |
| | |
| |
|
| | fig, ax = plt.subplots(1,2, figsize=(12,4)) |
| |
|
| | ax[0].plot(slopes, np.mean(results[:,:,0], axis=1), 'ko-', markerfacecolor='w', markersize=10) |
| | ax[0].set_xlabel('Slope') |
| | ax[0].set_title('Loss') |
| |
|
| | ax[1].plot(slopes, np.mean(results[:,:,1], axis=1), 'ms-', markerfacecolor='w', markersize=10) |
| | ax[1].set_xlabel('Slope') |
| | ax[1].set_ylabel('Real-predicted Correlation') |
| | ax[1].set_title('Model performance') |
| |
|
| | plt.show() |
| | |
| |
|