| | import torch |
| | import numpy as np |
| | from tqdm import tqdm |
| |
|
| |
|
| | class Flatten(torch.nn.Module): |
| | |
| |
|
| | def __init__(self): |
| | super(Flatten, self).__init__() |
| |
|
| | def forward(self, x): |
| | return x.view(x.size(0), -1) |
| |
|
| |
|
| | def get_model(): |
| | |
| | |
| | D_in, H, D_out = 4000, 100, 1 |
| |
|
| | model = torch.nn.Sequential( |
| | Flatten(), |
| | torch.nn.Linear(D_in, H), |
| | torch.nn.ReLU(), |
| | torch.nn.Linear(H, D_out), |
| | torch.nn.Sigmoid(), |
| | ) |
| | return model |
| |
|
| | simple_model = get_model() |
| |
|
| |
|
| | def generate_exmaple_model(): |
| | |
| | model = get_model() |
| |
|
| | |
| | loss_func = torch.nn.MSELoss() |
| |
|
| | |
| | optimizer = torch.optim.SGD(model.parameters(), lr=1e-1) |
| |
|
| | minibatch_size = 10 |
| | np.random.seed(0) |
| | x = torch.Tensor(50, 1000, 4).uniform_(0, 1) |
| | y = torch.Tensor(50).uniform_(0, 1) |
| |
|
| | for epoch in tqdm(range(10)): |
| | for mbi in tqdm(range(np.ceil(x.size()[0] / minibatch_size).astype(int))): |
| | minibatch = x[(mbi * minibatch_size):min(((mbi + 1) * minibatch_size), x.size()[0])] |
| | target = torch.autograd.Variable(y[(mbi * minibatch_size):min(((mbi + 1) * minibatch_size), x.size()[0])]) |
| | model.zero_grad() |
| |
|
| | |
| | out = model(torch.autograd.Variable(minibatch)) |
| |
|
| | |
| | L = loss_func(out, target) |
| | L.backward() |
| | optimizer.step() |
| |
|
| | torch.save(model, "model_files/full_model.pth") |
| | torch.save(model.state_dict(), "model_files/only_weights.pth") |
| |
|
| | |
| | def get_model_w_weights(): |
| | model = get_model() |
| | model.load_state_dict(torch.load("model_files/only_weights.pth")) |
| | return model |
| |
|
| | def test_same_weights(dict1, dict2): |
| | for k in dict1: |
| | assert np.all(dict1[k].numpy() == dict2[k].numpy()) |
| |
|
| | |
| |
|