Upload folder using huggingface_hub
Browse files- MultiplicationNet.py +1 -1
- model.pth +2 -2
- train.py +3 -3
MultiplicationNet.py
CHANGED
|
@@ -3,7 +3,7 @@ import torch.nn as nn
|
|
| 3 |
class MultiplicationNet(nn.Module):
|
| 4 |
def __init__(self):
|
| 5 |
super(MultiplicationNet, self).__init__()
|
| 6 |
-
layer_sizes = [2,
|
| 7 |
layers = []
|
| 8 |
for i in range(len(layer_sizes) - 1):
|
| 9 |
layers.append(nn.Linear(layer_sizes[i], layer_sizes[i + 1]))
|
|
|
|
| 3 |
class MultiplicationNet(nn.Module):
|
| 4 |
def __init__(self):
|
| 5 |
super(MultiplicationNet, self).__init__()
|
| 6 |
+
layer_sizes = [2, 256, 512, 1024, 1]
|
| 7 |
layers = []
|
| 8 |
for i in range(len(layer_sizes) - 1):
|
| 9 |
layers.append(nn.Linear(layer_sizes[i], layer_sizes[i + 1]))
|
model.pth
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cefcd52977cd055794c10dba186f321ad2b5aacee73975f99371d3843c0a524c
|
| 3 |
+
size 2639793
|
train.py
CHANGED
|
@@ -12,8 +12,8 @@ def generate_data(num_samples, min_val=0, max_val=100):
|
|
| 12 |
return np.hstack([x1, x2]), y
|
| 13 |
|
| 14 |
def train():
|
| 15 |
-
num_samples =
|
| 16 |
-
num_epochs =
|
| 17 |
learning_rate = 0.01
|
| 18 |
|
| 19 |
x, y = generate_data(num_samples)
|
|
@@ -23,7 +23,7 @@ def train():
|
|
| 23 |
model = MultiplicationNet().to(device)
|
| 24 |
criterion = nn.MSELoss().to(device)
|
| 25 |
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
|
| 26 |
-
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.
|
| 27 |
|
| 28 |
for epoch in range(num_epochs):
|
| 29 |
outputs = model(x_train)
|
|
|
|
| 12 |
return np.hstack([x1, x2]), y
|
| 13 |
|
| 14 |
def train():
|
| 15 |
+
num_samples = 20000
|
| 16 |
+
num_epochs = 20000
|
| 17 |
learning_rate = 0.01
|
| 18 |
|
| 19 |
x, y = generate_data(num_samples)
|
|
|
|
| 23 |
model = MultiplicationNet().to(device)
|
| 24 |
criterion = nn.MSELoss().to(device)
|
| 25 |
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
|
| 26 |
+
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.93)
|
| 27 |
|
| 28 |
for epoch in range(num_epochs):
|
| 29 |
outputs = model(x_train)
|