Upload folder using huggingface_hub
Browse files- MultiplicationNet.py +15 -0
- model.pth +3 -0
- run.py +13 -0
- train.py +39 -0
MultiplicationNet.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch.nn as nn
|
| 2 |
+
|
| 3 |
+
class MultiplicationNet(nn.Module):
|
| 4 |
+
def __init__(self):
|
| 5 |
+
super(MultiplicationNet, self).__init__()
|
| 6 |
+
layer_sizes = [2, 64, 128, 256, 1]
|
| 7 |
+
layers = []
|
| 8 |
+
for i in range(len(layer_sizes) - 1):
|
| 9 |
+
layers.append(nn.Linear(layer_sizes[i], layer_sizes[i + 1]))
|
| 10 |
+
layers.append(nn.ReLU())
|
| 11 |
+
layers.pop()
|
| 12 |
+
self.model = nn.Sequential(*layers)
|
| 13 |
+
|
| 14 |
+
def forward(self, x):
|
| 15 |
+
return self.model(x)
|
model.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:75c98e3b3f34bf836379b5c428cdba0f48794be3ea418b7b9418b9754ec50231
|
| 3 |
+
size 172209
|
run.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
|
| 3 |
+
model = torch.load("model.pth", weights_only=False)
|
| 4 |
+
|
| 5 |
+
def run(test):
|
| 6 |
+
with torch.no_grad():
|
| 7 |
+
test_data = torch.tensor([test], dtype=torch.float)
|
| 8 |
+
predictions = model(test_data)
|
| 9 |
+
return predictions.squeeze().numpy()
|
| 10 |
+
|
| 11 |
+
if __name__ == '__main__':
|
| 12 |
+
x, y = map(int, input().split())
|
| 13 |
+
print(run([x, y]))
|
train.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import torch.optim as optim
|
| 4 |
+
import numpy as np
|
| 5 |
+
from MultiplicationNet import MultiplicationNet
|
| 6 |
+
|
| 7 |
+
def generate_data(num_samples, min_val=0, max_val=100):
|
| 8 |
+
x1 = np.random.randint(min_val, max_val, size=(num_samples, 1))
|
| 9 |
+
x2 = np.random.randint(min_val, max_val, size=(num_samples, 1))
|
| 10 |
+
y = x1 * x2
|
| 11 |
+
return np.hstack([x1, x2]), y
|
| 12 |
+
|
| 13 |
+
def train():
|
| 14 |
+
num_samples = 10000
|
| 15 |
+
num_epochs = 5000
|
| 16 |
+
learning_rate = 0.01
|
| 17 |
+
|
| 18 |
+
x, y = generate_data(num_samples)
|
| 19 |
+
x_train = torch.tensor(x, dtype=torch.float)
|
| 20 |
+
y_train = torch.tensor(y, dtype=torch.float)
|
| 21 |
+
|
| 22 |
+
model = MultiplicationNet()
|
| 23 |
+
criterion = nn.MSELoss()
|
| 24 |
+
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
|
| 25 |
+
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.95)
|
| 26 |
+
|
| 27 |
+
for epoch in range(num_epochs):
|
| 28 |
+
outputs = model(x_train)
|
| 29 |
+
loss = criterion(outputs, y_train)
|
| 30 |
+
optimizer.zero_grad()
|
| 31 |
+
loss.backward()
|
| 32 |
+
optimizer.step()
|
| 33 |
+
scheduler.step()
|
| 34 |
+
print(f"Epoch {epoch}, loss = {loss.item()}")
|
| 35 |
+
|
| 36 |
+
torch.save(model, "model.pth")
|
| 37 |
+
|
| 38 |
+
if __name__ == '__main__':
|
| 39 |
+
train()
|