Upload folder using huggingface_hub
Browse files
device.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
|
| 3 |
+
device: str
|
| 4 |
+
|
| 5 |
+
if torch.cuda.is_available(): device = 'cuda'
|
| 6 |
+
elif torch.mps.is_available(): device = 'mps'
|
| 7 |
+
else: device = 'cpu'
|
model.pth
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 172209
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:287330a63e005470ced2f2678a429438d4194f83c524e22b8c986c39c800a257
|
| 3 |
size 172209
|
run.py
CHANGED
|
@@ -1,12 +1,13 @@
|
|
| 1 |
import torch
|
|
|
|
| 2 |
|
| 3 |
-
model = torch.load("model.pth", weights_only=False)
|
| 4 |
|
| 5 |
def run(test):
|
| 6 |
with torch.no_grad():
|
| 7 |
-
test_data = torch.tensor([test], dtype=torch.float)
|
| 8 |
predictions = model(test_data)
|
| 9 |
-
return predictions.squeeze().numpy()
|
| 10 |
|
| 11 |
if __name__ == '__main__':
|
| 12 |
x, y = map(int, input().split())
|
|
|
|
| 1 |
import torch
|
| 2 |
+
from device import device
|
| 3 |
|
| 4 |
+
model = torch.load("model.pth", weights_only=False).to(device)
|
| 5 |
|
| 6 |
def run(test):
|
| 7 |
with torch.no_grad():
|
| 8 |
+
test_data = torch.tensor([test], dtype=torch.float).to(device)
|
| 9 |
predictions = model(test_data)
|
| 10 |
+
return predictions.squeeze().cpu().numpy()
|
| 11 |
|
| 12 |
if __name__ == '__main__':
|
| 13 |
x, y = map(int, input().split())
|
train.py
CHANGED
|
@@ -3,6 +3,7 @@ import torch.nn as nn
|
|
| 3 |
import torch.optim as optim
|
| 4 |
import numpy as np
|
| 5 |
from MultiplicationNet import MultiplicationNet
|
|
|
|
| 6 |
|
| 7 |
def generate_data(num_samples, min_val=0, max_val=100):
|
| 8 |
x1 = np.random.randint(min_val, max_val, size=(num_samples, 1))
|
|
@@ -16,11 +17,11 @@ def train():
|
|
| 16 |
learning_rate = 0.01
|
| 17 |
|
| 18 |
x, y = generate_data(num_samples)
|
| 19 |
-
x_train = torch.tensor(x, dtype=torch.float)
|
| 20 |
-
y_train = torch.tensor(y, dtype=torch.float)
|
| 21 |
|
| 22 |
-
model = MultiplicationNet()
|
| 23 |
-
criterion = nn.MSELoss()
|
| 24 |
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
|
| 25 |
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.95)
|
| 26 |
|
|
|
|
| 3 |
import torch.optim as optim
|
| 4 |
import numpy as np
|
| 5 |
from MultiplicationNet import MultiplicationNet
|
| 6 |
+
from device import device
|
| 7 |
|
| 8 |
def generate_data(num_samples, min_val=0, max_val=100):
|
| 9 |
x1 = np.random.randint(min_val, max_val, size=(num_samples, 1))
|
|
|
|
| 17 |
learning_rate = 0.01
|
| 18 |
|
| 19 |
x, y = generate_data(num_samples)
|
| 20 |
+
x_train = torch.tensor(x, dtype=torch.float).to(device)
|
| 21 |
+
y_train = torch.tensor(y, dtype=torch.float).to(device)
|
| 22 |
|
| 23 |
+
model = MultiplicationNet().to(device)
|
| 24 |
+
criterion = nn.MSELoss().to(device)
|
| 25 |
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
|
| 26 |
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.95)
|
| 27 |
|