xcx0902 commited on
Commit
9b401fc
·
verified ·
1 Parent(s): 2eebda5

Upload folder using huggingface_hub

Browse files
Files changed (3) hide show
  1. MultiplicationNet.py +1 -1
  2. model.pth +2 -2
  3. train.py +3 -3
MultiplicationNet.py CHANGED
@@ -3,7 +3,7 @@ import torch.nn as nn
3
  class MultiplicationNet(nn.Module):
4
  def __init__(self):
5
  super(MultiplicationNet, self).__init__()
6
- layer_sizes = [2, 64, 128, 256, 1]
7
  layers = []
8
  for i in range(len(layer_sizes) - 1):
9
  layers.append(nn.Linear(layer_sizes[i], layer_sizes[i + 1]))
 
3
  class MultiplicationNet(nn.Module):
4
  def __init__(self):
5
  super(MultiplicationNet, self).__init__()
6
+ layer_sizes = [2, 256, 512, 1024, 1]
7
  layers = []
8
  for i in range(len(layer_sizes) - 1):
9
  layers.append(nn.Linear(layer_sizes[i], layer_sizes[i + 1]))
model.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:287330a63e005470ced2f2678a429438d4194f83c524e22b8c986c39c800a257
3
- size 172209
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cefcd52977cd055794c10dba186f321ad2b5aacee73975f99371d3843c0a524c
3
+ size 2639793
train.py CHANGED
@@ -12,8 +12,8 @@ def generate_data(num_samples, min_val=0, max_val=100):
12
  return np.hstack([x1, x2]), y
13
 
14
  def train():
15
- num_samples = 10000
16
- num_epochs = 5000
17
  learning_rate = 0.01
18
 
19
  x, y = generate_data(num_samples)
@@ -23,7 +23,7 @@ def train():
23
  model = MultiplicationNet().to(device)
24
  criterion = nn.MSELoss().to(device)
25
  optimizer = optim.Adam(model.parameters(), lr=learning_rate)
26
- scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.95)
27
 
28
  for epoch in range(num_epochs):
29
  outputs = model(x_train)
 
12
  return np.hstack([x1, x2]), y
13
 
14
  def train():
15
+ num_samples = 20000
16
+ num_epochs = 20000
17
  learning_rate = 0.01
18
 
19
  x, y = generate_data(num_samples)
 
23
  model = MultiplicationNet().to(device)
24
  criterion = nn.MSELoss().to(device)
25
  optimizer = optim.Adam(model.parameters(), lr=learning_rate)
26
+ scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.93)
27
 
28
  for epoch in range(num_epochs):
29
  outputs = model(x_train)