flpelerin commited on
Commit
f10ccaa
·
verified ·
1 Parent(s): dc981e2

Update train.py

Browse files
Files changed (1) hide show
  1. train.py +1 -0
train.py CHANGED
@@ -158,6 +158,7 @@ wandb.init(
158
  # ============================
159
  optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)
160
  h_states = None
 
161
 
162
  for epoch in range(num_epochs):
163
  print(f"Starting Epoch {epoch + 1}/{num_epochs}")
 
158
  # ============================
159
  optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)
160
  h_states = None
161
+ step = 0
162
 
163
  for epoch in range(num_epochs):
164
  print(f"Starting Epoch {epoch + 1}/{num_epochs}")