hkayabilisim commited on
Commit
c97c52c
·
1 Parent(s): e00a48f

added model testing

Browse files
agent/backend/utils.py CHANGED
@@ -2,6 +2,7 @@ import torch
2
  from tqdm import tqdm
3
  from torch.utils.data import Dataset, DataLoader
4
  from functools import partial
 
5
 
6
  from .data import ExplorationDataset
7
  from .models import Perceptron
@@ -11,7 +12,8 @@ from .loss import loss_mape
11
 
12
  def train(df, model_name, input_cols, output_cols, trn_ratio,
13
  batch_size_trn, batch_size_val, optimizer_name, learning_rate,
14
- max_epoch, loss_name):
 
15
  if model_name == "Perceptron":
16
  model = Perceptron(in_features=len(input_cols), out_features=len(output_cols))
17
  if loss_name == "mape":
@@ -20,7 +22,8 @@ def train(df, model_name, input_cols, output_cols, trn_ratio,
20
 
21
  trn_size = int(len(ds)*trn_ratio)
22
  val_size = len(ds) - trn_size
23
- ds_trn, ds_val = torch.utils.data.random_split(ds, [trn_size, val_size])
 
24
  dl_trn = DataLoader(ds_trn, batch_size=batch_size_trn, shuffle=True)
25
  dl_val = DataLoader(ds_val, batch_size=batch_size_val, shuffle=True)
26
 
@@ -40,6 +43,7 @@ def train(df, model_name, input_cols, output_cols, trn_ratio,
40
  print(f'Learning rate: {learning_rate}')
41
  print(f'Optimizer {optimizer_name}')
42
  print(f'Max epoch: {max_epoch}')
 
43
 
44
  x, y = ds[0]
45
  in_features = x.shape[0]
@@ -61,11 +65,36 @@ def train(df, model_name, input_cols, output_cols, trn_ratio,
61
  trn_loss = evaluate(model, dl_trn, loss_fn)
62
  val_loss = evaluate(model, dl_val, loss_fn)
63
  #epochbar.set_postfix(epoch=ep+1,loss=loss.item(),val_loss=val_loss)
64
- yield ep, trn_loss, val_loss, None
65
 
66
  return ep, trn_loss, val_loss, model
67
 
68
- def predict(model, dataloader):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
  with torch.no_grad():
70
  predictions = torch.empty(0, model.out_features)
71
  targets = torch.empty(predictions.shape)
 
2
  from tqdm import tqdm
3
  from torch.utils.data import Dataset, DataLoader
4
  from functools import partial
5
+ import pandas as pd
6
 
7
  from .data import ExplorationDataset
8
  from .models import Perceptron
 
12
 
13
  def train(df, model_name, input_cols, output_cols, trn_ratio,
14
  batch_size_trn, batch_size_val, optimizer_name, learning_rate,
15
+ max_epoch, loss_name, seed):
16
+ torch.manual_seed(seed)
17
  if model_name == "Perceptron":
18
  model = Perceptron(in_features=len(input_cols), out_features=len(output_cols))
19
  if loss_name == "mape":
 
22
 
23
  trn_size = int(len(ds)*trn_ratio)
24
  val_size = len(ds) - trn_size
25
+ generator = torch.Generator().manual_seed(seed)
26
+ ds_trn, ds_val = torch.utils.data.random_split(ds, [trn_size, val_size], generator=generator)
27
  dl_trn = DataLoader(ds_trn, batch_size=batch_size_trn, shuffle=True)
28
  dl_val = DataLoader(ds_val, batch_size=batch_size_val, shuffle=True)
29
 
 
43
  print(f'Learning rate: {learning_rate}')
44
  print(f'Optimizer {optimizer_name}')
45
  print(f'Max epoch: {max_epoch}')
46
+ print(f'random seed',seed)
47
 
48
  x, y = ds[0]
49
  in_features = x.shape[0]
 
65
  trn_loss = evaluate(model, dl_trn, loss_fn)
66
  val_loss = evaluate(model, dl_val, loss_fn)
67
  #epochbar.set_postfix(epoch=ep+1,loss=loss.item(),val_loss=val_loss)
68
+ yield ep, trn_loss, val_loss, model
69
 
70
  return ep, trn_loss, val_loss, model
71
 
72
+ def predict(model, df, input_cols, output_cols, trn_ratio,
73
+ batch_size_trn, batch_size_val, seed):
74
+ torch.manual_seed(seed)
75
+ ds = ExplorationDataset(df, input_cols=input_cols, output_cols=output_cols)
76
+ trn_size = int(len(ds)*trn_ratio)
77
+ val_size = len(ds) - trn_size
78
+ generator = torch.Generator().manual_seed(seed)
79
+ ds_trn, ds_val = torch.utils.data.random_split(ds, [trn_size, val_size], generator=generator)
80
+ dl_trn = DataLoader(ds_trn, batch_size=batch_size_trn, shuffle=True)
81
+ dl_val = DataLoader(ds_val, batch_size=batch_size_val, shuffle=True)
82
+
83
+ trn_pred, trn_target = predict_dataloader(model, dl_trn)
84
+ val_pred, val_target = predict_dataloader(model, dl_val)
85
+
86
+ results = {}
87
+ for col, col_name in enumerate(output_cols):
88
+ trn_df = pd.DataFrame(torch.cat([trn_pred[:,[col]], trn_target[:,[col]]],dim=1))
89
+ trn_df = trn_df.rename(columns={0:'prediction',1:'target'})
90
+ val_df = pd.DataFrame(torch.cat([val_pred[:,[col]], val_target[:,[col]]],dim=1))
91
+ val_df = val_df.rename(columns={0:'prediction',1:'target'})
92
+ results[col_name] = {'training': trn_df, 'validation': val_df}
93
+ return results
94
+
95
+
96
+
97
+ def predict_dataloader(model, dataloader):
98
  with torch.no_grad():
99
  predictions = torch.empty(0, model.out_features)
100
  targets = torch.empty(predictions.shape)
agent/dashboard/__init__.py CHANGED
@@ -1,5 +1,7 @@
1
  import solara
2
 
 
 
3
  @solara.component
4
  def Page():
5
  with solara.VBox() as main:
 
1
  import solara
2
 
3
+ route_order = ["data","training","testing"]
4
+
5
  @solara.component
6
  def Page():
7
  with solara.VBox() as main:
agent/dashboard/training.py CHANGED
@@ -21,6 +21,7 @@ local_state = solara.reactive(
21
  'loss_plot_data': solara.reactive({'epoch': [], 'trn_loss': [], 'val_loss': []}),
22
  'render_count': solara.reactive(0),
23
  'model': solara.reactive(None),
 
24
  }
25
  )
26
 
@@ -79,13 +80,14 @@ def ExecutePanel(df):
79
  optimizer_name = local_state.value['optimizer_name'].value
80
  max_epoch = local_state.value['max_epoch'].value
81
  loss_name = local_state.value['loss_name'].value
 
82
 
83
  epoch_list = []
84
  trn_loss_list = []
85
  val_loss_list = []
86
  for epoch, trn_loss, val_loss, model in train(dff, "Perceptron", input_cols, output_cols, trn_ratio,
87
  batch_size_trn, batch_size_val, optimizer_name, learning_rate,
88
- max_epoch, loss_name):
89
  epoch_list.append(epoch)
90
  trn_loss_list.append(trn_loss)
91
  val_loss_list.append(val_loss)
@@ -94,7 +96,7 @@ def ExecutePanel(df):
94
  'trn_loss': trn_loss_list,
95
  'val_loss': val_loss_list})
96
  force_render()
97
- local_state.value['model'].set(model)
98
  solara.Button(label='Train', on_click=trigger_training)
99
  LossPlot(local_state.value['loss_plot_data'].value, local_state.value['render_count'].value)
100
 
@@ -148,6 +150,8 @@ def ParameterSelection(df):
148
  value=local_state.value['learning_rate_log10'].value,
149
  min=-4, max=1, step=0.01,
150
  on_value=local_state.value['learning_rate_log10'].set)
 
 
151
 
152
 
153
 
 
21
  'loss_plot_data': solara.reactive({'epoch': [], 'trn_loss': [], 'val_loss': []}),
22
  'render_count': solara.reactive(0),
23
  'model': solara.reactive(None),
24
+ 'seed': solara.reactive(42),
25
  }
26
  )
27
 
 
80
  optimizer_name = local_state.value['optimizer_name'].value
81
  max_epoch = local_state.value['max_epoch'].value
82
  loss_name = local_state.value['loss_name'].value
83
+ seed = local_state.value['seed'].value
84
 
85
  epoch_list = []
86
  trn_loss_list = []
87
  val_loss_list = []
88
  for epoch, trn_loss, val_loss, model in train(dff, "Perceptron", input_cols, output_cols, trn_ratio,
89
  batch_size_trn, batch_size_val, optimizer_name, learning_rate,
90
+ max_epoch, loss_name, seed):
91
  epoch_list.append(epoch)
92
  trn_loss_list.append(trn_loss)
93
  val_loss_list.append(val_loss)
 
96
  'trn_loss': trn_loss_list,
97
  'val_loss': val_loss_list})
98
  force_render()
99
+ local_state.value['model'].set(model)
100
  solara.Button(label='Train', on_click=trigger_training)
101
  LossPlot(local_state.value['loss_plot_data'].value, local_state.value['render_count'].value)
102
 
 
150
  value=local_state.value['learning_rate_log10'].value,
151
  min=-4, max=1, step=0.01,
152
  on_value=local_state.value['learning_rate_log10'].set)
153
+ solara.InputInt(label='random seed', value=local_state.value['seed'].value,
154
+ on_value=local_state.value['seed'].set)
155
 
156
 
157