Bhuvanesh24 commited on
Commit
ab7bcc1
·
1 Parent(s): ca4f464

Intial Commit

Browse files
Files changed (6) hide show
  1. app.py +70 -0
  2. requirements.txt +5 -0
  3. res_1.pt +3 -0
  4. scaler_x.pkl +3 -0
  5. scaler_y.pkl +3 -0
  6. src/model.py +63 -0
app.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import gradio as gr
3
+ import pickle
4
+ import numpy as np
5
+ from sklearn.preprocessing import StandardScaler
6
+
7
+ # Load the model and scalers
8
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
9
+ model_path = 'model/res_1.pt'
10
+ scaler_x_path = 'data/scaler_x.pkl'
11
+ scaler_y_path = 'data/scaler_y.pkl'
12
+
13
+ # Load the model
14
+ model = torch.load(model_path, map_location=device)
15
+ model.eval()
16
+
17
+ # Load the scalers
18
+ with open(scaler_x_path, 'rb') as f:
19
+ scaler_x = pickle.load(f)
20
+
21
+ with open(scaler_y_path, 'rb') as f:
22
+ scaler_y = pickle.load(f)
23
+
24
+ # Define the prediction function
25
+ def predict(inputs):
26
+ """
27
+ Predict future values based on input features.
28
+
29
+ Inputs: List of lists, where each inner list contains
30
+ ['Current Level', 'Current Storage', 'Inflow', 'Outflow'] values.
31
+ """
32
+ try:
33
+ # Ensure inputs are in the correct shape
34
+ inputs = np.array(inputs).reshape(-1, 4)
35
+
36
+ # Scale inputs using the pre-loaded scaler
37
+ inputs_scaled = scaler_x.transform(inputs)
38
+
39
+ # Convert to tensor and move to the correct device
40
+ inputs_tensor = torch.tensor(inputs_scaled, dtype=torch.float32).to(device)
41
+
42
+ # Make predictions
43
+ with torch.no_grad():
44
+ predictions = model(inputs_tensor).cpu().numpy()
45
+
46
+ # Rescale predictions back to the original scale
47
+ predictions_original_scale = scaler_y.inverse_transform(predictions)
48
+
49
+ return {"predictions": predictions_original_scale.flatten().tolist()}
50
+
51
+ except Exception as e:
52
+ return {"error": str(e)}
53
+
54
+ # Configure Gradio interface
55
+ inputs = gr.Dataframe(
56
+ headers=["Current Level", "Current Storage", "Inflow", "Outflow"],
57
+ type="numpy",
58
+ row_count=5,
59
+ col_count=4,
60
+ label="Input Features (5 rows with 4 columns)"
61
+ )
62
+
63
+ outputs = gr.JSON(label="Predicted Values")
64
+
65
+ # Set up the Gradio Interface
66
+ interface = gr.Interface(fn=predict, inputs=inputs, outputs=outputs)
67
+
68
+ # Launch Gradio
69
+ if __name__ == "__main__":
70
+ interface.launch(show_error=True)
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ torch
2
+ gradio
3
+ numpy
4
+ scikit-learn
5
+ pickle-mixin
res_1.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ab90d8cc484a1d53fbd3a934e3a3ad6ec78e5b7ca6bd9557137f840ba6c6af1
3
+ size 693794
scaler_x.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b70dffc98fb68094861bee67dc96ea19f6954a9a564507aeef9af9f0b5b60beb
3
+ size 545
scaler_y.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50fcea07aa11ebb9bfb18a6beb9ba438f4d9c3f5f829b5615e46ec966e04718e
3
+ size 497
src/model.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch.nn as nn
2
+
3
+
4
+
5
+ class LSTM(nn.Module):
6
+ def _init_(self, input_size, lstm_layer_sizes,linear_layer_size, output_size):
7
+ super(LSTM, self)._init_()
8
+
9
+ self.input_size = input_size
10
+ self.linear_layer_size = linear_layer_size
11
+
12
+ self.lstm_layer_1 = nn.LSTM(input_size, lstm_layer_sizes[0], batch_first=True)
13
+ self.lstm_layer_2 = nn.LSTM(lstm_layer_sizes[0], lstm_layer_sizes[1], batch_first=True)
14
+ self.lstm_layer_3 = nn.LSTM(lstm_layer_sizes[1], lstm_layer_sizes[2], batch_first=True)
15
+
16
+ self.fc = Linear(lstm_layer_sizes[2], self.linear_layer_size,output_size)
17
+
18
+ self.apply(self.initialize_weights)
19
+
20
+ def forward(self, x):
21
+
22
+ out, (hn_1, cn_1) = self.lstm_layer_1(x)
23
+ out, (hn_2, cn_2) = self.lstm_layer_2(out)
24
+ out, (hn_3, cn_3) = self.lstm_layer_3(out)
25
+
26
+ out = hn_3[-1]
27
+ out = self.fc(out)
28
+ return out
29
+
30
+ def initialize_weights(self, layer):
31
+ if isinstance(layer, nn.Linear):
32
+ nn.init.xavier_uniform_(layer.weight)
33
+ nn.init.zeros_(layer.bias)
34
+ elif isinstance(layer, nn.LSTM):
35
+ for name, param in layer.named_parameters():
36
+ if 'weight' in name:
37
+ nn.init.xavier_uniform_(param.data)
38
+ elif 'bias' in name:
39
+ nn.init.zeros_(param.data)
40
+
41
+ class Linear(nn.Module):
42
+ def _init_(self,input_size,hidden_sizes,output_size):
43
+ super(Linear,self)._init_()
44
+
45
+ self.relu =nn.ReLU()
46
+ self.sigmoid =nn.Sigmoid()
47
+ self.tanh = nn.Tanh()
48
+ self.input = nn.Linear(input_size,hidden_sizes[0])
49
+ self.fc = nn.Linear(hidden_sizes[0],hidden_sizes[1])
50
+ self.output = nn.Linear(hidden_sizes[1],output_size)
51
+
52
+ self.apply(self.initialize_weights)
53
+
54
+ def forward(self,x):
55
+ out = self.relu(self.input(x))
56
+ out = self.relu(self.fc(out))
57
+ out = self.output(out)
58
+ return out
59
+
60
+ def initialize_weights(self, layer):
61
+ if isinstance(layer, nn.Linear):
62
+ nn.init.xavier_uniform_(layer.weight)
63
+ nn.init.zeros_(layer.bias)