Bhuvanesh24 commited on
Commit
e69ee07
·
1 Parent(s): 2a2ab61

Initial Commit

Browse files
app.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import gradio as gr
3
+ import numpy as np
4
+ from src.model import LSTM
5
+
6
+ # Load the model
7
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
8
+ model_path = "./water_forecast_8.pt"
9
+ model = torch.load(model_path, map_location=device)
10
+ model.eval()
11
+ # Define the prediction function
12
+ def predict_water_usage(state_idx, target_year, structured_data):
13
+
14
+ if len(structured_data) < 3:
15
+ return {"error": "Structured data must include 3 years of data for the specified state."}
16
+
17
+ # Convert structured data for model input (extract values for model)
18
+ data_values = [list(values) for values in structured_data.values()]
19
+ inputs = [[np.log(value + 1) for value in sublist] for sublist in data_values]
20
+
21
+
22
+ # Ensure the data has the right shape for the model
23
+ if len(inputs) != 3:
24
+ return {"error": "Structured data should have 3 years of data."}
25
+
26
+
27
+
28
+ inputs = torch.tensor(inputs, dtype=torch.float32)
29
+ predictions = model(inputs).cpu().detach().numpy()
30
+
31
+ with torch.no_grad():
32
+ output = [np.exp(prediction) - 1 for prediction in predictions]
33
+ return output
34
+ # Get model output
35
+
36
+ return {"error" : "Does not contain the torch model grad"}
37
+
38
+ # Configure Gradio interface
39
+ inputs = [
40
+ gr.Number(label="State Index"), # Numeric input for state index
41
+ gr.Number(label="Target Year"), # Numeric input for target year
42
+ gr.JSON(label="Structured Data") # JSON input for structured data
43
+ ]
44
+
45
+ outputs = gr.JSON(label="Prediction")
46
+
47
+ # Set up the Gradio Interface
48
+ interface = gr.Interface(fn=predict_water_usage, inputs=inputs, outputs=outputs)
49
+
50
+ # Launch Gradio
51
+ if __name__ == "__main__":
52
+ interface.launch(show_error=True)
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ torch
2
+ fastapi
3
+ pydantic
4
+ numpy
5
+ pandas
6
+ scikit-learn
7
+ uvicorn
8
+ gradio
src/__pycache__/model.cpython-311.pyc ADDED
Binary file (5.56 kB). View file
 
src/data.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pandas as pd
3
+ import numpy as np
4
+ import torch
5
+ from torch.utils.data import Dataset, DataLoader
6
+ from sklearn.preprocessing import StandardScaler
7
+
8
+ class WaterDataset(Dataset):
9
+ def __init__(self, sequence_length=5, transform=None):
10
+ """
11
+ Initializes the dataset by loading LUC, population, and usage data, merging them
12
+ based on year and state, and creating sequences of data for training.
13
+
14
+ Args:
15
+ sequence_length (int): The length of each data sequence for time series forecasting.
16
+ transform (callable, optional): Optional transform to be applied on a sample.
17
+ """
18
+ self.sequence_length = sequence_length
19
+ self.luc = pd.read_csv('data/luc.csv')
20
+ self.population = pd.read_csv('data/population.csv')
21
+ self.usage = pd.read_csv('data/usage.csv')
22
+ self.transform = transform
23
+
24
+ self.years = sorted(set(self.usage['Year']))
25
+ self.states = sorted(set(self.usage['State']))
26
+ self.all_years = sorted(set(self.population['Year']))
27
+
28
+ self.df = self.merge_data()
29
+ self.x, self.y = self.create_sequence()
30
+
31
+ self.scaler = StandardScaler()
32
+ self.x = self.scaler.fit_transform(self.x.reshape(-1, self.x.shape[-1])).reshape(self.x.shape)
33
+
34
+ def merge_data(self):
35
+ """
36
+ Merges land use classification (LUC) and population data based on year and state.
37
+
38
+ Returns:
39
+ pd.DataFrame: A DataFrame with merged data on population, urban/rural breakdown,
40
+ and LUC attributes for each year and state.
41
+ """
42
+ merged_data = []
43
+
44
+ for year, state in [(y, s) for y in self.all_years for s in self.states]:
45
+ population_data = self.population[(self.population['Year'] == year)]
46
+ luc_data = self.luc[(self.luc['Year'] == year) & (self.luc['State'] == state)]
47
+
48
+ if not population_data.empty and not luc_data.empty:
49
+ combined_data = {
50
+ 'year': year,
51
+ 'state': state,
52
+ 'population': population_data['Population'].values[0],
53
+ 'urban_population': population_data['Urban Population'].values[0],
54
+ 'rural_population': population_data['Rural Population'].values[0],
55
+ 'forest': luc_data['Forest'].values[0],
56
+ 'barren': luc_data['Barren'].values[0],
57
+ 'others': luc_data['Others'].values[0],
58
+ 'fallow': luc_data['Fallow'].values[0],
59
+ 'cropped': luc_data['Cropped'].values[0]
60
+ }
61
+ merged_data.append(combined_data)
62
+
63
+ return pd.DataFrame(merged_data)
64
+
65
+ def create_sequence(self):
66
+ """
67
+ Creates sequences of input data and their corresponding labels for training.
68
+
69
+ Returns:
70
+ tuple: Two numpy arrays, one for data sequences and one for label sequences.
71
+ """
72
+ data_sequences, label_sequences = [], []
73
+ missing_sequences = {state: [] for state in self.states}
74
+
75
+ for state in self.states:
76
+ state_data = self.df[self.df['state'] == state].sort_values('year')
77
+ usage_state_data = self.usage[self.usage['State'] == state]
78
+
79
+ for i in range(len(state_data) - self.sequence_length):
80
+ sequence = state_data.iloc[i:i + self.sequence_length]
81
+ year = sequence['year'].values[-1] + 1
82
+
83
+ usage_label = usage_state_data[usage_state_data['Year'] == year]
84
+
85
+ if len(sequence) == self.sequence_length and not usage_label.empty:
86
+ data_sequences.append(sequence[['population', 'urban_population', 'rural_population',
87
+ 'forest', 'barren', 'others', 'fallow', 'cropped']].values.astype(np.float32))
88
+ label_sequences.append(usage_label[['Domestic', 'Industrial', 'Irrigation']].values[0].astype(np.float32))
89
+ else:
90
+ missing_sequences[state].append(year)
91
+
92
+ return np.array(data_sequences), np.array(label_sequences)
93
+
94
+ def __len__(self):
95
+ return len(self.x)
96
+
97
+ def __getitem__(self, index):
98
+ return (torch.tensor(self.x[index], dtype=torch.float32),
99
+ torch.tensor(self.y[index], dtype=torch.float32))
src/model.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import math
4
+ #from transformers import AutoModelForCausalLM, AutoTokenizer
5
+
6
+ class LSTM(nn.Module):
7
+ def _init_(self, input_size, lstm_layer_sizes,linear_layer_size, output_size):
8
+ super(LSTM, self)._init_()
9
+
10
+ self.input_size = input_size
11
+ self.linear_layer_size = linear_layer_size
12
+
13
+ self.lstm_layer_1 = nn.LSTM(input_size, lstm_layer_sizes[0], batch_first=True)
14
+ self.lstm_layer_2 = nn.LSTM(lstm_layer_sizes[0], lstm_layer_sizes[1], batch_first=True)
15
+ self.lstm_layer_3 = nn.LSTM(lstm_layer_sizes[1], lstm_layer_sizes[2], batch_first=True)
16
+
17
+ self.fc = Linear(lstm_layer_sizes[2], self.linear_layer_size,output_size)
18
+
19
+ self.apply(self.initialize_weights)
20
+
21
+ def forward(self, x):
22
+
23
+ out, (hn_1, cn_1) = self.lstm_layer_1(x)
24
+ out, (hn_2, cn_2) = self.lstm_layer_2(out)
25
+ out, (hn_3, cn_3) = self.lstm_layer_3(out)
26
+
27
+ out = hn_3[-1]
28
+ out = self.fc(out)
29
+ return out
30
+
31
+ def initialize_weights(self, layer):
32
+ if isinstance(layer, nn.Linear):
33
+ nn.init.xavier_uniform_(layer.weight)
34
+ nn.init.zeros_(layer.bias)
35
+ elif isinstance(layer, nn.LSTM):
36
+ for name, param in layer.named_parameters():
37
+ if 'weight' in name:
38
+ nn.init.xavier_uniform_(param.data)
39
+ elif 'bias' in name:
40
+ nn.init.zeros_(param.data)
41
+
42
+ class Linear(nn.Module):
43
+ def _init_(self,input_size,hidden_sizes,output_size):
44
+ super(Linear,self)._init_()
45
+
46
+ self.relu =nn.ReLU()
47
+ self.sigmoid =nn.Sigmoid()
48
+ self.tanh = nn.Tanh()
49
+ self.input = nn.Linear(input_size,hidden_sizes[0])
50
+ self.fc = nn.Linear(hidden_sizes[0],hidden_sizes[1])
51
+ self.output = nn.Linear(hidden_sizes[1],output_size)
52
+
53
+ self.apply(self.initialize_weights)
54
+
55
+ def forward(self,x):
56
+ out = self.relu(self.input(x))
57
+ out = self.relu(self.fc(out))
58
+ out = self.relu(self.output(out))
59
+ return out
60
+
61
+ def initialize_weights(self, layer):
62
+ if isinstance(layer, nn.Linear):
63
+ nn.init.xavier_uniform_(layer.weight)
64
+ nn.init.zeros_(layer.bias)
65
+
66
+ class LUCLSTM(nn.Module):
67
+ def _init_(self, input_size, lstm_layer_sizes, output_size):
68
+ super(LUCLSTM, self)._init_()
69
+
70
+ self.input_size = input_size
71
+
72
+ self.lstm_layer_1 = nn.LSTM(input_size, lstm_layer_sizes[0], batch_first=True)
73
+ self.lstm_layer_2 = nn.LSTM(lstm_layer_sizes[0], lstm_layer_sizes[1], batch_first=True)
74
+ self.lstm_layer_3 = nn.LSTM(lstm_layer_sizes[1], lstm_layer_sizes[2], batch_first=True)
75
+
76
+ self.fc = nn.Linear(lstm_layer_sizes[2],64)
77
+ self.fc2 = nn.Linear(64,output_size)
78
+ self.tanh = nn.Tanh()
79
+ self.relu =nn.ReLU()
80
+
81
+ self.apply(self.initialize_weights)
82
+
83
+ def forward(self, x):
84
+
85
+ out, (hn_1, cn_1) = self.lstm_layer_1(x)
86
+ out, (hn_2, cn_2) = self.lstm_layer_2(out)
87
+ out, (hn_3, cn_3) = self.lstm_layer_3(out)
88
+
89
+ out = hn_3[-1]
90
+ out = self.tanh(self.fc(out))
91
+ out = self.fc2(out)
92
+ return out
93
+
94
+ def initialize_weights(self, layer):
95
+ if isinstance(layer, nn.Linear):
96
+ nn.init.xavier_uniform_(layer.weight)
97
+ nn.init.zeros_(layer.bias)
98
+ elif isinstance(layer, nn.LSTM):
99
+ for name, param in layer.named_parameters():
100
+ if 'weight' in name:
101
+ nn.init.xavier_uniform_(param.data)
102
+ elif 'bias' in name:
103
+ nn.init.zeros_(param.data)
104
+
105
+
106
+ class PositionalEncoding(nn.Module):
107
+ def _init_(self, dim, max_len=300):
108
+ super(PositionalEncoding, self)._init_()
109
+ pe = torch.zeros(max_len, dim)
110
+ position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
111
+ div_term = torch.exp(torch.arange(0, dim, 2).float() * (-math.log(10000.0) / dim))
112
+ pe[:, 0::2] = torch.sin(position * div_term)
113
+ pe[:, 1::2] = torch.cos(position * div_term)
114
+ pe = pe.unsqueeze(0).transpose(0, 1)
115
+ self.register_buffer('pe', pe)
116
+
117
+ def forward(self, x):
118
+ return x + self.pe[:x.size(0), :]
119
+
120
+ class Transformer(nn.Module):
121
+ def _init_(self):
122
+ super(Transformer,self)._init_()
water_forecast_8.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:69c9221d970709875286d5e8d37a396ff62c4972c4d3b3981ccf62e46ef2c04f
3
+ size 258545