vinay0123 commited on
Commit
36e777c
·
verified ·
1 Parent(s): 4266a48

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +380 -0
app.py ADDED
@@ -0,0 +1,380 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import textwrap
3
+ import torch
4
+ from datetime import datetime
5
+ import torch.nn as nn
6
+ import torch.optim as optim
7
+ import spacy
8
+ import random
9
+ import pandas as pd
10
+ from torch.utils.data import Dataset, DataLoader
11
+ from torch.nn.utils.rnn import pad_sequence
12
+ from sklearn.model_selection import train_test_split
13
+ from flask import Flask ,request, jsonify,send_file,after_this_request
14
+ from collections import Counter
15
+ from flask_cors import CORS
16
+ import requests
17
+ from gtts import gTTS
18
+ from googletrans import Translator
19
+ import uuid
20
+ import os
21
+ import time
22
+
23
+
24
+ # Load Dataset
25
+ df = pd.read_csv("https://drive.google.com/uc?id=1RCZShB5ohy1HdU-mogcP16TbeVv9txpY")
26
+ df = df.dropna(subset=['instruction', 'response'])
27
+
28
+ # Ensure all entries are strings
29
+ df['instruction'] = df['instruction'].astype(str)
30
+ df['response'] = df['response'].astype(str)
31
+ # Tokenizer (Scratch)
32
+ class ScratchTokenizer:
33
+ def __init__(self):
34
+ self.word2idx = {"<PAD>": 0, "<SOS>": 1, "<EOS>": 2, "<UNK>": 3}
35
+ self.idx2word = {0: "<PAD>", 1: "<SOS>", 2: "<EOS>", 3: "<UNK>"}
36
+ self.vocab_size = 4
37
+
38
+ def build_vocab(self, texts):
39
+ for text in texts:
40
+ for word in text.split():
41
+ if word not in self.word2idx:
42
+ self.word2idx[word] = self.vocab_size
43
+ self.idx2word[self.vocab_size] = word
44
+ self.vocab_size += 1
45
+
46
+ def encode(self, text, max_len=200):
47
+ tokens = [self.word2idx.get(word, 3) for word in text.split()]
48
+ tokens = [1] + tokens[:max_len - 2] + [2]
49
+ return tokens + [0] * (max_len - len(tokens))
50
+
51
+ def decode(self, tokens):
52
+ return " ".join([self.idx2word.get(idx, "<UNK>") for idx in tokens if idx > 0])
53
+
54
+ # Train-Test Split
55
+ train_data, test_data = train_test_split(df, test_size=0.2, random_state=42)
56
+
57
+ # Initialize Tokenizer
58
+ tokenizer = ScratchTokenizer()
59
+ tokenizer.build_vocab(train_data["instruction"].tolist() + train_data["response"].tolist())
60
+
61
+ # Dataset Class
62
+ class TextDataset(Dataset):
63
+ def __init__(self, data, tokenizer, max_len=200):
64
+ self.data = data
65
+ self.tokenizer = tokenizer
66
+ self.max_len = max_len
67
+
68
+ def __len__(self):
69
+ return len(self.data)
70
+
71
+ def __getitem__(self, idx):
72
+ src_text = self.data.iloc[idx]["instruction"]
73
+ tgt_text = self.data.iloc[idx]["response"]
74
+ src = torch.tensor(self.tokenizer.encode(src_text), dtype=torch.long)
75
+ tgt = torch.tensor(self.tokenizer.encode(tgt_text), dtype=torch.long)
76
+ return src, tgt
77
+
78
+ # Load Dataset
79
+ train_dataset = TextDataset(train_data, tokenizer)
80
+ test_dataset = TextDataset(test_data, tokenizer)
81
+ train_loader = DataLoader(train_dataset, batch_size=8, shuffle=True)
82
+ test_loader = DataLoader(test_dataset, batch_size=8)
83
+
84
+ # Improved GPT-Style Transformer Model
85
+
86
+ class GPTModel(nn.Module):
87
+ def __init__(self, vocab_size, embed_size=256, num_heads=8, num_layers=6, max_len=200):
88
+ super(GPTModel, self).__init__()
89
+ self.embedding = nn.Embedding(vocab_size, embed_size)
90
+ self.pos_embedding = nn.Parameter(torch.randn(1, max_len, embed_size))
91
+ # The problem was here, setting num_encoder_layers to 0
92
+ # makes the model try to access a non-existent layer.
93
+ # The solution is to remove the encoder completely.
94
+ self.transformer = nn.TransformerDecoder(nn.TransformerDecoderLayer(d_model=embed_size, nhead=num_heads), num_layers=num_layers)
95
+ self.fc_out = nn.Linear(embed_size, vocab_size)
96
+
97
+ def forward(self, src, tgt):
98
+ src_emb = self.embedding(src) + self.pos_embedding[:, :src.size(1), :]
99
+ tgt_emb = self.embedding(tgt) + self.pos_embedding[:, :tgt.size(1), :]
100
+
101
+ # Causal Mask for Auto-Regressive Decoding
102
+ tgt_mask = nn.Transformer.generate_square_subsequent_mask(tgt.size(1)).to(tgt.device)
103
+ output = self.transformer(tgt_emb.permute(1, 0, 2), src_emb.permute(1, 0, 2), tgt_mask=tgt_mask)
104
+ return self.fc_out(output.permute(1, 0, 2))
105
+
106
+ # Initialize Model
107
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
108
+ model = GPTModel(tokenizer.vocab_size).to(device)
109
+ optimizer = optim.AdamW(model.parameters(), lr=2e-4)
110
+ criterion = nn.CrossEntropyLoss(label_smoothing=0.1)
111
+
112
+
113
+ def load_model(model, path="gpt_model.pth"):
114
+ if os.path.exists(path):
115
+ model.load_state_dict(torch.load(path, map_location=device))
116
+ model.eval()
117
+ print("Model loaded successfully.")
118
+ else:
119
+ print("Model file not found!")
120
+
121
+ load_model(model)
122
+
123
+ # Generate Response
124
+ def generate_response(model, query, max_length=200):
125
+ model.eval()
126
+ with torch.no_grad(): # Disable gradient tracking
127
+ src = torch.tensor(tokenizer.encode(query)).unsqueeze(0).to(device)
128
+ tgt = torch.tensor([[1]]).to(device) # <SOS>
129
+
130
+ for _ in range(max_length):
131
+ output = model(src, tgt)
132
+ next_token = output[:, -1, :].argmax(dim=-1, keepdim=True)
133
+ tgt = torch.cat([tgt, next_token], dim=1)
134
+ if next_token.item() == 2: # <EOS>
135
+ break
136
+
137
+ return tokenizer.decode(tgt.squeeze(0).tolist())
138
+
139
+
140
+ DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
141
+ MAX_LEN = 350
142
+ BATCH_SIZE = 8
143
+ EMB_SIZE = 128
144
+ NHEAD = 8
145
+ FFN_HID_DIM = 256
146
+ NUM_ENCODER_LAYERS = 4
147
+ NUM_DECODER_LAYERS = 4
148
+ NUM_EPOCHS = 18
149
+ MIN_FREQ = 2
150
+
151
+ # ==== Tokenizers ====
152
+ spacy_eng = spacy.load("en_core_web_sm")
153
+ def tokenize_en(text):
154
+ return [tok.text.lower() for tok in spacy_eng.tokenizer(text)]
155
+
156
+ def tokenize_te(text):
157
+ return text.strip().split(" ")
158
+
159
+ # ==== Vocab Builder ====
160
+ def build_vocab(sentences, tokenizer, min_freq):
161
+ counter = Counter()
162
+ for sent in sentences:
163
+ counter.update(tokenizer(sent))
164
+ vocab = {'<pad>': 0, '<sos>': 1, '<eos>': 2, '<unk>': 3}
165
+ for word, freq in counter.items():
166
+ if freq >= min_freq:
167
+ vocab[word] = len(vocab)
168
+ return vocab
169
+
170
+ # ==== Dataset ====
171
+ class TranslationDataset(Dataset):
172
+ def __init__(self, df, en_vocab, te_vocab):
173
+ self.data = df
174
+ self.en_vocab = en_vocab
175
+ self.te_vocab = te_vocab
176
+
177
+ def __len__(self):
178
+ return len(self.data)
179
+
180
+ def __getitem__(self, idx):
181
+ en = self.data.iloc[idx]['response']
182
+ te = self.data.iloc[idx]['translated_response']
183
+
184
+ en_tokens = ['<sos>'] + tokenize_en(en) + ['<eos>']
185
+ te_tokens = ['<sos>'] + tokenize_te(te) + ['<eos>']
186
+
187
+ en_ids = [self.en_vocab.get(tok, self.en_vocab['<unk>']) for tok in en_tokens]
188
+ te_ids = [self.te_vocab.get(tok, self.te_vocab['<unk>']) for tok in te_tokens]
189
+
190
+ return torch.tensor(en_ids), torch.tensor(te_ids)
191
+
192
+ # ==== Collate Function ====
193
+ def collate_fn(batch):
194
+ src_batch, tgt_batch = zip(*batch)
195
+ src_batch = pad_sequence(src_batch, padding_value=en_vocab['<pad>'], batch_first=True)
196
+ tgt_batch = pad_sequence(tgt_batch, padding_value=te_vocab['<pad>'], batch_first=True)
197
+ return src_batch, tgt_batch
198
+
199
+ # ==== Transformer Model ====
200
+ class Seq2SeqTransformer(nn.Module):
201
+ def __init__(self, num_encoder_layers, num_decoder_layers,
202
+ emb_size, src_vocab_size, tgt_vocab_size,
203
+ nhead, dim_feedforward=512, dropout=0.1):
204
+ super().__init__()
205
+ self.transformer = nn.Transformer(d_model=emb_size, nhead=nhead,
206
+ num_encoder_layers=num_encoder_layers,
207
+ num_decoder_layers=num_decoder_layers,
208
+ dim_feedforward=dim_feedforward, dropout=dropout)
209
+ self.src_tok_emb = nn.Embedding(src_vocab_size, emb_size)
210
+ self.tgt_tok_emb = nn.Embedding(tgt_vocab_size, emb_size)
211
+ self.fc_out = nn.Linear(emb_size, tgt_vocab_size)
212
+ self.dropout = nn.Dropout(dropout)
213
+
214
+ def forward(self, src, tgt):
215
+ src_mask = self.transformer.generate_square_subsequent_mask(src.size(1)).to(DEVICE)
216
+ tgt_mask = self.transformer.generate_square_subsequent_mask(tgt.size(1)).to(DEVICE)
217
+
218
+ src_emb = self.dropout(self.src_tok_emb(src))
219
+ tgt_emb = self.dropout(self.tgt_tok_emb(tgt))
220
+ outs = self.transformer(src_emb.permute(1,0,2), tgt_emb.permute(1,0,2),
221
+ src_mask=src_mask, tgt_mask=tgt_mask)
222
+ return self.fc_out(outs.permute(1,0,2))
223
+
224
+ def translate(model, sentence, en_vocab, te_vocab, te_inv_vocab, max_len=MAX_LEN):
225
+ model.eval()
226
+ tokens = ['<sos>'] + tokenize_en(sentence) + ['<eos>']
227
+ src_ids = torch.tensor([[en_vocab.get(t, en_vocab['<unk>']) for t in tokens]]).to(DEVICE)
228
+ tgt_ids = torch.tensor([[te_vocab['<sos>']]]).to(DEVICE)
229
+
230
+ for i in range(max_len):
231
+ out = model(src_ids, tgt_ids)
232
+ next_token = out.argmax(-1)[:, -1].item()
233
+ tgt_ids = torch.cat([tgt_ids, torch.tensor([[next_token]]).to(DEVICE)], dim=1)
234
+ if next_token == te_vocab['<eos>']:
235
+ break
236
+
237
+ translated = [te_inv_vocab[idx.item()] for idx in tgt_ids[0][1:]]
238
+ return ' '.join(translated[:-1]) if translated[-1] == '<eos>' else ' '.join(translated)
239
+
240
+ # ==== Load Data ====
241
+ df_telugu = pd.read_csv("merged_translated_responses.csv") # columns: 'en', 'te'
242
+ # Clean NaN or non-string entries
243
+ df_telugu = df_telugu.dropna(subset=['response', 'translated_response'])
244
+
245
+ # Ensure all entries are strings
246
+ df_telugu['response'] = df_telugu['response'].astype(str)
247
+ df_telugu['translated_response'] = df_telugu['translated_response'].astype(str)
248
+
249
+ # Build vocabularies
250
+ en_vocab = build_vocab(df_telugu['response'], tokenize_en, MIN_FREQ)
251
+ te_vocab = build_vocab(df_telugu['translated_response'], tokenize_te, MIN_FREQ)
252
+ te_inv_vocab = {idx: tok for tok, idx in te_vocab.items()}
253
+
254
+ # Prepare Dataset & DataLoader
255
+ dataset = TranslationDataset(df_telugu, en_vocab, te_vocab)
256
+ dataloader = DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True, collate_fn=collate_fn)
257
+
258
+ # Initialize Model
259
+ # model = Seq2SeqTransformer(NUM_ENCODER_LAYERS, NUM_DECODER_LAYERS, EMB_SIZE,
260
+ # len(en_vocab), len(te_vocab), NHEAD, FFN_HID_DIM).to(DEVICE)
261
+
262
+ pad_idx = te_vocab['<pad>']
263
+ criterion_telugu = nn.CrossEntropyLoss(ignore_index=pad_idx)
264
+ optimizer_telugu = optim.Adam(model.parameters(), lr=0.0005)
265
+
266
+ # ==== Training ====
267
+ # for epoch in range(NUM_EPOCHS):
268
+ # loss = train(model, dataloader, optimizer, criterion)
269
+ # print(f"Epoch {epoch+1}, Loss: {loss:.4f}")
270
+
271
+ # ==== Try Translation ====
272
+
273
+ model_telugu = Seq2SeqTransformer(NUM_ENCODER_LAYERS, NUM_DECODER_LAYERS, EMB_SIZE,len(en_vocab), len(te_vocab), NHEAD, FFN_HID_DIM).to(DEVICE)
274
+
275
+ # Load saved weights
276
+ model_telugu.load_state_dict(torch.load("english_telugu_transformer.pth",map_location = torch.device('cpu')))
277
+ model_telugu.eval()
278
+ app=Flask(__name__)
279
+ CORS(app)
280
+
281
+
282
+ @app.route("/")
283
+ def home():
284
+ current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
285
+ return jsonify({"message": f"Welcome to TRAVIS API, Time : {current_time}"})
286
+
287
+
288
+ @app.route("/intent")
289
+ def intents():
290
+ return jsonify({"intents" :list(set(df['intent'].dropna()))})
291
+
292
+
293
+
294
+ @app.route("/translate", methods=["POST"])
295
+ def translate_text():
296
+ data = request.get_json()
297
+ text = data.get("text", "")
298
+ current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
299
+ print("Entered '/translate' at time: ",current_time)
300
+ if not text:
301
+ return jsonify({"error": "Text cannot be empty"}), 400
302
+
303
+ # First generate English response
304
+ english_response = text
305
+ start=time.time()
306
+ # Then translate to Telugu
307
+ telugu_response = translate(model_telugu, english_response, en_vocab, te_vocab, te_inv_vocab)
308
+ end=time.time()
309
+ return jsonify({
310
+ "english": english_response,
311
+ "telugu": telugu_response,
312
+ "time": end-start
313
+ })
314
+
315
+ @app.route("/generate", methods=["POST"])
316
+ def generate_text():
317
+ data = request.get_json()
318
+ query = data.get("query", "")
319
+ current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
320
+ print("Entered '/generate' at time: ",current_time)
321
+
322
+ if not query:
323
+ return jsonify({"error": "Query cannot be empty"}), 400
324
+ start=time.time()
325
+ response = generate_response(model, query)
326
+ end=time.time()
327
+ # Clean the response
328
+ def clean_response(response):
329
+ return response.replace("<EOS>", "").replace("<SOS>", "").strip()
330
+
331
+ response = clean_response(response)
332
+
333
+ return jsonify({
334
+ "response": response,
335
+ "time": end-start
336
+ })
337
+
338
+ @app.route("/query", methods=["POST"])
339
+ def query_model():
340
+ global audio_telugu_response
341
+ data = request.get_json()
342
+ current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
343
+ print("Entered '/query' at time: ",current_time)
344
+ query = data.get("query", "")
345
+
346
+ if not query:
347
+ return jsonify({"error": "Query cannot be empty"}), 400
348
+
349
+ start_eng = time.time()
350
+ # Assuming `generate_response` is a function that processes the query
351
+ response = generate_response(model, query)
352
+ end_eng = time.time()
353
+ def clean_response(response):
354
+ return response.replace("<EOS>", "").replace("<SOS>", "").strip()
355
+ response=clean_response(response)
356
+ start_te = time.time()
357
+ telugu_response = translate(model_telugu, response, en_vocab, te_vocab, te_inv_vocab)
358
+ end_te = time.time()
359
+ audio_telugu_response=telugu_response
360
+ return jsonify({"telugu":(telugu_response),"english":(response),"eng_time":(end_eng-start_eng),"telugu_time":(end_te-start_te)})
361
+
362
+
363
+ @app.route("/audio", methods=["POST"])
364
+ def get_audio():
365
+ data = request.get_json()
366
+ text = data.get("text")
367
+ start_te = time.time()
368
+
369
+ if not text:
370
+ return jsonify({"error": "No Response To convert to speech"}), 400
371
+
372
+ # Convert text to Telugu speech using in-memory file
373
+ speech = gTTS(text=text, lang="te")
374
+ audio_io = io.BytesIO()
375
+ speech.write_to_fp(audio_io)
376
+ audio_io.seek(0)
377
+ end_te = time.time()
378
+ print("telugu_time: ",(end_te-start_te))
379
+
380
+ return send_file(audio_io, mimetype="audio/mpeg", as_attachment=False)