Spaces:
Sleeping
Sleeping
File size: 6,705 Bytes
d80173a 3bf2f89 d80173a 3bf2f89 d80173a 3bf2f89 d80173a 3bf2f89 d80173a 3bf2f89 d80173a 3bf2f89 d80173a 3bf2f89 d80173a 3bf2f89 d80173a 3bf2f89 d80173a 3bf2f89 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 | import textwrap
import torch
import torch.nn as nn
import torch.optim as optim
import spacy
import random
import pandas as pd
from torch.utils.data import Dataset, DataLoader
from torch.nn.utils.rnn import pad_sequence
from sklearn.model_selection import train_test_split
from flask import Flask ,request, jsonify,send_file,after_this_request
from collections import Counter
from flask_cors import CORS
import requests
import uuid
import os
import time
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
MAX_LEN = 350
BATCH_SIZE = 8
EMB_SIZE = 128
NHEAD = 8
FFN_HID_DIM = 256
NUM_ENCODER_LAYERS = 4
NUM_DECODER_LAYERS = 4
NUM_EPOCHS = 18
MIN_FREQ = 2
PORT = 7680
# ==== Tokenizers ====
spacy_eng = spacy.load("en_core_web_sm")
def tokenize_en(text):
return [tok.text.lower() for tok in spacy_eng.tokenizer(text)]
def tokenize_te(text):
return text.strip().split(" ")
# ==== Vocab Builder ====
def build_vocab(sentences, tokenizer, min_freq):
counter = Counter()
for sent in sentences:
counter.update(tokenizer(sent))
vocab = {'<pad>': 0, '<sos>': 1, '<eos>': 2, '<unk>': 3}
for word, freq in counter.items():
if freq >= min_freq:
vocab[word] = len(vocab)
return vocab
# ==== Dataset ====
class TranslationDataset(Dataset):
def __init__(self, df, en_vocab, te_vocab):
self.data = df
self.en_vocab = en_vocab
self.te_vocab = te_vocab
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
en = self.data.iloc[idx]['response']
te = self.data.iloc[idx]['translated_response']
en_tokens = ['<sos>'] + tokenize_en(en) + ['<eos>']
te_tokens = ['<sos>'] + tokenize_te(te) + ['<eos>']
en_ids = [self.en_vocab.get(tok, self.en_vocab['<unk>']) for tok in en_tokens]
te_ids = [self.te_vocab.get(tok, self.te_vocab['<unk>']) for tok in te_tokens]
return torch.tensor(en_ids), torch.tensor(te_ids)
# ==== Collate Function ====
def collate_fn(batch):
src_batch, tgt_batch = zip(*batch)
src_batch = pad_sequence(src_batch, padding_value=en_vocab['<pad>'], batch_first=True)
tgt_batch = pad_sequence(tgt_batch, padding_value=te_vocab['<pad>'], batch_first=True)
return src_batch, tgt_batch
# ==== Transformer Model ====
class Seq2SeqTransformer(nn.Module):
def __init__(self, num_encoder_layers, num_decoder_layers,
emb_size, src_vocab_size, tgt_vocab_size,
nhead, dim_feedforward=512, dropout=0.1):
super().__init__()
self.transformer = nn.Transformer(d_model=emb_size, nhead=nhead,
num_encoder_layers=num_encoder_layers,
num_decoder_layers=num_decoder_layers,
dim_feedforward=dim_feedforward, dropout=dropout)
self.src_tok_emb = nn.Embedding(src_vocab_size, emb_size)
self.tgt_tok_emb = nn.Embedding(tgt_vocab_size, emb_size)
self.fc_out = nn.Linear(emb_size, tgt_vocab_size)
self.dropout = nn.Dropout(dropout)
def forward(self, src, tgt):
src_mask = self.transformer.generate_square_subsequent_mask(src.size(1)).to(DEVICE)
tgt_mask = self.transformer.generate_square_subsequent_mask(tgt.size(1)).to(DEVICE)
src_emb = self.dropout(self.src_tok_emb(src))
tgt_emb = self.dropout(self.tgt_tok_emb(tgt))
outs = self.transformer(src_emb.permute(1,0,2), tgt_emb.permute(1,0,2),
src_mask=src_mask, tgt_mask=tgt_mask)
return self.fc_out(outs.permute(1,0,2))
def translate(model, sentence, en_vocab, te_vocab, te_inv_vocab, max_len=MAX_LEN):
model.eval()
tokens = ['<sos>'] + tokenize_en(sentence) + ['<eos>']
src_ids = torch.tensor([[en_vocab.get(t, en_vocab['<unk>']) for t in tokens]]).to(DEVICE)
tgt_ids = torch.tensor([[te_vocab['<sos>']]]).to(DEVICE)
for i in range(max_len):
out = model(src_ids, tgt_ids)
next_token = out.argmax(-1)[:, -1].item()
tgt_ids = torch.cat([tgt_ids, torch.tensor([[next_token]]).to(DEVICE)], dim=1)
if next_token == te_vocab['<eos>']:
break
translated = [te_inv_vocab[idx.item()] for idx in tgt_ids[0][1:]]
return ' '.join(translated[:-1]) if translated[-1] == '<eos>' else ' '.join(translated)
# ==== Load Data ====
df_telugu = pd.read_csv("merged_translated_responses.csv") # columns: 'en', 'te'
# Clean NaN or non-string entries
df_telugu = df_telugu.dropna(subset=['response', 'translated_response'])
# Ensure all entries are strings
df_telugu['response'] = df_telugu['response'].astype(str)
df_telugu['translated_response'] = df_telugu['translated_response'].astype(str)
# Build vocabularies
en_vocab = build_vocab(df_telugu['response'], tokenize_en, MIN_FREQ)
te_vocab = build_vocab(df_telugu['translated_response'], tokenize_te, MIN_FREQ)
te_inv_vocab = {idx: tok for tok, idx in te_vocab.items()}
# Prepare Dataset & DataLoader
dataset = TranslationDataset(df_telugu, en_vocab, te_vocab)
dataloader = DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True, collate_fn=collate_fn)
# Initialize Model
model = Seq2SeqTransformer(NUM_ENCODER_LAYERS, NUM_DECODER_LAYERS, EMB_SIZE,
len(en_vocab), len(te_vocab), NHEAD, FFN_HID_DIM).to(DEVICE)
pad_idx = te_vocab['<pad>']
criterion_telugu = nn.CrossEntropyLoss(ignore_index=pad_idx)
optimizer_telugu = optim.Adam(model.parameters(), lr=0.0005)
# ==== Training ====
# for epoch in range(NUM_EPOCHS):
# loss = train(model, dataloader, optimizer, criterion)
# print(f"Epoch {epoch+1}, Loss: {loss:.4f}")
# ==== Try Translation ====
model_telugu = Seq2SeqTransformer(NUM_ENCODER_LAYERS, NUM_DECODER_LAYERS, EMB_SIZE,len(en_vocab), len(te_vocab), NHEAD, FFN_HID_DIM).to(DEVICE)
# Load saved weights
model_telugu.load_state_dict(torch.load("english_telugu_transformer.pth",map_location = torch.device('cpu')))
model_telugu.eval()
app=Flask(__name__)
CORS(app)
@app.route("/")
def home():
return jsonify({"message": "hellooooooooo"})
@app.route("/translate", methods=["POST"])
def translate_text():
data = request.get_json()
text = data.get("text", "")
if not text:
return jsonify({"error": "Text cannot be empty"}), 400
# First generate English response
english_response = text
start=time.time()
# Then translate to Telugu
telugu_response = translate(model_telugu, english_response, en_vocab, te_vocab, te_inv_vocab)
end=time.time()
return jsonify({
"english": english_response,
"telugu": telugu_response,
"time": end-start
})
|