Spaces:
Sleeping
Sleeping
rishitha
commited on
Update app.py
Browse files
app.py
CHANGED
|
@@ -61,7 +61,7 @@ class TextDataset(Dataset):
|
|
| 61 |
# Model
|
| 62 |
class GPTModel(nn.Module):
|
| 63 |
def __init__(self, vocab_size, embed_size=256, num_heads=8, num_layers=6, max_len=200):
|
| 64 |
-
super(GPTModel, self).
|
| 65 |
self.embedding = nn.Embedding(vocab_size, embed_size)
|
| 66 |
self.pos_embedding = nn.Parameter(torch.randn(1, max_len, embed_size))
|
| 67 |
self.transformer = nn.TransformerDecoder(
|
|
@@ -120,127 +120,4 @@ def query_model():
|
|
| 120 |
response = generate_response(model, query)
|
| 121 |
return jsonify({"query": query, "response": response})
|
| 122 |
|
| 123 |
-
# DO NOT ADD app.run()
|
| 124 |
-
import torch
|
| 125 |
-
import torch.nn as nn
|
| 126 |
-
import torch.optim as optim
|
| 127 |
-
import pandas as pd
|
| 128 |
-
from torch.utils.data import Dataset, DataLoader
|
| 129 |
-
from flask import Flask, request, jsonify
|
| 130 |
-
from sklearn.model_selection import train_test_split
|
| 131 |
-
import os
|
| 132 |
-
|
| 133 |
-
# Load data
|
| 134 |
-
url = "https://drive.google.com/uc?id=1RCZShB5ohy1HdU-mogcP16TbeVv9txpY"
|
| 135 |
-
df = pd.read_csv(url)
|
| 136 |
-
|
| 137 |
-
# Tokenizer
|
| 138 |
-
class ScratchTokenizer:
|
| 139 |
-
def __init__(self):
|
| 140 |
-
self.word2idx = {"<PAD>": 0, "<SOS>": 1, "<EOS>": 2, "<UNK>": 3}
|
| 141 |
-
self.idx2word = {0: "<PAD>", 1: "<SOS>", 2: "<EOS>", 3: "<UNK>"}
|
| 142 |
-
self.vocab_size = 4
|
| 143 |
-
|
| 144 |
-
def build_vocab(self, texts):
|
| 145 |
-
for text in texts:
|
| 146 |
-
for word in text.split():
|
| 147 |
-
if word not in self.word2idx:
|
| 148 |
-
self.word2idx[word] = self.vocab_size
|
| 149 |
-
self.idx2word[self.vocab_size] = word
|
| 150 |
-
self.vocab_size += 1
|
| 151 |
-
|
| 152 |
-
def encode(self, text, max_len=200):
|
| 153 |
-
tokens = [self.word2idx.get(word, 3) for word in text.split()]
|
| 154 |
-
tokens = [1] + tokens[:max_len - 2] + [2]
|
| 155 |
-
return tokens + [0] * (max_len - len(tokens))
|
| 156 |
-
|
| 157 |
-
def decode(self, tokens):
|
| 158 |
-
return " ".join([self.idx2word.get(idx, "<UNK>") for idx in tokens if idx > 0])
|
| 159 |
-
|
| 160 |
-
# Train-Test Split
|
| 161 |
-
train_data, test_data = train_test_split(df, test_size=0.2, random_state=42)
|
| 162 |
-
|
| 163 |
-
# Initialize Tokenizer
|
| 164 |
-
tokenizer = ScratchTokenizer()
|
| 165 |
-
tokenizer.build_vocab(train_data["instruction"].tolist() + train_data["response"].tolist())
|
| 166 |
-
|
| 167 |
-
# Dataset Class
|
| 168 |
-
class TextDataset(Dataset):
|
| 169 |
-
def __init__(self, data, tokenizer, max_len=200):
|
| 170 |
-
self.data = data
|
| 171 |
-
self.tokenizer = tokenizer
|
| 172 |
-
self.max_len = max_len
|
| 173 |
-
|
| 174 |
-
def _len_(self):
|
| 175 |
-
return len(self.data)
|
| 176 |
-
|
| 177 |
-
def _getitem_(self, idx):
|
| 178 |
-
src_text = self.data.iloc[idx]["instruction"]
|
| 179 |
-
tgt_text = self.data.iloc[idx]["response"]
|
| 180 |
-
src = torch.tensor(self.tokenizer.encode(src_text), dtype=torch.long)
|
| 181 |
-
tgt = torch.tensor(self.tokenizer.encode(tgt_text), dtype=torch.long)
|
| 182 |
-
return src, tgt
|
| 183 |
-
|
| 184 |
-
# Model
|
| 185 |
-
class GPTModel(nn.Module):
|
| 186 |
-
def __init__(self, vocab_size, embed_size=256, num_heads=8, num_layers=6, max_len=200):
|
| 187 |
-
super(GPTModel, self)._init_()
|
| 188 |
-
self.embedding = nn.Embedding(vocab_size, embed_size)
|
| 189 |
-
self.pos_embedding = nn.Parameter(torch.randn(1, max_len, embed_size))
|
| 190 |
-
self.transformer = nn.TransformerDecoder(
|
| 191 |
-
nn.TransformerDecoderLayer(d_model=embed_size, nhead=num_heads),
|
| 192 |
-
num_layers=num_layers
|
| 193 |
-
)
|
| 194 |
-
self.fc_out = nn.Linear(embed_size, vocab_size)
|
| 195 |
-
|
| 196 |
-
def forward(self, src, tgt):
|
| 197 |
-
src_emb = self.embedding(src) + self.pos_embedding[:, :src.size(1), :]
|
| 198 |
-
tgt_emb = self.embedding(tgt) + self.pos_embedding[:, :tgt.size(1), :]
|
| 199 |
-
tgt_mask = nn.Transformer.generate_square_subsequent_mask(tgt.size(1)).to(tgt.device)
|
| 200 |
-
output = self.transformer(tgt_emb.permute(1, 0, 2), src_emb.permute(1, 0, 2), tgt_mask=tgt_mask)
|
| 201 |
-
return self.fc_out(output.permute(1, 0, 2))
|
| 202 |
-
|
| 203 |
-
# Load model
|
| 204 |
-
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 205 |
-
model = GPTModel(tokenizer.vocab_size).to(device)
|
| 206 |
-
|
| 207 |
-
def load_model(model, path="gpt_model.pth"):
|
| 208 |
-
if os.path.exists(path):
|
| 209 |
-
model.load_state_dict(torch.load(path, map_location=device))
|
| 210 |
-
model.eval()
|
| 211 |
-
print("Model loaded successfully.")
|
| 212 |
-
else:
|
| 213 |
-
print("Model file not found!")
|
| 214 |
-
|
| 215 |
-
load_model(model)
|
| 216 |
-
|
| 217 |
-
# Generate Response
|
| 218 |
-
def generate_response(model, query, max_length=200):
|
| 219 |
-
model.eval()
|
| 220 |
-
src = torch.tensor(tokenizer.encode(query)).unsqueeze(0).to(device)
|
| 221 |
-
tgt = torch.tensor([[1]]).to(device) # <SOS>
|
| 222 |
-
for _ in range(max_length):
|
| 223 |
-
output = model(src, tgt)
|
| 224 |
-
next_word = output.argmax(-1)[:, -1].unsqueeze(1)
|
| 225 |
-
tgt = torch.cat([tgt, next_word], dim=1)
|
| 226 |
-
if next_word.item() == 2: # <EOS>
|
| 227 |
-
break
|
| 228 |
-
return tokenizer.decode(tgt.squeeze(0).tolist())
|
| 229 |
-
|
| 230 |
-
# Flask App
|
| 231 |
-
app = Flask(_name_)
|
| 232 |
-
|
| 233 |
-
@app.route("/")
|
| 234 |
-
def home():
|
| 235 |
-
return {"message": "Transformer-based Response Generator API is running!"}
|
| 236 |
-
|
| 237 |
-
@app.route("/query", methods=["POST"])
|
| 238 |
-
def query_model():
|
| 239 |
-
data = request.get_json()
|
| 240 |
-
query = data.get("query", "")
|
| 241 |
-
if not query:
|
| 242 |
-
return jsonify({"error": "Query cannot be empty"}), 400
|
| 243 |
-
response = generate_response(model, query)
|
| 244 |
-
return jsonify({"query": query, "response": response})
|
| 245 |
-
|
| 246 |
-
# DO NOT ADD app.run()
|
|
|
|
| 61 |
# Model
|
| 62 |
class GPTModel(nn.Module):
|
| 63 |
def __init__(self, vocab_size, embed_size=256, num_heads=8, num_layers=6, max_len=200):
|
| 64 |
+
super(GPTModel, self).__init__()
|
| 65 |
self.embedding = nn.Embedding(vocab_size, embed_size)
|
| 66 |
self.pos_embedding = nn.Parameter(torch.randn(1, max_len, embed_size))
|
| 67 |
self.transformer = nn.TransformerDecoder(
|
|
|
|
| 120 |
response = generate_response(model, query)
|
| 121 |
return jsonify({"query": query, "response": response})
|
| 122 |
|
| 123 |
+
# DO NOT ADD app.run()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|