File size: 9,548 Bytes
5d7219a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ad18db6
 
5d7219a
 
 
 
 
 
 
 
 
 
 
 
 
3de8536
5d7219a
 
 
 
 
 
 
 
 
 
 
 
3de8536
ad18db6
5d7219a
 
ad18db6
5d7219a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52fc039
5d7219a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b4864d1
 
5d7219a
 
 
 
 
 
 
 
b4864d1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5d7219a
 
 
 
 
b4864d1
 
 
 
 
 
 
 
 
 
 
 
 
5d7219a
 
 
 
 
 
 
b4864d1
 
 
 
 
 
 
 
 
5d7219a
 
 
 
 
 
 
b4864d1
 
 
 
 
 
 
 
 
5d7219a
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
"""
FastAPI backend for Nigerian Pidgin Next-Word Prediction.
Serves both LSTM and Trigram models as REST API.
Deploy to Hugging Face Spaces with Docker SDK.
"""

from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from typing import List, Tuple, Optional
import torch
import torch.nn as nn
import pickle
import re
import os

# =============================================================================
# FastAPI App
# =============================================================================
app = FastAPI(
    title="Nigerian Pidgin Next-Word Predictor API",
    description="LSTM + Trigram models for Nigerian Pidgin next-word prediction",
    version="1.0.0"
)

# Enable CORS for all origins
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# =============================================================================
# Special Tokens
# =============================================================================
PAD_TOKEN = '<PAD>'
UNK_TOKEN = '<UNK>'
SOS_TOKEN = '<SOS>'
EOS_TOKEN = '</EOS>'
START_TOKEN = '<s>'
END_TOKEN = '</s>'

# =============================================================================
# Text Processing
# =============================================================================
def clean_text(text: str) -> str:
    text = text.lower()
    text = re.sub(r'https?://\S+', '', text)
    text = re.sub(r'www\.\S+', '', text)
    text = re.sub(r'@\w+', '', text)
    text = re.sub(r'#(\w+)', r'\1', text)
    text = re.sub(r'\s+', ' ', text)
    return text.strip()

def tokenize(text: str) -> List[str]:
    tokens = re.findall(r"[\w']+|[.,!?;:]", text)
    return tokens

# =============================================================================
# LSTM Model
# =============================================================================
class LSTMLanguageModel(nn.Module):
    def __init__(self, vocab_size: int, embed_dim: int = 256, 
                 hidden_dim: int = 512, num_layers: int = 2, dropout: float = 0.3):
        super().__init__()
        self.embedding = nn.Embedding(vocab_size, embed_dim, padding_idx=0)
        self.lstm = nn.LSTM(embed_dim, hidden_dim, num_layers=num_layers, 
                           batch_first=True, dropout=dropout if num_layers > 1 else 0)
        self.dropout = nn.Dropout(dropout)
        self.fc = nn.Linear(hidden_dim, vocab_size)
    
    def forward(self, x):
        embedded = self.embedding(x)
        lstm_out, _ = self.lstm(embedded)
        last_out = lstm_out[:, -1, :]
        out = self.dropout(last_out)
        return self.fc(out)

# =============================================================================
# Trigram Model
# =============================================================================
# Import directly from src to ensure compatibility with pickle
from src.trigram_model import TrigramLM

# =============================================================================
# Global Models (loaded once at startup)
# =============================================================================
lstm_model = None
word_to_idx = None
idx_to_word = None
trigram_model = None

@app.on_event("startup")
async def load_models():
    global lstm_model, word_to_idx, idx_to_word, trigram_model
    
    # 1. Load LSTM
    try:
        checkpoint = torch.load('model/lstm_pidgin_model.pt', map_location='cpu')
        word_to_idx = checkpoint['word_to_idx']
        idx_to_word = checkpoint['idx_to_word']
        vocab_size = checkpoint['vocab_size']
        
        lstm_model = LSTMLanguageModel(vocab_size=vocab_size)
        lstm_model.load_state_dict(checkpoint['model_state_dict'])
        lstm_model.eval()
        print(f"LSTM model loaded! Vocab size: {vocab_size}")
    except Exception as e:
        print(f"Failed to load LSTM model: {e}")

    # 2. Load Trigram
    try:
        with open('model/trigram_model.pkl', 'rb') as f:
            trigram_model = pickle.load(f)
        print(f"Trigram model loaded! Vocab size: {len(trigram_model.vocab)}")
    except Exception as e:
        print(f"Failed to load Trigram model: {e}")

# =============================================================================
# Request/Response Models
# =============================================================================
class PredictionRequest(BaseModel):
    context: str
    top_k: int = 5
    model: str = "lstm"  # "lstm", "trigram", or "both"

class Prediction(BaseModel):
    word: str
    probability: float

class PredictionResponse(BaseModel):
    context: str
    model: str
    predictions: List[Prediction]

class BothModelsResponse(BaseModel):
    context: str
    lstm: List[Prediction]
    trigram: List[Prediction]

# =============================================================================
# Prediction Functions
# =============================================================================
def predict_lstm(context: str, top_k: int = 5) -> List[Prediction]:
    if lstm_model is None or not context.strip():
        return []
    
    tokens = tokenize(clean_text(context))
    if not tokens:
        return []
    
    unk_idx = word_to_idx.get(UNK_TOKEN, 1)
    indices = [word_to_idx.get(t, unk_idx) for t in tokens]
    x = torch.tensor([indices], dtype=torch.long)
    
    with torch.no_grad():
        logits = lstm_model(x)
        probs = torch.softmax(logits, dim=-1)
    
    top_probs, top_indices = torch.topk(probs[0], top_k + 5)
    
    results = []
    for prob, idx in zip(top_probs.tolist(), top_indices.tolist()):
        word = idx_to_word.get(str(idx), idx_to_word.get(idx, UNK_TOKEN))
        if word not in [PAD_TOKEN, UNK_TOKEN, SOS_TOKEN, EOS_TOKEN]:
            results.append(Prediction(word=word, probability=float(prob)))
        if len(results) >= top_k:
            break
    
    return results

def predict_trigram(context: str, top_k: int = 5) -> List[Prediction]:
    if trigram_model is None or not context.strip():
        return []
    
    preds = trigram_model.predict_next_words(context, top_k)
    return [Prediction(word=w, probability=p) for w, p in preds]

# =============================================================================
# API Endpoints
# =============================================================================
@app.get("/")
async def root():
    return {
        "message": "Nigerian Pidgin Next-Word Predictor API",
        "endpoints": {
            "/predict": "POST - Get predictions",
            "/predict/lstm": "GET - LSTM predictions",
            "/predict/trigram": "GET - Trigram predictions",
            "/health": "GET - Health check",
            "/debug": "GET - System info"
        }
    }

@app.get("/health")
async def health():
    return {
        "status": "healthy",
        "lstm_loaded": lstm_model is not None,
        "trigram_loaded": trigram_model is not None,
        "vocab_size": len(word_to_idx) if word_to_idx else 0
    }

@app.get("/debug")
async def debug_info():
    """Return debug information about the environment."""
    import sys
    return {
        "cwd": os.getcwd(),
        "files_root": os.listdir('.'),
        "files_model": os.listdir('model') if os.path.exists('model') else "MISSING",
        "files_src": os.listdir('src') if os.path.exists('src') else "MISSING",
        "python_path": sys.path,
        "lstm_model_type": str(type(lstm_model)) if lstm_model else "None",
        "trigram_model_type": str(type(trigram_model)) if trigram_model else "None",
    }

@app.post("/predict", response_model=BothModelsResponse)
async def predict(request: PredictionRequest):
    """Get predictions from both models."""
    try:
        lstm_preds = predict_lstm(request.context, request.top_k)
        trigram_preds = predict_trigram(request.context, request.top_k)
        
        return BothModelsResponse(
            context=request.context,
            lstm=lstm_preds,
            trigram=trigram_preds
        )
    except Exception as e:
        import traceback
        traceback.print_exc()
        raise HTTPException(status_code=500, detail=f"Prediction Failed: {str(e)}")

@app.get("/predict/lstm")
async def predict_lstm_endpoint(context: str, top_k: int = 5):
    """Get LSTM predictions."""
    if lstm_model is None:
        raise HTTPException(status_code=503, detail="LSTM model not loaded")
    
    try:
        predictions = predict_lstm(context, top_k)
        return PredictionResponse(
            context=context,
            model="lstm",
            predictions=predictions
        )
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"LSTM Prediction Failed: {str(e)}")

@app.get("/predict/trigram")
async def predict_trigram_endpoint(context: str, top_k: int = 5):
    """Get Trigram predictions."""
    if trigram_model is None:
        raise HTTPException(status_code=503, detail="Trigram model not loaded")
    
    try:
        predictions = predict_trigram(context, top_k)
        return PredictionResponse(
            context=context,
            model="trigram",
            predictions=predictions
        )
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"Trigram Prediction Failed: {str(e)}")

# =============================================================================
# Run with: uvicorn api:app --reload
# =============================================================================