ASRCONTRI / indiclid_inference.py
kasimali
Update indiclid_inference.py
3de163f verified
import os
import re
import pandas as pd
import fasttext
import torch
from torch.utils.data import Dataset, DataLoader
from transformers import AutoTokenizer
from huggingface_hub import hf_hub_download
# ------------------------------
# Download required models
# ------------------------------
print("Downloading IndicLID models from Hugging Face...")
FTN_PATH = hf_hub_download("ai4bharat/IndicLID-FTN", filename="model_baseline_roman.bin")
FTR_PATH = hf_hub_download("ai4bharat/IndicLID-FTR", filename="model_baseline_roman.bin")
BERT_PATH = hf_hub_download("ai4bharat/IndicLID-BERT", filename="basline_nn_simple.pt")
print("Download complete.")
# ------------------------------
# Data helper for BERT batching
# ------------------------------
class IndicBERT_Data(Dataset):
def __init__(self, indices, X):
self.x = list(X)
self.i = list(indices)
def __len__(self):
return len(self.x)
def __getitem__(self, idx):
return self.i[idx], self.x[idx]
# ------------------------------
# Main IndicLID Class
# ------------------------------
class IndicLID:
def __init__(self, input_threshold=0.5, roman_lid_threshold=0.6):
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.IndicLID_FTN = fasttext.load_model(FTN_PATH)
self.IndicLID_FTR = fasttext.load_model(FTR_PATH)
self.IndicLID_BERT = torch.load(BERT_PATH, map_location=self.device)
self.IndicLID_BERT.eval()
self.IndicLID_BERT_tokenizer = AutoTokenizer.from_pretrained("ai4bharat/IndicBERTv2-MLM-only")
self.input_threshold = input_threshold
self.model_threshold = roman_lid_threshold
# Official label map (index -> language code)
self.label_map_reverse = {
0: 'asm_Latn', 1: 'ben_Latn', 2: 'brx_Latn', 3: 'guj_Latn',
4: 'hin_Latn', 5: 'kan_Latn', 6: 'kas_Latn', 7: 'kok_Latn',
8: 'mai_Latn', 9: 'mal_Latn', 10: 'mni_Latn', 11: 'mar_Latn',
12: 'nep_Latn', 13: 'ori_Latn', 14: 'pan_Latn', 15: 'san_Latn',
16: 'snd_Latn', 17: 'tam_Latn', 18: 'tel_Latn', 19: 'urd_Latn',
20: 'eng_Latn', 21: 'other', 22: 'asm_Beng', 23: 'ben_Beng',
24: 'brx_Deva', 25: 'doi_Deva', 26: 'guj_Gujr', 27: 'hin_Deva',
28: 'kan_Knda', 29: 'kas_Arab', 30: 'kas_Deva', 31: 'kok_Deva',
32: 'mai_Deva', 33: 'mal_Mlym', 34: 'mni_Beng', 35: 'mni_Meti',
36: 'mar_Deva', 37: 'nep_Deva', 38: 'ori_Orya', 39: 'pan_Guru',
40: 'san_Deva', 41: 'sat_Olch', 42: 'snd_Arab', 43: 'tam_Tamil',
44: 'tel_Telu', 45: 'urd_Arab'
}
def char_percent_check(self, text):
total_chars = sum(c.isalpha() for c in text)
roman_chars = sum(bool(re.match(r"[A-Za-z]", c)) for c in text)
return roman_chars / total_chars if total_chars else 0
def native_inference(self, data, out_dict):
if not data: return out_dict
texts = [x[1] for x in data]
preds = self.IndicLID_FTN.predict(texts)
for (idx, txt), lbls, scrs in zip(data, preds[0], preds[1]):
out_dict[idx] = (txt, lbls[0][9:], float(scrs[0]), 'IndicLID-FTN')
return out_dict
def ftr_inference(self, data, out_dict, batch_size):
if not data: return out_dict
texts = [x[1] for x in data]
preds = self.IndicLID_FTR.predict(texts)
bert_inputs = []
for (idx, txt), lbls, scrs in zip(data, preds[0], preds[1]):
if float(scrs[0]) > self.model_threshold:
out_dict[idx] = (txt, lbls[0][9:], float(scrs[0]), 'IndicLID-FTR')
else:
bert_inputs.append((idx, txt))
return self.bert_inference(bert_inputs, out_dict, batch_size)
def bert_inference(self, data, out_dict, batch_size):
if not data: return out_dict
ds = IndicBERT_Data([x[0] for x in data], [x[1] for x in data])
dl = DataLoader(ds, batch_size=batch_size)
with torch.no_grad():
for idxs, texts in dl:
enc = self.IndicLID_BERT_tokenizer(
list(texts), return_tensors="pt", padding=True,
truncation=True, max_length=512
).to(self.device)
outputs = self.IndicLID_BERT(**enc)
preds = torch.argmax(outputs.logits, dim=1)
probs = torch.softmax(outputs.logits, dim=1)
for batch_i, p in enumerate(preds):
i = idxs[batch_i].item()
label_idx = p.item()
label = self.label_map_reverse[label_idx]
score = probs[batch_i, label_idx].item()
out_dict[i] = (texts[batch_i], label, score, 'IndicLID-BERT')
return out_dict
def batch_predict(self, texts, batch_size=8):
native, roman = [], []
for i, t in enumerate(texts):
if self.char_percent_check(t) > self.input_threshold:
roman.append((i, t))
else:
native.append((i, t))
out_dict = {}
out_dict = self.native_inference(native, out_dict)
out_dict = self.ftr_inference(roman, out_dict, batch_size)
return [out_dict[i] for i in sorted(out_dict.keys())]
# ------------------------------
# Quick test if run directly
# ------------------------------
if __name__ == "__main__":
detector = IndicLID()
samples = [
"यह एक हिंदी वाक्य है।", # Hindi (native)
"ennai pudikkuma?", # Tamil (romanized)
"ఇది ఒక తెలుగు వాక్యం", # Telugu (native)
"Hello, how are you?" # English
]
results = detector.batch_predict(samples)
for text, label, score, model in results:
print(f"Text: {text}\nPredicted: {label} | Score: {score:.4f} | Model: {model}\n")