Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,75 +1,95 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
-
|
|
|
|
| 3 |
|
| 4 |
-
# ---
|
| 5 |
-
|
| 6 |
"Phase 2: Stable (Formal)": {
|
| 7 |
"id": "st192011/Maltese-EuroLLM-1.7B-Phase2-Stable",
|
| 8 |
-
"description":
|
| 9 |
-
"The 'Bureaucrat Bot'. Built upon a foundational adaptation phase that mixed "
|
| 10 |
-
"monolingual Maltese and Italian to bridge morphological roots. This version "
|
| 11 |
-
"was fine-tuned on high-fidelity EU and governmental parallel corpora, "
|
| 12 |
-
"optimizing it for extreme formal precision and administrative accuracy."
|
| 13 |
-
),
|
| 14 |
"chrf": "60.18",
|
| 15 |
"comet": "0.6431"
|
| 16 |
},
|
| 17 |
"Phase 4: Anchored (Native)": {
|
| 18 |
"id": "st192011/Maltese-EuroLLM-1.7B-Phase4-Anchored",
|
| 19 |
-
"description":
|
| 20 |
-
"The 'Native Speaker'. An evolution of Phase 2 utilizing a curriculum-based "
|
| 21 |
-
"'Full Circle' approach. It integrates synthesized reasoning chains (CoT) "
|
| 22 |
-
"that allow the model to process linguistic logic before translating. By mixing "
|
| 23 |
-
"all previous data types, it anchors factual accuracy to native-level phrasing "
|
| 24 |
-
"and cultural awareness."
|
| 25 |
-
),
|
| 26 |
"chrf": "52.68",
|
| 27 |
"comet": "0.6567"
|
| 28 |
}
|
| 29 |
}
|
| 30 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
def translate_logic(text, selected_models, temp):
|
| 32 |
-
|
|
|
|
| 33 |
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
# Prompt format consistent with training
|
| 39 |
-
prompt = f"### INGLIŻ: {text}\n### MALTI:"
|
| 40 |
-
|
| 41 |
try:
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
)
|
| 49 |
-
# Clean up the response
|
| 50 |
-
clean_output = output.strip().split("### MALTI:")[-1].replace("<|endoftext|>", "").strip()
|
| 51 |
-
results[model_name] = clean_output
|
| 52 |
except Exception as e:
|
| 53 |
-
|
| 54 |
|
| 55 |
-
# Return formatted outputs for the UI
|
| 56 |
-
# We return a list of outputs corresponding to the two textboxes
|
| 57 |
-
out_p2 = results.get("Phase 2: Stable (Formal)", "Model not selected.")
|
| 58 |
-
out_p4 = results.get("Phase 4: Anchored (Native)", "Model not selected.")
|
| 59 |
-
|
| 60 |
return out_p2, out_p4
|
| 61 |
|
| 62 |
# --- GRADIO UI ---
|
| 63 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
| 64 |
-
gr.Markdown("# 🇲🇹 Maltese-MT Lab")
|
| 65 |
-
gr.Markdown("
|
| 66 |
|
| 67 |
with gr.Row():
|
| 68 |
with gr.Column(scale=2):
|
| 69 |
input_text = gr.Textbox(label="English Source Text", placeholder="Enter English text here...", lines=4)
|
| 70 |
model_selector = gr.CheckboxGroup(
|
| 71 |
-
choices=list(
|
| 72 |
-
value=list(
|
| 73 |
label="Select Models to Compare"
|
| 74 |
)
|
| 75 |
temp_slider = gr.Slider(minimum=0.1, maximum=1.0, value=0.1, step=0.1, label="Creativity (Temperature)")
|
|
@@ -79,14 +99,12 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
| 79 |
with gr.Column():
|
| 80 |
gr.Markdown("### Phase 2: Stable")
|
| 81 |
p2_out = gr.Textbox(label="Output", interactive=False, lines=5)
|
| 82 |
-
gr.Markdown(f"**
|
| 83 |
-
gr.Markdown(f"**Metrics:** ChrF++: `{MODELS['Phase 2: Stable (Formal)']['chrf']}` | COMET: `{MODELS['Phase 2: Stable (Formal)']['comet']}`")
|
| 84 |
|
| 85 |
with gr.Column():
|
| 86 |
gr.Markdown("### Phase 4: Anchored")
|
| 87 |
p4_out = gr.Textbox(label="Output", interactive=False, lines=5)
|
| 88 |
-
gr.Markdown(f"**
|
| 89 |
-
gr.Markdown(f"**Metrics:** ChrF++: `{MODELS['Phase 4: Anchored (Native)']['chrf']}` | COMET: `{MODELS['Phase 4: Anchored (Native)']['comet']}`")
|
| 90 |
|
| 91 |
gr.Examples(
|
| 92 |
examples=[
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
+
import torch
|
| 3 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 4 |
|
| 5 |
+
# --- CONFIGURATION ---
|
| 6 |
+
MODELS_CONFIG = {
|
| 7 |
"Phase 2: Stable (Formal)": {
|
| 8 |
"id": "st192011/Maltese-EuroLLM-1.7B-Phase2-Stable",
|
| 9 |
+
"description": "The 'Bureaucrat Bot'. Optimized for formal precision.",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
"chrf": "60.18",
|
| 11 |
"comet": "0.6431"
|
| 12 |
},
|
| 13 |
"Phase 4: Anchored (Native)": {
|
| 14 |
"id": "st192011/Maltese-EuroLLM-1.7B-Phase4-Anchored",
|
| 15 |
+
"description": "The 'Native Speaker'. Optimized for cultural awareness and logic.",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
"chrf": "52.68",
|
| 17 |
"comet": "0.6567"
|
| 18 |
}
|
| 19 |
}
|
| 20 |
|
| 21 |
+
# --- MODEL LOADING ---
|
| 22 |
+
# We load them globally so they stay in memory (this requires ~14GB RAM total)
|
| 23 |
+
print("Loading models to CPU... this may take a few minutes.")
|
| 24 |
+
|
| 25 |
+
# Load Model 2
|
| 26 |
+
tokenizer_p2 = AutoTokenizer.from_pretrained(MODELS_CONFIG["Phase 2: Stable (Formal)"]["id"])
|
| 27 |
+
model_p2 = AutoModelForCausalLM.from_pretrained(
|
| 28 |
+
MODELS_CONFIG["Phase 2: Stable (Formal)"]["id"],
|
| 29 |
+
device_map="cpu",
|
| 30 |
+
torch_dtype=torch.float32
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
# Load Model 4
|
| 34 |
+
tokenizer_p4 = AutoTokenizer.from_pretrained(MODELS_CONFIG["Phase 4: Anchored (Native)"]["id"])
|
| 35 |
+
model_p4 = AutoModelForCausalLM.from_pretrained(
|
| 36 |
+
MODELS_CONFIG["Phase 4: Anchored (Native)"]["id"],
|
| 37 |
+
device_map="cpu",
|
| 38 |
+
torch_dtype=torch.float32
|
| 39 |
+
)
|
| 40 |
+
|
| 41 |
+
def local_translate(model, tokenizer, text, temp):
|
| 42 |
+
prompt = f"### INGLIŻ: {text}\n### MALTI:"
|
| 43 |
+
inputs = tokenizer(prompt, return_tensors="pt").to("cpu")
|
| 44 |
+
|
| 45 |
+
with torch.no_grad():
|
| 46 |
+
output_tokens = model.generate(
|
| 47 |
+
**inputs,
|
| 48 |
+
max_new_tokens=150,
|
| 49 |
+
temperature=temp,
|
| 50 |
+
do_sample=True if temp > 0.1 else False,
|
| 51 |
+
repetition_penalty=1.2,
|
| 52 |
+
pad_token_id=tokenizer.eos_token_id
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
# Decode only the new tokens
|
| 56 |
+
full_text = tokenizer.decode(output_tokens[0], skip_special_tokens=True)
|
| 57 |
+
# Extract the part after ### MALTI:
|
| 58 |
+
maltese_text = full_text.split("### MALTI:")[-1].strip()
|
| 59 |
+
return maltese_text
|
| 60 |
+
|
| 61 |
def translate_logic(text, selected_models, temp):
|
| 62 |
+
out_p2 = "Model not selected."
|
| 63 |
+
out_p4 = "Model not selected."
|
| 64 |
|
| 65 |
+
if not text.strip():
|
| 66 |
+
return "Please enter text.", "Please enter text."
|
| 67 |
+
|
| 68 |
+
if "Phase 2: Stable (Formal)" in selected_models:
|
|
|
|
|
|
|
|
|
|
| 69 |
try:
|
| 70 |
+
out_p2 = local_translate(model_p2, tokenizer_p2, text, temp)
|
| 71 |
+
except Exception as e:
|
| 72 |
+
out_p2 = f"Error: {str(e)}"
|
| 73 |
+
|
| 74 |
+
if "Phase 4: Anchored (Native)" in selected_models:
|
| 75 |
+
try:
|
| 76 |
+
out_p4 = local_translate(model_p4, tokenizer_p4, text, temp)
|
|
|
|
|
|
|
|
|
|
| 77 |
except Exception as e:
|
| 78 |
+
out_p4 = f"Error: {str(e)}"
|
| 79 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 80 |
return out_p2, out_p4
|
| 81 |
|
| 82 |
# --- GRADIO UI ---
|
| 83 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
| 84 |
+
gr.Markdown("# 🇲🇹 Maltese-MT Lab (Local CPU)")
|
| 85 |
+
gr.Markdown("Comparing English-to-Maltese EuroLLM models running directly on this machine.")
|
| 86 |
|
| 87 |
with gr.Row():
|
| 88 |
with gr.Column(scale=2):
|
| 89 |
input_text = gr.Textbox(label="English Source Text", placeholder="Enter English text here...", lines=4)
|
| 90 |
model_selector = gr.CheckboxGroup(
|
| 91 |
+
choices=list(MODELS_CONFIG.keys()),
|
| 92 |
+
value=list(MODELS_CONFIG.keys()),
|
| 93 |
label="Select Models to Compare"
|
| 94 |
)
|
| 95 |
temp_slider = gr.Slider(minimum=0.1, maximum=1.0, value=0.1, step=0.1, label="Creativity (Temperature)")
|
|
|
|
| 99 |
with gr.Column():
|
| 100 |
gr.Markdown("### Phase 2: Stable")
|
| 101 |
p2_out = gr.Textbox(label="Output", interactive=False, lines=5)
|
| 102 |
+
gr.Markdown(f"**ChrF++:** `{MODELS_CONFIG['Phase 2: Stable (Formal)']['chrf']}` | **COMET:** `{MODELS_CONFIG['Phase 2: Stable (Formal)']['comet']}`")
|
|
|
|
| 103 |
|
| 104 |
with gr.Column():
|
| 105 |
gr.Markdown("### Phase 4: Anchored")
|
| 106 |
p4_out = gr.Textbox(label="Output", interactive=False, lines=5)
|
| 107 |
+
gr.Markdown(f"**ChrF++:** `{MODELS_CONFIG['Phase 4: Anchored (Native)']['chrf']}` | **COMET:** `{MODELS_CONFIG['Phase 4: Anchored (Native)']['comet']}`")
|
|
|
|
| 108 |
|
| 109 |
gr.Examples(
|
| 110 |
examples=[
|