Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -2,7 +2,7 @@ import gradio as gr
|
|
| 2 |
import torch
|
| 3 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 4 |
|
| 5 |
-
# --- MODEL DATA
|
| 6 |
MODELS_CONFIG = {
|
| 7 |
"Phase 2: Stable (Formal)": {
|
| 8 |
"id": "st192011/Maltese-EuroLLM-1.7B-Phase2-Stable",
|
|
@@ -30,10 +30,9 @@ MODELS_CONFIG = {
|
|
| 30 |
}
|
| 31 |
|
| 32 |
# --- MODEL LOADING (Local CPU) ---
|
| 33 |
-
|
| 34 |
-
print("Loading models to CPU... Please wait.")
|
| 35 |
|
| 36 |
-
# Load
|
| 37 |
tokenizer_p2 = AutoTokenizer.from_pretrained(MODELS_CONFIG["Phase 2: Stable (Formal)"]["id"])
|
| 38 |
model_p2 = AutoModelForCausalLM.from_pretrained(
|
| 39 |
MODELS_CONFIG["Phase 2: Stable (Formal)"]["id"],
|
|
@@ -41,7 +40,7 @@ model_p2 = AutoModelForCausalLM.from_pretrained(
|
|
| 41 |
torch_dtype=torch.float32
|
| 42 |
)
|
| 43 |
|
| 44 |
-
# Load
|
| 45 |
tokenizer_p4 = AutoTokenizer.from_pretrained(MODELS_CONFIG["Phase 4: Anchored (Native)"]["id"])
|
| 46 |
model_p4 = AutoModelForCausalLM.from_pretrained(
|
| 47 |
MODELS_CONFIG["Phase 4: Anchored (Native)"]["id"],
|
|
@@ -53,7 +52,6 @@ def local_translate(model, tokenizer, text, temp):
|
|
| 53 |
if not text.strip():
|
| 54 |
return ""
|
| 55 |
|
| 56 |
-
# Prompt format consistent with training
|
| 57 |
prompt = f"### INGLIŻ: {text}\n### MALTI:"
|
| 58 |
inputs = tokenizer(prompt, return_tensors="pt").to("cpu")
|
| 59 |
|
|
@@ -67,16 +65,20 @@ def local_translate(model, tokenizer, text, temp):
|
|
| 67 |
pad_token_id=tokenizer.eos_token_id
|
| 68 |
)
|
| 69 |
|
| 70 |
-
#
|
| 71 |
-
|
| 72 |
|
| 73 |
-
# Extract only the Maltese
|
| 74 |
-
if "### MALTI:" in
|
| 75 |
-
maltese_text =
|
| 76 |
else:
|
| 77 |
-
maltese_text =
|
| 78 |
|
| 79 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 80 |
|
| 81 |
def translate_logic(text, selected_models, temp):
|
| 82 |
out_p2 = "Model not selected."
|
|
@@ -103,36 +105,26 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
| 103 |
|
| 104 |
with gr.Row():
|
| 105 |
with gr.Column(scale=2):
|
| 106 |
-
input_text = gr.Textbox(
|
| 107 |
-
label="English Source Text",
|
| 108 |
-
placeholder="Enter English text here...",
|
| 109 |
-
lines=4
|
| 110 |
-
)
|
| 111 |
model_selector = gr.CheckboxGroup(
|
| 112 |
choices=list(MODELS_CONFIG.keys()),
|
| 113 |
value=list(MODELS_CONFIG.keys()),
|
| 114 |
-
label="Select Models
|
| 115 |
-
)
|
| 116 |
-
temp_slider = gr.Slider(
|
| 117 |
-
minimum=0.1,
|
| 118 |
-
maximum=1.0,
|
| 119 |
-
value=0.1,
|
| 120 |
-
step=0.1,
|
| 121 |
-
label="Creativity (Temperature)"
|
| 122 |
)
|
|
|
|
| 123 |
btn = gr.Button("🚀 Run Translation", variant="primary")
|
| 124 |
|
| 125 |
with gr.Row():
|
| 126 |
with gr.Column():
|
| 127 |
gr.Markdown("### Phase 2: Stable (Formal)")
|
| 128 |
p2_out = gr.Textbox(label="Output", interactive=False, lines=5)
|
| 129 |
-
gr.Markdown(f"**Training
|
| 130 |
gr.Markdown(f"**Metrics:** ChrF++: `{MODELS_CONFIG['Phase 2: Stable (Formal)']['chrf']}` | COMET: `{MODELS_CONFIG['Phase 2: Stable (Formal)']['comet']}`")
|
| 131 |
|
| 132 |
with gr.Column():
|
| 133 |
gr.Markdown("### Phase 4: Anchored (Native)")
|
| 134 |
p4_out = gr.Textbox(label="Output", interactive=False, lines=5)
|
| 135 |
-
gr.Markdown(f"**Training
|
| 136 |
gr.Markdown(f"**Metrics:** ChrF++: `{MODELS_CONFIG['Phase 4: Anchored (Native)']['chrf']}` | COMET: `{MODELS_CONFIG['Phase 4: Anchored (Native)']['comet']}`")
|
| 137 |
|
| 138 |
gr.Examples(
|
|
|
|
| 2 |
import torch
|
| 3 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 4 |
|
| 5 |
+
# --- MODEL DATA ---
|
| 6 |
MODELS_CONFIG = {
|
| 7 |
"Phase 2: Stable (Formal)": {
|
| 8 |
"id": "st192011/Maltese-EuroLLM-1.7B-Phase2-Stable",
|
|
|
|
| 30 |
}
|
| 31 |
|
| 32 |
# --- MODEL LOADING (Local CPU) ---
|
| 33 |
+
print("Loading models... this might take a minute.")
|
|
|
|
| 34 |
|
| 35 |
+
# Load Phase 2
|
| 36 |
tokenizer_p2 = AutoTokenizer.from_pretrained(MODELS_CONFIG["Phase 2: Stable (Formal)"]["id"])
|
| 37 |
model_p2 = AutoModelForCausalLM.from_pretrained(
|
| 38 |
MODELS_CONFIG["Phase 2: Stable (Formal)"]["id"],
|
|
|
|
| 40 |
torch_dtype=torch.float32
|
| 41 |
)
|
| 42 |
|
| 43 |
+
# Load Phase 4
|
| 44 |
tokenizer_p4 = AutoTokenizer.from_pretrained(MODELS_CONFIG["Phase 4: Anchored (Native)"]["id"])
|
| 45 |
model_p4 = AutoModelForCausalLM.from_pretrained(
|
| 46 |
MODELS_CONFIG["Phase 4: Anchored (Native)"]["id"],
|
|
|
|
| 52 |
if not text.strip():
|
| 53 |
return ""
|
| 54 |
|
|
|
|
| 55 |
prompt = f"### INGLIŻ: {text}\n### MALTI:"
|
| 56 |
inputs = tokenizer(prompt, return_tensors="pt").to("cpu")
|
| 57 |
|
|
|
|
| 65 |
pad_token_id=tokenizer.eos_token_id
|
| 66 |
)
|
| 67 |
|
| 68 |
+
# 1. Decode while skipping standard special tokens
|
| 69 |
+
decoded_text = tokenizer.decode(output_tokens[0], skip_special_tokens=True)
|
| 70 |
|
| 71 |
+
# 2. Extract only the Maltese part
|
| 72 |
+
if "### MALTI:" in decoded_text:
|
| 73 |
+
maltese_text = decoded_text.split("### MALTI:")[-1]
|
| 74 |
else:
|
| 75 |
+
maltese_text = decoded_text
|
| 76 |
|
| 77 |
+
# 3. CRITICAL: Manual cleaning of the end-of-text string if it still remains
|
| 78 |
+
# This removes <|endoftext|>, </s>, and any extra whitespace
|
| 79 |
+
clean_text = maltese_text.replace("<|endoftext|>", "").replace("</s>", "").strip()
|
| 80 |
+
|
| 81 |
+
return clean_text
|
| 82 |
|
| 83 |
def translate_logic(text, selected_models, temp):
|
| 84 |
out_p2 = "Model not selected."
|
|
|
|
| 105 |
|
| 106 |
with gr.Row():
|
| 107 |
with gr.Column(scale=2):
|
| 108 |
+
input_text = gr.Textbox(label="English Source Text", placeholder="Enter English text...", lines=4)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 109 |
model_selector = gr.CheckboxGroup(
|
| 110 |
choices=list(MODELS_CONFIG.keys()),
|
| 111 |
value=list(MODELS_CONFIG.keys()),
|
| 112 |
+
label="Select Models"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 113 |
)
|
| 114 |
+
temp_slider = gr.Slider(minimum=0.1, maximum=1.0, value=0.1, step=0.1, label="Temperature")
|
| 115 |
btn = gr.Button("🚀 Run Translation", variant="primary")
|
| 116 |
|
| 117 |
with gr.Row():
|
| 118 |
with gr.Column():
|
| 119 |
gr.Markdown("### Phase 2: Stable (Formal)")
|
| 120 |
p2_out = gr.Textbox(label="Output", interactive=False, lines=5)
|
| 121 |
+
gr.Markdown(f"**Training:** {MODELS_CONFIG['Phase 2: Stable (Formal)']['description']}")
|
| 122 |
gr.Markdown(f"**Metrics:** ChrF++: `{MODELS_CONFIG['Phase 2: Stable (Formal)']['chrf']}` | COMET: `{MODELS_CONFIG['Phase 2: Stable (Formal)']['comet']}`")
|
| 123 |
|
| 124 |
with gr.Column():
|
| 125 |
gr.Markdown("### Phase 4: Anchored (Native)")
|
| 126 |
p4_out = gr.Textbox(label="Output", interactive=False, lines=5)
|
| 127 |
+
gr.Markdown(f"**Training:** {MODELS_CONFIG['Phase 4: Anchored (Native)']['description']}")
|
| 128 |
gr.Markdown(f"**Metrics:** ChrF++: `{MODELS_CONFIG['Phase 4: Anchored (Native)']['chrf']}` | COMET: `{MODELS_CONFIG['Phase 4: Anchored (Native)']['comet']}`")
|
| 129 |
|
| 130 |
gr.Examples(
|