Spaces:
Sleeping
Sleeping
Update app.py
Browse filesupdate space for two models
app.py
CHANGED
|
@@ -1,35 +1,36 @@
|
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
from functools import lru_cache
|
| 3 |
from transformers import pipeline
|
| 4 |
|
| 5 |
-
|
|
|
|
|
|
|
| 6 |
|
| 7 |
-
@lru_cache(maxsize=
|
| 8 |
-
def get_pipe():
|
| 9 |
"""
|
| 10 |
-
Cache
|
| 11 |
-
Pakai CPU (device=-1) agar gratisan di Spaces jalan.
|
| 12 |
"""
|
| 13 |
return pipeline(
|
| 14 |
task="text2text-generation",
|
| 15 |
-
model=
|
| 16 |
-
tokenizer=
|
| 17 |
-
device=-1,
|
| 18 |
-
# framework & dtype otomatis oleh HF/Transformers
|
| 19 |
)
|
| 20 |
|
| 21 |
def translate(direction: str, text: str, max_new_tokens: int = 64) -> str:
|
| 22 |
-
|
|
|
|
| 23 |
return ""
|
| 24 |
|
| 25 |
-
#
|
|
|
|
|
|
|
|
|
|
| 26 |
prompt = f"translate {direction}: {text}"
|
| 27 |
|
| 28 |
-
|
| 29 |
-
# - num_beams memperbaiki kualitas (sedikit lebih lambat tapi OK di CPU kecil)
|
| 30 |
-
# - no_repeat_ngram_size mengurangi pengulangan
|
| 31 |
-
# - early_stopping untuk hentikan beam kalau sudah cukup baik
|
| 32 |
-
out = get_pipe()(
|
| 33 |
prompt,
|
| 34 |
max_new_tokens=int(max_new_tokens),
|
| 35 |
num_beams=5,
|
|
@@ -37,7 +38,6 @@ def translate(direction: str, text: str, max_new_tokens: int = 64) -> str:
|
|
| 37 |
no_repeat_ngram_size=3,
|
| 38 |
early_stopping=True,
|
| 39 |
)[0]["generated_text"]
|
| 40 |
-
|
| 41 |
return out
|
| 42 |
|
| 43 |
with gr.Blocks(title="mT5 id↔md Translator (HF Space API)") as demo:
|
|
@@ -63,16 +63,13 @@ with gr.Blocks(title="mT5 id↔md Translator (HF Space API)") as demo:
|
|
| 63 |
label="Contoh cepat",
|
| 64 |
)
|
| 65 |
|
| 66 |
-
#
|
| 67 |
try:
|
| 68 |
-
# Gradio 5.x (beberapa build menolak argumen apa pun)
|
| 69 |
demo.queue()
|
| 70 |
except TypeError:
|
| 71 |
try:
|
| 72 |
-
# Gradio 5.x lain (punya max_size)
|
| 73 |
demo.queue(max_size=12)
|
| 74 |
except TypeError:
|
| 75 |
-
# Gradio 4.x (pakai concurrency_count)
|
| 76 |
demo.queue(concurrency_count=1, max_size=12)
|
| 77 |
|
| 78 |
if __name__ == "__main__":
|
|
|
|
| 1 |
+
# app.py
|
| 2 |
import gradio as gr
|
| 3 |
from functools import lru_cache
|
| 4 |
from transformers import pipeline
|
| 5 |
|
| 6 |
+
# Ganti sesuai nama repo-mu di HF
|
| 7 |
+
ID2MD = "hasmar03/mt5_id2md"
|
| 8 |
+
MD2ID = "hasmar03/mt5_md2id" # <- model baru hasil training md→id
|
| 9 |
|
| 10 |
+
@lru_cache(maxsize=2)
|
| 11 |
+
def get_pipe(model_id: str):
|
| 12 |
"""
|
| 13 |
+
Cache per-model supaya load cuma sekali (jalan di CPU Spaces).
|
|
|
|
| 14 |
"""
|
| 15 |
return pipeline(
|
| 16 |
task="text2text-generation",
|
| 17 |
+
model=model_id,
|
| 18 |
+
tokenizer=model_id,
|
| 19 |
+
device=-1, # CPU (gratis)
|
|
|
|
| 20 |
)
|
| 21 |
|
| 22 |
def translate(direction: str, text: str, max_new_tokens: int = 64) -> str:
|
| 23 |
+
text = (text or "").strip()
|
| 24 |
+
if not text:
|
| 25 |
return ""
|
| 26 |
|
| 27 |
+
# pilih model berdasar arah
|
| 28 |
+
model_id = ID2MD if direction == "id2md" else MD2ID
|
| 29 |
+
|
| 30 |
+
# prefix sesuai format training
|
| 31 |
prompt = f"translate {direction}: {text}"
|
| 32 |
|
| 33 |
+
out = get_pipe(model_id)(
|
|
|
|
|
|
|
|
|
|
|
|
|
| 34 |
prompt,
|
| 35 |
max_new_tokens=int(max_new_tokens),
|
| 36 |
num_beams=5,
|
|
|
|
| 38 |
no_repeat_ngram_size=3,
|
| 39 |
early_stopping=True,
|
| 40 |
)[0]["generated_text"]
|
|
|
|
| 41 |
return out
|
| 42 |
|
| 43 |
with gr.Blocks(title="mT5 id↔md Translator (HF Space API)") as demo:
|
|
|
|
| 63 |
label="Contoh cepat",
|
| 64 |
)
|
| 65 |
|
| 66 |
+
# kompatibel Gradio 4.x/5.x
|
| 67 |
try:
|
|
|
|
| 68 |
demo.queue()
|
| 69 |
except TypeError:
|
| 70 |
try:
|
|
|
|
| 71 |
demo.queue(max_size=12)
|
| 72 |
except TypeError:
|
|
|
|
| 73 |
demo.queue(concurrency_count=1, max_size=12)
|
| 74 |
|
| 75 |
if __name__ == "__main__":
|