| import gradio as gr |
| from transformers import AutoModelForSeq2SeqLM, AutoTokenizer |
| import torch |
| import re |
|
|
| |
| MODEL_NAME = "google/flan-t5-base" |
|
|
| print("ุชุญู
ูู ุงูู
ูุฏููุ ุงูุชุธุฑ ููููุงู...") |
| tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) |
| model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_NAME, device_map="auto") |
|
|
| |
| def map_to_percentage(text): |
| text = text.lower() |
| if any(w in text for w in ["high", "ุฌูุฏ", "ู
ู
ุชุงุฒ"]): |
| return "90%" |
| elif any(w in text for w in ["medium", "ู
ุชูุณุท"]): |
| return "60%" |
| elif any(w in text for w in ["low", "ุถุนูู"]): |
| return "30%" |
| else: |
| return "N/A" |
|
|
| |
| def evaluate_prompt(prompts_text): |
| lines = [p.strip() for p in prompts_text.splitlines() if p.strip()] |
| results = [] |
|
|
| for p in lines: |
| eval_text = f""" |
| Evaluate the following Prompt in terms of clarity, consistency, efficiency, extensibility, and user experience. |
| Choose **one word only**: Low, Medium, High. |
| Prompt: {p} |
| Answer with a single word only. |
| """ |
|
|
| inputs = tokenizer(eval_text, return_tensors="pt").to(model.device) |
| output = model.generate(**inputs, max_new_tokens=70) |
| decoded = tokenizer.decode(output[0], skip_special_tokens=True) |
|
|
| |
| score = map_to_percentage(decoded) |
| results.append(f"{p} โ {score}") |
|
|
| return "\n".join(results) |
|
|
| |
| iface = gr.Interface( |
| fn=evaluate_prompt, |
| inputs=gr.Textbox(lines=15, placeholder="ุงูุชุจ ูู Prompt ูู ุณุทุฑ ู
ููุตู"), |
| outputs="textbox", |
| title="Prompt Evaluator ุจุฏูู API", |
| description="ุฃุฏุฎู ุนุฏุฉ Prompts ูุณูุนุทู ูู ูุงุญุฏ ูุณุจุฉ ู
ุฆููุฉ ุชูุฑูุจูุฉ ููุชูููู
." |
| ) |
|
|
| iface.launch(share=True) |