lalaru commited on
Commit
b3237d9
·
verified ·
1 Parent(s): 976cacb

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +83 -0
app.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import requests
3
+ import json
4
+
5
+ # Hugging Face API details
6
+ API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.2"
7
+ HF_TOKEN = "YOUR_HUGGINGFACE_API_TOKEN" # put your HF token here
8
+ headers = {"Authorization": f"Bearer {HF_TOKEN}"}
9
+
10
+ # Prompt template
11
+ PROMPT_TEMPLATE = """
12
+ You are an AI translation assistant for a real-time universal translator.
13
+ Your tasks:
14
+ 1. Take the input text in either English or Spanish.
15
+ 2. Remove filler words like "um", "uh", "ehhh", "pues", "like", "you know", and stretched words ("soooo", "pizzaaaa").
16
+ 3. Correct punctuation and casing.
17
+ 4. Translate the cleaned text into the target language (English ↔ Spanish).
18
+ 5. Do not change the emotion tag.
19
+
20
+ Return only JSON in this exact format:
21
+
22
+ {
23
+ "cleaned_text": "<cleaned input text in original language>",
24
+ "translated_text": "<translation in target language>",
25
+ "emotion": "<given emotion>"
26
+ }
27
+
28
+ Input:
29
+ {text}
30
+ Source language: {source_lang}
31
+ Target language: {target_lang}
32
+ Emotion: {emotion}
33
+ """
34
+
35
+ # Function to call Hugging Face API
36
+ def query_hf(payload):
37
+ response = requests.post(API_URL, headers=headers, json=payload)
38
+ return response.json()
39
+
40
+ # Core translation function
41
+ def translate(text, source_lang, target_lang, emotion):
42
+ prompt = PROMPT_TEMPLATE.format(
43
+ text=text,
44
+ source_lang=source_lang,
45
+ target_lang=target_lang,
46
+ emotion=emotion
47
+ )
48
+ payload = {"inputs": prompt, "parameters": {"temperature": 0.2}}
49
+ output = query_hf(payload)
50
+
51
+ # Extract model output
52
+ try:
53
+ raw_text = output[0]["generated_text"]
54
+ parsed = json.loads(raw_text.strip())
55
+ except Exception:
56
+ # If model messes up, fallback with safe JSON
57
+ parsed = {
58
+ "cleaned_text": text,
59
+ "translated_text": f"[Translation error]",
60
+ "emotion": emotion
61
+ }
62
+ return parsed
63
+
64
+ # Gradio UI
65
+ def gradio_interface(text, source_lang, target_lang, emotion):
66
+ result = translate(text, source_lang, target_lang, emotion)
67
+ return json.dumps(result, indent=2, ensure_ascii=False)
68
+
69
+ iface = gr.Interface(
70
+ fn=gradio_interface,
71
+ inputs=[
72
+ gr.Textbox(label="Input Text"),
73
+ gr.Radio(["en", "es"], label="Source Language"),
74
+ gr.Radio(["en", "es"], label="Target Language"),
75
+ gr.Radio(["happy", "sad", "angry", "calm", "excited"], label="Emotion")
76
+ ],
77
+ outputs=gr.Textbox(label="Output JSON"),
78
+ title="AI Universal Translator - Translation Module",
79
+ description="Cleans text, translates EN ↔ ES, and preserves emotions."
80
+ )
81
+
82
+ if __name__ == "__main__":
83
+ iface.launch()