Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,11 +1,11 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
import requests
|
| 3 |
import json
|
| 4 |
-
import os
|
| 5 |
|
| 6 |
# Hugging Face API details
|
| 7 |
-
API_URL =
|
| 8 |
-
HF_TOKEN = "
|
| 9 |
headers = {"Authorization": f"Bearer {HF_TOKEN}"}
|
| 10 |
|
| 11 |
# Prompt template
|
|
@@ -33,12 +33,13 @@ Target language: {target_lang}
|
|
| 33 |
Emotion: {emotion}
|
| 34 |
"""
|
| 35 |
|
| 36 |
-
# Function to call Hugging Face API
|
| 37 |
def query_hf(payload):
|
| 38 |
response = requests.post(API_URL, headers=headers, json=payload)
|
| 39 |
-
|
|
|
|
|
|
|
|
|
|
| 40 |
|
| 41 |
-
# Core translation function
|
| 42 |
def translate(text, source_lang, target_lang, emotion):
|
| 43 |
prompt = PROMPT_TEMPLATE.format(
|
| 44 |
text=text,
|
|
@@ -49,35 +50,47 @@ def translate(text, source_lang, target_lang, emotion):
|
|
| 49 |
payload = {"inputs": prompt, "parameters": {"temperature": 0.2}}
|
| 50 |
output = query_hf(payload)
|
| 51 |
|
| 52 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 53 |
try:
|
| 54 |
raw_text = output[0]["generated_text"]
|
| 55 |
parsed = json.loads(raw_text.strip())
|
| 56 |
-
except Exception:
|
| 57 |
-
# If model messes up, fallback with safe JSON
|
| 58 |
parsed = {
|
| 59 |
"cleaned_text": text,
|
| 60 |
-
"translated_text":
|
| 61 |
"emotion": emotion
|
| 62 |
}
|
| 63 |
return parsed
|
| 64 |
|
| 65 |
-
# Gradio UI
|
| 66 |
-
def gradio_interface(text, source_lang, target_lang, emotion):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 67 |
result = translate(text, source_lang, target_lang, emotion)
|
| 68 |
return json.dumps(result, indent=2, ensure_ascii=False)
|
| 69 |
|
| 70 |
iface = gr.Interface(
|
| 71 |
fn=gradio_interface,
|
| 72 |
inputs=[
|
| 73 |
-
gr.
|
|
|
|
| 74 |
gr.Radio(["en", "es"], label="Source Language"),
|
| 75 |
gr.Radio(["en", "es"], label="Target Language"),
|
| 76 |
gr.Radio(["happy", "sad", "angry", "calm", "excited"], label="Emotion")
|
| 77 |
],
|
| 78 |
outputs=gr.Textbox(label="Output JSON"),
|
| 79 |
title="AI Universal Translator - Translation Module",
|
| 80 |
-
description="Cleans text, translates EN ↔ ES, and preserves emotions."
|
| 81 |
)
|
| 82 |
|
| 83 |
if __name__ == "__main__":
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import requests
|
| 3 |
import json
|
| 4 |
+
import os
|
| 5 |
|
| 6 |
# Hugging Face API details
|
| 7 |
+
API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.2"
|
| 8 |
+
HF_TOKEN = os.getenv("HF_TOKEN") # make sure you set this in Hugging Face "Repository secrets"
|
| 9 |
headers = {"Authorization": f"Bearer {HF_TOKEN}"}
|
| 10 |
|
| 11 |
# Prompt template
|
|
|
|
| 33 |
Emotion: {emotion}
|
| 34 |
"""
|
| 35 |
|
|
|
|
| 36 |
def query_hf(payload):
|
| 37 |
response = requests.post(API_URL, headers=headers, json=payload)
|
| 38 |
+
try:
|
| 39 |
+
return response.json()
|
| 40 |
+
except:
|
| 41 |
+
return {"error": response.text}
|
| 42 |
|
|
|
|
| 43 |
def translate(text, source_lang, target_lang, emotion):
|
| 44 |
prompt = PROMPT_TEMPLATE.format(
|
| 45 |
text=text,
|
|
|
|
| 50 |
payload = {"inputs": prompt, "parameters": {"temperature": 0.2}}
|
| 51 |
output = query_hf(payload)
|
| 52 |
|
| 53 |
+
# Debug check
|
| 54 |
+
if "error" in output:
|
| 55 |
+
return {
|
| 56 |
+
"cleaned_text": text,
|
| 57 |
+
"translated_text": "[Error: " + output["error"] + "]",
|
| 58 |
+
"emotion": emotion
|
| 59 |
+
}
|
| 60 |
+
|
| 61 |
try:
|
| 62 |
raw_text = output[0]["generated_text"]
|
| 63 |
parsed = json.loads(raw_text.strip())
|
| 64 |
+
except Exception as e:
|
|
|
|
| 65 |
parsed = {
|
| 66 |
"cleaned_text": text,
|
| 67 |
+
"translated_text": "[Parsing error: " + str(e) + "]",
|
| 68 |
"emotion": emotion
|
| 69 |
}
|
| 70 |
return parsed
|
| 71 |
|
| 72 |
+
# Gradio UI with speech input
|
| 73 |
+
def gradio_interface(audio, text, source_lang, target_lang, emotion):
|
| 74 |
+
# If user spoke, convert audio → text using Gradio’s built-in STT
|
| 75 |
+
if audio is not None:
|
| 76 |
+
text = audio # Gradio’s Speech-to-Text returns text directly
|
| 77 |
+
if not text:
|
| 78 |
+
return {"error": "No input text provided"}
|
| 79 |
result = translate(text, source_lang, target_lang, emotion)
|
| 80 |
return json.dumps(result, indent=2, ensure_ascii=False)
|
| 81 |
|
| 82 |
iface = gr.Interface(
|
| 83 |
fn=gradio_interface,
|
| 84 |
inputs=[
|
| 85 |
+
gr.Audio(sources=["microphone"], type="text", label="🎙 Speech Input (or leave empty)"),
|
| 86 |
+
gr.Textbox(label="💬 Text Input"),
|
| 87 |
gr.Radio(["en", "es"], label="Source Language"),
|
| 88 |
gr.Radio(["en", "es"], label="Target Language"),
|
| 89 |
gr.Radio(["happy", "sad", "angry", "calm", "excited"], label="Emotion")
|
| 90 |
],
|
| 91 |
outputs=gr.Textbox(label="Output JSON"),
|
| 92 |
title="AI Universal Translator - Translation Module",
|
| 93 |
+
description="Cleans text or speech, translates EN ↔ ES, and preserves emotions."
|
| 94 |
)
|
| 95 |
|
| 96 |
if __name__ == "__main__":
|