TGPro1 commited on
Commit
0cb02b1
Β·
verified Β·
1 Parent(s): 2b4fbbf

Update app to gr.Interface for stable REST - Mon Jan 19 23:43:16 2026

Browse files
Files changed (1) hide show
  1. app.py +21 -35
app.py CHANGED
@@ -1,4 +1,3 @@
1
- # FORCE BUILD TRIGGER: Mon Jan 19 23:37:29 2026
2
  import gradio as gr
3
  import base64
4
  import torch
@@ -37,18 +36,27 @@ def load_models():
37
  "cuda" if torch.cuda.is_available() else "cpu"
38
  )
39
 
40
- def process_request(request_json):
41
  """Main API endpoint - receives JSON string, returns JSON string"""
 
 
 
 
 
 
42
  try:
43
  request = json.loads(request_json)
44
  action = request.get("action")
 
45
 
46
  if action == "health":
47
  return json.dumps({"status": "ok", "gpu": torch.cuda.is_available()})
48
 
 
49
  load_models()
50
 
51
  if action == "stt":
 
52
  audio_b64 = request.get("file")
53
  lang = request.get("lang")
54
  audio_bytes = base64.b64decode(audio_b64)
@@ -64,6 +72,7 @@ def process_request(request_json):
64
  os.unlink(temp_path)
65
 
66
  elif action == "translate":
 
67
  text = request.get("text")
68
  target_lang = request.get("target_lang")
69
  tgt_code = f"{target_lang}_Latn"
@@ -74,6 +83,7 @@ def process_request(request_json):
74
  return json.dumps({"translated": result.strip()})
75
 
76
  elif action == "tts":
 
77
  text = request.get("text")
78
  lang = request.get("lang")
79
  speaker_wav_b64 = request.get("speaker_wav")
@@ -107,38 +117,14 @@ def process_request(request_json):
107
  print(f"❌ Error: {traceback.format_exc()}")
108
  return json.dumps({"error": str(e)})
109
 
110
- # Create Gradio interface with API endpoint
111
- with gr.Blocks(title="πŸš€ Unified AI Engine") as demo:
112
- gr.Markdown("# πŸš€ Unified AI Engine\nBackend API for STT, Translation, and TTS")
113
-
114
- with gr.Row():
115
- input_box = gr.Textbox(
116
- label="JSON Request",
117
- placeholder='{"action": "health"}',
118
- lines=5
119
- )
120
- output_box = gr.Textbox(
121
- label="JSON Response",
122
- lines=5
123
- )
124
-
125
- submit_btn = gr.Button("Process Request", variant="primary")
126
-
127
- submit_btn.click(
128
- fn=process_request,
129
- inputs=input_box,
130
- outputs=output_box,
131
- api_name="predict" # This exposes the endpoint at /api/predict
132
- )
133
-
134
- gr.Examples(
135
- examples=[
136
- ['{"action": "health"}'],
137
- ['{"action": "stt", "file": "base64_audio", "lang": "en"}'],
138
- ['{"action": "translate", "text": "Hello", "target_lang": "fr"}'],
139
- ],
140
- inputs=input_box
141
- )
142
 
143
  if __name__ == "__main__":
144
- demo.launch(server_name="0.0.0.0", server_port=7860)
 
 
1
  import gradio as gr
2
  import base64
3
  import torch
 
36
  "cuda" if torch.cuda.is_available() else "cpu"
37
  )
38
 
39
+ def process_request(request_json, progress=gr.Progress()):
40
  """Main API endpoint - receives JSON string, returns JSON string"""
41
+ print(f"πŸ“₯ Received request: '{request_json[:100]}...' (len: {len(request_json or '')})")
42
+
43
+ if not request_json or not request_json.strip():
44
+ print("⚠️ EMPTY REQUEST RECEIVED")
45
+ return json.dumps({"error": "Empty request"})
46
+
47
  try:
48
  request = json.loads(request_json)
49
  action = request.get("action")
50
+ print(f"βš™οΈ Action: {action}")
51
 
52
  if action == "health":
53
  return json.dumps({"status": "ok", "gpu": torch.cuda.is_available()})
54
 
55
+ progress(0.1, desc="Loading models...")
56
  load_models()
57
 
58
  if action == "stt":
59
+ progress(0.3, desc="Processing Audio...")
60
  audio_b64 = request.get("file")
61
  lang = request.get("lang")
62
  audio_bytes = base64.b64decode(audio_b64)
 
72
  os.unlink(temp_path)
73
 
74
  elif action == "translate":
75
+ progress(0.5, desc="Translating...")
76
  text = request.get("text")
77
  target_lang = request.get("target_lang")
78
  tgt_code = f"{target_lang}_Latn"
 
83
  return json.dumps({"translated": result.strip()})
84
 
85
  elif action == "tts":
86
+ progress(0.7, desc="Generating Speech...")
87
  text = request.get("text")
88
  lang = request.get("lang")
89
  speaker_wav_b64 = request.get("speaker_wav")
 
117
  print(f"❌ Error: {traceback.format_exc()}")
118
  return json.dumps({"error": str(e)})
119
 
120
+ # Use Simple Interface for better API stability in 4.x
121
+ demo = gr.Interface(
122
+ fn=process_request,
123
+ inputs=gr.Textbox(label="JSON Request", lines=5),
124
+ outputs=gr.Textbox(label="JSON Response"),
125
+ title="πŸš€ Unified AI Engine",
126
+ description="Backend API for STT, Translation, and TTS"
127
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128
 
129
  if __name__ == "__main__":
130
+ demo.queue().launch(server_name="0.0.0.0", server_port=7860)