chatbot-nlp / app.py
fdabbiras's picture
Update app.py
0501a1a verified
import gradio as gr
from transformers import pipeline, set_seed
from gtts import gTTS
import uuid
import os
import speech_recognition as sr
# Inisialisasi model Bahasa Indonesia
generator = pipeline("text-generation", model="flax-community/gpt2-small-indonesian")
set_seed(42)
# Fungsi chatbot: teks masuk β†’ jawaban + TTS
def chat_with_bot(prompt):
result = generator(prompt, max_length=100, num_return_sequences=1)[0]["generated_text"]
response = result[len(prompt):].strip()
# Buat audio dari jawaban
audio_path = f"/tmp/{uuid.uuid4().hex}.mp3"
tts = gTTS(response, lang="id")
tts.save(audio_path)
return response, audio_path
# Fungsi untuk input teks atau suara
def transcribe_and_respond(text_input, audio_input):
if audio_input:
recognizer = sr.Recognizer()
with sr.AudioFile(audio_input) as source:
audio_data = recognizer.record(source)
try:
text_input = recognizer.recognize_google(audio_data, language="id-ID")
except:
return "⚠️ Gagal mengenali suara.", None
if not text_input.strip():
return "⚠️ Tidak ada input yang diproses.", None
return chat_with_bot(text_input)
# Gradio UI dengan tampilan mobile-friendly
with gr.Blocks(css="""
.gradio-container {
max-width: 600px !important;
margin: auto;
padding: 16px;
font-family: 'Segoe UI', sans-serif;
}
h1, h2 {
text-align: center;
}
textarea, input {
font-size: 16px !important;
}
@media screen and (max-width: 600px) {
.gradio-container {
padding: 12px;
}
}
""") as demo:
gr.Markdown("""
<h1>MEDVA – Medical Virtual Assistant</h1>
<p style='text-align:center;'>Tulis pertanyaan atau unggah suara Anda. MEDVA akan menjawab dan membacakan hasilnya!</p>
""")
with gr.Column():
text_input = gr.Textbox(label="πŸ’¬ Masukkan Teks (opsional)", placeholder="Contoh: Apa itu AI?", lines=2)
audio_input = gr.Audio(label="🎀 Upload Suara (opsional)", type="filepath")
submit_btn = gr.Button("πŸš€ Kirim")
with gr.Column():
text_output = gr.Textbox(label="🧠 Jawaban", lines=3)
audio_output = gr.Audio(label="πŸ”Š Jawaban Audio")
submit_btn.click(fn=transcribe_and_respond, inputs=[text_input, audio_input], outputs=[text_output, audio_output])
demo.launch()