Spaces:
Sleeping
Sleeping
| # Tech Trends Curator Chatbot with Audio (Hugging Face Compatible) | |
| import os | |
| import uuid | |
| import gradio as gr | |
| import requests | |
| from gtts import gTTS | |
| # Fetch API key from environment | |
| GROQ_API_KEY = os.environ.get("GROQ_API_KEY") | |
| # Function to query Groq LLM via REST API | |
| def query_groq(messages): | |
| url = "https://api.groq.com/openai/v1/chat/completions" | |
| headers = { | |
| "Authorization": f"Bearer {GROQ_API_KEY}", | |
| "Content-Type": "application/json" | |
| } | |
| data = { | |
| "model": "llama3-8b-8192", | |
| "messages": messages | |
| } | |
| response = requests.post(url, headers=headers, json=data) | |
| result = response.json() | |
| return result["choices"][0]["message"]["content"] | |
| # Chatbot function with audio output | |
| def chat_with_audio(user_input, history): | |
| system_prompt = { | |
| "role": "system", | |
| "content": ( | |
| "You are a tech trend curator. Summarize the latest AI tools, GitHub projects, " | |
| "and startup news in an engaging tone. If asked for fake/funny trends, be creative and witty." | |
| ) | |
| } | |
| messages = [system_prompt] | |
| for user, assistant in history: | |
| messages.append({"role": "user", "content": user}) | |
| messages.append({"role": "assistant", "content": assistant}) | |
| messages.append({"role": "user", "content": user_input}) | |
| response_text = query_groq(messages) | |
| # Generate audio | |
| tts = gTTS(response_text) | |
| audio_file = f"/tmp/{uuid.uuid4()}.mp3" | |
| tts.save(audio_file) | |
| return response_text, audio_file | |
| # Gradio ChatInterface with audio output | |
| with gr.Blocks(theme="soft") as demo: | |
| gr.Markdown("# π Tech Trends Curator\nChat about trending AI tools, GitHub projects, and startup news β with audio!") | |
| chatbot = gr.Chatbot(height=400) | |
| msg = gr.Textbox(placeholder="Ask for top trends, tools, or fake news") | |
| audio = gr.Audio(label="Audio Summary") | |
| state = gr.State([]) | |
| def user_message(user_input, history): | |
| reply, audio_path = chat_with_audio(user_input, history) | |
| history.append([user_input, reply]) | |
| return history, history, audio_path | |
| msg.submit(user_message, [msg, state], [chatbot, state, audio]) | |
| # Launch the app | |
| demo.launch() | |