Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from huggingface_hub import InferenceClient | |
| from spitch import Spitch | |
| import os | |
| import json | |
| import random | |
| import time | |
| import threading | |
| # ----------------------------- | |
| # 1οΈβ£ Setup API keys | |
| # ----------------------------- | |
| os.environ["SPITCH_API_KEY"] = os.getenv("SPITCH_API_KEY") | |
| HF_API_TOKEN = os.getenv("HF_API_TOKEN") | |
| # ----------------------------- | |
| # 2οΈβ£ Initialize clients | |
| # ----------------------------- | |
| spitch_client = Spitch() | |
| hf_client = InferenceClient(token=HF_API_TOKEN) | |
| # ----------------------------- | |
| # 3οΈβ£ Store debate context | |
| # ----------------------------- | |
| debate_sessions = {} # Store debate history by session | |
| # ----------------------------- | |
| # 4οΈβ£ Voice mappings | |
| # ----------------------------- | |
| VOICES = { | |
| "Yoruba": { | |
| "Female": ["sade", "funmi"], | |
| "Male": ["segun", "femi"] | |
| }, | |
| "Hausa": { | |
| "Female": ["amina", "zainab"], | |
| "Male": ["hasan", "aliyu"] | |
| }, | |
| "Igbo": { | |
| "Female": ["ngozi", "amara"], | |
| "Male": ["obinna", "ebuka"] | |
| }, | |
| "English": { | |
| "Female": ["lucy", "lina", "kani"], | |
| "Male": ["john", "jude", "henry"] | |
| } | |
| } | |
| LANGUAGE_CODES = { | |
| "English": "en", | |
| "Yoruba": "yo", | |
| "Hausa": "ha", | |
| "Igbo": "ig" | |
| } | |
| # ----------------------------- | |
| # 5οΈβ£ MPT-7B-Instruct specific formatting | |
| # ----------------------------- | |
| INSTRUCTION_KEY = "### Instruction:" | |
| RESPONSE_KEY = "### Response:" | |
| INTRO_BLURB = "Below is an instruction that describes a task. Write a response that appropriately completes the request." | |
| PROMPT_FOR_GENERATION_FORMAT = """{intro} | |
| {instruction_key} | |
| {instruction} | |
| {response_key} | |
| """.format( | |
| intro=INTRO_BLURB, | |
| instruction_key=INSTRUCTION_KEY, | |
| instruction="{instruction}", | |
| response_key=RESPONSE_KEY, | |
| ) | |
| def create_comprehensive_debate_prompt(topic, user_stance, all_user_arguments, round_num): | |
| """Create a comprehensive debate prompt that addresses all user arguments""" | |
| opponent_stance = "against" if user_stance.lower() == "for" else "for" | |
| # Combine all user arguments | |
| user_args_text = "\n".join([f"- {arg}" for arg in all_user_arguments]) | |
| if round_num == 1: | |
| instruction = f"""You are debating the topic: "{topic}" | |
| Your position: STRONGLY {opponent_stance.upper()} | |
| Opponent's position: {user_stance.upper()} | |
| The opponent has presented these arguments: | |
| {user_args_text} | |
| Your task: Write a comprehensive counter-argument that: | |
| 1. Directly refutes their main points with specific evidence | |
| 2. Presents 2-3 new strong arguments supporting the {opponent_stance} position | |
| 3. Uses facts, logic, and real-world examples | |
| 4. Maintains a respectful but assertive tone | |
| 5. Builds a compelling case for why you're right | |
| Write your response as a skilled debater would:""" | |
| else: # Round 2 | |
| instruction = f"""You are in round {round_num} of a debate on: "{topic}" | |
| Your position: STRONGLY {opponent_stance.upper()} | |
| Your opponent has now provided these arguments across all rounds: | |
| {user_args_text} | |
| Your task for this final round: | |
| 1. Address any new points they raised | |
| 2. Reinforce your strongest arguments from before | |
| 3. Present one powerful new argument they haven't heard | |
| 4. Expose weaknesses in their overall position | |
| 5. Make a compelling closing case for the {opponent_stance} side | |
| Deliver your strongest rebuttal:""" | |
| return instruction | |
| def generate_debate_response(topic, user_stance, all_user_arguments, round_num, tts_voice="john", tts_language="en"): | |
| """Generate comprehensive debate response addressing all user arguments""" | |
| try: | |
| # Create comprehensive prompt | |
| instruction = create_comprehensive_debate_prompt(topic, user_stance, all_user_arguments, round_num) | |
| formatted_prompt = PROMPT_FOR_GENERATION_FORMAT.format(instruction=instruction) | |
| print(f"π― Round {round_num} - Calling MPT-7B-Instruct...") | |
| # Call MPT-7B-Instruct | |
| try: | |
| response = hf_client.text_generation( | |
| formatted_prompt, | |
| model="mosaicml/mpt-7b-instruct", | |
| max_new_tokens=200, | |
| temperature=0.8, | |
| do_sample=True, | |
| return_full_text=False, | |
| stop_sequences=["### Instruction:", "###", "\nRound", "\nOpponent"], | |
| top_p=0.9, | |
| repetition_penalty=1.2 | |
| ) | |
| # Clean response | |
| if isinstance(response, str): | |
| ai_response = response.strip() | |
| else: | |
| ai_response = str(response).strip() | |
| # Remove formatting artifacts | |
| ai_response = ai_response.replace("### Instruction:", "").replace("### Response:", "").strip() | |
| ai_response = ai_response.replace("Your task:", "").replace("Write your response:", "").strip() | |
| # Clean up lines | |
| lines = ai_response.split('\n') | |
| clean_lines = [] | |
| for line in lines: | |
| line = line.strip() | |
| if line and not line.startswith('###') and not line.startswith('Below is') and not line.startswith('Your task'): | |
| clean_lines.append(line) | |
| if clean_lines: | |
| ai_response = '\n'.join(clean_lines) | |
| print(f"β MPT Response (Round {round_num}): {ai_response[:100]}...") | |
| # Validate response | |
| if len(ai_response.strip()) < 30: | |
| raise Exception("Response too short") | |
| except Exception as llm_error: | |
| print(f"β MPT Error: {llm_error}") | |
| # Enhanced fallback responses per round | |
| opponent_stance = "against" if user_stance.lower() == "for" else "for" | |
| if round_num == 1: | |
| fallbacks = [ | |
| f"While you present some points about {topic}, the {opponent_stance} position is fundamentally stronger. First, implementation costs would be enormous and bureaucratic inefficiency would hinder progress. Second, industry self-regulation has proven more effective in rapidly evolving tech sectors. Third, overregulation could drive innovation to less regulated countries, ultimately harming our competitive advantage.", | |
| f"Your arguments for {topic.lower()} overlook critical economic realities. The {opponent_stance} side recognizes that heavy-handed government intervention typically stifles innovation rather than promoting it. Market forces naturally correct problems more efficiently than regulatory agencies, and the tech industry's track record of self-improvement demonstrates this principle in action.", | |
| ] | |
| else: | |
| fallbacks = [ | |
| f"Throughout this debate on {topic}, you've failed to address the fundamental flaws in your position. The {opponent_stance} side has consistently shown that your approach would create more problems than it solves. The evidence overwhelmingly supports a market-based solution rather than government interference.", | |
| f"After hearing all your arguments about {topic}, I'm more convinced than ever that the {opponent_stance} position is correct. Your points, while well-intentioned, ignore the practical realities of implementation and the historical failures of similar regulatory approaches.", | |
| ] | |
| ai_response = random.choice(fallbacks) | |
| # Generate TTS | |
| try: | |
| audio_file = f"debate_round_{round_num}_{int(time.time())}.mp3" | |
| tts_response = spitch_client.speech.generate( | |
| text=ai_response, | |
| language=tts_language, | |
| voice=tts_voice | |
| ) | |
| with open(audio_file, "wb") as f: | |
| f.write(tts_response.read()) | |
| return ai_response, audio_file | |
| except Exception as tts_error: | |
| print(f"TTS Error: {tts_error}") | |
| return ai_response, None | |
| except Exception as e: | |
| print(f"Error in generate_debate_response: {str(e)}") | |
| return f"I maintain my position against {topic} based on the arguments presented.", None | |
| def transcribe_audio(audio_file, language="en"): | |
| """Transcribe audio using Spitch""" | |
| try: | |
| if audio_file is None: | |
| return "No audio provided" | |
| with open(audio_file, "rb") as f: | |
| response = spitch_client.speech.transcribe( | |
| language=language, | |
| content=f.read() | |
| ) | |
| return response.text | |
| except Exception as e: | |
| print(f"Transcription error: {str(e)}") | |
| return "Transcription failed" | |
| # ----------------------------- | |
| # 6οΈβ£ Timed Debate System | |
| # ----------------------------- | |
| class DebateTimer: | |
| def __init__(self): | |
| self.is_running = False | |
| self.time_left = 0 | |
| self.timer_thread = None | |
| self.callback = None | |
| def start_timer(self, duration, callback): | |
| if self.is_running: | |
| return False | |
| self.is_running = True | |
| self.time_left = duration | |
| self.callback = callback | |
| self.timer_thread = threading.Thread(target=self._run_timer) | |
| self.timer_thread.daemon = True | |
| self.timer_thread.start() | |
| return True | |
| def _run_timer(self): | |
| while self.time_left > 0 and self.is_running: | |
| time.sleep(1) | |
| self.time_left -= 1 | |
| if self.is_running and self.callback: | |
| self.callback() | |
| self.is_running = False | |
| def stop_timer(self): | |
| self.is_running = False | |
| self.time_left = 0 | |
| def get_time_left(self): | |
| return self.time_left | |
| # Global timer instance | |
| debate_timer = DebateTimer() | |
| def format_time(seconds): | |
| """Format time as MM:SS""" | |
| minutes = seconds // 60 | |
| seconds = seconds % 60 | |
| return f"{minutes:02d}:{seconds:02d}" | |
| def start_debate_round(topic, stance, language, gender, session_id): | |
| """Start a new debate round with timer""" | |
| global debate_sessions, debate_timer | |
| # Initialize session | |
| if session_id not in debate_sessions: | |
| debate_sessions[session_id] = { | |
| "topic": topic, | |
| "user_stance": stance, | |
| "language": language, | |
| "gender": gender, | |
| "current_round": 1, | |
| "user_arguments": [], | |
| "ai_responses": [], | |
| "is_active": True, | |
| "round_start_time": time.time() | |
| } | |
| session = debate_sessions[session_id] | |
| if session["current_round"] > 2: | |
| return "Debate completed!", "00:00", "", "", None, gr.update(interactive=False) | |
| # Start 5-minute timer | |
| def auto_submit(): | |
| if session_id in debate_sessions and debate_sessions[session_id]["is_active"]: | |
| # Auto-submit with current text | |
| current_text = "Time's up - submitting current argument" | |
| process_round_end(current_text, session_id) | |
| debate_timer.start_timer(300, auto_submit) # 5 minutes | |
| round_msg = f"π― ROUND {session['current_round']} STARTED!\n\nTopic: {topic}\nYour stance: {stance}\n\nYou have 5 minutes to present your arguments. The timer will auto-submit when time expires." | |
| return ( | |
| round_msg, | |
| format_time(300), | |
| "", # Clear text input | |
| "", # Clear AI response | |
| None, # Clear AI audio | |
| gr.update(interactive=True) # Enable submit | |
| ) | |
| def update_timer_display(): | |
| """Update timer display - manual refresh""" | |
| time_left = debate_timer.get_time_left() | |
| return format_time(time_left) | |
| def get_current_timer(): | |
| """Get current timer status""" | |
| if debate_timer.is_running: | |
| return format_time(debate_timer.get_time_left()) | |
| return "00:00" | |
| def process_round_end(user_argument, session_id): | |
| """Process the end of a round and generate AI response""" | |
| global debate_sessions, debate_timer | |
| if session_id not in debate_sessions: | |
| return "Session not found", "00:00", "", "", None | |
| session = debate_sessions[session_id] | |
| debate_timer.stop_timer() | |
| # Add user argument to session | |
| if user_argument.strip(): | |
| session["user_arguments"].append(user_argument.strip()) | |
| # Get voice settings | |
| lang_voices = VOICES.get(session["language"], VOICES["English"]) | |
| voices = lang_voices.get(session["gender"], lang_voices["Male"]) | |
| voice = voices[0] if voices else "john" | |
| lang_code = LANGUAGE_CODES.get(session["language"], "en") | |
| # Generate AI response | |
| ai_response, audio_file = generate_debate_response( | |
| session["topic"], | |
| session["user_stance"], | |
| session["user_arguments"], | |
| session["current_round"], | |
| voice, | |
| lang_code | |
| ) | |
| session["ai_responses"].append(ai_response) | |
| # Move to next round or end debate | |
| if session["current_round"] < 2: | |
| session["current_round"] += 1 | |
| status_msg = f"β Round {session['current_round'] - 1} completed!\n\nπ€ AI Response:\n{ai_response}\n\nπ― Ready for Round {session['current_round']}?" | |
| next_round_available = True | |
| else: | |
| session["is_active"] = False | |
| status_msg = f"π DEBATE COMPLETED!\n\nπ€ Final AI Response:\n{ai_response}\n\nThank you for the debate!" | |
| next_round_available = False | |
| return ( | |
| status_msg, | |
| "00:00", | |
| ai_response, | |
| audio_file, | |
| gr.update(interactive=next_round_available) | |
| ) | |
| # ----------------------------- | |
| # 7οΈβ£ Gradio Interface | |
| # ----------------------------- | |
| with gr.Blocks(title="3-Round Timed Debate System", theme=gr.themes.Soft()) as demo: | |
| # State variables | |
| session_state = gr.State(value=f"session_{int(time.time())}") | |
| gr.Markdown("# ποΈ 3-Round Timed Debate Arena") | |
| gr.Markdown("**Powered by MPT-7B-Instruct | Each round: 5 minutes | Auto-submit when timer expires**") | |
| with gr.Row(): | |
| with gr.Column(scale=2): | |
| # Debate Setup | |
| topic_input = gr.Textbox( | |
| label="π― Debate Topic", | |
| placeholder="Enter the debate resolution...", | |
| value="Artificial intelligence should be heavily regulated by government" | |
| ) | |
| with gr.Row(): | |
| stance_input = gr.Dropdown( | |
| choices=["For", "Against"], | |
| value="For", | |
| label="Your Stance" | |
| ) | |
| language_select = gr.Dropdown( | |
| choices=list(LANGUAGE_CODES.keys()), | |
| value="English", | |
| label="Language" | |
| ) | |
| gender_select = gr.Dropdown( | |
| choices=["Male", "Female"], | |
| value="Male", | |
| label="AI Voice" | |
| ) | |
| with gr.Column(scale=1): | |
| # Timer and Controls | |
| timer_display = gr.Textbox( | |
| label="β° Time Remaining", | |
| value="05:00", | |
| interactive=False, | |
| info="Timer updates when you interact with buttons" | |
| ) | |
| start_round_btn = gr.Button("π Start New Round", variant="primary", size="lg") | |
| submit_round_btn = gr.Button("β Submit Round", variant="secondary", interactive=False) | |
| check_timer_btn = gr.Button("π Check Timer", variant="secondary", size="sm") | |
| # Debate Interface | |
| with gr.Row(): | |
| with gr.Column(): | |
| status_output = gr.Textbox( | |
| label="π Debate Status", | |
| lines=8, | |
| value="Click 'Start New Round' to begin the debate!", | |
| interactive=False | |
| ) | |
| user_input = gr.Textbox( | |
| label="βοΈ Your Argument", | |
| placeholder="Present your argument here... (5 minutes to write)", | |
| lines=6 | |
| ) | |
| with gr.Column(): | |
| ai_response_output = gr.Textbox( | |
| label="π€ AI Counter-Argument", | |
| lines=8, | |
| interactive=False | |
| ) | |
| ai_audio_output = gr.Audio( | |
| label="π΅ AI Audio Response", | |
| type="filepath" | |
| ) | |
| # Test Prompts Section | |
| with gr.Accordion("π§ͺ Test Prompts & Examples", open=False): | |
| gr.Markdown(""" | |
| ## Quick Test Topics: | |
| **π€ AI & Technology:** | |
| - "Artificial intelligence should be heavily regulated by government" | |
| - "Social media platforms should be liable for user-generated content" | |
| - "Cryptocurrencies should replace traditional banking systems" | |
| **π Social Issues:** | |
| - "Universal basic income should be implemented globally" | |
| - "Climate change policies should prioritize economic growth" | |
| - "Private healthcare systems are better than public ones" | |
| **π Education & Society:** | |
| - "Traditional universities will become obsolete within 20 years" | |
| - "Standardized testing should be eliminated from education" | |
| - "Remote work is better for society than office-based work" | |
| ## How the 3-Round System Works: | |
| 1. **Round 1 (5 min):** Present your opening arguments | |
| 2. **Round 2 (5 min):** Respond to AI's counter-arguments and add new points | |
| 3. **Final:** AI delivers comprehensive closing argument addressing all your points | |
| ## Debate Tips: | |
| - Use specific examples and evidence | |
| - Address counterarguments proactively | |
| - Build logical chains of reasoning | |
| - Stay focused on the core issue | |
| """) | |
| # Event Handlers | |
| def on_start_round(topic, stance, language, gender, session_id): | |
| return start_debate_round(topic, stance, language, gender, session_id) | |
| def on_submit_round(user_arg, session_id): | |
| return process_round_end(user_arg, session_id) | |
| # Connect buttons | |
| start_round_btn.click( | |
| on_start_round, | |
| inputs=[topic_input, stance_input, language_select, gender_select, session_state], | |
| outputs=[status_output, timer_display, user_input, ai_response_output, ai_audio_output, submit_round_btn] | |
| ) | |
| submit_round_btn.click( | |
| on_submit_round, | |
| inputs=[user_input, session_state], | |
| outputs=[status_output, timer_display, ai_response_output, ai_audio_output, start_round_btn] | |
| ) | |
| # Check timer button | |
| check_timer_btn.click( | |
| get_current_timer, | |
| outputs=[timer_display] | |
| ) | |
| # Timer updates will be handled manually through button clicks | |
| # Note: Auto-timer updates require Gradio Pro features | |
| # ----------------------------- | |
| # 8οΈβ£ Launch app | |
| # ----------------------------- | |
| if __name__ == "__main__": | |
| demo.launch(server_name="0.0.0.0", server_port=7860, share=False) |