Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from huggingface_hub import InferenceClient | |
| # Initialize the Hugging Face Inference Client | |
| client = InferenceClient() | |
| # Function to stream debate arguments as they are generated | |
| def debate_stream(topic, stance): | |
| prompt = f"Construct a logical argument for the topic: '{topic}' with a stance: '{stance}'. Include both supporting arguments and potential counterarguments." | |
| messages = [ | |
| {"role": "user", "content": prompt} | |
| ] | |
| # Create a stream to receive generated content | |
| stream = client.chat.completions.create( | |
| model="Qwen/Qwen2.5-72B-Instruct", | |
| messages=messages, | |
| temperature=0.7, | |
| max_tokens=1024, | |
| top_p=0.8, | |
| stream=True | |
| ) | |
| # Stream content as it is generated | |
| debate_output = "" | |
| for chunk in stream: | |
| debate_output += chunk.choices[0].delta.content | |
| yield debate_output # Yield incremental content to display immediately | |
| # Create Gradio interface with the modified layout | |
| with gr.Blocks() as app: | |
| gr.Markdown("## Automated Debate System") | |
| gr.Markdown("Generate logical arguments and counterarguments for any debate topic using advanced AI reasoning.") | |
| with gr.Row(): | |
| # First column for input components | |
| with gr.Column(): | |
| topic_input = gr.Textbox(lines=2, label="Debate Topic", placeholder="Enter your debate topic here", elem_id="full_width") | |
| stance = gr.Dropdown( | |
| choices=["For", "Against"], | |
| label="Stance", | |
| value="For" | |
| ) | |
| debate_button = gr.Button("Generate Debate Arguments") | |
| # Second column for output | |
| with gr.Column(): | |
| gr.Markdown("### Debate Arguments") # This acts as the label for the output | |
| output_markdown = gr.Markdown() | |
| # Link button to function with inputs and outputs | |
| debate_button.click(fn=debate_stream, inputs=[topic_input, stance], outputs=output_markdown) | |
| # Run the Gradio app | |
| app.launch(debug=True) | |