Spaces:
Sleeping
Sleeping
File size: 2,031 Bytes
f408326 a924277 f408326 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 | import gradio as gr
from huggingface_hub import InferenceClient
# Initialize the Hugging Face Inference Client
client = InferenceClient()
# Function to stream debate arguments as they are generated
def debate_stream(topic, stance):
prompt = f"Construct a logical argument for the topic: '{topic}' with a stance: '{stance}'. Include both supporting arguments and potential counterarguments."
messages = [
{"role": "user", "content": prompt}
]
# Create a stream to receive generated content
stream = client.chat.completions.create(
model="Qwen/Qwen2.5-72B-Instruct",
messages=messages,
temperature=0.7,
max_tokens=1024,
top_p=0.8,
stream=True
)
# Stream content as it is generated
debate_output = ""
for chunk in stream:
debate_output += chunk.choices[0].delta.content
yield debate_output # Yield incremental content to display immediately
# Create Gradio interface with the modified layout
with gr.Blocks() as app:
gr.Markdown("## Automated Debate System")
gr.Markdown("Generate logical arguments and counterarguments for any debate topic using advanced AI reasoning.")
with gr.Row():
# First column for input components
with gr.Column():
topic_input = gr.Textbox(lines=2, label="Debate Topic", placeholder="Enter your debate topic here", elem_id="full_width")
stance = gr.Dropdown(
choices=["For", "Against"],
label="Stance",
value="For"
)
debate_button = gr.Button("Generate Debate Arguments")
# Second column for output
with gr.Column():
gr.Markdown("### Debate Arguments") # This acts as the label for the output
output_markdown = gr.Markdown()
# Link button to function with inputs and outputs
debate_button.click(fn=debate_stream, inputs=[topic_input, stance], outputs=output_markdown)
# Run the Gradio app
app.launch(debug=True)
|