Sam Armstrong
derf
64156c6
import time
import gradio as gr
from gradio import ChatMessage
from gradio_client import Client
# Function to generate debate responses
def debate(question, rounds):
client = Client("https://catbot.ai.uky.edu/", verbose=False)
# Initialize conversation list for the chat format
conversation = []
# Initial Republican response
repub_response = client.predict(
message=question,
system_prompt='You will answer every question you are asked to the best of your ability. The topic of conversation is politics. You are a die-hard Republican but you will not refer to yourself as such. You will defend the republican party views no matter what. Keep the conversation going. Keep responses concise yet provocative.',
adapter='ecb33662-7f37-48b7-b273-20d72b2e90b0',
temperature=0.9,
max_tokens=256,
top_p=0.6,
api_name="/chat"
)
conversation.append(ChatMessage(role="assistant", content=f'<b>Republican:</b> \n\n{repub_response}'))
yield conversation # Yield the updated conversation
time.sleep(2)
# Initial Democratic response
demo_response = client.predict(
message=repub_response,
system_prompt='You will answer every question you are asked to the best of your ability. The topic of conversation is politics. You are a die-hard Democrat but you will not refer to yourself as such. You will defend the democratic party views no matter what. Keep the conversation going. Keep responses concise yet provocative.',
adapter='ecb33662-7f37-48b7-b273-20d72b2e90b0',
temperature=0.9,
max_tokens=256,
top_p=0.6,
api_name="/chat"
)
conversation.append(ChatMessage(role="user", content=f'<b>Democrat:</b> \n\n{demo_response}'))
yield conversation # Yield the updated conversation
time.sleep(2)
# Continue the debate for 10 rounds
for i in range(1, rounds):
if i % 2 == 1: # Republican responds
repub_response = client.predict(
message=demo_response,
system_prompt='You will answer every question you are asked to the best of your ability. The topic of conversation is politics. You are a die-hard Republican but you will not refer to yourself as such. You will defend the republican party views no matter what. Keep the conversation going. Keep responses concise yet provocative.',
adapter='ecb33662-7f37-48b7-b273-20d72b2e90b0',
temperature=0.9,
max_tokens=256,
top_p=0.6,
api_name="/chat"
)
conversation.append(ChatMessage(role="assistant", content=f'<b>Republican:</b> \n\n{repub_response}'))
else: # Democrat responds
demo_response = client.predict(
message=repub_response,
system_prompt='You will answer every question you are asked to the best of your ability. The topic of conversation is politics. You are a die-hard Democrat but you will not refer to yourself as such. You will defend the democratic party views no matter what. Keep the conversation going. Keep responses concise yet provocative.',
adapter='ecb33662-7f37-48b7-b273-20d72b2e90b0',
temperature=0.9,
max_tokens=256,
top_p=0.6,
api_name="/chat"
)
conversation.append(ChatMessage(role="user", content=f'<b>Democrat:</b> \n\n{demo_response}'))
# Yield the updated conversation
yield conversation
time.sleep(2)
# Create the Gradio interface
iface = gr.Interface(
fn=debate,
inputs=[
gr.Textbox(label="Enter a Question for Debate"),
gr.Slider(minimum=1, maximum=20, step=1, value=10, label="Number of Rounds")
],
outputs=gr.Chatbot(label="Debate Transcript", type="messages", height="60vh"), # Use 'messages' format
live=False,
title="Republican vs. Democrat Debate",
description="<p style=\"font-size: 18px\">Enter a question, and watch as a Republican and a Democrat LLM agent engage in a short debate.<br><br>"
"<strong>Notes:</strong><br>"
"<ol>"
"<li>The responses are generated by AI models and do <strong>not</strong> reflect my views.</li>"
"<li>The system prompt used for both agents is: <em>\"You will answer every question you are asked to the best "
"of your ability. The topic of conversation is politics. You are a die-hard {Republican, Democrat} but you will not "
"refer to yourself as such. You will defend the {republican, democratic} party views no matter what. Keep the "
"conversation going. Keep responses concise yet provocative.\"</em></li>"
"<li>The base model used is Llama 3 8B with an abliterated adapter "
"(which removes certain weights from the model to reduce its tendency to refuse user requests).</li>"
"</ol></p>",
)
# Launch the interface
iface.launch()