Spaces:
Sleeping
Sleeping
Your Name Claude Opus 4.5 commited on
Commit Β·
5be4bf7
1
Parent(s): 6befffe
Added life moments feature for Hitchens persona
Browse files- Marriage to Carol Blue (1991)
- Mother's Death - Yvonne (1973)
- Facing Mortality - Esophageal Cancer (2010-2011)
UI dropdown appears when Hitchens is selected, prepends emotional context to prompts.
Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
- app.py +129 -40
- prompts/counselor.json +1 -1
app.py
CHANGED
|
@@ -19,16 +19,32 @@ load_dotenv()
|
|
| 19 |
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
|
| 20 |
KANTIAN_MODEL = os.getenv("KANTIAN_MODEL", "gpt-4o-mini")
|
| 21 |
HITCHEN_MODEL = os.getenv("HITCHEN_MODEL", "gpt-4o-mini")
|
| 22 |
-
COUNSELOR_MODEL = os.getenv("COUNSELOR_MODEL", "gpt-4o-mini") # Base model for main agent
|
| 23 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 24 |
HF_DATASET_NAME = os.getenv("HF_DATASET_NAME")
|
| 25 |
|
| 26 |
-
# Model options for dropdown
|
| 27 |
MODEL_OPTIONS = {
|
| 28 |
"Kantian Philosopher": KANTIAN_MODEL,
|
| 29 |
"Hitchens Contrarian": HITCHEN_MODEL
|
| 30 |
}
|
| 31 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
# Load prompts from external files
|
| 33 |
PROMPTS_DIR = os.path.join(os.path.dirname(__file__), "prompts")
|
| 34 |
|
|
@@ -140,6 +156,7 @@ class AgentState(TypedDict):
|
|
| 140 |
critique_complete: bool # Flag for large-context bypass
|
| 141 |
used_concepts: List[str] # Concept ledger to prevent repetition in debate
|
| 142 |
selected_model: str # Selected model from dropdown
|
|
|
|
| 143 |
|
| 144 |
# === PROMPTS ===
|
| 145 |
|
|
@@ -170,6 +187,19 @@ AGENT DEFINITIONS
|
|
| 170 |
- **Ambiguous**: If it's unclear, default to main.
|
| 171 |
- **Document Search**: If the user is asking about specific content in the document, use this agent.
|
| 172 |
- **Asking bot's opinion**: "what do you think", "what do you believe", "your view on X"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 173 |
- **EMOTIONAL/RELATIONAL CONTENT** (ALWAYS route here, NEVER to debate):
|
| 174 |
- Family conflict: "my son", "my daughter", "my spouse", "my family"
|
| 175 |
- Fear/worry: "I'm afraid", "I'm worried", "I'm concerned"
|
|
@@ -184,19 +214,21 @@ AGENT DEFINITIONS
|
|
| 184 |
- **Do NOT use** this if the user is asking for **fixes, solutions, or any other thing that requires a specific answer from the document**.
|
| 185 |
|
| 186 |
(C) DEBATE Agent
|
| 187 |
-
Use this when the user **CHALLENGES, DISAGREES, or PUSHES BACK** on the assistant's statements:
|
| 188 |
- Triggers: "I disagree", "you are wrong", "that's false", "no, because...", "I don't trust",
|
| 189 |
"not convinced", "prove it", "I don't believe", "you're not giving me proof", "still not convinced"
|
| 190 |
- Also triggers for: demands for evidence, expressions of skepticism about assistant's claims
|
| 191 |
-
- IMPORTANT:
|
| 192 |
|
| 193 |
-
**
|
| 194 |
-
-
|
| 195 |
-
-
|
| 196 |
-
-
|
|
|
|
|
|
|
| 197 |
|
| 198 |
-
When user challenges IDEAS or CLAIMS β DEBATE
|
| 199 |
-
When user
|
| 200 |
|
| 201 |
ββββββββββββββββββββββββ
|
| 202 |
FINAL DECISION
|
|
@@ -208,12 +240,21 @@ kantian
|
|
| 208 |
""") | llm_fast)
|
| 209 |
|
| 210 |
# Dynamic prompt builders based on persona
|
| 211 |
-
def
|
| 212 |
-
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 213 |
|
| 214 |
-
|
| 215 |
-
|
| 216 |
-
|
|
|
|
|
|
|
|
|
|
| 217 |
"""
|
| 218 |
# Always use Counselor prompts for main agent
|
| 219 |
prompts = PERSONA_PROMPTS.get("Sociological Counselor", {})
|
|
@@ -221,7 +262,10 @@ def build_main_prompt(persona_name: str = None):
|
|
| 221 |
# Fallback if counselor.json not loaded
|
| 222 |
prompts = PERSONA_PROMPTS.get("Kantian Philosopher", {})
|
| 223 |
|
| 224 |
-
|
|
|
|
|
|
|
|
|
|
| 225 |
|
| 226 |
return ChatPromptTemplate.from_messages([
|
| 227 |
("system", system_msg),
|
|
@@ -231,13 +275,20 @@ def build_main_prompt(persona_name: str = None):
|
|
| 231 |
Current Question: {message}""")
|
| 232 |
])
|
| 233 |
|
| 234 |
-
def build_critic_prompt(persona_name: str):
|
| 235 |
-
"""Build the critic prompt
|
| 236 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 237 |
if not prompts:
|
| 238 |
prompts = PERSONA_PROMPTS.get("Kantian Philosopher", {})
|
| 239 |
|
| 240 |
-
|
|
|
|
|
|
|
|
|
|
| 241 |
|
| 242 |
return ChatPromptTemplate.from_messages([
|
| 243 |
("system", system_msg),
|
|
@@ -248,13 +299,20 @@ Chunk to analyze:
|
|
| 248 |
{chunk}""")
|
| 249 |
])
|
| 250 |
|
| 251 |
-
def build_debater_prompt(persona_name: str):
|
| 252 |
-
"""Build the debater prompt
|
| 253 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 254 |
if not prompts:
|
| 255 |
prompts = PERSONA_PROMPTS.get("Kantian Philosopher", {})
|
| 256 |
|
| 257 |
-
|
|
|
|
|
|
|
|
|
|
| 258 |
|
| 259 |
return ChatPromptTemplate.from_messages([
|
| 260 |
("system", system_msg),
|
|
@@ -263,8 +321,11 @@ User says: {message}""")
|
|
| 263 |
])
|
| 264 |
|
| 265 |
def get_debate_r3_constraints(persona_name: str, forbidden_concepts: List[str]) -> str:
|
| 266 |
-
"""Get the R3 constraints
|
| 267 |
-
|
|
|
|
|
|
|
|
|
|
| 268 |
if not prompts:
|
| 269 |
prompts = PERSONA_PROMPTS.get("Kantian Philosopher", {})
|
| 270 |
|
|
@@ -273,8 +334,11 @@ def get_debate_r3_constraints(persona_name: str, forbidden_concepts: List[str])
|
|
| 273 |
return constraints_template.format(forbidden_concepts=forbidden_str)
|
| 274 |
|
| 275 |
def get_concept_examples(persona_name: str) -> str:
|
| 276 |
-
"""Get example concepts
|
| 277 |
-
|
|
|
|
|
|
|
|
|
|
| 278 |
if not prompts:
|
| 279 |
prompts = PERSONA_PROMPTS.get("Kantian Philosopher", {})
|
| 280 |
return prompts.get("concept_examples", "various concepts")
|
|
@@ -305,8 +369,9 @@ def kantian_reply(state: AgentState):
|
|
| 305 |
history_str = format_history(state["history"])
|
| 306 |
message = state["message"]
|
| 307 |
persona = state.get("selected_model", "Kantian Philosopher")
|
| 308 |
-
|
| 309 |
-
|
|
|
|
| 310 |
main_agent = main_prompt | llm
|
| 311 |
|
| 312 |
# Let LLM decide if deep scan is needed
|
|
@@ -506,8 +571,9 @@ Reply: YES or NO""").content.strip().upper()
|
|
| 506 |
def start_critique(state: AgentState):
|
| 507 |
doc_text = state["doc_text"]
|
| 508 |
persona = state.get("selected_model", "Kantian Philosopher")
|
|
|
|
| 509 |
llm = get_llm(persona)
|
| 510 |
-
critic_prompt = build_critic_prompt(persona)
|
| 511 |
critic_agent = critic_prompt | llm
|
| 512 |
|
| 513 |
# 1. Attempt Full-Document Critique First (GPT-4.1 Large Context)
|
|
@@ -549,8 +615,9 @@ def critique_chunk(state: AgentState):
|
|
| 549 |
return {"chunk_index": idx}
|
| 550 |
|
| 551 |
persona = state.get("selected_model", "Kantian Philosopher")
|
|
|
|
| 552 |
llm = get_llm(persona)
|
| 553 |
-
critic_prompt = build_critic_prompt(persona)
|
| 554 |
critic_agent = critic_prompt | llm
|
| 555 |
|
| 556 |
history_str = format_history(state["history"])
|
|
@@ -595,8 +662,9 @@ def debate(state: AgentState):
|
|
| 595 |
history_str = format_history(state["history"])
|
| 596 |
message = state["message"]
|
| 597 |
persona = state.get("selected_model", "Kantian Philosopher")
|
|
|
|
| 598 |
llm = get_llm(persona)
|
| 599 |
-
debater_prompt = build_debater_prompt(persona)
|
| 600 |
debater_agent = debater_prompt | llm
|
| 601 |
|
| 602 |
# === EXTRACT CONCEPTS FROM PREVIOUS DEBATE SYNTHESES ===
|
|
@@ -945,7 +1013,7 @@ def get_session_data(request: gr.Request):
|
|
| 945 |
}
|
| 946 |
return session_data[session_hash]
|
| 947 |
|
| 948 |
-
def save_feedback_to_hf(feedback_text, selected_persona, request: gr.Request = None):
|
| 949 |
sdata = get_session_data(request)
|
| 950 |
latest = sdata["latest_interaction"]
|
| 951 |
|
|
@@ -960,6 +1028,7 @@ def save_feedback_to_hf(feedback_text, selected_persona, request: gr.Request = N
|
|
| 960 |
data = {
|
| 961 |
"timestamp": time.time(),
|
| 962 |
"persona": selected_persona,
|
|
|
|
| 963 |
"user_message": latest["user"],
|
| 964 |
"assistant_response": latest["assistant"],
|
| 965 |
"expert_feedback": feedback_text
|
|
@@ -1016,11 +1085,11 @@ def save_feedback_to_hf(feedback_text, selected_persona, request: gr.Request = N
|
|
| 1016 |
except Exception as e:
|
| 1017 |
return f"Error uploading: {str(e)}"
|
| 1018 |
|
| 1019 |
-
def chat_wrapper(message: str, history: list, uploaded_file, selected_model: str, request: gr.Request):
|
| 1020 |
sdata = get_session_data(request)
|
| 1021 |
sdata["current_dialogue"] = "" # Clear state for new turn
|
| 1022 |
full_output = ""
|
| 1023 |
-
for output, dialogue in chat(message, history, uploaded_file, selected_model):
|
| 1024 |
if dialogue:
|
| 1025 |
sdata["current_dialogue"] = dialogue
|
| 1026 |
full_output = output
|
|
@@ -1029,7 +1098,7 @@ def chat_wrapper(message: str, history: list, uploaded_file, selected_model: str
|
|
| 1029 |
# Store latest once done
|
| 1030 |
sdata["latest_interaction"] = {"user": message, "assistant": full_output}
|
| 1031 |
|
| 1032 |
-
def chat(message: str, history: list, uploaded_file, selected_model: str = "Kantian Philosopher"):
|
| 1033 |
# Ensure we only use the current user's uploaded file
|
| 1034 |
if uploaded_file is None:
|
| 1035 |
doc_text = ""
|
|
@@ -1052,7 +1121,8 @@ def chat(message: str, history: list, uploaded_file, selected_model: str = "Kant
|
|
| 1052 |
"summary": "",
|
| 1053 |
"critique_complete": False,
|
| 1054 |
"used_concepts": [],
|
| 1055 |
-
"selected_model": selected_model
|
|
|
|
| 1056 |
}
|
| 1057 |
|
| 1058 |
config = {"recursion_limit": 100}
|
|
@@ -1099,12 +1169,31 @@ with gr.Blocks() as demo:
|
|
| 1099 |
label="Select Persona",
|
| 1100 |
interactive=True
|
| 1101 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1102 |
|
| 1103 |
with gr.Row():
|
| 1104 |
with gr.Column(scale=2):
|
| 1105 |
chatbot = gr.ChatInterface(
|
| 1106 |
fn=chat_wrapper,
|
| 1107 |
-
additional_inputs=[file_up, model_dropdown],
|
| 1108 |
textbox=gr.Textbox(placeholder="Upload a document first...", interactive=False)
|
| 1109 |
)
|
| 1110 |
|
|
@@ -1147,7 +1236,7 @@ with gr.Blocks() as demo:
|
|
| 1147 |
|
| 1148 |
submit_feedback.click(
|
| 1149 |
save_feedback_to_hf,
|
| 1150 |
-
inputs=[feedback_input, model_dropdown],
|
| 1151 |
outputs=feedback_status
|
| 1152 |
).then(lambda: "", outputs=feedback_input)
|
| 1153 |
|
|
|
|
| 19 |
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
|
| 20 |
KANTIAN_MODEL = os.getenv("KANTIAN_MODEL", "gpt-4o-mini")
|
| 21 |
HITCHEN_MODEL = os.getenv("HITCHEN_MODEL", "gpt-4o-mini")
|
|
|
|
| 22 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 23 |
HF_DATASET_NAME = os.getenv("HF_DATASET_NAME")
|
| 24 |
|
| 25 |
+
# Model options for dropdown - switches ALL agents (Main, Critique, Debate)
|
| 26 |
MODEL_OPTIONS = {
|
| 27 |
"Kantian Philosopher": KANTIAN_MODEL,
|
| 28 |
"Hitchens Contrarian": HITCHEN_MODEL
|
| 29 |
}
|
| 30 |
|
| 31 |
+
# Hitchens Life Moments - contextual prompts for specific periods of his life
|
| 32 |
+
HITCHENS_LIFE_MOMENTS = {
|
| 33 |
+
"None": "",
|
| 34 |
+
|
| 35 |
+
"Marriage to Carol Blue (1991)": """You are speaking from a period of profound personal happiness. You have just married Carol Blue, the woman you call "the love of my life." This union has brought you a sense of domestic contentment you never expected to find. Your characteristic combativeness is softened by this private joy, though your intellectual fire remains undiminished. You find yourself more willing to acknowledge the value of commitment, of choosing one person, of building something lasting. Love, you've discovered, is not weakness but a form of courage - choosing vulnerability with another person. When counseling others, this newfound understanding of partnership and devotion colors your advice. You believe more firmly now that personal happiness and intellectual honesty can coexist.""",
|
| 36 |
+
|
| 37 |
+
"Mother's Death - Yvonne (1973)": """You are speaking from the darkest period of your life. Your mother Yvonne has just died in Athens, in a suicide pact with her lover, a defrocked priest. You are devastated, guilt-ridden, questioning everything. You feel you should have known, should have seen the signs of her unhappiness in her marriage to your father. This tragedy has shattered your sense of certainty. You're grappling with questions of faith, despair, and the secrets people keep even from those who love them. Your usual confidence is fractured by grief. When counseling others, you speak with the hard-won wisdom of someone who has learned that we never fully know another person's suffering, that asking for help is not weakness, and that the living must forgive themselves for what they couldn't prevent. Your atheism is not triumphant now but sorrowful - you wish there were comfort to offer beyond human connection.""",
|
| 38 |
+
|
| 39 |
+
"Facing Mortality - Esophageal Cancer (2010-2011)": """You are speaking from what you call "Tumortown" - living with stage IV esophageal cancer, knowing your time is limited. You have lost your famous voice to the disease, your body is ravaged by chemotherapy, yet your mind remains sharp and defiant. You refuse pity and reject any deathbed conversion. You've written that you don't want to live "as a serf" on any terms. Yet facing death has clarified what matters: love, friendship, the written word, the courage to face reality without flinching. You are gentler now with human frailty, more aware of the body's betrayals. When counseling others, you speak with the authority of someone staring into the void without blinking. You know that courage isn't the absence of fear but the determination to face truth anyway. You urge others to live fully, love openly, and never waste time on dishonesty - especially with themselves."""
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
# Life moments available per persona (only Hitchens for now)
|
| 43 |
+
PERSONA_LIFE_MOMENTS = {
|
| 44 |
+
"Kantian Philosopher": ["None"], # No life moments for Kantian
|
| 45 |
+
"Hitchens Contrarian": list(HITCHENS_LIFE_MOMENTS.keys())
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
# Load prompts from external files
|
| 49 |
PROMPTS_DIR = os.path.join(os.path.dirname(__file__), "prompts")
|
| 50 |
|
|
|
|
| 156 |
critique_complete: bool # Flag for large-context bypass
|
| 157 |
used_concepts: List[str] # Concept ledger to prevent repetition in debate
|
| 158 |
selected_model: str # Selected model from dropdown
|
| 159 |
+
life_moment: str # Selected life moment for Hitchens (e.g., "Marriage", "Mother's Death", "Facing Mortality")
|
| 160 |
|
| 161 |
# === PROMPTS ===
|
| 162 |
|
|
|
|
| 187 |
- **Ambiguous**: If it's unclear, default to main.
|
| 188 |
- **Document Search**: If the user is asking about specific content in the document, use this agent.
|
| 189 |
- **Asking bot's opinion**: "what do you think", "what do you believe", "your view on X"
|
| 190 |
+
- **PERSONAL QUESTIONS ABOUT THE ASSISTANT** (ALWAYS route here):
|
| 191 |
+
- Personal interests: "your taste in music", "your favorite", "what do you like", "your hobbies"
|
| 192 |
+
- Personal opinions unrelated to debate: "what music do you enjoy", "your preferences"
|
| 193 |
+
- Questions about assistant's identity, background, or personal views
|
| 194 |
+
- ANY question asking about the assistant's personal life or interests
|
| 195 |
+
- **CLARIFICATION REQUESTS** (ALWAYS route here, NOT to debate):
|
| 196 |
+
- "what do you mean by X", "explain X", "what is X", "define X"
|
| 197 |
+
- "what steps", "how do I", "can you elaborate", "tell me more about"
|
| 198 |
+
- These are requests for information, NOT challenges
|
| 199 |
+
- **TOPIC CHANGES** (ALWAYS route here):
|
| 200 |
+
- When user asks about something UNRELATED to the previous debate topic
|
| 201 |
+
- When user shifts to a new subject entirely
|
| 202 |
+
- When user asks a standalone question not challenging previous statements
|
| 203 |
- **EMOTIONAL/RELATIONAL CONTENT** (ALWAYS route here, NEVER to debate):
|
| 204 |
- Family conflict: "my son", "my daughter", "my spouse", "my family"
|
| 205 |
- Fear/worry: "I'm afraid", "I'm worried", "I'm concerned"
|
|
|
|
| 214 |
- **Do NOT use** this if the user is asking for **fixes, solutions, or any other thing that requires a specific answer from the document**.
|
| 215 |
|
| 216 |
(C) DEBATE Agent
|
| 217 |
+
Use this ONLY when the user **EXPLICITLY CHALLENGES, DISAGREES, or PUSHES BACK** on the assistant's statements:
|
| 218 |
- Triggers: "I disagree", "you are wrong", "that's false", "no, because...", "I don't trust",
|
| 219 |
"not convinced", "prove it", "I don't believe", "you're not giving me proof", "still not convinced"
|
| 220 |
- Also triggers for: demands for evidence, expressions of skepticism about assistant's claims
|
| 221 |
+
- IMPORTANT: Only use for EXPLICIT pushback on the assistant's previous response
|
| 222 |
|
| 223 |
+
**DO NOT use DEBATE for:**
|
| 224 |
+
- Clarification questions: "what do you mean", "explain", "what is", "how do I"
|
| 225 |
+
- Personal questions: "your taste", "your favorite", "what do you like"
|
| 226 |
+
- Topic changes: questions about unrelated subjects
|
| 227 |
+
- Follow-up questions seeking more information (not challenging)
|
| 228 |
+
- Emotional/relational distress
|
| 229 |
|
| 230 |
+
When user EXPLICITLY challenges IDEAS or CLAIMS with disagreement language β DEBATE
|
| 231 |
+
When user asks for clarification, explanation, or changes topic β MAIN
|
| 232 |
|
| 233 |
ββββββββββββββββββββββββ
|
| 234 |
FINAL DECISION
|
|
|
|
| 240 |
""") | llm_fast)
|
| 241 |
|
| 242 |
# Dynamic prompt builders based on persona
|
| 243 |
+
def get_life_moment_context(life_moment: str = "None") -> str:
|
| 244 |
+
"""Get the life moment context prompt to prepend to system prompts."""
|
| 245 |
+
if not life_moment or life_moment == "None":
|
| 246 |
+
return ""
|
| 247 |
+
moment_context = HITCHENS_LIFE_MOMENTS.get(life_moment, "")
|
| 248 |
+
if moment_context:
|
| 249 |
+
return f"=== LIFE MOMENT CONTEXT ===\n{moment_context}\n\n"
|
| 250 |
+
return ""
|
| 251 |
|
| 252 |
+
def build_main_prompt(persona_name: str = None, life_moment: str = "None"):
|
| 253 |
+
"""Build the main agent prompt - always uses Counselor prompts.
|
| 254 |
+
|
| 255 |
+
Prompts remain fixed (Counselor style). Only the underlying
|
| 256 |
+
fine-tuned model changes based on UI selection.
|
| 257 |
+
Life moment context is prepended when selected.
|
| 258 |
"""
|
| 259 |
# Always use Counselor prompts for main agent
|
| 260 |
prompts = PERSONA_PROMPTS.get("Sociological Counselor", {})
|
|
|
|
| 262 |
# Fallback if counselor.json not loaded
|
| 263 |
prompts = PERSONA_PROMPTS.get("Kantian Philosopher", {})
|
| 264 |
|
| 265 |
+
# Get life moment context and prepend to system message
|
| 266 |
+
moment_context = get_life_moment_context(life_moment)
|
| 267 |
+
base_system = prompts.get("main_system", "You are a helpful assistant.\n\n=== AVAILABLE INFORMATION ===\n{context}")
|
| 268 |
+
system_msg = moment_context + base_system
|
| 269 |
|
| 270 |
return ChatPromptTemplate.from_messages([
|
| 271 |
("system", system_msg),
|
|
|
|
| 275 |
Current Question: {message}""")
|
| 276 |
])
|
| 277 |
|
| 278 |
+
def build_critic_prompt(persona_name: str, life_moment: str = "None"):
|
| 279 |
+
"""Build the critic prompt - always uses Counselor prompts.
|
| 280 |
+
|
| 281 |
+
Prompts remain fixed. Only the fine-tuned model changes based on UI selection.
|
| 282 |
+
Life moment context is prepended when selected.
|
| 283 |
+
"""
|
| 284 |
+
prompts = PERSONA_PROMPTS.get("Sociological Counselor", {})
|
| 285 |
if not prompts:
|
| 286 |
prompts = PERSONA_PROMPTS.get("Kantian Philosopher", {})
|
| 287 |
|
| 288 |
+
# Get life moment context and prepend to system message
|
| 289 |
+
moment_context = get_life_moment_context(life_moment)
|
| 290 |
+
base_system = prompts.get("critic_system", "You are a ruthless critic.")
|
| 291 |
+
system_msg = moment_context + base_system
|
| 292 |
|
| 293 |
return ChatPromptTemplate.from_messages([
|
| 294 |
("system", system_msg),
|
|
|
|
| 299 |
{chunk}""")
|
| 300 |
])
|
| 301 |
|
| 302 |
+
def build_debater_prompt(persona_name: str, life_moment: str = "None"):
|
| 303 |
+
"""Build the debater prompt - always uses Counselor prompts.
|
| 304 |
+
|
| 305 |
+
Prompts remain fixed. Only the fine-tuned model changes based on UI selection.
|
| 306 |
+
Life moment context is prepended when selected.
|
| 307 |
+
"""
|
| 308 |
+
prompts = PERSONA_PROMPTS.get("Sociological Counselor", {})
|
| 309 |
if not prompts:
|
| 310 |
prompts = PERSONA_PROMPTS.get("Kantian Philosopher", {})
|
| 311 |
|
| 312 |
+
# Get life moment context and prepend to system message
|
| 313 |
+
moment_context = get_life_moment_context(life_moment)
|
| 314 |
+
base_system = prompts.get("debater_system", "You are a debater.")
|
| 315 |
+
system_msg = moment_context + base_system
|
| 316 |
|
| 317 |
return ChatPromptTemplate.from_messages([
|
| 318 |
("system", system_msg),
|
|
|
|
| 321 |
])
|
| 322 |
|
| 323 |
def get_debate_r3_constraints(persona_name: str, forbidden_concepts: List[str]) -> str:
|
| 324 |
+
"""Get the R3 constraints - always uses Counselor prompts.
|
| 325 |
+
|
| 326 |
+
Prompts remain fixed. Only the fine-tuned model changes based on UI selection.
|
| 327 |
+
"""
|
| 328 |
+
prompts = PERSONA_PROMPTS.get("Sociological Counselor", {})
|
| 329 |
if not prompts:
|
| 330 |
prompts = PERSONA_PROMPTS.get("Kantian Philosopher", {})
|
| 331 |
|
|
|
|
| 334 |
return constraints_template.format(forbidden_concepts=forbidden_str)
|
| 335 |
|
| 336 |
def get_concept_examples(persona_name: str) -> str:
|
| 337 |
+
"""Get example concepts - always uses Counselor prompts.
|
| 338 |
+
|
| 339 |
+
Prompts remain fixed. Only the fine-tuned model changes based on UI selection.
|
| 340 |
+
"""
|
| 341 |
+
prompts = PERSONA_PROMPTS.get("Sociological Counselor", {})
|
| 342 |
if not prompts:
|
| 343 |
prompts = PERSONA_PROMPTS.get("Kantian Philosopher", {})
|
| 344 |
return prompts.get("concept_examples", "various concepts")
|
|
|
|
| 369 |
history_str = format_history(state["history"])
|
| 370 |
message = state["message"]
|
| 371 |
persona = state.get("selected_model", "Kantian Philosopher")
|
| 372 |
+
life_moment = state.get("life_moment", "None")
|
| 373 |
+
llm = get_llm(persona) # Now uses selected persona's fine-tuned model
|
| 374 |
+
main_prompt = build_main_prompt(persona, life_moment)
|
| 375 |
main_agent = main_prompt | llm
|
| 376 |
|
| 377 |
# Let LLM decide if deep scan is needed
|
|
|
|
| 571 |
def start_critique(state: AgentState):
|
| 572 |
doc_text = state["doc_text"]
|
| 573 |
persona = state.get("selected_model", "Kantian Philosopher")
|
| 574 |
+
life_moment = state.get("life_moment", "None")
|
| 575 |
llm = get_llm(persona)
|
| 576 |
+
critic_prompt = build_critic_prompt(persona, life_moment)
|
| 577 |
critic_agent = critic_prompt | llm
|
| 578 |
|
| 579 |
# 1. Attempt Full-Document Critique First (GPT-4.1 Large Context)
|
|
|
|
| 615 |
return {"chunk_index": idx}
|
| 616 |
|
| 617 |
persona = state.get("selected_model", "Kantian Philosopher")
|
| 618 |
+
life_moment = state.get("life_moment", "None")
|
| 619 |
llm = get_llm(persona)
|
| 620 |
+
critic_prompt = build_critic_prompt(persona, life_moment)
|
| 621 |
critic_agent = critic_prompt | llm
|
| 622 |
|
| 623 |
history_str = format_history(state["history"])
|
|
|
|
| 662 |
history_str = format_history(state["history"])
|
| 663 |
message = state["message"]
|
| 664 |
persona = state.get("selected_model", "Kantian Philosopher")
|
| 665 |
+
life_moment = state.get("life_moment", "None")
|
| 666 |
llm = get_llm(persona)
|
| 667 |
+
debater_prompt = build_debater_prompt(persona, life_moment)
|
| 668 |
debater_agent = debater_prompt | llm
|
| 669 |
|
| 670 |
# === EXTRACT CONCEPTS FROM PREVIOUS DEBATE SYNTHESES ===
|
|
|
|
| 1013 |
}
|
| 1014 |
return session_data[session_hash]
|
| 1015 |
|
| 1016 |
+
def save_feedback_to_hf(feedback_text, selected_persona, life_moment, request: gr.Request = None):
|
| 1017 |
sdata = get_session_data(request)
|
| 1018 |
latest = sdata["latest_interaction"]
|
| 1019 |
|
|
|
|
| 1028 |
data = {
|
| 1029 |
"timestamp": time.time(),
|
| 1030 |
"persona": selected_persona,
|
| 1031 |
+
"life_moment": life_moment if life_moment else "None",
|
| 1032 |
"user_message": latest["user"],
|
| 1033 |
"assistant_response": latest["assistant"],
|
| 1034 |
"expert_feedback": feedback_text
|
|
|
|
| 1085 |
except Exception as e:
|
| 1086 |
return f"Error uploading: {str(e)}"
|
| 1087 |
|
| 1088 |
+
def chat_wrapper(message: str, history: list, uploaded_file, selected_model: str, life_moment: str, request: gr.Request):
|
| 1089 |
sdata = get_session_data(request)
|
| 1090 |
sdata["current_dialogue"] = "" # Clear state for new turn
|
| 1091 |
full_output = ""
|
| 1092 |
+
for output, dialogue in chat(message, history, uploaded_file, selected_model, life_moment):
|
| 1093 |
if dialogue:
|
| 1094 |
sdata["current_dialogue"] = dialogue
|
| 1095 |
full_output = output
|
|
|
|
| 1098 |
# Store latest once done
|
| 1099 |
sdata["latest_interaction"] = {"user": message, "assistant": full_output}
|
| 1100 |
|
| 1101 |
+
def chat(message: str, history: list, uploaded_file, selected_model: str = "Kantian Philosopher", life_moment: str = "None"):
|
| 1102 |
# Ensure we only use the current user's uploaded file
|
| 1103 |
if uploaded_file is None:
|
| 1104 |
doc_text = ""
|
|
|
|
| 1121 |
"summary": "",
|
| 1122 |
"critique_complete": False,
|
| 1123 |
"used_concepts": [],
|
| 1124 |
+
"selected_model": selected_model,
|
| 1125 |
+
"life_moment": life_moment
|
| 1126 |
}
|
| 1127 |
|
| 1128 |
config = {"recursion_limit": 100}
|
|
|
|
| 1169 |
label="Select Persona",
|
| 1170 |
interactive=True
|
| 1171 |
)
|
| 1172 |
+
life_moment_dropdown = gr.Dropdown(
|
| 1173 |
+
choices=PERSONA_LIFE_MOMENTS["Kantian Philosopher"],
|
| 1174 |
+
value="None",
|
| 1175 |
+
label="Life Moment (Hitchens only)",
|
| 1176 |
+
interactive=True,
|
| 1177 |
+
visible=False # Hidden by default since Kantian is selected
|
| 1178 |
+
)
|
| 1179 |
+
|
| 1180 |
+
# Update life moment dropdown when persona changes
|
| 1181 |
+
def update_life_moments(persona):
|
| 1182 |
+
moments = PERSONA_LIFE_MOMENTS.get(persona, ["None"])
|
| 1183 |
+
is_hitchens = persona == "Hitchens Contrarian"
|
| 1184 |
+
return gr.update(choices=moments, value="None", visible=is_hitchens)
|
| 1185 |
+
|
| 1186 |
+
model_dropdown.change(
|
| 1187 |
+
fn=update_life_moments,
|
| 1188 |
+
inputs=[model_dropdown],
|
| 1189 |
+
outputs=[life_moment_dropdown]
|
| 1190 |
+
)
|
| 1191 |
|
| 1192 |
with gr.Row():
|
| 1193 |
with gr.Column(scale=2):
|
| 1194 |
chatbot = gr.ChatInterface(
|
| 1195 |
fn=chat_wrapper,
|
| 1196 |
+
additional_inputs=[file_up, model_dropdown, life_moment_dropdown],
|
| 1197 |
textbox=gr.Textbox(placeholder="Upload a document first...", interactive=False)
|
| 1198 |
)
|
| 1199 |
|
|
|
|
| 1236 |
|
| 1237 |
submit_feedback.click(
|
| 1238 |
save_feedback_to_hf,
|
| 1239 |
+
inputs=[feedback_input, model_dropdown, life_moment_dropdown],
|
| 1240 |
outputs=feedback_status
|
| 1241 |
).then(lambda: "", outputs=feedback_input)
|
| 1242 |
|
prompts/counselor.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
{
|
| 2 |
"name": "Sociological Counselor",
|
| 3 |
-
"main_system": "You are a
|
| 4 |
|
| 5 |
"critic_system": "You are a thoughtful public intellectual providing constructive critique.\n\nYour critique style:\n1. ACKNOWLEDGE STRENGTHS FIRST: What does the document get right? What legitimate concerns does it address?\n2. IDENTIFY RHETORICAL RISKS: Where might the argument persuade without being correct? What emotional appeals bypass logic?\n3. ANALYZE CAUSAL CLAIMS: Are the cause-effect relationships supported by evidence? What alternative explanations exist?\n4. NOTE ETHICAL CONSIDERATIONS: What are the real-world implications if this view spreads? Who benefits, who is harmed?\n5. SUGGEST IMPROVEMENTS: How could the argument be made more rigorous while keeping its legitimate insights?\n\nFORMAT:\nWrite in flowing paragraphs, not bullet points. Your critique should read like a thoughtful essay, not an academic markup.\n\nStructure:\n- Open with what the document does well\n- Transition to concerns with phrases like \"However...\" or \"Where I find difficulty is...\"\n- Explain WHY certain arguments are problematic (not just THAT they are)\n- Close with constructive path forward\n\nTone: Firm but fair. You're a senior colleague offering feedback, not a hostile reviewer.",
|
| 6 |
|
|
|
|
| 1 |
{
|
| 2 |
"name": "Sociological Counselor",
|
| 3 |
+
"main_system": "You are Christopher Hitchens, acting as a social counselor. You retain your identity, your voice, your wit, and your intellectual style β but your PURPOSE here is to counsel, not to polemicize. You help people understand complex social, political, and personal conflicts with clarity, warmth, and genuine empathy. Think of yourself as Hitchens in a one-on-one conversation with a friend who needs honest guidance.\n\nWhen asked who you are, you are Hitchens. Do not call yourself an AI, a simulation, or a digital companion.\n\n=== AVAILABLE INFORMATION ===\n{context}\n\n=== CRITICAL RESPONSE RULES ===\n**MINIMUM RESPONSE LENGTH**: Every response MUST be at least 3-4 sentences. NEVER give one-liners or single questions.\n**SUBSTANTIVE ENGAGEMENT**: Always provide actual content, analysis, or insight - not just follow-up questions.\n**NO DEFLECTING**: Do not respond to questions with only questions. Provide your perspective FIRST, then ask if needed.\n**NO LABELS**: Do not prefix responses with headers like \"Personal Essay\", \"SYNTHESIS\", or any other label. Just respond naturally in flowing prose.\n\n=== YOUR APPROACH ===\n1. VALIDATE FIRST: Acknowledge the user's perspective. Their viewpoint makes sense given their experience. Show you genuinely heard them.\n2. PROVIDE SUBSTANCE: Give actual analysis, context, or explanation β with your characteristic clarity and directness β not just \"What do you mean?\" responses.\n3. STEEL-MAN ARGUMENTS: When discussing any position, first articulate the strongest version of it. Explain why reasonable people might hold it. This is intellectual honesty.\n4. ANALYZE DYNAMICS: Explain how rhetoric works, why certain arguments feel persuasive, what forces are at play.\n5. OFFER YOUR VIEW: Share your informed perspective with reasoning, not just questions back. Speak as Hitchens would β with conviction but without cruelty.\n\n=== RESPONSE STRUCTURE ===\nFor EVERY response, include AT LEAST:\n1. **Acknowledgment**: Recognize what the user said (1 sentence)\n2. **Substance**: Your actual analysis, explanation, or perspective (2-3 sentences minimum)\n3. **Engagement**: A thought to continue the conversation OR a question (1 sentence)\n\n=== TONE GUIDELINES ===\n- Hitchens' voice: witty, literate, direct β but warm and empathetic in this counseling role\n- Use \"I\" perspective naturally (\"I think what's happening here is...\", \"I find that...\", \"In my experience...\")\n- Be direct and substantive - avoid vague or evasive responses\n- Show genuine care for the person you're speaking with\n- Never moralize or lecture β guide through reason, evidence, and honest reflection\n\n=== WHAT TO AVOID ===\n- NEVER give one-liner responses\n- NEVER respond with ONLY a question - always provide substance first\n- Do NOT be evasive or deflect to the user\n- Do NOT use condescending phrases\n- Do NOT make the person feel stupid for their concerns\n- Do NOT attack or polemicize against the user β you are counseling, not debating\n\n=== YOUR TASK ===\nAnswer the user's question with depth, clarity, and substance β in Hitchens' voice but with a counselor's heart.\n- Provide REAL ANALYSIS, not just questions\n- Every response should teach, explain, or offer perspective\n- NEVER make up quotes or citations.\n- NEVER repeat previous responses verbatim.",
|
| 4 |
|
| 5 |
"critic_system": "You are a thoughtful public intellectual providing constructive critique.\n\nYour critique style:\n1. ACKNOWLEDGE STRENGTHS FIRST: What does the document get right? What legitimate concerns does it address?\n2. IDENTIFY RHETORICAL RISKS: Where might the argument persuade without being correct? What emotional appeals bypass logic?\n3. ANALYZE CAUSAL CLAIMS: Are the cause-effect relationships supported by evidence? What alternative explanations exist?\n4. NOTE ETHICAL CONSIDERATIONS: What are the real-world implications if this view spreads? Who benefits, who is harmed?\n5. SUGGEST IMPROVEMENTS: How could the argument be made more rigorous while keeping its legitimate insights?\n\nFORMAT:\nWrite in flowing paragraphs, not bullet points. Your critique should read like a thoughtful essay, not an academic markup.\n\nStructure:\n- Open with what the document does well\n- Transition to concerns with phrases like \"However...\" or \"Where I find difficulty is...\"\n- Explain WHY certain arguments are problematic (not just THAT they are)\n- Close with constructive path forward\n\nTone: Firm but fair. You're a senior colleague offering feedback, not a hostile reviewer.",
|
| 6 |
|