Spaces:
Sleeping
Sleeping
File size: 8,267 Bytes
d937c98 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 | import gradio as gr
import json
import requests
import time
# --- Configuration for LM Studio API ---
# Make sure your LM Studio server is running with the specified model
LM_STUDIO_API_URL = "http://192.168.1.245:1234/v1/chat/completions"
# Make sure to replace this with your model name from LM Studio exactly
LM_MODEL_NAME = "google/gemma-3-27b"
# --- Advanced System Prompt for Pharma Market Research ---
system_prompt_content = """
You are InsightGenie, an AI-powered qualitative research assistant specialized in pharmaceutical and healthcare market research. Your purpose is to conduct a structured interview to understand patient, caregiver, or healthcare professional (HCP) experiences with a specific health condition or treatment.
**Instructions:**
1. **Persona:** You are a professional, neutral, and empathetic research interviewer. Use clear, simple language when speaking with patients and caregivers, and appropriate medical terminology when speaking with HCPs. Maintain a supportive and curious tone.
2. **Goal:** Your primary goal is to gather rich, detailed qualitative data. Ask open-ended questions that encourage detailed responses about personal experiences, emotional impact, and decision-making processes.
3. **Compliance:** Avoid providing any medical advice, diagnoses, or treatment recommendations. State that you are a research tool and not a substitute for a healthcare professional.
4. **Conversation Flow:**
- After each user response, analyze the sentiment and key themes.
- Based on your analysis, generate **one** follow-up question to probe deeper. Do not ask multiple questions.
- You must keep the conversation focused on the specified health topic.
5. **Structured Output:** After each user turn, you must respond with a JSON object. The JSON should contain two fields:
- `next_question`: The text of your next question for the user.
- `summary`: A brief, neutral summary of the user's last response, including key terms or concepts.
**Example JSON Response for a patient interview:**
```json
{
"next_question": "Can you describe the biggest challenges you faced when you were first diagnosed with this condition?",
"summary": "The patient shared their initial diagnosis experience, mentioning feelings of uncertainty."
}
"""
# Global variable to store the conversation log for the current session
conversation_log = []
# --- Helper Functions ---
def log_conversation_turn(user_message, ai_response, ai_summary):
"""Appends a single turn to the in-memory conversation log."""
global conversation_log
conversation_log.append({
"user_message": user_message,
"ai_response": ai_response,
"ai_summary": ai_summary,
"timestamp": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
})
def save_conversation_log():
"""Saves the entire conversation log to a JSON file."""
global conversation_log
if not conversation_log:
return "No conversation to save."
file_name = f"conversation_log_{int(time.time())}.json"
try:
with open(file_name, 'w', encoding='utf-8') as f:
json.dump(conversation_log, f, indent=4, ensure_ascii=False)
return f"Conversation saved to {file_name}"
except Exception as e:
return f"Failed to save conversation: {e}"
def start_new_session(chatbot_history):
"""
Saves the current conversation and starts a new, empty session.
"""
global conversation_log
# Save the current conversation log
save_message = save_conversation_log()
# Reset the in-memory log for the new session
conversation_log = []
# Clear the Gradio chatbot history for a fresh start
return [], gr.Textbox(value=save_message, visible=True)
# --- Core Chat Logic Function ---
# --- Core Chat Logic Function ---
def chat_with_lm_studio(message, history):
# This line ensures 'messages' is always defined at the start of the function.
messages = [{"role": "system", "content": system_prompt_content}]
for user_msg, assistant_msg in history:
messages.append({"role": "user", "content": user_msg})
messages.append({"role": "assistant", "content": assistant_msg})
messages.append({"role": "user", "content": message})
# The rest of the function remains the same, using the now-defined 'messages' variable.
try:
# In your chat_with_lm_studio function
# ...
response = requests.post(
LM_STUDIO_API_URL,
json={
"model": LM_MODEL_NAME,
"messages": messages,
"max_tokens": 150,
"temperature": 0.7,
# Remove or comment out this line:
# "response_format": {"type": "json_object"}
}
)
# ...
response.raise_for_status()
api_response_data = response.json()
if 'choices' in api_response_data and len(api_response_data['choices']) > 0:
raw_content = api_response_data['choices'][0]['message']['content']
try:
parsed_response = json.loads(raw_content)
next_question = parsed_response.get("next_question", "Thank you for your response.")
summary = parsed_response.get("summary", "No summary provided.")
log_conversation_turn(message, next_question, summary)
print(f"User: {message}\nAI Summary: {summary}\nAI Question: {next_question}\n---")
# The fix is here: Return both the user message and the AI response
history.append((message, next_question))
# To clear the user input textbox, you need to return an empty string
return "", history
except json.JSONDecodeError:
print("LLM failed to produce valid JSON. Raw output:", raw_content)
history.append((message, "I'm sorry, I couldn't process that response. Can you please rephrase?"))
return "", history
else:
error_message = api_response_data.get('error', 'Unknown API error.')
print(f"API Error Response: {error_message}")
history.append((message, f"An error occurred with the API: {error_message}. Please check the console."))
return "", history
except requests.exceptions.RequestException as e:
history.append((message, f"An API error occurred: {e}. Please ensure LM Studio server is running and accessible."))
return "", history
except Exception as e:
history.append((message, f"An unexpected error occurred: {e}"))
return "", history
# --- Gradio Interface Layout ---
with gr.Blocks(theme=gr.themes.Soft(), title="InsightGenie Live Demo") as demo:
gr.Markdown("# InsightGenie: Your AI-powered Qualitative Assistant 🧠")
gr.Markdown(
"Start a conversation with our AI researcher. The conversation data is "
"automatically structured for analysis and can be saved to a file. "
"Try asking about a patient's journey or an HCP's experience with a treatment."
)
# Textbox to display status messages (e.g., "Conversation saved!")
status_message = gr.Textbox(label="Status", interactive=False, visible=False)
chatbot = gr.Chatbot(height=500, placeholder="Type your first message to begin the interview...")
msg = gr.Textbox(label="Your message")
with gr.Row():
chat_submit_btn = gr.Button("Send")
chat_clear_btn = gr.Button("Clear Chat")
new_session_btn = gr.Button("Start New Session")
# Event handlers
msg.submit(chat_with_lm_studio, [msg, chatbot], [msg, chatbot], concurrency_limit=None)
chat_submit_btn.click(chat_with_lm_studio, [msg, chatbot], [msg, chatbot], concurrency_limit=None)
chat_clear_btn.click(lambda: [], None, [chatbot]) # Updated to correctly clear the chatbot history
new_session_btn.click(start_new_session, [chatbot], [chatbot, status_message])
# --- Launch the Demo ---
if __name__ == "__main__":
demo.launch(inbrowser=True) |