ProjectEcho / insight_genie_v021.py
jmisak's picture
Upload folder using huggingface_hub
d937c98 verified
import gradio as gr
import json
import requests
import time
# --- Configuration for LM Studio API ---
LM_STUDIO_API_URL = "http://192.168.1.245:1234/v1/chat/completions"
LM_MODEL_NAME = "google/gemma-3-27b"
# --- Dynamic System Prompt ---
DEFAULT_PROMPT = """
You are InsightGenie, an AI-powered qualitative research assistant. Your purpose is to conduct a structured interview to deeply understand a user's experience with a specific topic.
**Instructions:**
1. **Persona:** You are a professional, neutral, and empathetic research interviewer. Maintain a supportive and curious tone.
2. **Goal:** Your primary goal is to gather rich, detailed qualitative data. Ask open-ended questions that encourage detailed responses.
3. **Conversation Flow:**
- After each user response, analyze the sentiment and key topics.
- Based on your analysis, generate **one** follow-up question to probe deeper. Do not ask multiple questions.
- You must keep the conversation focused on the topic.
4. **Structured Output:** After each user turn, you must respond with a JSON object. The JSON should contain two fields:
- `next_question`: The text of your next question for the user.
- `summary`: A brief, neutral summary of the user's last response.
**Example JSON Response:**
```json
{
"next_question": "Can you tell me more about why that was your favorite part?",
"summary": "The user had a positive experience and liked the fast delivery."
}
"""
# Global variable to store the conversation log for the current session
conversation_log = []
# --- Helper Functions ---
def handle_save_and_display_status():
save_message = save_conversation_log()
return gr.Textbox(value=save_message, visible=True)
def log_conversation_turn(user_message, ai_response, ai_summary):
"""Appends a single turn to the in-memory conversation log."""
global conversation_log
conversation_log.append({
"user_message": user_message,
"ai_response": ai_response,
"ai_summary": ai_summary,
"timestamp": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
})
def save_conversation_log():
"""Saves the entire conversation log to a JSON file."""
global conversation_log
if not conversation_log:
return "No conversation to save."
file_name = f"conversation_log_{int(time.time())}.json"
try:
with open(file_name, 'w', encoding='utf-8') as f:
json.dump(conversation_log, f, indent=4, ensure_ascii=False)
return f"Conversation saved to {file_name}"
except Exception as e:
return f"Failed to save conversation: {e}"
def chat_with_lm_studio(message, history, prompt_text):
messages = [{"role": "system", "content": prompt_text}]
for user_msg, assistant_msg in history:
messages.append({"role": "user", "content": user_msg})
messages.append({"role": "assistant", "content": assistant_msg})
messages.append({"role": "user", "content": message})
try:
response = requests.post(
LM_STUDIO_API_URL,
json={
"model": LM_MODEL_NAME,
"messages": messages,
"max_tokens": 250,
"temperature": 0.7
}
)
response.raise_for_status()
api_response_data = response.json()
if 'choices' in api_response_data and len(api_response_data['choices']) > 0:
raw_content = api_response_data['choices'][0]['message']['content']
try:
json_start = raw_content.find("```json")
if json_start != -1:
json_end = raw_content.find("```", json_start + 1)
if json_end != -1:
json_string = raw_content[json_start + 7:json_end].strip()
else:
json_string = raw_content[json_start + 7:].strip()
else:
json_string = raw_content.strip()
parsed_response = json.loads(json_string)
next_question = parsed_response.get("next_question", "Thank you for your response.")
summary = parsed_response.get("summary", "No summary provided.")
log_conversation_turn(message, next_question, summary)
transcript_message = (
f"--- New Turn ---\n"
f"User Input: {message}\n"
f"AI Summary: {summary}\n"
f"AI Question: {next_question}\n"
)
print(transcript_message)
history.append((message, next_question))
return "", history, transcript_message
except json.JSONDecodeError:
error_message = f"LLM failed to produce valid JSON. Raw output:\n{raw_content}"
print(error_message)
history.append((message, "I'm sorry, I couldn't process that. Can you please rephrase?"))
return "", history, error_message
else:
error_message = api_response_data.get('error', 'Unknown API error.')
print(f"API Error Response: {error_message}")
history.append((message, f"An API error occurred: {error_message}. Please check the console."))
return "", history, f"API Error: {error_message}"
except requests.exceptions.RequestException as e:
history.append((message, f"An API error occurred: {e}. Please ensure LM Studio server is running."))
return "", history, f"API Error: {e}"
except Exception as e:
history.append((message, f"An unexpected error occurred: {e}"))
return "", history, f"Unexpected Error: {e}"
# --- Gradio Interface Layout ---
with gr.Blocks(theme=gr.themes.Soft(), title="Project Echo Live Demo") as demo:
gr.Markdown("# Project Echo: Your AI-powered Qualitative Assistant")
with gr.Tabs():
with gr.Tab("Live Demo"):
gr.Markdown(
"Start a conversation with the AI researcher. "
"The conversation data is structured for analysis and can be saved."
)
chatbot = gr.Chatbot(height=500, placeholder="Start by telling me about your experience with a specific medication.")
with gr.Row():
msg = gr.Textbox(label="Your message", scale=4)
chat_submit_btn = gr.Button("Send", scale=1)
gr.Examples(
examples=[
["What is a typical day like?"],
["What was your biggest challenge when first taking drug X?"]
],
inputs=msg
)
with gr.Row():
clear_btn = gr.Button("Clear Chat")
save_btn = gr.Button("Save Conversation")
save_status = gr.Textbox(label="Save Status", interactive=False, visible=False)
with gr.Tab("Prompt Settings"):
gr.Markdown(
"Customize the AI's persona and instructions. "
"Changing this prompt will affect the next conversation turn."
)
prompt_input = gr.Textbox(
label="System Prompt",
value=DEFAULT_PROMPT,
lines=20,
interactive=True
)
with gr.Tab("Unedited Interaction"):
log_output = gr.Textbox(
label="Backend Transcript",
interactive=False,
lines=15,
placeholder="Backend logs will appear here after each message is sent."
)
# Event Handlers
msg.submit(chat_with_lm_studio, [msg, chatbot, prompt_input], [msg, chatbot, log_output], concurrency_limit=None)
chat_submit_btn.click(chat_with_lm_studio, [msg, chatbot, prompt_input], [msg, chatbot, log_output], concurrency_limit=None)
clear_btn.click(lambda: [], None, [chatbot])
save_btn.click(handle_save_and_display_status, None, save_status)
# --- Launch the Demo ---
if __name__ == "__main__":
demo.launch(share=True)