Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import json | |
| import requests | |
| import time | |
| # Configuration for LM Studio API | |
| LM_STUDIO_API_URL = "http://192.168.1.245:1234/v1/chat/completions" | |
| # Make sure to replace this with your model name from LM Studio | |
| LM_MODEL_NAME = "google/gemma-3-27b" | |
| # Advanced System Prompt for InsightGenie | |
| system_prompt_content = """ | |
| You are InsightGenie, an AI-powered qualitative research assistant. Your purpose is to conduct a structured interview to deeply understand a user's experience with a specific topic. | |
| **Instructions:** | |
| 1. **Persona:** You are a professional, neutral, and empathetic research interviewer. Maintain a supportive and curious tone. | |
| 2. **Goal:** Your primary goal is to gather rich, detailed qualitative data. Ask open-ended questions that encourage detailed responses. | |
| 3. **Conversation Flow:** | |
| - After each user response, analyze the sentiment and key topics. | |
| - Based on your analysis, generate **one** follow-up question to probe deeper. Do not ask multiple questions. | |
| - You must keep the conversation focused on the topic. | |
| 4. **Structured Output:** After each user turn, you must respond with a JSON object. The JSON should contain two fields: | |
| - `next_question`: The text of your next question for the user. | |
| - `summary`: A brief, neutral summary of the user's last response. This helps for later analysis. | |
| **Example JSON Response:** | |
| ```json | |
| { | |
| "next_question": "Can you tell me more about why that was your favorite part?", | |
| "summary": "The user had a positive experience and liked the fast delivery." | |
| } | |
| """ | |
| def chat_with_lm_studio(message, history): | |
| messages = [{"role": "system", "content": system_prompt_content}] | |
| for user_msg, assistant_msg in history: | |
| messages.append({"role": "user", "content": user_msg}) | |
| messages.append({"role": "assistant", "content": assistant_msg}) | |
| messages.append({"role": "user", "content": message}) | |
| try: | |
| response = requests.post( | |
| LM_STUDIO_API_URL, | |
| json={ | |
| "model": LM_MODEL_NAME, | |
| "messages": messages, | |
| "max_tokens": 150, | |
| "temperature": 0.7 | |
| } | |
| ) | |
| response.raise_for_status() | |
| # Parse the JSON response | |
| api_response_data = response.json() | |
| # Check if 'choices' key exists in the response | |
| if 'choices' in api_response_data and len(api_response_data['choices']) > 0: | |
| ai_message_content = api_response_data['choices'][0]['message']['content'] | |
| # Since we removed structured output, we just return the text | |
| return ai_message_content | |
| else: | |
| # If 'choices' is missing, there's likely an error. | |
| # Look for an 'error' key or other diagnostic info. | |
| error_message = api_response_data.get('error', 'Unknown API error.') | |
| print(f"API Error Response: {error_message}") | |
| return f"An error occurred with the API: {error_message}" | |
| except requests.exceptions.RequestException as e: | |
| return f"An API error occurred: {e}. Please ensure LM Studio server is running." | |
| except Exception as e: | |
| return f"An unexpected error occurred: {e}" | |
| # Gradio interface to launch the demo | |
| demo = gr.ChatInterface( | |
| fn=chat_with_lm_studio, | |
| chatbot=gr.Chatbot(height=500), | |
| theme=gr.themes.Soft(), | |
| title="InsightGenie Live Demo", | |
| description="Your AI-powered qualitative research assistant.", | |
| examples=[["I had a great experience with a new online clothing store."], | |
| ["The delivery was slow, and the product was damaged."]], | |
| multimodal=False | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch(inbrowser=True) |