File size: 3,823 Bytes
d937c98
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import gradio as gr
import json
import requests
import time

# Configuration for LM Studio API
LM_STUDIO_API_URL = "http://192.168.1.245:1234/v1/chat/completions"
# Make sure to replace this with your model name from LM Studio
LM_MODEL_NAME = "google/gemma-3-27b"

# Advanced System Prompt for InsightGenie
system_prompt_content = """

You are InsightGenie, an AI-powered qualitative research assistant. Your purpose is to conduct a structured interview to deeply understand a user's experience with a specific topic.



**Instructions:**

1.  **Persona:** You are a professional, neutral, and empathetic research interviewer. Maintain a supportive and curious tone.

2.  **Goal:** Your primary goal is to gather rich, detailed qualitative data. Ask open-ended questions that encourage detailed responses.

3.  **Conversation Flow:**

    - After each user response, analyze the sentiment and key topics.

    - Based on your analysis, generate **one** follow-up question to probe deeper. Do not ask multiple questions.

    - You must keep the conversation focused on the topic.

4.  **Structured Output:** After each user turn, you must respond with a JSON object. The JSON should contain two fields:

    - `next_question`: The text of your next question for the user.

    - `summary`: A brief, neutral summary of the user's last response. This helps for later analysis.



**Example JSON Response:**

```json

{

  "next_question": "Can you tell me more about why that was your favorite part?",

  "summary": "The user had a positive experience and liked the fast delivery."

}

"""

def chat_with_lm_studio(message, history):
    messages = [{"role": "system", "content": system_prompt_content}]

    for user_msg, assistant_msg in history:
        messages.append({"role": "user", "content": user_msg})
        messages.append({"role": "assistant", "content": assistant_msg})
    
    messages.append({"role": "user", "content": message})

    try:
        response = requests.post(
            LM_STUDIO_API_URL,
            json={
                "model": LM_MODEL_NAME,
                "messages": messages,
                "max_tokens": 150,
                "temperature": 0.7
            }
        )
        response.raise_for_status()
        
        # Parse the JSON response
        api_response_data = response.json()

        # Check if 'choices' key exists in the response
        if 'choices' in api_response_data and len(api_response_data['choices']) > 0:
            ai_message_content = api_response_data['choices'][0]['message']['content']
            
            # Since we removed structured output, we just return the text
            return ai_message_content
        else:
            # If 'choices' is missing, there's likely an error.
            # Look for an 'error' key or other diagnostic info.
            error_message = api_response_data.get('error', 'Unknown API error.')
            print(f"API Error Response: {error_message}")
            return f"An error occurred with the API: {error_message}"

    except requests.exceptions.RequestException as e:
        return f"An API error occurred: {e}. Please ensure LM Studio server is running."
    except Exception as e:
        return f"An unexpected error occurred: {e}"

# Gradio interface to launch the demo
demo = gr.ChatInterface(
    fn=chat_with_lm_studio,
    chatbot=gr.Chatbot(height=500),
    theme=gr.themes.Soft(),
    title="InsightGenie Live Demo",
    description="Your AI-powered qualitative research assistant.",
    examples=[["I had a great experience with a new online clothing store."], 
              ["The delivery was slow, and the product was damaged."]],
    multimodal=False
)

if __name__ == "__main__":
    demo.launch(inbrowser=True)