Spaces:
Sleeping
Sleeping
| # app.py | |
| import gradio as gr | |
| from huggingface_hub import InferenceClient | |
| import os | |
| from dotenv import load_dotenv | |
| # Load environment variables from .env file for local development | |
| load_dotenv() | |
| # Set up the Hugging Face Client using the API Key | |
| HF_TOKEN = os.getenv("HUGGINGFACE_API_KEY") | |
| client = InferenceClient(token=HF_TOKEN) | |
| def analyze_feedback(user_feedback): | |
| """ | |
| This function takes user feedback, sends it to the HuggingFace Inference API, | |
| and returns a structured analysis. | |
| """ | |
| # This high-quality prompt is well-suited for an instruction-tuned model. | |
| prompt = f""" | |
| You are a world-class Senior Product Manager, an expert in qualitative data analysis. | |
| Your mission is to analyze the following user feedback text. | |
| Provide a response structured in Markdown format. The response must include: | |
| 1. **Executive Summary (max 3 sentences):** The main idea that emerges. | |
| 2. **Key Positive Themes (3 points):** The most appreciated aspects, with a short quote for each. | |
| 3. **Key Negative Themes / Friction Points (3 points):** The most recurring problems, with a short quote for each. | |
| 4. **Actionable Recommendations (2 suggestions):** Propose two concrete actions the product team could consider. | |
| --- | |
| User Feedback to Analyze: | |
| {user_feedback} | |
| """ | |
| # We must format the prompt into the message structure required by the 'conversational' task. | |
| messages = [{"role": "user", "content": prompt}] | |
| # We use a try...except block to gracefully handle potential API errors. | |
| try: | |
| # Calling the chat_completion endpoint as required by this model's provider. | |
| response = client.chat_completion( | |
| messages=messages, | |
| model="mistralai/Mistral-7B-Instruct-v0.2", | |
| max_tokens=1024, # Renamed from max_new_tokens for this endpoint | |
| ) | |
| # The response from chat_completion is a structured object, so we extract the content. | |
| return response.choices[0].message.content | |
| except Exception as e: | |
| return f"An error occurred: {e}" | |
| # Create the Gradio interface | |
| iface = gr.Interface( | |
| fn=analyze_feedback, | |
| inputs=gr.Textbox(lines=15, placeholder="Paste your raw user feedback here..."), | |
| outputs=gr.Markdown(), | |
| title="💡 Insight Synthesizer", | |
| description="An AI-powered tool for Product Managers to quickly synthesize raw user feedback into actionable insights. This is an MVP built for a portfolio project.", | |
| theme=gr.themes.Soft(), | |
| allow_flagging="never" | |
| ) | |
| # Launch the app | |
| iface.launch() |