Chat_Analyzer / app.py
TarSh8654's picture
Create app.py
b46567f verified
import gradio as gr
import requests
import json
import os
# --- Gemini API Configuration ---
# The API key will be automatically provided by the Canvas environment at runtime
# if left as an empty string. DO NOT hardcode your API key here.
API_KEY = "" # Leave as empty string for Canvas environment
API_URL = "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent"
# --- Function to call the Gemini API ---
async def call_gemini_api(prompt: str) -> str:
"""
Calls the Gemini API with the given prompt and returns the generated text.
"""
headers = {
'Content-Type': 'application/json',
}
payload = {
"contents": [{"role": "user", "parts": [{"text": prompt}]}],
}
# Append API key to URL if available, otherwise it's handled by Canvas runtime
full_api_url = f"{API_URL}?key={API_KEY}" if API_KEY else API_URL
try:
# Use requests.post for synchronous call, or aiohttp for async if needed
# For Gradio, direct requests.post within the function is usually fine.
response = requests.post(full_api_url, headers=headers, data=json.dumps(payload))
response.raise_for_status() # Raise an exception for HTTP errors
result = response.json()
if result.get("candidates") and len(result["candidates"]) > 0 and \
result["candidates"][0].get("content") and \
result["candidates"][0]["content"].get("parts") and \
len(result["candidates"][0]["content"]["parts"]) > 0:
return result["candidates"][0]["content"]["parts"][0]["text"]
else:
return "No content generated by the model."
except requests.exceptions.RequestException as e:
return f"API Call Error: {e}"
except json.JSONDecodeError:
return f"API Response Error: Could not decode JSON. Response: {response.text}"
except Exception as e:
return f"An unexpected error occurred: {e}"
# --- Gradio Interface Function ---
async def analyze_chat_conversation(chat_text: str, analysis_task: str) -> str:
"""
Analyzes the chat conversation based on the selected task using an LLM.
"""
if not chat_text.strip():
return "Please enter a chat conversation to analyze."
prompt = ""
if analysis_task == "Summarize":
prompt = f"Summarize the following chat conversation:\n\n{chat_text}\n\nSummary:"
elif analysis_task == "Sentiment Analysis":
prompt = f"Analyze the overall sentiment of the following chat conversation (e.g., positive, negative, neutral, mixed). Explain your reasoning briefly:\n\n{chat_text}\n\nSentiment Analysis:"
elif analysis_task == "Extract Key Points & Action Items":
prompt = f"Extract the main discussion points and any explicit action items from the following chat conversation. Present them as a bulleted list:\n\n{chat_text}\n\nKey Points and Action Items:"
else:
return "Invalid analysis task selected."
# Call the Gemini API
response_text = await call_gemini_api(prompt)
return response_text
# --- Gradio Interface Definition ---
demo = gr.Interface(
fn=analyze_chat_conversation,
inputs=[
gr.Textbox(lines=10, label="Paste Chat Conversation Here", placeholder="e.g., 'Alice: Let's meet tomorrow. Bob: Sure, 10 AM? Alice: Yes, and please bring the report. Bob: Will do.'"),
gr.Dropdown(
["Summarize", "Sentiment Analysis", "Extract Key Points & Action Items"],
label="Select Analysis Task",
value="Summarize"
)
],
outputs=gr.Textbox(label="Analysis Result", lines=15),
title="💬 Chat Conversation Analyzer (Powered by Gemini)",
description="Paste your chat conversation, select an analysis task, and get insights from an AI."
)
# --- Launch the Gradio App ---
if __name__ == "__main__":
# For local testing, use demo.launch()
# For Hugging Face Spaces, ensure `gradio` and `requests` are in requirements.txt
demo.launch()