py-learn-backend / chat.py
pykara's picture
Update chat.py
43c3b55 verified
from flask import Flask, jsonify, send_file, abort, make_response, request, Blueprint, current_app
from flask_cors import CORS
import os
print(f"GOOGLE_APPLICATION_CREDENTIALS: {os.getenv('GOOGLE_APPLICATION_CREDENTIALS')}")
import io
import uuid
import requests
import re
import tempfile # needed by validate-pronounce
app = Flask(__name__)
CORS(app)
# 👇 Add the helper right here
def _cohere_headers():
api_key = current_app.config.get("COHERE_API_KEY") or COHERE_API_KEY
return {
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json",
}
@app.route('/')
def home():
return "Welcome to the Flask app! The server is running."
# API configuration for AI-based question generation
COHERE_API_KEY = os.getenv("COHERE_API_KEY", "")
# (1) UPDATED URL: v2 endpoint on api.cohere.com
COHERE_API_URL = 'https://api.cohere.com/v2/chat'
# Dictionary to store user conversations
user_sessions = {}
# Endpoint to explain grammar topics
movie_bp = Blueprint("movie", __name__)
def _extract_text_v2(resp_json: dict) -> str:
"""
v2 /chat returns:
{ "message": { "content": [ { "type": "text", "text": "..." } ] } }
"""
msg = resp_json.get("message", {})
content = msg.get("content", [])
if isinstance(content, list) and content:
block = content[0]
if isinstance(block, dict):
return (block.get("text") or "").strip()
return ""
def _cohere_generate(prompt: str, max_tokens: int = 1000, temperature: float = 0.7):
api_key = current_app.config.get("COHERE_API_KEY") or COHERE_API_KEY
if not api_key:
return None, ("COHERE_API_KEY not set on the server", 500)
headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
# (2) UPDATED PAYLOAD: use messages instead of prompt
payload = {
"model": "command-r-08-2024",
"messages": [
{"role": "user", "content": prompt}
],
"max_tokens": max_tokens,
"temperature": temperature
}
try:
r = requests.post(COHERE_API_URL, headers=headers, json=payload, timeout=30)
if r.status_code != 200:
return None, (f"Cohere API error: {r.text}", 502)
# (3) UPDATED PARSING: read message.content[0].text
text = _extract_text_v2(r.json())
return text, None
except Exception as e:
current_app.logger.exception("Cohere request failed: %s", e)
return None, ("Upstream request failed", 502)
@movie_bp.post("/explain-grammar")
def explain_grammar():
try:
data = request.get_json()
print("Received Data:", data)
topic = data.get('topic', '').strip()
session_id = data.get('session_id', str(uuid.uuid4())) # Use provided session_id or create a new one
if not topic:
return jsonify({'error': 'Topic is required'}), 400
# Retrieve previous conversation history
conversation_history = user_sessions.get(session_id, [])
# Keep the last 10 messages to maintain better context (adjustable)
if len(conversation_history) > 10:
conversation_history = conversation_history[-10:]
# Generate a more **adaptive** prompt
context = "\n".join(conversation_history) if conversation_history else ""
prompt = f"""
You are a highly skilled grammar assistant. Your job is to maintain a **dynamic conversation** and respond intelligently based on user input, If the user asks something **unrelated to grammar**, respond with: "Please send a grammar-related question..
- Your answers must always **relate to the conversation history** and **extend naturally** based on what was previously asked.
- Your answers must be **concise, clear, and to the point**
- If the user asks for **examples**, explanations, or clarifications, **automatically infer** which topic they are referring to.
- If the user's question is **vague**, determine the most **logical continuation** based on prior questions.
- If the user asks something **unrelated to grammar**, respond with: "Please send a grammar-related question."
**Conversation so far:**
{context}
**User's new question:** {topic}
Please provide a **coherent and relevant answer** that continues the conversation naturally.
"""
# Make the API call to Cohere
headers = {
'Authorization': f'Bearer {COHERE_API_KEY}',
'Content-Type': 'application/json'
}
# (2) UPDATED PAYLOAD: messages array
payload = {
'model': 'command-r-08-2024',
'messages': [
{'role': 'user', 'content': prompt}
],
'max_tokens': 1000
}
response = requests.post(COHERE_API_URL, headers=headers, json=payload)
if response.status_code == 200:
# (3) UPDATED PARSING
ai_response = _extract_text_v2(response.json())
# Store conversation history to maintain context
conversation_history.append(f"User: {topic}\nAI: {ai_response}")
user_sessions[session_id] = conversation_history # Update session history
return jsonify({'response': ai_response, 'session_id': session_id})
else:
return jsonify({'error': 'Failed to fetch data from Cohere API'}), 500
except Exception as e:
return jsonify({'error': str(e)}), 500
@app.route('/suggest-grammar-questions', methods=['POST'])
def suggest_grammar_questions():
try:
data = request.get_json()
user_input = data.get('input', '').strip() # User's partial input (e.g., "What is v")
if not user_input:
return jsonify({'error': 'Input is required'}), 400
prompt = f"""
You are a grammar expert. Given the user's input "{user_input}", generate **3 natural grammar-related questions** that people might ask.
- The user's input is a **partial or full grammar-related query**.
- AI must **infer the most likely grammar topic** based on the input.
- AI must **ensure all suggestions are strictly related to English grammar**.
- **If the input is incomplete, intelligently complete it** with the most likely grammar concept.
- Ensure all **questions are fully formed and relevant**.
**User input:** "{user_input}"
Provide exactly 3 well-structured, grammar-related questions:
"""
# Call Cohere API
headers = {
'Authorization': f'Bearer {COHERE_API_KEY}',
'Content-Type': 'application/json'
}
# (2) UPDATED PAYLOAD: messages array
payload = {
'model': 'command-r-08-2024',
'messages': [
{'role': 'user', 'content': prompt}
],
'max_tokens': 100,
'temperature': 0.9
}
response = requests.post(COHERE_API_URL, headers=headers, json=payload)
if response.status_code == 200:
# (3) UPDATED PARSING
text = _extract_text_v2(response.json())
suggestions = [s for s in (text or "").split("\n") if s.strip()]
return jsonify({'suggestions': suggestions[:3]})
# keep exactly 3 if more lines present
else:
return jsonify({'error': 'Failed to fetch suggestions', 'details': response.text}), 500
except Exception as e:
return jsonify({'error': str(e)}), 500
def validate_topic(topic):
validation_prompt = f"""
You are an AI grammar expert. Your task is to determine if a given topic is related to **English grammar** or not.
**Input:** "{topic}"
### **Rules:**
- If the input is **in the form of a question** (e.g., it asks for an explanation or definition), return `"ask grammar topics"`, even if the topic is related to grammar.
- If the topic is **related to English grammar concepts** such as **parts of speech**, **verb tenses**, **sentence structure**, etc., return `"Grammar"`.
- If the topic is **not related to grammar**, such as general knowledge, science, math, history, or topics from other fields, return `"Not Grammar"`.
- Your response must be based purely on whether the topic relates to grammar, and **not** based on specific words, phrases, or examples.
**Your response must be exactly either "Grammar", "Not Grammar", or "ask grammar topics". No extra text.**
"""
headers = {
'Authorization': f'Bearer {COHERE_API_KEY}',
'Content-Type': 'application/json'
}
# (2) UPDATED PAYLOAD: messages array
payload = {
'model': 'command-r-08-2024',
'messages': [
{'role': 'user', 'content': validation_prompt}
],
'max_tokens': 5
}
try:
response = requests.post(COHERE_API_URL, json=payload, headers=headers)
# (3) UPDATED PARSING
validation_result = _extract_text_v2(response.json())
# Ensure the response is strictly "Grammar" or "Not Grammar" or "ask grammar topics"
if validation_result not in ["Grammar", "Not Grammar", "ask grammar topics"]:
return "Not Grammar" # Fallback to avoid incorrect responses
return validation_result
except Exception as e:
return f"Error: {str(e)}"
if __name__ == '__main__':
# app.run(debug=True)
app.register_blueprint(movie_bp, url_prefix='') # expose /explain-grammar locally
app.run(host='0.0.0.0', port=5012, debug=True)