Oviya commited on
Commit
f5fe108
·
1 Parent(s): f91c8cc
Files changed (2) hide show
  1. chat.py +226 -0
  2. test_moviepy.py +0 -1021
chat.py ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, jsonify, send_file, abort, make_response, request, Blueprint, current_app
2
+ from flask_cors import CORS
3
+ import os
4
+ print(f"GOOGLE_APPLICATION_CREDENTIALS: {os.getenv('GOOGLE_APPLICATION_CREDENTIALS')}")
5
+ import io
6
+ import uuid
7
+ import requests
8
+ import re
9
+ import tempfile # needed by validate-pronounce
10
+
11
+ app = Flask(__name__)
12
+ CORS(app)
13
+
14
+ # 👇 Add the helper right here
15
+ def _cohere_headers():
16
+ api_key = current_app.config.get("COHERE_API_KEY") or COHERE_API_KEY
17
+ return {
18
+ "Authorization": f"Bearer {api_key}",
19
+ "Content-Type": "application/json",
20
+ }
21
+
22
+ @app.route('/')
23
+ def home():
24
+ return "Welcome to the Flask app! The server is running."
25
+
26
+ # API configuration for AI-based question generation
27
+ # COHERE_API_KEY = 'WjnDKknACe0zxHvczdo7q4vwF4WAXn2429hcPHIB'
28
+ COHERE_API_KEY = os.getenv("COHERE_API_KEY", "")
29
+ COHERE_API_URL = 'https://api.cohere.ai/v1/generate'
30
+
31
+
32
+
33
+ # Dictionary to store user conversations
34
+ user_sessions = {}
35
+ # Endpoint to explain grammar topics
36
+ movie_bp = Blueprint("movie", __name__)
37
+
38
+ def _cohere_generate(prompt: str, max_tokens: int = 1000, temperature: float = 0.7):
39
+ api_key = current_app.config.get("COHERE_API_KEY") or COHERE_API_KEY
40
+ if not api_key:
41
+ return None, ("COHERE_API_KEY not set on the server", 500)
42
+
43
+ headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
44
+ payload = {"model": "command-r-08-2024", "prompt": prompt, "max_tokens": max_tokens, "temperature": temperature}
45
+ try:
46
+ r = requests.post(COHERE_API_URL, headers=headers, json=payload, timeout=30)
47
+ if r.status_code != 200:
48
+ return None, (f"Cohere API error: {r.text}", 502)
49
+ text = r.json().get("generations", [{}])[0].get("text", "").strip()
50
+ return text, None
51
+ except Exception as e:
52
+ current_app.logger.exception("Cohere request failed: %s", e)
53
+ return None, ("Upstream request failed", 502)
54
+
55
+ @movie_bp.post("/explain-grammar")
56
+ def explain_grammar():
57
+ try:
58
+ data = request.get_json()
59
+ print("Received Data:", data)
60
+ topic = data.get('topic', '').strip()
61
+ session_id = data.get('session_id', str(uuid.uuid4())) # Use provided session_id or create a new one
62
+
63
+ if not topic:
64
+ return jsonify({'error': 'Topic is required'}), 400
65
+
66
+ # Retrieve previous conversation history
67
+ conversation_history = user_sessions.get(session_id, [])
68
+
69
+ # Keep the last 10 messages to maintain better context (adjustable)
70
+ if len(conversation_history) > 10:
71
+ conversation_history = conversation_history[-10:]
72
+
73
+ # Generate a more **adaptive** prompt
74
+ context = "\n".join(conversation_history) if conversation_history else ""
75
+
76
+ prompt = f"""
77
+ You are a highly skilled grammar assistant. Your job is to maintain a **dynamic conversation** and respond intelligently based on user input, If the user asks something **unrelated to grammar**, respond with: "Please send a grammar-related question..
78
+
79
+ - Your answers must always **relate to the conversation history** and **extend naturally** based on what was previously asked.
80
+ - Your answers must be **concise, clear, and to the point**
81
+ - If the user asks for **examples**, explanations, or clarifications, **automatically infer** which topic they are referring to.
82
+ - If the user's question is **vague**, determine the most **logical continuation** based on prior questions.
83
+ - If the user asks something **unrelated to grammar**, respond with: "Please send a grammar-related question."
84
+
85
+ **Conversation so far:**
86
+ {context}
87
+
88
+ **User's new question:** {topic}
89
+ Please provide a **coherent and relevant answer** that continues the conversation naturally.
90
+ """
91
+
92
+ # Make the API call to Cohere
93
+ headers = {
94
+ 'Authorization': f'Bearer {COHERE_API_KEY}',
95
+ 'Content-Type': 'application/json'
96
+ }
97
+
98
+ payload = {
99
+ 'model': 'command-r-08-2024',
100
+ 'prompt': prompt,
101
+ 'max_tokens': 1000 # Increased to allow better explanations
102
+ }
103
+
104
+ response = requests.post(COHERE_API_URL, headers=headers, json=payload)
105
+
106
+ if response.status_code == 200:
107
+ ai_response = response.json().get('generations', [{}])[0].get('text', '').strip()
108
+
109
+ # Store conversation history to maintain context
110
+ conversation_history.append(f"User: {topic}\nAI: {ai_response}")
111
+ user_sessions[session_id] = conversation_history # Update session history
112
+
113
+ return jsonify({'response': ai_response, 'session_id': session_id})
114
+ else:
115
+ return jsonify({'error': 'Failed to fetch data from Cohere API'}), 500
116
+
117
+ except Exception as e:
118
+ return jsonify({'error': str(e)}), 500
119
+
120
+
121
+
122
+ @app.route('/suggest-grammar-questions', methods=['POST'])
123
+ def suggest_grammar_questions():
124
+ try:
125
+ data = request.get_json()
126
+ user_input = data.get('input', '').strip() # User's partial input (e.g., "What is v")
127
+
128
+ if not user_input:
129
+ return jsonify({'error': 'Input is required'}), 400
130
+
131
+
132
+
133
+ prompt = f"""
134
+ You are a grammar expert. Given the user's input "{user_input}", generate **3 natural grammar-related questions** that people might ask.
135
+
136
+ - The user's input is a **partial or full grammar-related query**.
137
+ - AI must **infer the most likely grammar topic** based on the input.
138
+ - AI must **ensure all suggestions are strictly related to English grammar**.
139
+ - **If the input is incomplete, intelligently complete it** with the most likely grammar concept.
140
+ - Ensure all **questions are fully formed and relevant**.
141
+
142
+ **User input:** "{user_input}"
143
+ Provide exactly 3 well-structured, grammar-related questions:
144
+ """
145
+
146
+ # Call Cohere API
147
+ headers = {
148
+ 'Authorization': f'Bearer {COHERE_API_KEY}',
149
+ 'Content-Type': 'application/json'
150
+ }
151
+
152
+ payload = {
153
+ 'model': 'command-r-08-2024',
154
+ 'prompt': prompt,
155
+ 'max_tokens': 100, # Only short responses needed
156
+ 'temperature': 0.9, # Some randomness for variety
157
+ 'frequency_penalty': 0.8, # Reduce repeated words
158
+ 'presence_penalty': 0.8 # Encourage diverse questions
159
+ }
160
+
161
+ response = requests.post(COHERE_API_URL, headers=headers, json=payload)
162
+
163
+ if response.status_code == 200:
164
+ suggestions = response.json().get('generations', [{}])[0].get('text', '').strip().split("\n")
165
+ return jsonify({'suggestions': suggestions})
166
+ else:
167
+ return jsonify({'error': 'Failed to fetch suggestions', 'details': response.text}), 500
168
+
169
+ except Exception as e:
170
+ return jsonify({'error': str(e)}), 500
171
+
172
+
173
+ def validate_topic(topic):
174
+
175
+
176
+
177
+ validation_prompt = f"""
178
+ You are an AI grammar expert. Your task is to determine if a given topic is related to **English grammar** or not.
179
+
180
+ **Input:** "{topic}"
181
+
182
+ ### **Rules:**
183
+ - If the input is **in the form of a question** (e.g., it asks for an explanation or definition), return `"ask grammar topics"`, even if the topic is related to grammar.
184
+ - If the topic is **related to English grammar concepts** such as **parts of speech**, **verb tenses**, **sentence structure**, etc., return `"Grammar"`.
185
+ - If the topic is **not related to grammar**, such as general knowledge, science, math, history, or topics from other fields, return `"Not Grammar"`.
186
+ - Your response must be based purely on whether the topic relates to grammar, and **not** based on specific words, phrases, or examples.
187
+
188
+ **Your response must be exactly either "Grammar", "Not Grammar", or "ask grammar topics". No extra text.**
189
+ """
190
+
191
+
192
+
193
+
194
+
195
+ headers = {
196
+ 'Authorization': f'Bearer {COHERE_API_KEY}',
197
+ 'Content-Type': 'application/json'
198
+ }
199
+
200
+ payload = {
201
+ 'model': 'command-r-08-2024',
202
+ 'prompt': validation_prompt,
203
+ 'max_tokens': 5 # Minimal token usage for classification
204
+ }
205
+
206
+ try:
207
+ response = requests.post(COHERE_API_URL, json=payload, headers=headers)
208
+ validation_result = response.json().get('generations', [{}])[0].get('text', '').strip()
209
+
210
+ # Ensure the response is strictly "Grammar" or "Not Grammar"
211
+ if validation_result not in ["Grammar", "Not Grammar"]:
212
+ return "Not Grammar" # Fallback to avoid incorrect responses
213
+
214
+ return validation_result
215
+
216
+ except Exception as e:
217
+ return f"Error: {str(e)}"
218
+
219
+
220
+
221
+
222
+
223
+ if __name__ == '__main__':
224
+ # app.run(debug=True)
225
+ app.register_blueprint(movie_bp, url_prefix='') # expose /explain-grammar locally
226
+ app.run(host='0.0.0.0', port=5012, debug=True)
test_moviepy.py DELETED
@@ -1,1021 +0,0 @@
1
- from flask import Flask, jsonify, send_file, abort, make_response, request, Blueprint, current_app
2
- from flask_cors import CORS
3
- from moviepy.editor import VideoFileClip
4
- from google.cloud import speech
5
- import os
6
- print(f"GOOGLE_APPLICATION_CREDENTIALS: {os.getenv('GOOGLE_APPLICATION_CREDENTIALS')}")
7
- import io
8
- import uuid
9
- import requests
10
- from pydub import AudioSegment
11
- import ffmpeg
12
- import re
13
- import tempfile # needed by validate-pronounce
14
-
15
- app = Flask(__name__)
16
- CORS(app)
17
-
18
- # 👇 Add the helper right here
19
- def _cohere_headers():
20
- api_key = current_app.config.get("COHERE_API_KEY") or COHERE_API_KEY
21
- return {
22
- "Authorization": f"Bearer {api_key}",
23
- "Content-Type": "application/json",
24
- }
25
-
26
- @app.route('/')
27
- def home():
28
- return "Welcome to the Flask app! The server is running."
29
-
30
- # Directories for video, audio, and transcripts
31
- VIDEO_FOLDER = 'static/videos'
32
- AUDIO_FOLDER = 'static/audio'
33
- TRANSCRIPT_FOLDER = 'static/transcripts'
34
-
35
- # Ensure directories exist
36
- os.makedirs(VIDEO_FOLDER, exist_ok=True)
37
- os.makedirs(AUDIO_FOLDER, exist_ok=True)
38
- os.makedirs(TRANSCRIPT_FOLDER, exist_ok=True)
39
-
40
- # API configuration for AI-based question generation
41
- # COHERE_API_KEY = 'WjnDKknACe0zxHvczdo7q4vwF4WAXn2429hcPHIB'
42
- COHERE_API_KEY = os.getenv("COHERE_API_KEY", "")
43
- COHERE_API_URL = 'https://api.cohere.ai/v1/generate'
44
-
45
- # Google Cloud Speech-to-Text Configuration
46
- speech_client = speech.SpeechClient()
47
-
48
- # Predefined video metadata
49
- # VIDEOS = [
50
- # {
51
- # "id": "1",
52
- # "title": "Introduction to AI",
53
- # "filename": "ai_intro.mp4"
54
- # },
55
- # {
56
- # "id": "2",
57
- # "title": "Machine Learning Basics",
58
- # "filename": "ml_basics.mp4"
59
- # }
60
- # ]
61
-
62
-
63
-
64
- @app.route('/videos', methods=['GET'])
65
- def list_videos():
66
- """
67
- List available videos for users to watch.
68
- """
69
- return jsonify(VIDEOS), 200
70
-
71
- @app.route('/videos/<filename>')
72
- def serve_video(filename):
73
- """
74
- Serve a video file to the user.
75
- """
76
- video_path = os.path.join(VIDEO_FOLDER, filename)
77
- if not os.path.exists(video_path):
78
- print(f"Video file not found: {filename}")
79
- abort(404)
80
-
81
- mime_type = 'video/mp4'
82
- print(f"Serving video: {filename}, MIME type: {mime_type}")
83
- return send_file(video_path, mimetype=mime_type)
84
-
85
-
86
- # Convert video to audio
87
- def convert_video_to_audio(video_path, audio_path):
88
- try:
89
- # Using moviepy to extract audio from video
90
- video = VideoFileClip(video_path)
91
- video.audio.write_audiofile(audio_path, codec='mp3')
92
- return audio_path
93
- except Exception as e:
94
- print(f"Error converting video to audio: {str(e)}")
95
- return None
96
-
97
- # Re-encode MP3 to ensure proper format
98
- def reencode_mp3(input_audio_path, output_audio_path):
99
- try:
100
- # Using pydub to convert and re-encode MP3 (ensuring correct encoding)
101
- audio = AudioSegment.from_mp3(input_audio_path)
102
- audio.export(output_audio_path, format="mp3", codec="libmp3lame", parameters=["-q:a", "0"])
103
- return output_audio_path
104
- except Exception as e:
105
- print(f"Error re-encoding MP3: {str(e)}")
106
- return None
107
- # Helper function to convert audio to the proper MP3 encoding if necessary
108
- def convert_audio_to_mp3(input_file_path, output_file_path):
109
- """
110
- Converts the audio file to a valid MP3 format with proper encoding.
111
- """
112
- try:
113
- ffmpeg.input(input_file_path).output(output_file_path, acodec='libmp3lame', audio_bitrate='128k').run()
114
- return True
115
- except Exception as e:
116
- print(f"Error during audio conversion: {e}")
117
- return False
118
-
119
-
120
- # Function to compress audio dynamically
121
- def compress_audio(input_file_path, output_file_path, target_bitrate="128k"):
122
- audio = AudioSegment.from_file(input_file_path)
123
- audio.export(output_file_path, format="mp3", bitrate=target_bitrate)
124
- return output_file_path
125
-
126
-
127
- @app.route('/upload-video', methods=['POST'])
128
- def upload_video():
129
- print("Received upload request.")
130
-
131
- if 'video' not in request.files:
132
- print("No video file provided in the request.")
133
- return jsonify({'error': 'No video file provided'}), 400
134
-
135
- video = request.files['video']
136
- if video.filename == '':
137
- print("Empty filename detected.")
138
- return jsonify({'error': 'No selected file'}), 400
139
-
140
- try:
141
- filename = str(uuid.uuid4()) + ".mp4"
142
- video_path = os.path.join(VIDEO_FOLDER, filename)
143
-
144
- print(f"Saving video: {filename}")
145
- video.save(video_path)
146
-
147
- print(f"Video saved successfully at {video_path}")
148
- return jsonify({'message': 'Video uploaded successfully!', 'filename': filename}), 200
149
-
150
- except Exception as e:
151
- print(f"Error saving video: {str(e)}")
152
- return jsonify({'error': 'Failed to save video'}), 500
153
-
154
-
155
-
156
-
157
- @app.route('/generate-questions-dynamicvideo', methods=['POST'])
158
- def generate_questions():
159
- try:
160
- # Expecting a JSON request body with video filename
161
- data = request.json
162
- video_filename = data.get('filename')
163
-
164
- if not video_filename:
165
- print("Error: No filename provided in request.")
166
- return jsonify({"error": "Filename is required"}), 400
167
-
168
- video_path = os.path.join(VIDEO_FOLDER, video_filename)
169
- if not os.path.exists(video_path):
170
- print(f"Error: Video file {video_filename} not found at {video_path}")
171
- return jsonify({"error": "Video file not found"}), 404
172
-
173
- print(f"Processing video: {video_filename}")
174
-
175
- # Convert video to audio
176
- audio_filename = f"{uuid.uuid4()}.mp3"
177
- audio_path = os.path.join(AUDIO_FOLDER, audio_filename)
178
-
179
- # Convert video to audio
180
- if not convert_video_to_audio(video_path, audio_path):
181
- print("Error: Video to audio conversion failed.")
182
- return jsonify({"error": "Failed to convert video to audio"}), 500
183
-
184
- # Transcribe audio using Google Cloud Speech-to-Text
185
- with open(audio_path, 'rb') as audio_file:
186
- audio_content = audio_file.read()
187
-
188
- audio = speech.RecognitionAudio(content=audio_content)
189
- config = speech.RecognitionConfig(
190
- encoding=speech.RecognitionConfig.AudioEncoding.MP3,
191
- sample_rate_hertz=16000,
192
- language_code="en-US",
193
- )
194
-
195
- response = speech_client.recognize(config=config, audio=audio)
196
- transcripts = [result.alternatives[0].transcript for result in response.results]
197
-
198
- if not transcripts:
199
- print("Error: No transcription results found.")
200
- return jsonify({"error": "No transcription results found"}), 500
201
-
202
- transcription_text = " ".join(transcripts)
203
- print(f"Transcription successful: {transcription_text[:200]}...") # Print first 200 chars
204
-
205
-
206
-
207
- cohere_payload = {
208
- "model": "command-r-08-2024",
209
- "prompt": f"Generate exactly three multiple-choice questions based on this text:\n{transcription_text}\n"
210
- f"Each question should start with a number followed by a period (e.g., 1.) and should have four options labeled A., B., C., and D. After the options, indicate the correct answer with 'Correct answer: <option letter>' (e.g., Correct answer: A).\n"
211
- f"Please make sure to generate exactly four options per question, and only three questions in total.",
212
- "max_tokens": 300,
213
- "temperature": 0.9, # Optional: Control randomness in the responses
214
- }
215
-
216
- headers = _cohere_headers()
217
-
218
- cohere_response = requests.post(COHERE_API_URL, json=cohere_payload, headers=headers)
219
-
220
- if cohere_response.status_code != 200:
221
- print(f"Error: Cohere API response failed: {cohere_response.text}")
222
- return jsonify({"error": "Failed to generate questions"}), 500
223
-
224
- questions = cohere_response.json().get("generations", [])
225
- if not questions:
226
- print("Error: No questions generated from Cohere API.")
227
- return jsonify({"error": "No questions generated"}), 500
228
-
229
- # Extract raw text and parse questions
230
- raw_text = questions[0]["text"]
231
- structured_questions = parse_questions(raw_text)
232
-
233
- return jsonify({"questions": structured_questions}), 200
234
-
235
- except Exception as e:
236
- print(f"Critical Error: {e}")
237
- return jsonify({"error": "An error occurred while generating questions"}), 500
238
-
239
-
240
-
241
-
242
- def parse_questions(response_text):
243
- # Split the text into individual question blocks
244
- question_blocks = response_text.split("\n\n")
245
- questions = []
246
-
247
- # Process each question block
248
- for block in question_blocks:
249
- print("\nProcessing Block:", block) # Debug: Log each question block
250
-
251
- # Split the block into lines
252
- lines = block.strip().split("\n")
253
- print("Split Lines:", lines) # Debug: Log split lines of the block
254
-
255
- # Ensure the block contains a question
256
- if len(lines) < 2:
257
- print("Skipping Invalid Block") # Debug: Log invalid blocks
258
- continue
259
-
260
- # Extract the question text
261
- question_line = lines[0]
262
- question_text = question_line.split(". ", 1)[1] if ". " in question_line else question_line
263
- print("Question Text:", question_text) # Debug: Log extracted question text
264
-
265
- # Extract the options and find the correct answer
266
- options = []
267
- correct_answer_letter = None
268
- for line in lines[1:]:
269
- line = line.strip()
270
- match = re.match(r"^(?:[a-dA-D][).]?\s)?(.+)$", line) # Regex to handle `a)`, `A.`, etc.
271
- if match:
272
- option_text = match.group(1).strip()
273
- # Check if this line contains the correct answer
274
- if option_text.startswith("Correct answer:"):
275
- correct_answer_letter = option_text.split(":")[-1].strip()
276
- else:
277
- options.append(option_text)
278
- print("Extracted Options:", options) # Debug: Log extracted options
279
- print("Correct Answer Letter:", correct_answer_letter) # Debug: Log the correct answer letter
280
-
281
- # Map the correct answer text
282
- correct_answer_text = ""
283
- if correct_answer_letter:
284
- option_index = ord(correct_answer_letter.upper()) - ord('A') # Convert 'A' to index 0, 'B' to index 1, etc.
285
- if 0 <= option_index < len(options):
286
- correct_answer_text = options[option_index]
287
- print("Mapped Correct Answer Text:", correct_answer_text) # Debug: Log mapped answer
288
-
289
- # Append the parsed question to the list
290
- questions.append({
291
- "question": question_text,
292
- "options": options,
293
- "answer": correct_answer_text # Use the full answer text
294
- })
295
-
296
- print("\nFinal Questions:", questions) # Debug: Log final parsed questions
297
- return questions
298
-
299
-
300
- generated_topics = set()
301
-
302
-
303
- @app.route('/generate-writing-topics', methods=['POST'])
304
- def generate_writing_topics():
305
- """
306
- Generate writing topics based on the user's grade level.
307
- """
308
- try:
309
- # Extract grade level from the POST request
310
- data = request.json
311
- grade_level = data.get("grade_level", "lower").lower() # Default to 'lower' if not provided
312
-
313
- # Validate grade level
314
- valid_levels = {"lower", "middle", "upper"}
315
- if grade_level not in valid_levels:
316
- return jsonify({"error": f"Invalid grade level. Choose from: {', '.join(valid_levels)}"}), 400
317
-
318
- # Define a prompt based on the grade level
319
- if grade_level == "lower":
320
- prompt = (
321
- "Generate one simple and short writing topic suitable for a lower-grade student. "
322
- "The topic should be one sentence, easy to understand, and fun to write about. "
323
- "Focus on familiar and relatable themes like family, school, or favorite activities."
324
- )
325
- elif grade_level == "middle":
326
- prompt = (
327
- "Generate one creative writing topic suitable for a middle-grade student. "
328
- "The topic should encourage imagination and exploration while still being age-appropriate. "
329
- "Include themes like adventures, problem-solving, or hypothetical scenarios."
330
- )
331
- elif grade_level == "upper":
332
- prompt = (
333
- "Generate one challenging and thought-provoking writing topic suitable for an upper-grade student. "
334
- "The topic should require critical thinking, creativity, or self-reflection. "
335
- "Include themes like ethical dilemmas, futuristic ideas, or personal experiences."
336
- )
337
-
338
- # Call Cohere API to generate topics
339
- payload = {
340
- "model": "command-r-08-2024",
341
- "prompt": prompt,
342
- "max_tokens": 150,
343
- "temperature": 0.9
344
- }
345
- headers = _cohere_headers()
346
- response = requests.post(COHERE_API_URL, json=payload, headers=headers)
347
-
348
- if response.status_code != 200:
349
- return jsonify({"error": "Failed to generate topics"}), 500
350
-
351
- # Extract the generated text
352
- raw_text = response.json().get("generations", [])[0].get("text", "")
353
- topics = [topic.strip() for topic in raw_text.split("\n") if topic.strip()]
354
-
355
- # Return the generated topics
356
- return jsonify({"topics": topics}), 200
357
-
358
- except Exception as e:
359
- print(f"Error generating writing topics: {e}")
360
- return jsonify({"error": "An error occurred while generating topics"}), 500
361
-
362
-
363
-
364
-
365
- @app.route('/validate-response', methods=['POST'])
366
- def validate_response():
367
- """
368
- Validate user response against the provided topic using AI.
369
- """
370
- try:
371
- # Extract user input and topic from the request
372
- data = request.json
373
- topic = data.get("topic")
374
- response = data.get("response")
375
-
376
- if not topic or not response:
377
- return jsonify({"error": "Both 'topic' and 'response' fields are required."}), 400
378
-
379
- # Define a prompt for validation
380
- # prompt = (
381
- # f"You are an English teacher. Evaluate if the following response matches the given topic. "
382
- # f"Topic: '{topic}'. Response: '{response}'. "
383
- # f"Provide feedback on relevance, grammar, and clarity. If the response is unrelated, suggest improvements to align it with the topic."
384
- # )
385
-
386
- prompt = (
387
- f"You are a writing teacher. Evaluate the sentence formation, grammar, and overall writing quality of the following response. "
388
- f"Provide constructive feedback highlighting any errors in grammar, spelling, punctuation, and sentence structure. "
389
- f"If the response is well-written, acknowledge its strengths. "
390
- f"Topic: '{topic}'. Response: '{response}'."
391
- )
392
-
393
-
394
- # Call Cohere API for validation
395
- payload = {
396
- "model": "command-r-08-2024",
397
- "prompt": prompt,
398
- "max_tokens": 500,
399
- "temperature": 0.7
400
- }
401
- headers = _cohere_headers()
402
- api_response = requests.post(COHERE_API_URL, json=payload, headers=headers)
403
-
404
- if api_response.status_code != 200:
405
- return jsonify({"error": "Failed to validate response"}), 500
406
-
407
- # Extract AI feedback
408
- feedback = api_response.json().get("generations", [])[0].get("text", "")
409
-
410
- return jsonify({"feedback": feedback}), 200
411
-
412
- except Exception as e:
413
- print(f"Error validating response: {e}")
414
- return jsonify({"error": "An error occurred while validating the response."}), 500
415
-
416
- # Pronounciation
417
-
418
- @app.route('/validate-pronounce', methods=['POST'])
419
- def validate():
420
- # Get the word and audio from the request
421
- target_word = request.form['word']
422
- audio_file = request.files['audio']
423
-
424
- # Save the audio file temporarily
425
- with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file:
426
- audio_path = tmp_file.name
427
- audio_file.save(audio_path)
428
-
429
- # Transcribe the audio and validate the pronunciation
430
- transcribed_word = transcribe_audio(audio_path)
431
- os.remove(audio_path) # Clean up the temporary file
432
-
433
- if not transcribed_word:
434
- return jsonify({
435
- "success": False,
436
- "message": "Could not understand the pronunciation."
437
- })
438
-
439
- is_correct, message = validate_pronunciation(target_word, transcribed_word)
440
- return jsonify({
441
- "success": is_correct,
442
- "message": message,
443
- "transcribed_word": transcribed_word
444
- })
445
-
446
-
447
- @app.route('/teach', methods=['GET'])
448
- def teach():
449
- # Get the word to teach
450
- word = request.args.get('word', default='', type=str)
451
- if not word:
452
- return jsonify({"success": False, "message": "No word provided."})
453
-
454
- # Teach the pronunciation details
455
- syllables = get_syllables(word)
456
- silent_letters = detect_silent_letters(word)
457
-
458
- return jsonify({
459
- "success": True,
460
- "syllables": syllables,
461
- "silent_letters": silent_letters
462
- })
463
-
464
-
465
-
466
- # Dictionary to store user conversations
467
- user_sessions = {}
468
- # Endpoint to explain grammar topics
469
- movie_bp = Blueprint("movie", __name__)
470
-
471
- def _cohere_generate(prompt: str, max_tokens: int = 1000, temperature: float = 0.7):
472
- api_key = current_app.config.get("COHERE_API_KEY") or COHERE_API_KEY
473
- if not api_key:
474
- return None, ("COHERE_API_KEY not set on the server", 500)
475
-
476
- headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
477
- payload = {"model": "command-r-08-2024", "prompt": prompt, "max_tokens": max_tokens, "temperature": temperature}
478
- try:
479
- r = requests.post(COHERE_API_URL, headers=headers, json=payload, timeout=30)
480
- if r.status_code != 200:
481
- return None, (f"Cohere API error: {r.text}", 502)
482
- text = r.json().get("generations", [{}])[0].get("text", "").strip()
483
- return text, None
484
- except Exception as e:
485
- current_app.logger.exception("Cohere request failed: %s", e)
486
- return None, ("Upstream request failed", 502)
487
-
488
- @movie_bp.post("/explain-grammar")
489
- def explain_grammar():
490
- try:
491
- data = request.get_json()
492
- print("Received Data:", data)
493
- topic = data.get('topic', '').strip()
494
- session_id = data.get('session_id', str(uuid.uuid4())) # Use provided session_id or create a new one
495
-
496
- if not topic:
497
- return jsonify({'error': 'Topic is required'}), 400
498
-
499
- # Retrieve previous conversation history
500
- conversation_history = user_sessions.get(session_id, [])
501
-
502
- # Keep the last 10 messages to maintain better context (adjustable)
503
- if len(conversation_history) > 10:
504
- conversation_history = conversation_history[-10:]
505
-
506
- # Generate a more **adaptive** prompt
507
- context = "\n".join(conversation_history) if conversation_history else ""
508
-
509
- prompt = f"""
510
- You are a highly skilled grammar assistant. Your job is to maintain a **dynamic conversation** and respond intelligently based on user input, If the user asks something **unrelated to grammar**, respond with: "Please send a grammar-related question..
511
-
512
- - Your answers must always **relate to the conversation history** and **extend naturally** based on what was previously asked.
513
- - Your answers must be **concise, clear, and to the point**
514
- - If the user asks for **examples**, explanations, or clarifications, **automatically infer** which topic they are referring to.
515
- - If the user's question is **vague**, determine the most **logical continuation** based on prior questions.
516
- - If the user asks something **unrelated to grammar**, respond with: "Please send a grammar-related question."
517
-
518
- **Conversation so far:**
519
- {context}
520
-
521
- **User's new question:** {topic}
522
- Please provide a **coherent and relevant answer** that continues the conversation naturally.
523
- """
524
-
525
- # Make the API call to Cohere
526
- headers = {
527
- 'Authorization': f'Bearer {COHERE_API_KEY}',
528
- 'Content-Type': 'application/json'
529
- }
530
-
531
- payload = {
532
- 'model': 'command-r-08-2024',
533
- 'prompt': prompt,
534
- 'max_tokens': 1000 # Increased to allow better explanations
535
- }
536
-
537
- response = requests.post(COHERE_API_URL, headers=headers, json=payload)
538
-
539
- if response.status_code == 200:
540
- ai_response = response.json().get('generations', [{}])[0].get('text', '').strip()
541
-
542
- # Store conversation history to maintain context
543
- conversation_history.append(f"User: {topic}\nAI: {ai_response}")
544
- user_sessions[session_id] = conversation_history # Update session history
545
-
546
- return jsonify({'response': ai_response, 'session_id': session_id})
547
- else:
548
- return jsonify({'error': 'Failed to fetch data from Cohere API'}), 500
549
-
550
- except Exception as e:
551
- return jsonify({'error': str(e)}), 500
552
-
553
-
554
-
555
- @app.route('/suggest-grammar-questions', methods=['POST'])
556
- def suggest_grammar_questions():
557
- try:
558
- data = request.get_json()
559
- user_input = data.get('input', '').strip() # User's partial input (e.g., "What is v")
560
-
561
- if not user_input:
562
- return jsonify({'error': 'Input is required'}), 400
563
-
564
-
565
-
566
- prompt = f"""
567
- You are a grammar expert. Given the user's input "{user_input}", generate **3 natural grammar-related questions** that people might ask.
568
-
569
- - The user's input is a **partial or full grammar-related query**.
570
- - AI must **infer the most likely grammar topic** based on the input.
571
- - AI must **ensure all suggestions are strictly related to English grammar**.
572
- - **If the input is incomplete, intelligently complete it** with the most likely grammar concept.
573
- - Ensure all **questions are fully formed and relevant**.
574
-
575
- **User input:** "{user_input}"
576
- Provide exactly 3 well-structured, grammar-related questions:
577
- """
578
-
579
- # Call Cohere API
580
- headers = {
581
- 'Authorization': f'Bearer {COHERE_API_KEY}',
582
- 'Content-Type': 'application/json'
583
- }
584
-
585
- payload = {
586
- 'model': 'command-r-08-2024',
587
- 'prompt': prompt,
588
- 'max_tokens': 100, # Only short responses needed
589
- 'temperature': 0.9, # Some randomness for variety
590
- 'frequency_penalty': 0.8, # Reduce repeated words
591
- 'presence_penalty': 0.8 # Encourage diverse questions
592
- }
593
-
594
- response = requests.post(COHERE_API_URL, headers=headers, json=payload)
595
-
596
- if response.status_code == 200:
597
- suggestions = response.json().get('generations', [{}])[0].get('text', '').strip().split("\n")
598
- return jsonify({'suggestions': suggestions})
599
- else:
600
- return jsonify({'error': 'Failed to fetch suggestions', 'details': response.text}), 500
601
-
602
- except Exception as e:
603
- return jsonify({'error': str(e)}), 500
604
-
605
-
606
- def validate_topic(topic):
607
-
608
-
609
-
610
- validation_prompt = f"""
611
- You are an AI grammar expert. Your task is to determine if a given topic is related to **English grammar** or not.
612
-
613
- **Input:** "{topic}"
614
-
615
- ### **Rules:**
616
- - If the input is **in the form of a question** (e.g., it asks for an explanation or definition), return `"ask grammar topics"`, even if the topic is related to grammar.
617
- - If the topic is **related to English grammar concepts** such as **parts of speech**, **verb tenses**, **sentence structure**, etc., return `"Grammar"`.
618
- - If the topic is **not related to grammar**, such as general knowledge, science, math, history, or topics from other fields, return `"Not Grammar"`.
619
- - Your response must be based purely on whether the topic relates to grammar, and **not** based on specific words, phrases, or examples.
620
-
621
- **Your response must be exactly either "Grammar", "Not Grammar", or "ask grammar topics". No extra text.**
622
- """
623
-
624
-
625
-
626
-
627
-
628
- headers = {
629
- 'Authorization': f'Bearer {COHERE_API_KEY}',
630
- 'Content-Type': 'application/json'
631
- }
632
-
633
- payload = {
634
- 'model': 'command-r-08-2024',
635
- 'prompt': validation_prompt,
636
- 'max_tokens': 5 # Minimal token usage for classification
637
- }
638
-
639
- try:
640
- response = requests.post(COHERE_API_URL, json=payload, headers=headers)
641
- validation_result = response.json().get('generations', [{}])[0].get('text', '').strip()
642
-
643
- # Ensure the response is strictly "Grammar" or "Not Grammar"
644
- if validation_result not in ["Grammar", "Not Grammar"]:
645
- return "Not Grammar" # Fallback to avoid incorrect responses
646
-
647
- return validation_result
648
-
649
- except Exception as e:
650
- return f"Error: {str(e)}"
651
-
652
-
653
- @app.route('/generate-questions', methods=['POST'])
654
- def generate_questions_test():
655
- try:
656
- data = request.get_json()
657
- topic = data.get('topic', '').strip() # Default to "grammar" if no topic is provided
658
- # levels = data.get('levels', ['basic', 'medium', 'hard'])
659
-
660
-
661
- validation_result = validate_topic(topic)
662
-
663
- if validation_result != "Grammar":
664
- return jsonify({"message": "Please enter a valid **grammar topic**, not a general word or unrelated question."}), 400
665
-
666
-
667
- difficulty = data.get('difficulty', 'basic')
668
-
669
-
670
- # Debugging output
671
- print(f"Generating {difficulty} questions for topic: {topic}")
672
-
673
-
674
-
675
- if difficulty == 'basic':
676
- prompt = f"""
677
- Generate five **completely new and unique** very basic-level fill-in-the-blank grammar questions **every time** on the topic '{topic}'.
678
-
679
- ### Rules:
680
- - **Each question must have a different theme**, ensuring variety.
681
- - **Do not repeat** themes from previous generations.
682
- - Use **different sentence structures**, avoiding predictable patterns.
683
- - **Avoid long words or abstract concepts**
684
- - **Focus on the topic '{topic}'**, and ensure the blank is the key part of speech.
685
- - Each question should contain **one blank represented by '_______'**, with the correct answer provided in parentheses at the end.
686
- - The vocabulary should be **simple and suitable for beginners**.
687
- - Ensure that the questions are **always new and distinct**, avoiding previously used themes.
688
-
689
- Do not include any explanations or instructions in the response—only the five unique questions.
690
- """
691
-
692
- elif difficulty == 'elementary':
693
- prompt = f"""
694
- Generate five **completely new and unique** elementary-level fill-in-the-blank grammar questions **every time** on the topic '{topic}'.
695
-
696
- ### Rules:
697
- - The questions should be **slightly more challenging than basic-level questions**, incorporating **a wider range of sentence structures and vocabulary**.
698
- - **Each question must have a different theme**, ensuring variety.
699
- - **Use varied sentence structures**, making them slightly more complex than basic-level questions.
700
- - **Ensure that the vocabulary is still simple but slightly broader** than basic-level questions.
701
- - **Focus on the topic '{topic}'**, and ensure the blank is the key part of speech.
702
- - Each question should contain **one blank represented by '_______'**, with the correct answer provided in parentheses at the end.
703
- - Ensure that the questions are **always new and distinct**, avoiding previously used themes.
704
-
705
- Do not include any explanations or instructions in the response—only the five unique questions.
706
- """
707
-
708
- elif difficulty == 'pre-intermediate':
709
- prompt = f"""
710
- Generate five **completely new and unique** pre-intermediate-level fill-in-the-blank grammar questions **every time** on the topic '{topic}'.
711
-
712
- ### Rules:
713
- - The questions should be **more challenging than elementary-level questions**, incorporating **more varied sentence structures and an expanded vocabulary**.
714
- - **Each question must have a different theme**, ensuring variety.
715
- - **Ensure that the vocabulary is broader than elementary-level**, while keeping it understandable for learners.
716
- - **Sentences should be longer and more descriptive** but still clear.
717
- - **Focus on the topic '{topic}'**, and ensure the blank is the key part of speech.
718
- - Each question should contain **one blank represented by '_______'**, with the correct answer provided in parentheses at the end.
719
- - Ensure that the questions are **always new and distinct**, avoiding previously used themes.
720
-
721
- Do not include any explanations or instructions in the response—only the five unique questions.
722
- """
723
-
724
- elif difficulty == 'intermediate':
725
- prompt = f"""
726
- Generate five **completely new and unique** intermediate-level fill-in-the-blank grammar questions **every time** on the topic '{topic}'.
727
-
728
- ### Rules:
729
- - The questions should be **more complex than pre-intermediate-level questions**, incorporating **a wider range of vocabulary and sentence structures**.
730
- - **Each question must have a different theme**, ensuring variety.
731
- - **Ensure that the vocabulary is more advanced than pre-intermediate-level** while keeping it natural for learners.
732
- - **Sentences should be longer and include more detail**, improving reading comprehension.
733
- - **Focus on the topic '{topic}'**, and ensure the blank is the key part of speech.
734
- - Each question should contain **one blank represented by '_______'**, with the correct answer provided in parentheses at the end.
735
- - Ensure that the questions are **always new and distinct**, avoiding previously used themes.
736
-
737
- Do not include any explanations or instructions in the response—only the five unique questions.
738
- """
739
-
740
- elif difficulty == 'upper-intermediate':
741
- prompt = f"""
742
- Generate five **completely new and unique** upper-intermediate-level fill-in-the-blank grammar questions **every time** on the topic '{topic}'.
743
-
744
- ### Rules:
745
- - The questions should be **more complex than intermediate-level questions**, incorporating **more advanced sentence structures and vocabulary**.
746
- - **Each question must have a different theme**, ensuring variety.
747
- - **Ensure that the vocabulary is more refined and diverse** but still understandable for upper-intermediate learners.
748
- - **Sentences should be longer and may introduce more nuanced contexts**, requiring a deeper understanding.
749
- - **Focus on the topic '{topic}'**, and ensure the blank is the key part of speech.
750
- - Each question should contain **one blank represented by '_______'**, with the correct answer provided in parentheses at the end.
751
- - Ensure that the questions are **always new and distinct**, avoiding previously used themes.
752
-
753
- Do not include any explanations or instructions in the response—only the five unique questions.
754
- """
755
-
756
- elif difficulty == 'advanced':
757
- prompt = f"""
758
- Generate five **completely new and unique** advanced-level (C1) fill-in-the-blank grammar questions **every time** on the topic '{topic}'.
759
-
760
- ### Rules:
761
- - The questions should be **more challenging than upper-intermediate (B2) level**, requiring a deep understanding of grammar, context, and vocabulary.
762
- - **Ensure varied and sophisticated vocabulary**, avoiding basic words.
763
- - **Each question should require nuanced comprehension**, testing knowledge of advanced grammar patterns.
764
- - **The blank must be the key part of the sentence** (not an obvious answer).
765
- - Each question should contain **one blank represented by '_______'**, with the correct answer provided in parentheses at the end.
766
- - Ensure that the questions are **always new and distinct**, avoiding repetition of themes.
767
-
768
- Do not include any explanations or instructions in the response—only the five unique questions.
769
- """
770
-
771
- elif difficulty == 'hard':
772
- prompt = f"""
773
- Generate five **completely new and unique** hard-level (C2) fill-in-the-blank grammar questions **every time** on the topic '{topic}'.
774
-
775
- ### Rules:
776
- - The questions should be **more challenging than advanced(C1) level**, requiring a deep understanding of grammar, context, and vocabulary.
777
- - **Ensure varied and sophisticated vocabulary**, avoiding basic words.
778
- - **Each question should require nuanced comprehension**, testing knowledge of advanced grammar patterns.
779
- - **The blank must be the key part of the sentence** (not an obvious answer).
780
- - Each question should contain **one blank represented by '_______'**, with the correct answer provided in parentheses at the end.
781
- - Ensure that the questions are **always new and distinct**, avoiding repetition of themes.
782
-
783
- Do not include any explanations or instructions in the response—only the five unique questions.
784
- """
785
-
786
- else:
787
- return jsonify({"error": "Invalid difficulty level"}), 400
788
-
789
- headers = {
790
- 'Authorization': f'Bearer {COHERE_API_KEY}',
791
- 'Content-Type': 'application/json'
792
- }
793
-
794
- payload = {
795
- 'model': 'command-r-08-2024', # Ensure you're using the right model
796
- 'prompt': prompt,
797
- 'max_tokens': 1000 # Increase the token limit if needed
798
- }
799
-
800
- # Make the API request
801
- response = requests.post(COHERE_API_URL, json=payload, headers=headers)
802
-
803
- # Log the full response for debugging
804
- print("Response status code:", response.status_code)
805
- print("Response content:", response.text)
806
-
807
- if response.status_code == 200:
808
- return jsonify(response.json()) # Return the response from Cohere API
809
- else:
810
- return jsonify({"error": "Failed to fetch questions", "details": response.text}), 500
811
- except Exception as e:
812
- return jsonify({"error": str(e)}), 500
813
-
814
- # Endpoint to validate answers
815
- @app.route('/validate-answer', methods=['POST'])
816
- def validate_answer():
817
- try:
818
- # Get the data from the request
819
- data = request.get_json()
820
- topic = data.get('topic', '') # Get the topic
821
- question = data.get('question', '')
822
- user_answer = data.get('user_answer', '')
823
-
824
- # Validate if the required fields are present
825
- if not question or not user_answer or not topic:
826
- return jsonify({'error': 'Topic, question, and user answer are required'}), 400
827
-
828
- # Construct the prompt for Cohere API
829
- prompt = f"""
830
- You are a highly knowledgeable grammar assistant. Validate whether the user's answer to the following question is correct or not based on {topic}. If the answer is incorrect, provide a helpful hint.
831
-
832
- Topic: {topic}
833
- Question: "{question}"
834
- User's Answer: "{user_answer}"
835
-
836
- Is the answer correct? If not, please explain why and give a hint.
837
- """
838
-
839
- # Headers and payload for the Cohere API request
840
- headers = {
841
- 'Authorization': f'Bearer {COHERE_API_KEY}',
842
- 'Content-Type': 'application/json'
843
- }
844
-
845
- payload = {
846
- 'model': 'command-r-08-2024', # Use your model name here
847
- 'prompt': prompt,
848
- 'max_tokens': 100,
849
- 'temperature': 0.7
850
- }
851
-
852
- # Make the API call to Cohere
853
- response = requests.post(COHERE_API_URL, headers=headers, json=payload)
854
-
855
- # Debugging: Log response status and body
856
- print(f"Status Code: {response.status_code}")
857
- print(f"Response Body: {response.text}")
858
-
859
- # Check if the request was successful
860
- if response.status_code == 200:
861
- data = response.json()
862
- return jsonify(data)
863
- else:
864
- return jsonify({'error': 'Failed to fetch data from Cohere API'}), 500
865
-
866
- except Exception as e:
867
- return jsonify({'error': str(e)}), 500
868
-
869
-
870
-
871
- # // for validating Multiple answer:
872
-
873
- # Function to validate an individual answer using Cohere API
874
- def validate_answer_with_ai(topic, question, user_answer):
875
- try:
876
- # Construct the prompt for Cohere API
877
- prompt = f"""
878
- You are a highly knowledgeable grammar assistant. Validate whether the user's answer to the following question is correct or not based on {topic}. If the answer is incorrect, provide a helpful hint.
879
-
880
- Topic: {topic}
881
- Question: "{question}"
882
- User's Answer: "{user_answer}"
883
-
884
- Is the answer correct? If not, please explain why and give a hint.
885
- """
886
-
887
- # Headers for the API request
888
- headers = {
889
- 'Authorization': f'Bearer {COHERE_API_KEY}',
890
- 'Content-Type': 'application/json'
891
- }
892
-
893
- # Construct the payload for Cohere API request
894
- payload = {
895
- 'model': 'command-r-08-2024', # You can use a different model depending on your needs
896
- 'prompt': prompt,
897
- 'max_tokens': 200,
898
- 'temperature': 0.7,
899
- 'stop_sequences': ['\n']
900
- }
901
-
902
- # Make the POST request to the Cohere API
903
- response = requests.post('https://api.cohere.ai/v1/generate', headers=headers, json=payload)
904
-
905
- if response.status_code == 200:
906
- result = response.json()
907
- # Extract and return the relevant part of the response
908
- validation_response = result['generations'][0]['text'].strip()
909
- return validation_response
910
- else:
911
- return f"Error: {response.status_code} - {response.text}"
912
-
913
- except Exception as e:
914
- return f"An error occurred: {str(e)}"
915
-
916
-
917
-
918
-
919
- # Endpoint to validate multiple answers at once
920
- @app.route('/validate-all-answers', methods=['POST'])
921
- def validate_all_answers():
922
- try:
923
- # Get the list of questions and answers from the request body
924
- data = request.get_json()
925
- questions = data.get('questions', [])
926
-
927
- if not questions:
928
- return jsonify({'error': 'No questions provided'}), 400
929
-
930
- validation_results = []
931
-
932
- # Iterate over the list of questions and validate each answer
933
- for item in questions:
934
- topic = item.get('topic', '')
935
- question = item.get('question', '')
936
- user_answer = item.get('user_answer', '')
937
-
938
- if not topic or not question or not user_answer:
939
- validation_results.append({
940
- 'question': question,
941
- 'error': 'Missing required fields (topic, question, or answer).'
942
- })
943
- continue
944
-
945
- # Validate the answer with the Cohere API
946
- validation_response = validate_answer_with_ai(topic, question, user_answer)
947
-
948
- # If the answer is incorrect, generate a hint
949
- hint = None
950
- if "incorrect" in validation_response.lower() or "not correct" in validation_response.lower():
951
- # Generate the hint for the incorrect answer
952
- hint = generate_hint(topic, question, user_answer)
953
-
954
- validation_results.append({
955
- 'question': question,
956
- 'user_answer': user_answer,
957
- 'validation_response': validation_response,
958
- 'hint': hint # Add hint to the result
959
- })
960
-
961
- return jsonify({'results': validation_results})
962
-
963
- except Exception as e:
964
- return jsonify({'error': str(e)}), 500
965
-
966
-
967
-
968
- def generate_hint(topic, question, user_answer):
969
- try:
970
- # Construct the prompt for Cohere API to generate a hint for incorrect answers
971
- prompt = f"""
972
- You are a highly skilled grammar assistant. Your task is to generate a helpful hint for the user to improve their answer based on the following question.
973
-
974
- Topic: {topic}
975
- Question: "{question}"
976
- User's Answer: "{user_answer}"
977
-
978
- If the user's answer is incorrect, provide a specific, actionable hint to help the user correct their answer.
979
- The hint should include:
980
- - Explanation of the error made by the user.
981
- - A hint on the correct grammatical structure or word form.
982
- - A hint on how to structure the sentence correctly **without revealing the exact answer**.
983
-
984
- Please make sure the hint is **clear** and **helpful** for the user, **without revealing the correct answer**.
985
- """
986
-
987
- headers = {
988
- 'Authorization': f'Bearer {COHERE_API_KEY}',
989
- 'Content-Type': 'application/json'
990
- }
991
-
992
- payload = {
993
- 'model': 'command-r-08-2024', # Replace with the model you're using
994
- 'prompt': prompt,
995
- 'max_tokens': 250, # Adjust token size as needed
996
- 'temperature': 0.7, # Use temperature to control creativity
997
- }
998
-
999
- # Make the POST request to Cohere API
1000
- response = requests.post(COHERE_API_URL, headers=headers, json=payload)
1001
-
1002
- if response.status_code == 200:
1003
- result = response.json()
1004
- # Extract and return the relevant hint from the response
1005
- hint = result['generations'][0]['text'].strip()
1006
- return hint
1007
- else:
1008
- return f"Error: {response.status_code} - {response.text}"
1009
-
1010
- except Exception as e:
1011
- return f"An error occurred: {str(e)}"
1012
-
1013
-
1014
-
1015
-
1016
-
1017
-
1018
- if __name__ == '__main__':
1019
- # app.run(debug=True)
1020
- app.register_blueprint(movie_bp, url_prefix='') # expose /explain-grammar locally
1021
- app.run(host='0.0.0.0', port=5012, debug=True)