File size: 17,839 Bytes
8aef97e
 
 
 
 
 
 
 
 
 
 
 
 
d6f71f7
8aef97e
d6f71f7
6024ffc
 
 
8aef97e
d6f71f7
6024ffc
8aef97e
d6f71f7
8aef97e
 
 
 
 
 
 
dafdf4f
 
 
 
d6f71f7
8aef97e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d6f71f7
8aef97e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6024ffc
8aef97e
 
 
 
 
 
 
 
6c76f8f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8aef97e
 
 
6c76f8f
 
 
8aef97e
 
 
 
 
 
d6f71f7
8aef97e
 
 
d6f71f7
8aef97e
 
 
 
 
 
 
 
d6f71f7
6c76f8f
 
8aef97e
 
 
 
 
 
6c76f8f
 
 
8aef97e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dafdf4f
 
8aef97e
dafdf4f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f2bf26d
dafdf4f
f2bf26d
 
 
 
 
 
dafdf4f
19a0f6f
 
f2bf26d
dafdf4f
19a0f6f
dafdf4f
 
19a0f6f
 
 
 
dafdf4f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f2bf26d
 
 
 
dafdf4f
 
 
 
f2bf26d
 
dafdf4f
 
 
f97b708
19a0f6f
 
 
 
f97b708
8aef97e
 
 
d6f71f7
8aef97e
 
9f948da
 
 
 
 
 
 
 
 
 
 
 
 
 
d6f71f7
9f948da
 
3bd47ae
9f948da
 
 
 
6024ffc
165d756
6024ffc
165d756
 
 
 
 
 
 
6024ffc
 
9f948da
 
 
 
 
 
 
 
 
 
 
 
d6f71f7
9f948da
 
 
 
 
 
 
 
 
8aef97e
 
9f948da
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8aef97e
 
 
d6f71f7
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
# -*- coding: utf-8 -*-
"""app
Automatically generated by Colab.
Original file is located at
    https://colab.research.google.com/drive/1pwwcBb5Zlw1DA3u5K8W8mjrwBTBWXc1L
"""

import gradio as gr
import numpy as np
from transformers import pipeline
import os
import time
import groq
import uuid

# LangChain imports
from langchain_core.messages import HumanMessage, SystemMessage, AIMessage
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_core.documents import Document
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import Chroma
from langchain_groq import ChatGroq

# Other imports
import chardet
import fitz  # PyMuPDF for PDFs
import docx  # python-docx for Word files
import gtts  # Google Text-to-Speech library
from pptx import Presentation  # python-pptx for PowerPoint files
import re

import torch
import torchaudio
from transformers import pipeline, WhisperProcessor, WhisperForConditionalGeneration

# Set API Key
groq.api_key = os.getenv("GROQ_API_KEY")

# Initialize Chat Model
chat_model = ChatGroq(model_name="llama-3.3-70b-versatile", api_key=groq.api_key)

# Initialize Embeddings and chromaDB
os.makedirs("chroma_db", exist_ok=True)
embedding_model = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
vectorstore = Chroma(
    embedding_function=embedding_model,
    persist_directory="chroma_db"
)

# Short-term memory for the LLM
chat_memory = []

# Prompt for quiz generation
quiz_prompt = """
You are an AI assistant specialized in education and assessment creation. Given an uploaded document or text, generate a quiz with a mix of multiple-choice questions (MCQs) and fill-in-the-blank questions. The quiz should be directly based on the key concepts, facts, and details from the provided material.
Generate 20 Questions.
Remove all unnecessary formatting generated by the LLM, including <think> tags, asterisks, markdown formatting, and any bold or italic text, as well as **, ###, ##, and # tags.
For each question:
- Provide 4 answer choices (for MCQs), with only one correct answer.
- Ensure fill-in-the-blank questions focus on key terms, phrases, or concepts from the document.
- Include an answer key for all questions.
- Ensure questions vary in difficulty and encourage comprehension rather than memorization.
- Additionally, implement an instant feedback mechanism:
    - When a user selects an answer, indicate whether it is correct or incorrect.
    - If incorrect, provide a brief explanation from the document to guide learning.
    - Ensure responses are concise and educational to enhance understanding.
Output Example:
1. Fill in the blank: The LLM Agent framework has a central decision-making unit called the _______________________.
Answer: Agent Core
Feedback: The Agent Core is the central component of the LLM Agent framework, responsible for managing goals, tool instructions, planning modules, memory integration, and agent persona.
2. What is the main limitation of LLM-based applications?
a) Limited token capacity
b) Lack of domain expertise
c) Prone to hallucination
d) All of the above
Answer: d) All of the above
Feedback: LLM-based applications have several limitations, including limited token capacity, lack of domain expertise, and being prone to hallucination, among others.
3. Given the following info, what is the value of P(jam|Rain)?
P(no Rain) = 0.8;
P(no Jam) = 0.2;
P(Rain|Jam) = 0.1
a) 0.016
b) 0.025
c) 0.1
d) 0.4
Answer: d) 0.4
Feedback: This question tests understanding of Bayes' Theorem by requiring the calculation of conditional probability using the given values.
"""

# Function to clean AI response by removing unwanted formatting
def clean_response(response):
    """Removes <think> tags, asterisks, and markdown formatting."""
    cleaned_text = re.sub(r"<think>.*?</think>", "", response, flags=re.DOTALL)
    cleaned_text = re.sub(r"(\*\*|\*|\[|\])", "", cleaned_text)
    cleaned_text = re.sub(r"^##+\s*", "", cleaned_text, flags=re.MULTILINE)
    cleaned_text = re.sub(r"\\", "", cleaned_text)
    cleaned_text = re.sub(r"---", "", cleaned_text)
    return cleaned_text.strip()

# Function to generate quiz based on content
def generate_quiz(content):
    prompt = f"{quiz_prompt}\n\nDocument content:\n{content}"
    response = chat_model.invoke([HumanMessage(content=prompt)])
    cleaned_response = clean_response(response.content)
    return cleaned_response

# Function to retrieve relevant documents from vectorstore based on user query
def retrieve_documents(query):
    results = vectorstore.similarity_search(query, k=3)
    return [doc.page_content for doc in results]

# Function to convert tuple format to message format
def convert_to_message_format(chat_history):
    message_format = []
    for user_msg, bot_msg in chat_history:
        message_format.append({"role": "user", "content": user_msg})
        message_format.append({"role": "assistant", "content": bot_msg})
    return message_format

# Function to convert message format to tuple format for processing
def convert_to_tuple_format(chat_history):
    tuple_format = []
    for i in range(0, len(chat_history), 2):
        if i+1 < len(chat_history):
            user_msg = chat_history[i]["content"]
            bot_msg = chat_history[i+1]["content"]
            tuple_format.append((user_msg, bot_msg))
    return tuple_format

# Function to handle chatbot interactions with short-term memory
def chat_with_groq(user_input, chat_history):
    try:
        # Convert message format to tuple format for processing
        tuple_history = convert_to_tuple_format(chat_history)
        
        # Retrieve relevant documents for additional context
        relevant_docs = retrieve_documents(user_input)
        context = "\n".join(relevant_docs) if relevant_docs else "No relevant documents found."

        # Construct proper prompting with conversation history
        system_prompt = "You are a helpful AI assistant. Answer questions accurately and concisely."
        conversation_history = "\n".join(chat_memory[-10:])
        prompt = f"{system_prompt}\n\nConversation History:\n{conversation_history}\n\nUser Input: {user_input}\n\nContext:\n{context}"

        # Call the chat model
        response = chat_model.invoke([HumanMessage(content=prompt)])

        # Clean response to remove any unwanted formatting
        cleaned_response_text = clean_response(response.content)

        # Append conversation history
        chat_memory.append(f"User: {user_input}")
        chat_memory.append(f"AI: {cleaned_response_text}")

        # Update chat history
        chat_history.append({"role": "user", "content": user_input})
        chat_history.append({"role": "assistant", "content": cleaned_response_text})

        # Convert response to speech
        audio_file = speech_playback(cleaned_response_text)

        return chat_history, "", audio_file
    except Exception as e:
        error_msg = f"Error: {str(e)}"
        chat_history.append({"role": "user", "content": user_input})
        chat_history.append({"role": "assistant", "content": error_msg})
        return chat_history, "", None

# Function to play response as speech using gTTS
def speech_playback(text):
    try:
        # Generate a unique filename for each audio file
        unique_id = str(uuid.uuid4())
        audio_file = f"output_audio_{unique_id}.mp3"

        # Convert text to speech
        tts = gtts.gTTS(text, lang='en')
        tts.save(audio_file)

        # Return the path to the audio file
        return audio_file
    except Exception as e:
        print(f"Error in speech_playback: {e}")
        return None

# Function to detect encoding safely
def detect_encoding(file_path):
    try:
        with open(file_path, "rb") as f:
            raw_data = f.read(4096)
            detected = chardet.detect(raw_data)
            encoding = detected["encoding"]
        return encoding if encoding else "utf-8"
    except Exception:
        return "utf-8"

# Function to extract text from PDF
def extract_text_from_pdf(pdf_path):
    try:
        doc = fitz.open(pdf_path)
        text = "\n".join([page.get_text("text") for page in doc])
        return text if text.strip() else "No extractable text found."
    except Exception as e:
        return f"Error extracting text from PDF: {str(e)}"

# Function to extract text from Word files (.docx)
def extract_text_from_docx(docx_path):
    try:
        doc = docx.Document(docx_path)
        text = "\n".join([para.text for para in doc.paragraphs])
        return text if text.strip() else "No extractable text found."
    except Exception as e:
        return f"Error extracting text from Word document: {str(e)}"

# Function to extract text from PowerPoint files (.pptx)
def extract_text_from_pptx(pptx_path):
    try:
        presentation = Presentation(pptx_path)
        text = ""
        for slide in presentation.slides:
            for shape in slide.shapes:
                if hasattr(shape, "text"):
                    text += shape.text + "\n"
        return text if text.strip() else "No extractable text found."
    except Exception as e:
        return f"Error extracting text from PowerPoint: {str(e)}"

# Function to process documents safely
def process_document(file):
    try:
        file_extension = os.path.splitext(file.name)[-1].lower()
        if file_extension in [".png", ".jpg", ".jpeg"]:
            return "Error: Images cannot be processed for text extraction."
        if file_extension == ".pdf":
            content = extract_text_from_pdf(file.name)
        elif file_extension == ".docx":
            content = extract_text_from_docx(file.name)
        elif file_extension == ".pptx":
            content = extract_text_from_pptx(file.name)
        else:
            encoding = detect_encoding(file.name)
            with open(file.name, "r", encoding=encoding, errors="replace") as f:
                content = f.read()
        text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=50)
        documents = [Document(page_content=chunk) for chunk in text_splitter.split_text(content)]
        vectorstore.add_documents(documents)

        quiz = generate_quiz(content)
        return f"Document processed successfully (File Type: {file_extension}). Quiz generated:\n{quiz}"
    except Exception as e:
        return f"Error processing document: {str(e)}"



# Function to handle speech-to-text conversion

# Initialize Whisper model globally to avoid reloading
def initialize_whisper_model():
    """Initialize Whisper model once to improve performance"""
    try:
        # Use larger model for better accuracy
        model_name = "openai/whisper-small.en"  # or "openai/whisper-medium.en" for even better accuracy
        transcriber = pipeline(
            "automatic-speech-recognition",
            model=model_name,
            torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
            device="cuda" if torch.cuda.is_available() else "cpu"
        )
        return transcriber
    except Exception as e:
        print(f"Error initializing Whisper model: {e}")
        # Fallback to base model
        return pipeline("automatic-speech-recognition", model="openai/whisper-base.en")

# Initialize model once
whisper_model = initialize_whisper_model()

def transcribe_audio(audio):
    """Enhanced speech-to-text transcription with better preprocessing"""
    if audio is None:
        return "Please record audio first"
    
    try:
        sr, y = audio
        
        # Enhanced audio preprocessing
        if y.ndim > 1:
            y = y.mean(axis=1)  # Convert to mono
        
        # Convert to proper data type
        y = y.astype(np.float32)
        
        # Normalize audio
        max_val = np.max(np.abs(y))
        if max_val > 0:
            y = y / max_val
        
        # Remove silence (simple threshold-based)
        silence_threshold = 0.01
        non_silent_indices = np.where(np.abs(y) > silence_threshold)[0]
        
        if len(non_silent_indices) == 0:
            return "No speech detected. Please speak louder or check your microphone."
        
        # Trim silence from beginning and end
        start_idx = non_silent_indices[0]
        end_idx = non_silent_indices[-1]
        y_trimmed = y[start_idx:end_idx+1]
        
        # Check if audio is too short
        if len(y_trimmed) / sr < 0.5:  # Less than 0.5 seconds
            return "Audio too short. Please speak for at least 1-2 seconds."
        
        # Enhanced transcription with better parameters
        result = whisper_model(
            {
                "sampling_rate": sr, 
                "raw": y_trimmed
            },
            return_timestamps=False,
            generate_kwargs={
                "task": "transcribe",
                "language": "en"
            }
        )
        
        text = result["text"].strip()
        
        if not text or text.lower() in ["", "you", "thank you"]:
            return "No clear speech detected. Try speaking more clearly or in a quieter environment."
        
        return text
        
    except Exception as e:
        error_msg = f"Transcription error: {str(e)}"
        print(error_msg)
        return f"Sorry, I couldn't process the audio. Please try again or type your message instead. Error: {str(e)}"

# Clear chat history function
def clear_chat_history():
    chat_memory.clear()
    return [], None

def tutor_ai_chatbot():
    """Main Gradio interface for the Tutor AI Chatbot."""
    with gr.Blocks() as app:
        gr.Markdown("# AI Tutor - We.(POC)")
        gr.Markdown("An interactive Personal AI Tutor chatbot to help with your learning needs.")

        # Chatbot Tab
        with gr.Tab("AI Chatbot"):
            with gr.Row():
                with gr.Column(scale=3):
                    chatbot = gr.Chatbot(height=500, type="messages")
                    
                with gr.Column(scale=1):
                    audio_playback = gr.Audio(label="Audio Response", type="filepath")
            
            # Move the input controls here to span full width
            with gr.Row():
                msg = gr.Textbox(
                    label="Ask a question", 
                    placeholder="Type your question here...",
                    container=False
                )
                submit = gr.Button("Send")
            
            with gr.Row():
                with gr.Column(scale=1):
                    audio_input = gr.Audio(type="numpy", label="Record or Upload Audio")
            
            # Voice recording tips - ONLY in AI Chatbot tab
            with gr.Accordion("Voice Recording Tips", open=False):
                gr.Markdown("""
                **For better speech recognition accuracy:**
                - Speak clearly and at a moderate pace
                - Record in a quiet environment  
                - Keep the microphone close to your mouth (10-15 cm)
                - Use a good quality microphone if possible
                - Review the transcribed text before sending
                - If transcription is poor, try recording again or type manually
                """)
            
            # Clear chat history button
            clear_btn = gr.Button("Clear Chat")

            # Handle chat interaction
            submit.click(
                chat_with_groq,
                inputs=[msg, chatbot],
                outputs=[chatbot, msg, audio_playback]
            )

            # Clear chat history function
            clear_btn.click(
                lambda: [],
                inputs=None, 
                outputs=[chatbot]
            )

            # Also allow Enter key to submit
            msg.submit(
                chat_with_groq,
                inputs=[msg, chatbot],
                outputs=[chatbot, msg, audio_playback]
            )

            # Add some examples of questions students might ask
            with gr.Accordion("Example Questions", open=False):
                gr.Examples(
                    examples=[
                        "Can you explain the concept of RLHF AI?",
                        "What are AI transformers?",
                        "What is MoE AI?",
                        "What's gate networks AI?",
                        "I am making a switch, please generating baking recipe?"
                    ],
                    inputs=msg
                )

            # Connect audio input to transcription
            audio_input.change(fn=transcribe_audio, inputs=audio_input, outputs=msg)

        # Upload Notes & Generate Quiz Tab
        with gr.Tab("Upload Notes & Generate Quiz"):
            with gr.Row():
                with gr.Column(scale=2):
                    file_input = gr.File(label="Upload Lecture Notes (PDF, DOCX, PPTX)")
                with gr.Column(scale=3):
                    quiz_output = gr.Textbox(label="Generated Quiz", lines=10)

            # Connect file input to document processing
            file_input.change(process_document, inputs=file_input, outputs=quiz_output)

        # Introduction Video Tab - Now with the working video
        with gr.Tab("Introduction Video"):
            with gr.Row():
                with gr.Column(scale=1):
                    gr.Markdown("### Welcome to the Introduction Video")
                    gr.Markdown("Music from Xu Mengyuan - China-O, musician Xu Mengyuan YUAN! | 徐梦圆 - China-O 音乐人徐梦圆YUAN!")
                    # Use the local video file that's stored in your Space
                    gr.Video("We_not_me_video.mp4", label="Introduction Video")

        # Launch the application
        app.launch(share=False)

# Launch the AI chatbot
if __name__ == "__main__":
    tutor_ai_chatbot()