Spaces:
Sleeping
Sleeping
Upload app.py
Browse files
app.py
CHANGED
|
@@ -28,6 +28,7 @@ class CurriculumChatbot:
|
|
| 28 |
self.llm = None
|
| 29 |
self.qa_chain = None
|
| 30 |
self.slide_selection_chain = None
|
|
|
|
| 31 |
self.response_cache = {} # Simple cache for responses
|
| 32 |
self.fast_mode = fast_mode # Skip LLM for faster responses
|
| 33 |
self._process_pdfs(slides_dir)
|
|
@@ -69,6 +70,12 @@ class CurriculumChatbot:
|
|
| 69 |
)
|
| 70 |
|
| 71 |
def _setup_llm(self):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 72 |
try:
|
| 73 |
# Use a smaller, faster model for better performance
|
| 74 |
# Llama 3.1 8B is quite large and slow - let's use a smaller model
|
|
@@ -143,6 +150,7 @@ Answer:"""
|
|
| 143 |
self.llm = None
|
| 144 |
self.qa_chain = None
|
| 145 |
self.slide_selection_chain = None
|
|
|
|
| 146 |
|
| 147 |
def get_pdf_page_image(self, pdf_path, page_num):
|
| 148 |
try:
|
|
|
|
| 28 |
self.llm = None
|
| 29 |
self.qa_chain = None
|
| 30 |
self.slide_selection_chain = None
|
| 31 |
+
self.focused_qa_chain = None
|
| 32 |
self.response_cache = {} # Simple cache for responses
|
| 33 |
self.fast_mode = fast_mode # Skip LLM for faster responses
|
| 34 |
self._process_pdfs(slides_dir)
|
|
|
|
| 70 |
)
|
| 71 |
|
| 72 |
def _setup_llm(self):
|
| 73 |
+
# Initialize all LLM-related attributes to None first
|
| 74 |
+
self.llm = None
|
| 75 |
+
self.qa_chain = None
|
| 76 |
+
self.slide_selection_chain = None
|
| 77 |
+
self.focused_qa_chain = None
|
| 78 |
+
|
| 79 |
try:
|
| 80 |
# Use a smaller, faster model for better performance
|
| 81 |
# Llama 3.1 8B is quite large and slow - let's use a smaller model
|
|
|
|
| 150 |
self.llm = None
|
| 151 |
self.qa_chain = None
|
| 152 |
self.slide_selection_chain = None
|
| 153 |
+
self.focused_qa_chain = None
|
| 154 |
|
| 155 |
def get_pdf_page_image(self, pdf_path, page_num):
|
| 156 |
try:
|