Update app.py
Browse files
app.py
CHANGED
|
@@ -15,9 +15,6 @@ from concurrent.futures import ThreadPoolExecutor
|
|
| 15 |
import threading
|
| 16 |
from html import escape
|
| 17 |
|
| 18 |
-
# Import OpenAI library
|
| 19 |
-
import openai
|
| 20 |
-
|
| 21 |
# Suppress specific warnings
|
| 22 |
import urllib3
|
| 23 |
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
|
@@ -73,19 +70,35 @@ CATEGORIES = [
|
|
| 73 |
"Uncategorized",
|
| 74 |
]
|
| 75 |
|
| 76 |
-
#
|
| 77 |
-
|
|
|
|
|
|
|
|
|
|
| 78 |
|
| 79 |
-
|
| 80 |
-
|
| 81 |
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 89 |
|
| 90 |
def extract_main_content(soup):
|
| 91 |
"""
|
|
@@ -158,7 +171,7 @@ def get_page_metadata(soup):
|
|
| 158 |
|
| 159 |
def generate_summary_and_assign_category(bookmark):
|
| 160 |
"""
|
| 161 |
-
Generate a concise summary and assign a category using
|
| 162 |
"""
|
| 163 |
logger.info(f"Generating summary and assigning category for bookmark: {bookmark.get('url')}")
|
| 164 |
|
|
@@ -167,8 +180,8 @@ def generate_summary_and_assign_category(bookmark):
|
|
| 167 |
|
| 168 |
while retry_count < max_retries:
|
| 169 |
try:
|
| 170 |
-
# Rate Limiting Logic
|
| 171 |
-
with
|
| 172 |
global last_api_call_time
|
| 173 |
current_time = time.time()
|
| 174 |
elapsed = current_time - last_api_call_time
|
|
@@ -237,23 +250,15 @@ Summary: [Your summary]
|
|
| 237 |
Category: [One category]
|
| 238 |
"""
|
| 239 |
|
| 240 |
-
#
|
| 241 |
-
response =
|
| 242 |
-
model='gpt-4', # Ensure you're using a valid and accessible model
|
| 243 |
-
messages=[
|
| 244 |
-
{"role": "user", "content": prompt}
|
| 245 |
-
],
|
| 246 |
-
max_tokens=150,
|
| 247 |
-
temperature=0.5,
|
| 248 |
-
)
|
| 249 |
|
| 250 |
-
|
| 251 |
-
if not content:
|
| 252 |
raise ValueError("Empty response received from the model.")
|
| 253 |
|
| 254 |
# Parse the response
|
| 255 |
-
summary_match = re.search(r"Summary:\s*(.*)",
|
| 256 |
-
category_match = re.search(r"Category:\s*(.*)",
|
| 257 |
|
| 258 |
if summary_match:
|
| 259 |
bookmark['summary'] = summary_match.group(1).strip()
|
|
@@ -280,16 +285,11 @@ Category: [One category]
|
|
| 280 |
logger.info("Successfully generated summary and assigned category")
|
| 281 |
break # Exit the retry loop upon success
|
| 282 |
|
| 283 |
-
except openai.error.RateLimitError as e:
|
| 284 |
-
retry_count += 1
|
| 285 |
-
wait_time = int(e.headers.get("Retry-After", 5))
|
| 286 |
-
logger.warning(f"Rate limit reached. Waiting for {wait_time} seconds before retrying... (Attempt {retry_count}/{max_retries})")
|
| 287 |
-
time.sleep(wait_time)
|
| 288 |
except Exception as e:
|
| 289 |
logger.error(f"Error generating summary and assigning category: {e}", exc_info=True)
|
| 290 |
bookmark['summary'] = 'No summary available.'
|
| 291 |
bookmark['category'] = 'Uncategorized'
|
| 292 |
-
break # Exit the retry loop on
|
| 293 |
|
| 294 |
def parse_bookmarks(file_content):
|
| 295 |
"""
|
|
@@ -624,8 +624,8 @@ def chatbot_response(user_query, chat_history):
|
|
| 624 |
# Append user's message to chat history
|
| 625 |
chat_history.append({"role": "user", "content": user_query})
|
| 626 |
|
| 627 |
-
# Rate Limiting Logic
|
| 628 |
-
with
|
| 629 |
global last_api_call_time
|
| 630 |
current_time = time.time()
|
| 631 |
elapsed = current_time - last_api_call_time
|
|
@@ -665,28 +665,19 @@ Bookmarks:
|
|
| 665 |
Provide a concise and helpful response.
|
| 666 |
"""
|
| 667 |
|
| 668 |
-
#
|
| 669 |
-
response =
|
| 670 |
-
|
| 671 |
-
|
| 672 |
-
|
| 673 |
-
|
| 674 |
-
|
| 675 |
-
temperature=0.7,
|
| 676 |
-
)
|
| 677 |
-
|
| 678 |
-
answer = response['choices'][0]['message']['content'].strip()
|
| 679 |
logger.info("Chatbot response generated")
|
| 680 |
|
| 681 |
# Append the assistant's response to chat history
|
| 682 |
chat_history.append({"role": "assistant", "content": answer})
|
| 683 |
return chat_history
|
| 684 |
|
| 685 |
-
except openai.error.RateLimitError as e:
|
| 686 |
-
wait_time = int(e.headers.get("Retry-After", 5))
|
| 687 |
-
logger.warning(f"Rate limit reached. Waiting for {wait_time} seconds before retrying...")
|
| 688 |
-
time.sleep(wait_time)
|
| 689 |
-
return chatbot_response(user_query, chat_history) # Retry after waiting
|
| 690 |
except Exception as e:
|
| 691 |
error_message = f"⚠️ Error processing your query: {str(e)}"
|
| 692 |
logger.error(error_message, exc_info=True)
|
|
|
|
| 15 |
import threading
|
| 16 |
from html import escape
|
| 17 |
|
|
|
|
|
|
|
|
|
|
| 18 |
# Suppress specific warnings
|
| 19 |
import urllib3
|
| 20 |
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
|
|
|
| 70 |
"Uncategorized",
|
| 71 |
]
|
| 72 |
|
| 73 |
+
# Remove OpenAI dependencies
|
| 74 |
+
# Define a function to generate responses using llama-3.1-70b-versatile
|
| 75 |
+
def generate_llama_response(prompt):
|
| 76 |
+
"""
|
| 77 |
+
Generate a response using the llama-3.1-70b-versatile model.
|
| 78 |
|
| 79 |
+
Replace the implementation below with the actual method to interact with your LLM.
|
| 80 |
+
This could be an API call to a local server, a cloud service, or any interface you have set up.
|
| 81 |
|
| 82 |
+
Example (pseudo-code):
|
| 83 |
+
response = requests.post("http://localhost:5000/generate", json={"prompt": prompt})
|
| 84 |
+
return response.json().get("response", "")
|
| 85 |
|
| 86 |
+
For demonstration purposes, we'll return a placeholder response.
|
| 87 |
+
"""
|
| 88 |
+
try:
|
| 89 |
+
# Example implementation; replace with actual API call or method
|
| 90 |
+
# For instance, if using a local API:
|
| 91 |
+
# response = requests.post("http://localhost:5000/generate", json={"prompt": prompt})
|
| 92 |
+
# return response.json().get("response", "")
|
| 93 |
+
|
| 94 |
+
# Placeholder response
|
| 95 |
+
logger.info("Generating response using llama-3.1-70b-versatile")
|
| 96 |
+
# Simulate processing time
|
| 97 |
+
time.sleep(1)
|
| 98 |
+
return "This is a placeholder response from llama-3.1-70b-versatile."
|
| 99 |
+
except Exception as e:
|
| 100 |
+
logger.error(f"Error generating response from llama: {e}", exc_info=True)
|
| 101 |
+
return "Error generating response."
|
| 102 |
|
| 103 |
def extract_main_content(soup):
|
| 104 |
"""
|
|
|
|
| 171 |
|
| 172 |
def generate_summary_and_assign_category(bookmark):
|
| 173 |
"""
|
| 174 |
+
Generate a concise summary and assign a category using the llama-3.1-70b-versatile model.
|
| 175 |
"""
|
| 176 |
logger.info(f"Generating summary and assigning category for bookmark: {bookmark.get('url')}")
|
| 177 |
|
|
|
|
| 180 |
|
| 181 |
while retry_count < max_retries:
|
| 182 |
try:
|
| 183 |
+
# Rate Limiting Logic (if necessary)
|
| 184 |
+
with lock:
|
| 185 |
global last_api_call_time
|
| 186 |
current_time = time.time()
|
| 187 |
elapsed = current_time - last_api_call_time
|
|
|
|
| 250 |
Category: [One category]
|
| 251 |
"""
|
| 252 |
|
| 253 |
+
# Generate response using llama-3.1-70b-versatile
|
| 254 |
+
response = generate_llama_response(prompt)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 255 |
|
| 256 |
+
if not response:
|
|
|
|
| 257 |
raise ValueError("Empty response received from the model.")
|
| 258 |
|
| 259 |
# Parse the response
|
| 260 |
+
summary_match = re.search(r"Summary:\s*(.*)", response)
|
| 261 |
+
category_match = re.search(r"Category:\s*(.*)", response)
|
| 262 |
|
| 263 |
if summary_match:
|
| 264 |
bookmark['summary'] = summary_match.group(1).strip()
|
|
|
|
| 285 |
logger.info("Successfully generated summary and assigned category")
|
| 286 |
break # Exit the retry loop upon success
|
| 287 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 288 |
except Exception as e:
|
| 289 |
logger.error(f"Error generating summary and assigning category: {e}", exc_info=True)
|
| 290 |
bookmark['summary'] = 'No summary available.'
|
| 291 |
bookmark['category'] = 'Uncategorized'
|
| 292 |
+
break # Exit the retry loop on exceptions
|
| 293 |
|
| 294 |
def parse_bookmarks(file_content):
|
| 295 |
"""
|
|
|
|
| 624 |
# Append user's message to chat history
|
| 625 |
chat_history.append({"role": "user", "content": user_query})
|
| 626 |
|
| 627 |
+
# Rate Limiting Logic (if necessary)
|
| 628 |
+
with lock:
|
| 629 |
global last_api_call_time
|
| 630 |
current_time = time.time()
|
| 631 |
elapsed = current_time - last_api_call_time
|
|
|
|
| 665 |
Provide a concise and helpful response.
|
| 666 |
"""
|
| 667 |
|
| 668 |
+
# Generate response using llama-3.1-70b-versatile
|
| 669 |
+
response = generate_llama_response(prompt)
|
| 670 |
+
|
| 671 |
+
if not response:
|
| 672 |
+
raise ValueError("Empty response received from the model.")
|
| 673 |
+
|
| 674 |
+
answer = response.strip()
|
|
|
|
|
|
|
|
|
|
|
|
|
| 675 |
logger.info("Chatbot response generated")
|
| 676 |
|
| 677 |
# Append the assistant's response to chat history
|
| 678 |
chat_history.append({"role": "assistant", "content": answer})
|
| 679 |
return chat_history
|
| 680 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 681 |
except Exception as e:
|
| 682 |
error_message = f"⚠️ Error processing your query: {str(e)}"
|
| 683 |
logger.error(error_message, exc_info=True)
|