Spaces:
Runtime error
Runtime error
Solution for too large input
Browse files
RAG.py
CHANGED
|
@@ -15,6 +15,47 @@ from typing import Dict, Any, Optional, List, Tuple
|
|
| 15 |
import json
|
| 16 |
import logging
|
| 17 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
def retrieve(query: str,vectorstore:PineconeVectorStore, k: int = 1000) -> Tuple[List[Document], List[float]]:
|
| 19 |
start = time.time()
|
| 20 |
# pinecone_api_key = os.getenv("PINECONE_API_KEY")
|
|
@@ -182,7 +223,7 @@ def RAG(llm: Any, query: str,vectorstore:PineconeVectorStore, top: int = 10, k:
|
|
| 182 |
<CONTEXT>Cars use gasoline for fuel. Some cars use electricity for fuel.Tesla stock has increased by 10 percent over the last quarter.</CONTEXT>
|
| 183 |
<REASONING>Based on the context pineapples have not been explored as a fuel for cars. The context discusses gasoline, electricity, and tesla stock, therefore it is not relevant to the query about pineapples for fuel</REASONING>
|
| 184 |
<VALID>NO</VALID>
|
| 185 |
-
<RESPONSE>Pineapples are not a good fuel for cars, however with further
|
| 186 |
</EXAMPLE>
|
| 187 |
Now it's your turn
|
| 188 |
<QUERY>
|
|
@@ -192,6 +233,9 @@ def RAG(llm: Any, query: str,vectorstore:PineconeVectorStore, top: int = 10, k:
|
|
| 192 |
|
| 193 |
# Generate response
|
| 194 |
ans_prompt = answer_template.invoke({"context": context, "query": query})
|
|
|
|
|
|
|
|
|
|
| 195 |
response = llm.invoke(ans_prompt)
|
| 196 |
|
| 197 |
# Parse and return response
|
|
|
|
| 15 |
import json
|
| 16 |
import logging
|
| 17 |
|
| 18 |
+
|
| 19 |
+
import logging
|
| 20 |
+
from datetime import datetime
|
| 21 |
+
from io import StringIO
|
| 22 |
+
|
| 23 |
+
class RunLogger:
|
| 24 |
+
def __init__(self, script_name='streamlit_script'):
|
| 25 |
+
# Create string buffer to store logs
|
| 26 |
+
self.log_buffer = StringIO()
|
| 27 |
+
|
| 28 |
+
# Create logger
|
| 29 |
+
self.logger = logging.getLogger(script_name)
|
| 30 |
+
self.logger.setLevel(logging.INFO)
|
| 31 |
+
|
| 32 |
+
# Create handler that writes to our string buffer
|
| 33 |
+
handler = logging.StreamHandler(self.log_buffer)
|
| 34 |
+
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
|
| 35 |
+
handler.setFormatter(formatter)
|
| 36 |
+
self.logger.addHandler(handler)
|
| 37 |
+
|
| 38 |
+
self.logger.info("=== Starting new run ===")
|
| 39 |
+
|
| 40 |
+
def info(self, message):
|
| 41 |
+
self.logger.info(message)
|
| 42 |
+
|
| 43 |
+
def error(self, message):
|
| 44 |
+
self.logger.error(message)
|
| 45 |
+
|
| 46 |
+
def warning(self, message):
|
| 47 |
+
self.logger.warning(message)
|
| 48 |
+
|
| 49 |
+
def output_logs(self):
|
| 50 |
+
"""Print all collected logs"""
|
| 51 |
+
print("\n=== Run Complete - All Logs ===")
|
| 52 |
+
print(self.log_buffer.getvalue())
|
| 53 |
+
print("=== End Logs ===\n")
|
| 54 |
+
|
| 55 |
+
def __del__(self):
|
| 56 |
+
"""Ensure logs are output if logger is garbage collected"""
|
| 57 |
+
self.output_logs()
|
| 58 |
+
|
| 59 |
def retrieve(query: str,vectorstore:PineconeVectorStore, k: int = 1000) -> Tuple[List[Document], List[float]]:
|
| 60 |
start = time.time()
|
| 61 |
# pinecone_api_key = os.getenv("PINECONE_API_KEY")
|
|
|
|
| 223 |
<CONTEXT>Cars use gasoline for fuel. Some cars use electricity for fuel.Tesla stock has increased by 10 percent over the last quarter.</CONTEXT>
|
| 224 |
<REASONING>Based on the context pineapples have not been explored as a fuel for cars. The context discusses gasoline, electricity, and tesla stock, therefore it is not relevant to the query about pineapples for fuel</REASONING>
|
| 225 |
<VALID>NO</VALID>
|
| 226 |
+
<RESPONSE>Pineapples are not a good fuel for cars, however with further research they might be</RESPONSE>
|
| 227 |
</EXAMPLE>
|
| 228 |
Now it's your turn
|
| 229 |
<QUERY>
|
|
|
|
| 233 |
|
| 234 |
# Generate response
|
| 235 |
ans_prompt = answer_template.invoke({"context": context, "query": query})
|
| 236 |
+
# Max input tokens is 10,000 for 4o-mini. This is a quick and dirty solution
|
| 237 |
+
if len(ans_prompt) > 30000:
|
| 238 |
+
ans_prompt = ans_prompt[:30000]
|
| 239 |
response = llm.invoke(ans_prompt)
|
| 240 |
|
| 241 |
# Parse and return response
|