Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -85,28 +85,26 @@ def retrieve_and_generate_app(query, top_k=3):
|
|
| 85 |
for i in I[0]:
|
| 86 |
sol_id = document_ids[i]
|
| 87 |
# Find the full content of the retrieved SOL
|
| 88 |
-
#
|
| 89 |
-
#
|
| 90 |
-
retrieved_content = next((doc["
|
| 91 |
-
retrieved_docs.append({"id": sol_id, "content": retrieved_content})
|
| 92 |
|
| 93 |
-
#
|
| 94 |
-
|
|
|
|
|
|
|
| 95 |
|
| 96 |
-
#
|
| 97 |
prompt = f"""
|
| 98 |
Given the following information about Virginia Standards of Learning (SOLs):
|
| 99 |
-
|
| 100 |
{context}
|
| 101 |
-
|
| 102 |
Based on this information, answer the following question:
|
| 103 |
{query}
|
| 104 |
-
|
| 105 |
If the question is about a specific SOL number, provide a direct explanation for that SOL.
|
| 106 |
If asked for lesson plans, worksheets, or proofs, explain what the document generally entails and whether it provides such materials.
|
| 107 |
Be concise and to the point.
|
| 108 |
"""
|
| 109 |
-
# --- Start of the print statements for debugging (keep these for now!) ---
|
| 110 |
print(f"\n--- PROMPT SENT TO LLM ---\n{prompt}\n--------------------------\n")
|
| 111 |
|
| 112 |
response = llm_pipeline(prompt, max_new_tokens=500, num_return_sequences=1, do_sample=True, temperature=0.7)
|
|
@@ -123,8 +121,6 @@ Be concise and to the point.
|
|
| 123 |
answer = generated_text
|
| 124 |
|
| 125 |
print(f"\n--- FINAL ANSWER ---\n{answer}\n--------------------\n")
|
| 126 |
-
# --- End of the print statements for debugging ---
|
| 127 |
-
|
| 128 |
return answer
|
| 129 |
|
| 130 |
# Create Gradio interface
|
|
|
|
| 85 |
for i in I[0]:
|
| 86 |
sol_id = document_ids[i]
|
| 87 |
# Find the full content of the retrieved SOL
|
| 88 |
+
# --- CHANGE THIS LINE ---
|
| 89 |
+
# Original (incorrect): retrieved_content = next((doc["text"] for doc in documents if doc["id"] == sol_id), "Content not found.")
|
| 90 |
+
retrieved_content = next((doc["content"] for doc in documents if doc["id"] == sol_id), "Content not found.")
|
| 91 |
+
retrieved_docs.append({"id": sol_id, "content": retrieved_content})
|
| 92 |
|
| 93 |
+
# 3. Context Construction
|
| 94 |
+
# --- CHANGE THIS LINE ---
|
| 95 |
+
# Original (incorrect): context = "\n\n".join([f"SOL {doc['id']}: {doc['text']}" for doc in retrieved_docs])
|
| 96 |
+
context = "\n\n".join([f"SOL {doc['id']}: {doc['content']}" for doc in retrieved_docs])
|
| 97 |
|
| 98 |
+
# 4. LLM Generation
|
| 99 |
prompt = f"""
|
| 100 |
Given the following information about Virginia Standards of Learning (SOLs):
|
|
|
|
| 101 |
{context}
|
|
|
|
| 102 |
Based on this information, answer the following question:
|
| 103 |
{query}
|
|
|
|
| 104 |
If the question is about a specific SOL number, provide a direct explanation for that SOL.
|
| 105 |
If asked for lesson plans, worksheets, or proofs, explain what the document generally entails and whether it provides such materials.
|
| 106 |
Be concise and to the point.
|
| 107 |
"""
|
|
|
|
| 108 |
print(f"\n--- PROMPT SENT TO LLM ---\n{prompt}\n--------------------------\n")
|
| 109 |
|
| 110 |
response = llm_pipeline(prompt, max_new_tokens=500, num_return_sequences=1, do_sample=True, temperature=0.7)
|
|
|
|
| 121 |
answer = generated_text
|
| 122 |
|
| 123 |
print(f"\n--- FINAL ANSWER ---\n{answer}\n--------------------\n")
|
|
|
|
|
|
|
| 124 |
return answer
|
| 125 |
|
| 126 |
# Create Gradio interface
|