Spaces:
Runtime error
Runtime error
Commit
Β·
82b35ca
1
Parent(s):
732bb40
Add standalone RAG chain test notebook and update Milvus client initialization with API key
Browse files- Created a new Jupyter notebook `test_ragas_llama_test.ipynb` for testing the standalone RAG chain.
- Implemented a function to set up the RAG chain without a Chainlit session.
- Added logic to retrieve relevant documents and format responses based on context.
- Included evaluation metrics for assessing the performance of the RAG chain.
- Updated `test_vector_search.py` to initialize the Milvus client with an API key from environment variables.
- .gitignore +2 -1
- app.py +39 -22
- delete_collection.py +1 -1
- modal_app.py +63 -0
- populate_db.py +1 -1
- tests/test_ragas.ipynb +113 -101
- tests/test_ragas_llama_test.ipynb +488 -0
- tests/test_vector_search.py +1 -1
.gitignore
CHANGED
|
@@ -227,4 +227,5 @@ volumes/
|
|
| 227 |
simple_analysis.py
|
| 228 |
# This file is used for simple analysis of the codebase, such as checking for unused imports or variables.
|
| 229 |
|
| 230 |
-
RAGAS_test_details/
|
|
|
|
|
|
| 227 |
simple_analysis.py
|
| 228 |
# This file is used for simple analysis of the codebase, such as checking for unused imports or variables.
|
| 229 |
|
| 230 |
+
RAGAS_test_details/
|
| 231 |
+
zilliz-cloud-Dissertation-username-password.txt
|
app.py
CHANGED
|
@@ -16,10 +16,10 @@ from langchain_classic.chains import create_retrieval_chain
|
|
| 16 |
from langchain_classic.chains.combine_documents import create_stuff_documents_chain
|
| 17 |
from langchain_nebius import ChatNebius
|
| 18 |
|
| 19 |
-
from
|
| 20 |
-
from
|
| 21 |
-
from
|
| 22 |
-
from
|
| 23 |
from langchain_core.messages import BaseMessage, HumanMessage, AIMessage
|
| 24 |
|
| 25 |
from pymilvus import MilvusClient
|
|
@@ -36,7 +36,7 @@ from populate_db import main
|
|
| 36 |
|
| 37 |
# Initialize Milvus client and embedding model
|
| 38 |
MILVUS_URI = os.getenv("MILVUS_URI", "http://localhost:19530")
|
| 39 |
-
milvus_client = MilvusClient(uri=MILVUS_URI)
|
| 40 |
collection_name = "my_rag_collection"
|
| 41 |
|
| 42 |
# Initialize collection once at startup
|
|
@@ -60,8 +60,8 @@ embedding_model = NebiusEmbeddings(
|
|
| 60 |
|
| 61 |
# Initialize LLM
|
| 62 |
model = ChatNebius(
|
| 63 |
-
model="
|
| 64 |
-
streaming=
|
| 65 |
temperature=0.2,
|
| 66 |
max_tokens=8192,
|
| 67 |
top_p=0.95,
|
|
@@ -159,20 +159,21 @@ Rules:
|
|
| 159 |
7. Do not assume the user's prior knowledge; maintain a neutral, professional, and respectful tone.
|
| 160 |
|
| 161 |
Format requirements:
|
| 162 |
-
- Structure all responses using the RESPONSE TEMPLATE provided
|
| 163 |
-
-
|
|
|
|
| 164 |
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
\n\nResponse template:\n\n
|
| 168 |
|
| 169 |
**Summary**
|
| 170 |
-
|
|
|
|
| 171 |
|
| 172 |
**Key Guidance**
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
|
|
|
|
| 176 |
|
| 177 |
"""
|
| 178 |
|
|
@@ -299,31 +300,47 @@ async def on_chat_resume(thread: ThreadDict):
|
|
| 299 |
|
| 300 |
@cl.on_message
|
| 301 |
async def on_message(message: cl.Message):
|
| 302 |
-
|
| 303 |
"""Handle incoming messages with RAG and conversation history."""
|
| 304 |
chain = cl.user_session.get("chain")
|
| 305 |
messages = cl.user_session.get("messages", [])
|
|
|
|
|
|
|
|
|
|
| 306 |
cb = cl.AsyncLangchainCallbackHandler(
|
| 307 |
-
stream_final_answer=True,
|
| 308 |
)
|
| 309 |
|
| 310 |
try:
|
| 311 |
-
# Get
|
| 312 |
relevant_docs = retrieve_relevant_documents(message.content, limit=5)
|
| 313 |
citations = format_docs_with_id(relevant_docs)
|
| 314 |
|
| 315 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 316 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 317 |
async with cl.Step(name="References") as step:
|
| 318 |
if relevant_docs:
|
| 319 |
step.output = citations
|
| 320 |
else:
|
| 321 |
step.output = "No relevant documents found for this query."
|
| 322 |
|
| 323 |
-
# Update
|
| 324 |
messages.append(HumanMessage(content=message.content))
|
| 325 |
messages.append(AIMessage(content=answer))
|
| 326 |
-
|
| 327 |
cl.user_session.set("messages", messages)
|
| 328 |
|
| 329 |
except Exception as e:
|
|
|
|
| 16 |
from langchain_classic.chains.combine_documents import create_stuff_documents_chain
|
| 17 |
from langchain_nebius import ChatNebius
|
| 18 |
|
| 19 |
+
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
| 20 |
+
from langchain_core.output_parsers import StrOutputParser
|
| 21 |
+
from langchain_core.runnables import RunnablePassthrough, RunnableLambda
|
| 22 |
+
from langchain_core.runnables.config import RunnableConfig
|
| 23 |
from langchain_core.messages import BaseMessage, HumanMessage, AIMessage
|
| 24 |
|
| 25 |
from pymilvus import MilvusClient
|
|
|
|
| 36 |
|
| 37 |
# Initialize Milvus client and embedding model
|
| 38 |
MILVUS_URI = os.getenv("MILVUS_URI", "http://localhost:19530")
|
| 39 |
+
milvus_client = MilvusClient(uri=MILVUS_URI, token=os.getenv("MILVUS_API_KEY"))
|
| 40 |
collection_name = "my_rag_collection"
|
| 41 |
|
| 42 |
# Initialize collection once at startup
|
|
|
|
| 60 |
|
| 61 |
# Initialize LLM
|
| 62 |
model = ChatNebius(
|
| 63 |
+
model="Qwen/Qwen3-235B-A22B-Instruct-2507",
|
| 64 |
+
streaming=True,
|
| 65 |
temperature=0.2,
|
| 66 |
max_tokens=8192,
|
| 67 |
top_p=0.95,
|
|
|
|
| 159 |
7. Do not assume the user's prior knowledge; maintain a neutral, professional, and respectful tone.
|
| 160 |
|
| 161 |
Format requirements:
|
| 162 |
+
- Structure all responses using the RESPONSE TEMPLATE provided below.
|
| 163 |
+
- Use bolding for headers (e.g. **Summary**)
|
| 164 |
+
- Ensure there is a blank line before and after lists.
|
| 165 |
|
| 166 |
+
Response template:
|
|
|
|
|
|
|
| 167 |
|
| 168 |
**Summary**
|
| 169 |
+
|
| 170 |
+
[Insert a concise 1-3 sentence answer here]
|
| 171 |
|
| 172 |
**Key Guidance**
|
| 173 |
+
|
| 174 |
+
* [Actionable point 1]
|
| 175 |
+
* [Actionable point 2]
|
| 176 |
+
* [Actionable point 3]
|
| 177 |
|
| 178 |
"""
|
| 179 |
|
|
|
|
| 300 |
|
| 301 |
@cl.on_message
|
| 302 |
async def on_message(message: cl.Message):
|
|
|
|
| 303 |
"""Handle incoming messages with RAG and conversation history."""
|
| 304 |
chain = cl.user_session.get("chain")
|
| 305 |
messages = cl.user_session.get("messages", [])
|
| 306 |
+
|
| 307 |
+
# 1. Initialize callback with stream_final_answer=True
|
| 308 |
+
# This automatically creates an empty message and streams tokens into it
|
| 309 |
cb = cl.AsyncLangchainCallbackHandler(
|
| 310 |
+
stream_final_answer=True,
|
| 311 |
)
|
| 312 |
|
| 313 |
try:
|
| 314 |
+
# Get relevant documents first (fast)
|
| 315 |
relevant_docs = retrieve_relevant_documents(message.content, limit=5)
|
| 316 |
citations = format_docs_with_id(relevant_docs)
|
| 317 |
|
| 318 |
+
# 2. Invoke the chain with the callback
|
| 319 |
+
# The chain will stream chunks to 'cb', which updates the UI in real-time
|
| 320 |
+
# We assign the final result to 'res' just to store it in history
|
| 321 |
+
answer = await chain.ainvoke(
|
| 322 |
+
{"question": message.content},
|
| 323 |
+
config=RunnableConfig(callbacks=[cb])
|
| 324 |
+
)
|
| 325 |
+
|
| 326 |
+
await cl.Message(answer).send()
|
| 327 |
|
| 328 |
+
# 'res' is usually a dict if the chain returns a dict, or a string.
|
| 329 |
+
# Based on your StrOutputParser usage, it should be a string.
|
| 330 |
+
# If your chain returns a dict, you might need to extract the text.
|
| 331 |
+
# answer = res if isinstance(res, str) else res.get("output", "") or res.get("text", "")
|
| 332 |
+
|
| 333 |
+
# 3. Add References as a Step (Collapsible element under the message)
|
| 334 |
+
# Note: Since the message is already sent by the callback, we just append a step.
|
| 335 |
async with cl.Step(name="References") as step:
|
| 336 |
if relevant_docs:
|
| 337 |
step.output = citations
|
| 338 |
else:
|
| 339 |
step.output = "No relevant documents found for this query."
|
| 340 |
|
| 341 |
+
# 4. Update History
|
| 342 |
messages.append(HumanMessage(content=message.content))
|
| 343 |
messages.append(AIMessage(content=answer))
|
|
|
|
| 344 |
cl.user_session.set("messages", messages)
|
| 345 |
|
| 346 |
except Exception as e:
|
delete_collection.py
CHANGED
|
@@ -2,7 +2,7 @@ from pymilvus import MilvusClient
|
|
| 2 |
import os
|
| 3 |
# Initialize client
|
| 4 |
MILVUS_URI = os.getenv("MILVUS_URI", "http://localhost:19530")
|
| 5 |
-
milvus_client = MilvusClient(uri=MILVUS_URI)
|
| 6 |
collection_name = "my_rag_collection"
|
| 7 |
|
| 8 |
# Delete the collection
|
|
|
|
| 2 |
import os
|
| 3 |
# Initialize client
|
| 4 |
MILVUS_URI = os.getenv("MILVUS_URI", "http://localhost:19530")
|
| 5 |
+
milvus_client = MilvusClient(uri=MILVUS_URI, token=os.getenv("MILVUS_API_KEY"))
|
| 6 |
collection_name = "my_rag_collection"
|
| 7 |
|
| 8 |
# Delete the collection
|
modal_app.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import modal
|
| 2 |
+
import subprocess
|
| 3 |
+
import os
|
| 4 |
+
|
| 5 |
+
# 1. Replicating your Dockerfile
|
| 6 |
+
# We match Python 3.12 and install the specific system libraries you listed.
|
| 7 |
+
image = (
|
| 8 |
+
modal.Image.debian_slim(python_version="3.12")
|
| 9 |
+
.apt_install("libgl1", "libglib2.0-0") # From your Dockerfile RUN command
|
| 10 |
+
.pip_install_from_requirements("requirements.txt")
|
| 11 |
+
.add_local_dir(".", remote_path="/root/app", ignore=[
|
| 12 |
+
".venv", # Exclude virtual env
|
| 13 |
+
"__pycache__", # Exclude bytecode cache
|
| 14 |
+
".git", # Exclude git repo
|
| 15 |
+
"*.pyc", # Exclude loose bytecode files
|
| 16 |
+
"*.pyo", # Exclude optimized bytecode
|
| 17 |
+
"*.log", # Exclude logs if any
|
| 18 |
+
".env",
|
| 19 |
+
"simple_analysis.py",
|
| 20 |
+
"secrets/",
|
| 21 |
+
"volumes/",
|
| 22 |
+
"data/",
|
| 23 |
+
".vscode/",
|
| 24 |
+
"*.json",
|
| 25 |
+
"RAGAS_test_details/",
|
| 26 |
+
"zilliz-cloud-Dissertation-username-password.txt",
|
| 27 |
+
],) # Replaces COPY . .
|
| 28 |
+
)
|
| 29 |
+
|
| 30 |
+
app = modal.App("neatstandards-rag")
|
| 31 |
+
|
| 32 |
+
# 2. Define Secrets (Environment Variables)
|
| 33 |
+
# You must create a secret named "neatstandards-secrets" in the Modal dashboard
|
| 34 |
+
# containing: NEBIUS_API_KEY, OPENAI_API_KEY, CHAINLIT_AUTH_SECRET, MILVUS_URI, MILVUS_TOKEN
|
| 35 |
+
secrets = [modal.Secret.from_name("neatstandards-secrets")]
|
| 36 |
+
|
| 37 |
+
# 3. The Web Server
|
| 38 |
+
@app.function(
|
| 39 |
+
image=image,
|
| 40 |
+
secrets=secrets,
|
| 41 |
+
scaledown_window=300, # Keep container alive for 5 mins to reduce cold starts
|
| 42 |
+
timeout=600,
|
| 43 |
+
)
|
| 44 |
+
@modal.web_server(port=8000)
|
| 45 |
+
def run():
|
| 46 |
+
# Set the working directory (same as WORKDIR /app)
|
| 47 |
+
os.chdir("/root/app")
|
| 48 |
+
|
| 49 |
+
# NOTE: We skip 'entrypoint.sh' and run Chainlit directly.
|
| 50 |
+
# If your entrypoint.sh does complex setup, let me know!
|
| 51 |
+
cmd = "chainlit run app.py --host 0.0.0.0 --port 8000 --headless"
|
| 52 |
+
|
| 53 |
+
subprocess.Popen(cmd, shell=True).wait()
|
| 54 |
+
|
| 55 |
+
# 4. Database Population Helper
|
| 56 |
+
# Since you can't rely on 'compose.yml' to start Milvus, use this to populate
|
| 57 |
+
# your remote Zilliz/Milvus instance.
|
| 58 |
+
@app.function(image=image, secrets=secrets)
|
| 59 |
+
def populate_db():
|
| 60 |
+
os.chdir("/root/app")
|
| 61 |
+
print("Starting database population...")
|
| 62 |
+
subprocess.run(["python", "populate_db.py"], check=True)
|
| 63 |
+
print("Database population complete!")
|
populate_db.py
CHANGED
|
@@ -29,7 +29,7 @@ MAX_CHARACTERS = 1500
|
|
| 29 |
COMBINE_TEXT_UNDER_N_CHARS = 200
|
| 30 |
|
| 31 |
# Initialize clients
|
| 32 |
-
milvus_client = MilvusClient(uri=MILVUS_URI)
|
| 33 |
|
| 34 |
embedding_model = NebiusEmbeddings(
|
| 35 |
api_key=SecretStr(os.getenv("NEBIUS_API_KEY", os.getenv("OPENAI_API_KEY"))),
|
|
|
|
| 29 |
COMBINE_TEXT_UNDER_N_CHARS = 200
|
| 30 |
|
| 31 |
# Initialize clients
|
| 32 |
+
milvus_client = MilvusClient(uri=MILVUS_URI, token=os.getenv("MILVUS_API_KEY"))
|
| 33 |
|
| 34 |
embedding_model = NebiusEmbeddings(
|
| 35 |
api_key=SecretStr(os.getenv("NEBIUS_API_KEY", os.getenv("OPENAI_API_KEY"))),
|
tests/test_ragas.ipynb
CHANGED
|
@@ -2,7 +2,7 @@
|
|
| 2 |
"cells": [
|
| 3 |
{
|
| 4 |
"cell_type": "code",
|
| 5 |
-
"execution_count":
|
| 6 |
"id": "cb2ff14b",
|
| 7 |
"metadata": {},
|
| 8 |
"outputs": [],
|
|
@@ -13,22 +13,31 @@
|
|
| 13 |
},
|
| 14 |
{
|
| 15 |
"cell_type": "code",
|
| 16 |
-
"execution_count":
|
| 17 |
"id": "6bb3bb7d",
|
| 18 |
"metadata": {},
|
| 19 |
-
"outputs": [
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
"source": [
|
| 21 |
"from datasets import Dataset\n",
|
| 22 |
"from app import retrieve_relevant_documents, emb_text, model, embedding_model\n",
|
| 23 |
"from langchain_classic.chains.combine_documents import create_stuff_documents_chain\n",
|
| 24 |
-
"from
|
| 25 |
-
"from
|
| 26 |
"from langchain_core.documents import Document"
|
| 27 |
]
|
| 28 |
},
|
| 29 |
{
|
| 30 |
"cell_type": "code",
|
| 31 |
-
"execution_count":
|
| 32 |
"id": "e572fb31",
|
| 33 |
"metadata": {},
|
| 34 |
"outputs": [],
|
|
@@ -100,7 +109,7 @@
|
|
| 100 |
},
|
| 101 |
{
|
| 102 |
"cell_type": "code",
|
| 103 |
-
"execution_count":
|
| 104 |
"id": "330ee35d",
|
| 105 |
"metadata": {},
|
| 106 |
"outputs": [],
|
|
@@ -144,7 +153,7 @@
|
|
| 144 |
},
|
| 145 |
{
|
| 146 |
"cell_type": "code",
|
| 147 |
-
"execution_count":
|
| 148 |
"id": "ba3810dd",
|
| 149 |
"metadata": {},
|
| 150 |
"outputs": [
|
|
@@ -152,18 +161,18 @@
|
|
| 152 |
"name": "stdout",
|
| 153 |
"output_type": "stream",
|
| 154 |
"text": [
|
| 155 |
-
"2025-
|
| 156 |
"Relevant documents: {'text': 'What is a provision, criterion or practice? The phrase βprovision, criterion or practiceβ is not defined by the Act. These concepts should be construed widely so as to include, for example, any formal or informal policies, rules, practices, arrangements, criteria, procedures, activities or provisions. They can cover one-off decisions and actions. In simple terms, they are about the way an education provider does things. Example:', 'metadata': {'source': 'data\\\\technical-guidance-further-higher-education.docx', 'file_directory': 'data', 'filename': 'technical-guidance-further-higher-education.docx', 'last_modified': '2025-07-02T21:00:50', 'page_number': 95, 'languages': ['eng'], 'filetype': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', 'category': 'CompositeElement', 'element_id': '3ae881ad6f81487213a9e234debf0921'}, 'score': 0.7807720899581909}\n",
|
| 157 |
-
"2025-
|
| 158 |
-
"2025-
|
| 159 |
-
"2025-
|
| 160 |
"Relevant documents: {'text': 'βReasonableβ means having regard to all of the circumstances including the nature of the act and how obviously discriminatory it is, the authority of the person making the statement and the knowledge that the helper has or ought to have.', 'metadata': {'source': 'data\\\\technical-guidance-further-higher-education.docx', 'file_directory': 'data', 'filename': 'technical-guidance-further-higher-education.docx', 'last_modified': '2025-07-02T21:00:50', 'page_number': 36, 'languages': ['eng'], 'filetype': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', 'category': 'CompositeElement', 'element_id': 'c5e3a60e2a6ccc88e0eff961f645a962'}, 'score': 0.7367081046104431}\n",
|
| 161 |
-
"2025-
|
| 162 |
-
"2025-
|
| 163 |
-
"2025-
|
| 164 |
"Relevant documents: {'text': 'The Act states that disadvantage must be substantial, which is defined as more than minor or trivial. Whether such a disadvantage exists in a particular case is a question of fact, and is assessed on an objective basis. s212(1)', 'metadata': {'source': 'data\\\\technical-guidance-further-higher-education.docx', 'file_directory': 'data', 'filename': 'technical-guidance-further-higher-education.docx', 'last_modified': '2025-07-02T21:00:50', 'page_number': 89, 'languages': ['eng'], 'filetype': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', 'category': 'CompositeElement', 'element_id': 'b9e8ef04daf9150c9f7e32736b53df5b'}, 'score': 0.8376985788345337}\n",
|
| 165 |
-
"2025-
|
| 166 |
-
"2025-
|
| 167 |
]
|
| 168 |
}
|
| 169 |
],
|
|
@@ -204,9 +213,38 @@
|
|
| 204 |
"\n"
|
| 205 |
]
|
| 206 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 207 |
{
|
| 208 |
"cell_type": "code",
|
| 209 |
"execution_count": 7,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 210 |
"id": "3e016be2",
|
| 211 |
"metadata": {},
|
| 212 |
"outputs": [
|
|
@@ -221,169 +259,143 @@
|
|
| 221 |
"name": "stdout",
|
| 222 |
"output_type": "stream",
|
| 223 |
"text": [
|
| 224 |
-
"2025-
|
| 225 |
-
"2025-
|
| 226 |
-
"2025-
|
| 227 |
-
"2025-
|
| 228 |
-
"2025-
|
| 229 |
]
|
| 230 |
},
|
| 231 |
{
|
| 232 |
"name": "stderr",
|
| 233 |
"output_type": "stream",
|
| 234 |
"text": [
|
| 235 |
-
"Evaluating: 8%|β | 1/12 [00:
|
| 236 |
]
|
| 237 |
},
|
| 238 |
{
|
| 239 |
"name": "stdout",
|
| 240 |
"output_type": "stream",
|
| 241 |
"text": [
|
| 242 |
-
"2025-
|
| 243 |
-
"2025-
|
| 244 |
-
"2025-
|
| 245 |
-
"2025-
|
| 246 |
-
"2025-
|
| 247 |
-
"2025-
|
| 248 |
-
"2025-
|
| 249 |
-
"2025-
|
| 250 |
-
"2025-
|
| 251 |
-
"2025-
|
| 252 |
-
"2025-
|
| 253 |
-
"2025-
|
| 254 |
-
"2025-
|
|
|
|
|
|
|
| 255 |
]
|
| 256 |
},
|
| 257 |
{
|
| 258 |
"name": "stderr",
|
| 259 |
"output_type": "stream",
|
| 260 |
"text": [
|
| 261 |
-
"Evaluating: 17%|ββ | 2/12 [00:
|
| 262 |
]
|
| 263 |
},
|
| 264 |
{
|
| 265 |
"name": "stdout",
|
| 266 |
"output_type": "stream",
|
| 267 |
"text": [
|
| 268 |
-
"2025-
|
| 269 |
-
"2025-
|
| 270 |
-
"2025-
|
| 271 |
-
"2025-
|
| 272 |
-
"2025-11-27 02:11:38 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
| 273 |
-
"2025-11-27 02:11:38 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n"
|
| 274 |
]
|
| 275 |
},
|
| 276 |
{
|
| 277 |
"name": "stderr",
|
| 278 |
"output_type": "stream",
|
| 279 |
"text": [
|
| 280 |
-
"Evaluating:
|
| 281 |
]
|
| 282 |
},
|
| 283 |
{
|
| 284 |
"name": "stdout",
|
| 285 |
"output_type": "stream",
|
| 286 |
"text": [
|
| 287 |
-
"2025-
|
| 288 |
-
"2025-
|
| 289 |
]
|
| 290 |
},
|
| 291 |
{
|
| 292 |
"name": "stderr",
|
| 293 |
"output_type": "stream",
|
| 294 |
"text": [
|
| 295 |
-
"Evaluating: 50%|βββββ | 6/12 [00:
|
| 296 |
]
|
| 297 |
},
|
| 298 |
{
|
| 299 |
"name": "stdout",
|
| 300 |
"output_type": "stream",
|
| 301 |
"text": [
|
| 302 |
-
"2025-
|
| 303 |
-
"2025-
|
| 304 |
-
"2025-
|
| 305 |
-
"2025-
|
| 306 |
-
"2025-
|
| 307 |
-
"2025-11-27 02:12:06 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
| 308 |
-
"2025-11-27 02:12:08 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
| 309 |
-
"2025-11-27 02:12:09 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n"
|
| 310 |
]
|
| 311 |
},
|
| 312 |
{
|
| 313 |
"name": "stderr",
|
| 314 |
"output_type": "stream",
|
| 315 |
"text": [
|
| 316 |
-
"Evaluating: 58%|ββββββ | 7/12 [00:
|
| 317 |
]
|
| 318 |
},
|
| 319 |
{
|
| 320 |
"name": "stdout",
|
| 321 |
"output_type": "stream",
|
| 322 |
"text": [
|
| 323 |
-
"2025-
|
|
|
|
|
|
|
|
|
|
| 324 |
]
|
| 325 |
},
|
| 326 |
{
|
| 327 |
"name": "stderr",
|
| 328 |
"output_type": "stream",
|
| 329 |
"text": [
|
| 330 |
-
"Evaluating: 67%|βββββββ | 8/12 [00:
|
| 331 |
]
|
| 332 |
},
|
| 333 |
{
|
| 334 |
"name": "stdout",
|
| 335 |
"output_type": "stream",
|
| 336 |
"text": [
|
| 337 |
-
"2025-
|
|
|
|
|
|
|
| 338 |
]
|
| 339 |
},
|
| 340 |
{
|
| 341 |
"name": "stderr",
|
| 342 |
"output_type": "stream",
|
| 343 |
"text": [
|
| 344 |
-
"Evaluating: 75%|ββββββββ | 9/12 [
|
| 345 |
]
|
| 346 |
},
|
| 347 |
{
|
| 348 |
"name": "stdout",
|
| 349 |
"output_type": "stream",
|
| 350 |
"text": [
|
| 351 |
-
"2025-
|
| 352 |
]
|
| 353 |
},
|
| 354 |
{
|
| 355 |
"name": "stderr",
|
| 356 |
"output_type": "stream",
|
| 357 |
"text": [
|
| 358 |
-
"Evaluating:
|
| 359 |
-
]
|
| 360 |
-
},
|
| 361 |
-
{
|
| 362 |
-
"name": "stdout",
|
| 363 |
-
"output_type": "stream",
|
| 364 |
-
"text": [
|
| 365 |
-
"2025-11-27 02:12:49 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n"
|
| 366 |
-
]
|
| 367 |
-
},
|
| 368 |
-
{
|
| 369 |
-
"name": "stderr",
|
| 370 |
-
"output_type": "stream",
|
| 371 |
-
"text": [
|
| 372 |
-
"Evaluating: 92%|ββββββββββ| 11/12 [01:37<00:11, 11.16s/it]"
|
| 373 |
-
]
|
| 374 |
-
},
|
| 375 |
-
{
|
| 376 |
-
"name": "stdout",
|
| 377 |
-
"output_type": "stream",
|
| 378 |
-
"text": [
|
| 379 |
-
"2025-11-27 02:12:55 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n"
|
| 380 |
-
]
|
| 381 |
-
},
|
| 382 |
-
{
|
| 383 |
-
"name": "stderr",
|
| 384 |
-
"output_type": "stream",
|
| 385 |
-
"text": [
|
| 386 |
-
"Evaluating: 100%|ββββββββββ| 12/12 [01:42<00:00, 8.57s/it]\n"
|
| 387 |
]
|
| 388 |
}
|
| 389 |
],
|
|
@@ -397,7 +409,7 @@
|
|
| 397 |
")\n",
|
| 398 |
"\n",
|
| 399 |
"result = evaluate(\n",
|
| 400 |
-
" llm=
|
| 401 |
" embeddings=embedding_model,\n",
|
| 402 |
" dataset = dataset, \n",
|
| 403 |
" metrics=[\n",
|
|
@@ -420,7 +432,7 @@
|
|
| 420 |
},
|
| 421 |
{
|
| 422 |
"cell_type": "code",
|
| 423 |
-
"execution_count":
|
| 424 |
"id": "d8514ff3",
|
| 425 |
"metadata": {
|
| 426 |
"slideshow": {
|
|
@@ -443,19 +455,19 @@
|
|
| 443 |
"2 [The Act states that disadvantage must be subs... \n",
|
| 444 |
"\n",
|
| 445 |
" response \\\n",
|
| 446 |
-
"0
|
| 447 |
-
"1
|
| 448 |
-
"2
|
| 449 |
"\n",
|
| 450 |
" reference context_precision \\\n",
|
| 451 |
-
"0 The Equality and Human Rights Commission (EHRC...
|
| 452 |
-
"1 There are two key considerations of 'reasonabl...
|
| 453 |
-
"2 'Substantial' is defined in the Act as 'more t...
|
| 454 |
"\n",
|
| 455 |
" context_recall faithfulness answer_relevancy \n",
|
| 456 |
-
"0
|
| 457 |
-
"1
|
| 458 |
-
"2
|
| 459 |
]
|
| 460 |
}
|
| 461 |
],
|
|
|
|
| 2 |
"cells": [
|
| 3 |
{
|
| 4 |
"cell_type": "code",
|
| 5 |
+
"execution_count": 1,
|
| 6 |
"id": "cb2ff14b",
|
| 7 |
"metadata": {},
|
| 8 |
"outputs": [],
|
|
|
|
| 13 |
},
|
| 14 |
{
|
| 15 |
"cell_type": "code",
|
| 16 |
+
"execution_count": 2,
|
| 17 |
"id": "6bb3bb7d",
|
| 18 |
"metadata": {},
|
| 19 |
+
"outputs": [
|
| 20 |
+
{
|
| 21 |
+
"name": "stderr",
|
| 22 |
+
"output_type": "stream",
|
| 23 |
+
"text": [
|
| 24 |
+
"f:\\Dissertation\\prod-rag-chat\\.venv\\Lib\\site-packages\\tqdm\\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
|
| 25 |
+
" from .autonotebook import tqdm as notebook_tqdm\n"
|
| 26 |
+
]
|
| 27 |
+
}
|
| 28 |
+
],
|
| 29 |
"source": [
|
| 30 |
"from datasets import Dataset\n",
|
| 31 |
"from app import retrieve_relevant_documents, emb_text, model, embedding_model\n",
|
| 32 |
"from langchain_classic.chains.combine_documents import create_stuff_documents_chain\n",
|
| 33 |
+
"from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
|
| 34 |
+
"from langchain_core.runnables import RunnableLambda\n",
|
| 35 |
"from langchain_core.documents import Document"
|
| 36 |
]
|
| 37 |
},
|
| 38 |
{
|
| 39 |
"cell_type": "code",
|
| 40 |
+
"execution_count": 3,
|
| 41 |
"id": "e572fb31",
|
| 42 |
"metadata": {},
|
| 43 |
"outputs": [],
|
|
|
|
| 109 |
},
|
| 110 |
{
|
| 111 |
"cell_type": "code",
|
| 112 |
+
"execution_count": 4,
|
| 113 |
"id": "330ee35d",
|
| 114 |
"metadata": {},
|
| 115 |
"outputs": [],
|
|
|
|
| 153 |
},
|
| 154 |
{
|
| 155 |
"cell_type": "code",
|
| 156 |
+
"execution_count": 5,
|
| 157 |
"id": "ba3810dd",
|
| 158 |
"metadata": {},
|
| 159 |
"outputs": [
|
|
|
|
| 161 |
"name": "stdout",
|
| 162 |
"output_type": "stream",
|
| 163 |
"text": [
|
| 164 |
+
"2025-12-20 19:42:27 - HTTP Request: POST https://api.studio.nebius.ai/v1/embeddings \"HTTP/1.1 200 OK\"\n",
|
| 165 |
"Relevant documents: {'text': 'What is a provision, criterion or practice? The phrase βprovision, criterion or practiceβ is not defined by the Act. These concepts should be construed widely so as to include, for example, any formal or informal policies, rules, practices, arrangements, criteria, procedures, activities or provisions. They can cover one-off decisions and actions. In simple terms, they are about the way an education provider does things. Example:', 'metadata': {'source': 'data\\\\technical-guidance-further-higher-education.docx', 'file_directory': 'data', 'filename': 'technical-guidance-further-higher-education.docx', 'last_modified': '2025-07-02T21:00:50', 'page_number': 95, 'languages': ['eng'], 'filetype': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', 'category': 'CompositeElement', 'element_id': '3ae881ad6f81487213a9e234debf0921'}, 'score': 0.7807720899581909}\n",
|
| 166 |
+
"2025-12-20 19:42:29 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
| 167 |
+
"2025-12-20 19:42:32 - HTTP Request: POST https://api.studio.nebius.ai/v1/embeddings \"HTTP/1.1 200 OK\"\n",
|
| 168 |
+
"2025-12-20 19:42:32 - HTTP Request: POST https://api.studio.nebius.ai/v1/embeddings \"HTTP/1.1 200 OK\"\n",
|
| 169 |
"Relevant documents: {'text': 'βReasonableβ means having regard to all of the circumstances including the nature of the act and how obviously discriminatory it is, the authority of the person making the statement and the knowledge that the helper has or ought to have.', 'metadata': {'source': 'data\\\\technical-guidance-further-higher-education.docx', 'file_directory': 'data', 'filename': 'technical-guidance-further-higher-education.docx', 'last_modified': '2025-07-02T21:00:50', 'page_number': 36, 'languages': ['eng'], 'filetype': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', 'category': 'CompositeElement', 'element_id': 'c5e3a60e2a6ccc88e0eff961f645a962'}, 'score': 0.7367081046104431}\n",
|
| 170 |
+
"2025-12-20 19:42:33 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
| 171 |
+
"2025-12-20 19:42:36 - HTTP Request: POST https://api.studio.nebius.ai/v1/embeddings \"HTTP/1.1 200 OK\"\n",
|
| 172 |
+
"2025-12-20 19:42:37 - HTTP Request: POST https://api.studio.nebius.ai/v1/embeddings \"HTTP/1.1 200 OK\"\n",
|
| 173 |
"Relevant documents: {'text': 'The Act states that disadvantage must be substantial, which is defined as more than minor or trivial. Whether such a disadvantage exists in a particular case is a question of fact, and is assessed on an objective basis. s212(1)', 'metadata': {'source': 'data\\\\technical-guidance-further-higher-education.docx', 'file_directory': 'data', 'filename': 'technical-guidance-further-higher-education.docx', 'last_modified': '2025-07-02T21:00:50', 'page_number': 89, 'languages': ['eng'], 'filetype': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', 'category': 'CompositeElement', 'element_id': 'b9e8ef04daf9150c9f7e32736b53df5b'}, 'score': 0.8376985788345337}\n",
|
| 174 |
+
"2025-12-20 19:42:38 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
| 175 |
+
"2025-12-20 19:42:40 - HTTP Request: POST https://api.studio.nebius.ai/v1/embeddings \"HTTP/1.1 200 OK\"\n"
|
| 176 |
]
|
| 177 |
}
|
| 178 |
],
|
|
|
|
| 213 |
"\n"
|
| 214 |
]
|
| 215 |
},
|
| 216 |
+
{
|
| 217 |
+
"cell_type": "code",
|
| 218 |
+
"execution_count": 6,
|
| 219 |
+
"id": "25c00c40",
|
| 220 |
+
"metadata": {},
|
| 221 |
+
"outputs": [],
|
| 222 |
+
"source": [
|
| 223 |
+
"from langchain_nebius import ChatNebius\n",
|
| 224 |
+
"from pydantic import SecretStr\n",
|
| 225 |
+
"import os"
|
| 226 |
+
]
|
| 227 |
+
},
|
| 228 |
{
|
| 229 |
"cell_type": "code",
|
| 230 |
"execution_count": 7,
|
| 231 |
+
"id": "2994b26e",
|
| 232 |
+
"metadata": {},
|
| 233 |
+
"outputs": [],
|
| 234 |
+
"source": [
|
| 235 |
+
"eval_model = ChatNebius(\n",
|
| 236 |
+
" model=\"Qwen/Qwen3-235B-A22B-Instruct-2507\",\n",
|
| 237 |
+
" streaming=False, # <--- CRITICAL: Must be False for Ragas\n",
|
| 238 |
+
" temperature=0, # Lower temperature for the judge is better\n",
|
| 239 |
+
" max_tokens=8192,\n",
|
| 240 |
+
" top_p=0.95,\n",
|
| 241 |
+
" api_key=SecretStr(os.getenv(\"OPENAI_API_KEY\")),\n",
|
| 242 |
+
")"
|
| 243 |
+
]
|
| 244 |
+
},
|
| 245 |
+
{
|
| 246 |
+
"cell_type": "code",
|
| 247 |
+
"execution_count": 8,
|
| 248 |
"id": "3e016be2",
|
| 249 |
"metadata": {},
|
| 250 |
"outputs": [
|
|
|
|
| 259 |
"name": "stdout",
|
| 260 |
"output_type": "stream",
|
| 261 |
"text": [
|
| 262 |
+
"2025-12-20 19:42:46 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
| 263 |
+
"2025-12-20 19:42:48 - HTTP Request: POST https://api.studio.nebius.ai/v1/embeddings \"HTTP/1.1 200 OK\"\n",
|
| 264 |
+
"2025-12-20 19:42:49 - HTTP Request: POST https://api.studio.nebius.ai/v1/embeddings \"HTTP/1.1 200 OK\"\n",
|
| 265 |
+
"2025-12-20 19:42:49 - HTTP Request: POST https://api.studio.nebius.ai/v1/embeddings \"HTTP/1.1 200 OK\"\n",
|
| 266 |
+
"2025-12-20 19:42:49 - HTTP Request: POST https://api.studio.nebius.ai/v1/embeddings \"HTTP/1.1 200 OK\"\n"
|
| 267 |
]
|
| 268 |
},
|
| 269 |
{
|
| 270 |
"name": "stderr",
|
| 271 |
"output_type": "stream",
|
| 272 |
"text": [
|
| 273 |
+
"Evaluating: 8%|β | 1/12 [00:06<01:08, 6.19s/it]"
|
| 274 |
]
|
| 275 |
},
|
| 276 |
{
|
| 277 |
"name": "stdout",
|
| 278 |
"output_type": "stream",
|
| 279 |
"text": [
|
| 280 |
+
"2025-12-20 19:42:50 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
| 281 |
+
"2025-12-20 19:42:50 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
| 282 |
+
"2025-12-20 19:42:50 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
| 283 |
+
"2025-12-20 19:42:50 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
| 284 |
+
"2025-12-20 19:42:50 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
| 285 |
+
"2025-12-20 19:42:50 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
| 286 |
+
"2025-12-20 19:42:50 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
| 287 |
+
"2025-12-20 19:42:55 - HTTP Request: POST https://api.studio.nebius.ai/v1/embeddings \"HTTP/1.1 200 OK\"\n",
|
| 288 |
+
"2025-12-20 19:42:56 - HTTP Request: POST https://api.studio.nebius.ai/v1/embeddings \"HTTP/1.1 200 OK\"\n",
|
| 289 |
+
"2025-12-20 19:42:57 - HTTP Request: POST https://api.studio.nebius.ai/v1/embeddings \"HTTP/1.1 200 OK\"\n",
|
| 290 |
+
"2025-12-20 19:42:57 - HTTP Request: POST https://api.studio.nebius.ai/v1/embeddings \"HTTP/1.1 200 OK\"\n",
|
| 291 |
+
"2025-12-20 19:42:59 - HTTP Request: POST https://api.studio.nebius.ai/v1/embeddings \"HTTP/1.1 200 OK\"\n",
|
| 292 |
+
"2025-12-20 19:43:00 - HTTP Request: POST https://api.studio.nebius.ai/v1/embeddings \"HTTP/1.1 200 OK\"\n",
|
| 293 |
+
"2025-12-20 19:43:00 - HTTP Request: POST https://api.studio.nebius.ai/v1/embeddings \"HTTP/1.1 200 OK\"\n",
|
| 294 |
+
"2025-12-20 19:43:01 - HTTP Request: POST https://api.studio.nebius.ai/v1/embeddings \"HTTP/1.1 200 OK\"\n"
|
| 295 |
]
|
| 296 |
},
|
| 297 |
{
|
| 298 |
"name": "stderr",
|
| 299 |
"output_type": "stream",
|
| 300 |
"text": [
|
| 301 |
+
"Evaluating: 17%|ββ | 2/12 [00:20<01:49, 10.98s/it]"
|
| 302 |
]
|
| 303 |
},
|
| 304 |
{
|
| 305 |
"name": "stdout",
|
| 306 |
"output_type": "stream",
|
| 307 |
"text": [
|
| 308 |
+
"2025-12-20 19:43:05 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
| 309 |
+
"2025-12-20 19:43:05 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
| 310 |
+
"2025-12-20 19:43:05 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
| 311 |
+
"2025-12-20 19:43:05 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n"
|
|
|
|
|
|
|
| 312 |
]
|
| 313 |
},
|
| 314 |
{
|
| 315 |
"name": "stderr",
|
| 316 |
"output_type": "stream",
|
| 317 |
"text": [
|
| 318 |
+
"Evaluating: 42%|βββββ | 5/12 [00:26<00:32, 4.60s/it]"
|
| 319 |
]
|
| 320 |
},
|
| 321 |
{
|
| 322 |
"name": "stdout",
|
| 323 |
"output_type": "stream",
|
| 324 |
"text": [
|
| 325 |
+
"2025-12-20 19:43:11 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
| 326 |
+
"2025-12-20 19:43:11 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n"
|
| 327 |
]
|
| 328 |
},
|
| 329 |
{
|
| 330 |
"name": "stderr",
|
| 331 |
"output_type": "stream",
|
| 332 |
"text": [
|
| 333 |
+
"Evaluating: 50%|βββββ | 6/12 [00:28<00:22, 3.83s/it]"
|
| 334 |
]
|
| 335 |
},
|
| 336 |
{
|
| 337 |
"name": "stdout",
|
| 338 |
"output_type": "stream",
|
| 339 |
"text": [
|
| 340 |
+
"2025-12-20 19:43:17 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
| 341 |
+
"2025-12-20 19:43:18 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
| 342 |
+
"2025-12-20 19:43:18 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
| 343 |
+
"2025-12-20 19:43:22 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
| 344 |
+
"2025-12-20 19:43:22 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n"
|
|
|
|
|
|
|
|
|
|
| 345 |
]
|
| 346 |
},
|
| 347 |
{
|
| 348 |
"name": "stderr",
|
| 349 |
"output_type": "stream",
|
| 350 |
"text": [
|
| 351 |
+
"Evaluating: 58%|ββββββ | 7/12 [00:41<00:31, 6.21s/it]"
|
| 352 |
]
|
| 353 |
},
|
| 354 |
{
|
| 355 |
"name": "stdout",
|
| 356 |
"output_type": "stream",
|
| 357 |
"text": [
|
| 358 |
+
"2025-12-20 19:43:25 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
| 359 |
+
"2025-12-20 19:43:25 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
| 360 |
+
"2025-12-20 19:43:25 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
| 361 |
+
"2025-12-20 19:43:28 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n"
|
| 362 |
]
|
| 363 |
},
|
| 364 |
{
|
| 365 |
"name": "stderr",
|
| 366 |
"output_type": "stream",
|
| 367 |
"text": [
|
| 368 |
+
"Evaluating: 67%|βββββββ | 8/12 [00:45<00:23, 5.78s/it]"
|
| 369 |
]
|
| 370 |
},
|
| 371 |
{
|
| 372 |
"name": "stdout",
|
| 373 |
"output_type": "stream",
|
| 374 |
"text": [
|
| 375 |
+
"2025-12-20 19:43:31 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
| 376 |
+
"2025-12-20 19:43:31 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
| 377 |
+
"2025-12-20 19:43:31 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n"
|
| 378 |
]
|
| 379 |
},
|
| 380 |
{
|
| 381 |
"name": "stderr",
|
| 382 |
"output_type": "stream",
|
| 383 |
"text": [
|
| 384 |
+
"Evaluating: 75%|ββββββββ | 9/12 [00:52<00:17, 5.95s/it]"
|
| 385 |
]
|
| 386 |
},
|
| 387 |
{
|
| 388 |
"name": "stdout",
|
| 389 |
"output_type": "stream",
|
| 390 |
"text": [
|
| 391 |
+
"2025-12-20 19:43:36 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n"
|
| 392 |
]
|
| 393 |
},
|
| 394 |
{
|
| 395 |
"name": "stderr",
|
| 396 |
"output_type": "stream",
|
| 397 |
"text": [
|
| 398 |
+
"Evaluating: 100%|ββββββββββ| 12/12 [00:53<00:00, 4.47s/it]\n"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 399 |
]
|
| 400 |
}
|
| 401 |
],
|
|
|
|
| 409 |
")\n",
|
| 410 |
"\n",
|
| 411 |
"result = evaluate(\n",
|
| 412 |
+
" llm=eval_model,\n",
|
| 413 |
" embeddings=embedding_model,\n",
|
| 414 |
" dataset = dataset, \n",
|
| 415 |
" metrics=[\n",
|
|
|
|
| 432 |
},
|
| 433 |
{
|
| 434 |
"cell_type": "code",
|
| 435 |
+
"execution_count": 9,
|
| 436 |
"id": "d8514ff3",
|
| 437 |
"metadata": {
|
| 438 |
"slideshow": {
|
|
|
|
| 455 |
"2 [The Act states that disadvantage must be subs... \n",
|
| 456 |
"\n",
|
| 457 |
" response \\\n",
|
| 458 |
+
"0 **Provisions, Criteria and Practices (PCPs)** ... \n",
|
| 459 |
+
"1 The term **'reasonable'** in the context of di... \n",
|
| 460 |
+
"2 'Substantial disadvantage' refers to a disadva... \n",
|
| 461 |
"\n",
|
| 462 |
" reference context_precision \\\n",
|
| 463 |
+
"0 The Equality and Human Rights Commission (EHRC... 1.000000 \n",
|
| 464 |
+
"1 There are two key considerations of 'reasonabl... 0.533333 \n",
|
| 465 |
+
"2 'Substantial' is defined in the Act as 'more t... 0.866667 \n",
|
| 466 |
"\n",
|
| 467 |
" context_recall faithfulness answer_relevancy \n",
|
| 468 |
+
"0 0.857143 0.714286 0.704690 \n",
|
| 469 |
+
"1 0.333333 0.774194 0.716996 \n",
|
| 470 |
+
"2 0.400000 0.875000 0.834463 \n"
|
| 471 |
]
|
| 472 |
}
|
| 473 |
],
|
tests/test_ragas_llama_test.ipynb
ADDED
|
@@ -0,0 +1,488 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "code",
|
| 5 |
+
"execution_count": 2,
|
| 6 |
+
"id": "cb2ff14b",
|
| 7 |
+
"metadata": {},
|
| 8 |
+
"outputs": [],
|
| 9 |
+
"source": [
|
| 10 |
+
"import sys\n",
|
| 11 |
+
"sys.path.append('..')"
|
| 12 |
+
]
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"cell_type": "code",
|
| 16 |
+
"execution_count": 3,
|
| 17 |
+
"id": "6bb3bb7d",
|
| 18 |
+
"metadata": {},
|
| 19 |
+
"outputs": [],
|
| 20 |
+
"source": [
|
| 21 |
+
"from datasets import Dataset\n",
|
| 22 |
+
"from app import retrieve_relevant_documents, emb_text, model, embedding_model\n",
|
| 23 |
+
"from langchain_classic.chains.combine_documents import create_stuff_documents_chain\n",
|
| 24 |
+
"from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
|
| 25 |
+
"from langchain.schema.runnable import RunnableLambda\n",
|
| 26 |
+
"from langchain_core.documents import Document"
|
| 27 |
+
]
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"cell_type": "code",
|
| 31 |
+
"execution_count": 4,
|
| 32 |
+
"id": "e572fb31",
|
| 33 |
+
"metadata": {},
|
| 34 |
+
"outputs": [],
|
| 35 |
+
"source": [
|
| 36 |
+
"def setup_standalone_rag_chain():\n",
|
| 37 |
+
" \"\"\"Setup a standalone RAG chain for testing without Chainlit session.\"\"\"\n",
|
| 38 |
+
" \n",
|
| 39 |
+
" def get_context_and_history(inputs):\n",
|
| 40 |
+
" \"\"\"Retrieve context without session history.\"\"\"\n",
|
| 41 |
+
" query = inputs[\"question\"]\n",
|
| 42 |
+
" relevant_docs = retrieve_relevant_documents(query, limit=5)\n",
|
| 43 |
+
" print(\"Relevant documents:\", relevant_docs[0] if relevant_docs else \"No documents found\")\n",
|
| 44 |
+
" \n",
|
| 45 |
+
" # Convert dictionaries to Document objects for LangChain\n",
|
| 46 |
+
" doc_objects = []\n",
|
| 47 |
+
" for doc in relevant_docs:\n",
|
| 48 |
+
" doc_obj = Document(\n",
|
| 49 |
+
" page_content=doc.get('text', ''),\n",
|
| 50 |
+
" metadata=doc.get('metadata', {})\n",
|
| 51 |
+
" )\n",
|
| 52 |
+
" doc_objects.append(doc_obj)\n",
|
| 53 |
+
"\n",
|
| 54 |
+
" return {\n",
|
| 55 |
+
" \"question\": query,\n",
|
| 56 |
+
" \"context\": doc_objects,\n",
|
| 57 |
+
" \"history\": [] # Empty history for testing\n",
|
| 58 |
+
" }\n",
|
| 59 |
+
" \n",
|
| 60 |
+
" system_prompt = \"\"\"You are a helpful assistant specialising in developing non-discriminatory competence standards and disability support, reasonable adjustments, and equality legislation.\n",
|
| 61 |
+
"\n",
|
| 62 |
+
"When answering questions, you should:\n",
|
| 63 |
+
"1. Use the provided context documents to inform your response\n",
|
| 64 |
+
"2. Be accurate and helpful\n",
|
| 65 |
+
"3. If the context doesn't contain relevant information, say so clearly\n",
|
| 66 |
+
"4. Always reply in English\n",
|
| 67 |
+
"5. Provide clear recommendations and examples wherever applicable\n",
|
| 68 |
+
"6. Do not make assumptions about the user's knowledge or background\n",
|
| 69 |
+
"7. If the user asks for a specific law or regulation, provide a brief explanation and cite relevant documents if available.\n",
|
| 70 |
+
"8. Do not overemphasize disability in your responses, but rather focus on the support and adjustments that can be made to ensure equality and inclusivity.\n",
|
| 71 |
+
"9. If the user query explicitly asks for a disability-related topic, provide a well-informed response based on the context documents.\n",
|
| 72 |
+
"\n",
|
| 73 |
+
"Context documents:\n",
|
| 74 |
+
"{context} \n",
|
| 75 |
+
"\n",
|
| 76 |
+
"Please provide a clear response using the above context\n",
|
| 77 |
+
"\"\"\"\n",
|
| 78 |
+
"\n",
|
| 79 |
+
" prompt = ChatPromptTemplate.from_messages([\n",
|
| 80 |
+
" (\"system\", system_prompt),\n",
|
| 81 |
+
" MessagesPlaceholder(variable_name=\"history\"),\n",
|
| 82 |
+
" (\"human\", \"{question}\"),\n",
|
| 83 |
+
" ])\n",
|
| 84 |
+
"\n",
|
| 85 |
+
" question_answer_chain = create_stuff_documents_chain(model, prompt)\n",
|
| 86 |
+
" \n",
|
| 87 |
+
" # Use a custom chain that properly handles our context and history\n",
|
| 88 |
+
" def process_input_and_format(inputs):\n",
|
| 89 |
+
" context_data = get_context_and_history(inputs)\n",
|
| 90 |
+
" return {\n",
|
| 91 |
+
" \"context\": context_data[\"context\"],\n",
|
| 92 |
+
" \"question\": context_data[\"question\"],\n",
|
| 93 |
+
" \"history\": context_data[\"history\"]\n",
|
| 94 |
+
" }\n",
|
| 95 |
+
" \n",
|
| 96 |
+
" chain = RunnableLambda(process_input_and_format) | question_answer_chain\n",
|
| 97 |
+
" \n",
|
| 98 |
+
" return chain"
|
| 99 |
+
]
|
| 100 |
+
},
|
| 101 |
+
{
|
| 102 |
+
"cell_type": "code",
|
| 103 |
+
"execution_count": 5,
|
| 104 |
+
"id": "330ee35d",
|
| 105 |
+
"metadata": {},
|
| 106 |
+
"outputs": [],
|
| 107 |
+
"source": [
|
| 108 |
+
"\n",
|
| 109 |
+
"# Setup the RAG chain\n",
|
| 110 |
+
"rag_chain = setup_standalone_rag_chain()\n",
|
| 111 |
+
"\n",
|
| 112 |
+
"questions = [\"What are Provisions, Criteria and Practices?\", \n",
|
| 113 |
+
" \"What is 'reasonable'?\",\n",
|
| 114 |
+
" \"What is 'substantial disadvantage'?\",\n",
|
| 115 |
+
" ]\n",
|
| 116 |
+
"ground_truths = [\n",
|
| 117 |
+
" \"\"\"The Equality and Human Rights Commission (EHRC) interprets PCPs as including:3 \n",
|
| 118 |
+
"+ arrangements (for example, for deciding who to admit) \n",
|
| 119 |
+
"+ the way that education, or access to any benefit, service or facility is offered or provided \n",
|
| 120 |
+
"+ one-off or discretionary decisions \n",
|
| 121 |
+
"+ proposals or directions to do something in a particular way \n",
|
| 122 |
+
"+ formal and informal policies \n",
|
| 123 |
+
"+ rules\"\"\",\n",
|
| 124 |
+
"\n",
|
| 125 |
+
" \"\"\"There are two key considerations of 'reasonableness' which can help when thinking through \n",
|
| 126 |
+
"when an adjustment may be reasonable:4 \n",
|
| 127 |
+
"+ Could the adjustment be practicable in its application (is it possible)? \n",
|
| 128 |
+
"+ Could the adjustment be effective in achieving its aim (will it work)? \n",
|
| 129 |
+
"There is no need to prove that the adjustment is practicable and effective in advance, just \n",
|
| 130 |
+
"that it might be. An adjustment should not be considered unreasonable if it does not remove \n",
|
| 131 |
+
"the disadvantage fully; an adjustment which partially removes or reduces substantial \n",
|
| 132 |
+
"disadvantage is also likely to be reasonable.\"\"\",\n",
|
| 133 |
+
"\n",
|
| 134 |
+
" \"\"\"'Substantial' is defined in the Act as 'more than minor or trivial'. \n",
|
| 135 |
+
"Examples of disadvantage recognised by the EHRC include: \n",
|
| 136 |
+
"+ The additional time and effort expended by a disabled student \n",
|
| 137 |
+
"+ The inconvenience, indignity, discomfort, or perceived disadvantage suffered by a \n",
|
| 138 |
+
"disabled student \n",
|
| 139 |
+
"+ The loss of opportunity or diminished progress experienced by a disabled student. \"\"\"]\n",
|
| 140 |
+
"\n",
|
| 141 |
+
"\n",
|
| 142 |
+
"\n"
|
| 143 |
+
]
|
| 144 |
+
},
|
| 145 |
+
{
|
| 146 |
+
"cell_type": "code",
|
| 147 |
+
"execution_count": 6,
|
| 148 |
+
"id": "ba3810dd",
|
| 149 |
+
"metadata": {},
|
| 150 |
+
"outputs": [
|
| 151 |
+
{
|
| 152 |
+
"name": "stdout",
|
| 153 |
+
"output_type": "stream",
|
| 154 |
+
"text": [
|
| 155 |
+
"2025-11-27 02:10:22 - HTTP Request: POST https://api.studio.nebius.ai/v1/embeddings \"HTTP/1.1 200 OK\"\n",
|
| 156 |
+
"Relevant documents: {'text': 'What is a provision, criterion or practice? The phrase βprovision, criterion or practiceβ is not defined by the Act. These concepts should be construed widely so as to include, for example, any formal or informal policies, rules, practices, arrangements, criteria, procedures, activities or provisions. They can cover one-off decisions and actions. In simple terms, they are about the way an education provider does things. Example:', 'metadata': {'source': 'data\\\\technical-guidance-further-higher-education.docx', 'file_directory': 'data', 'filename': 'technical-guidance-further-higher-education.docx', 'last_modified': '2025-07-02T21:00:50', 'page_number': 95, 'languages': ['eng'], 'filetype': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', 'category': 'CompositeElement', 'element_id': '3ae881ad6f81487213a9e234debf0921'}, 'score': 0.7807720899581909}\n",
|
| 157 |
+
"2025-11-27 02:10:34 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
| 158 |
+
"2025-11-27 02:10:34 - HTTP Request: POST https://api.studio.nebius.ai/v1/embeddings \"HTTP/1.1 200 OK\"\n",
|
| 159 |
+
"2025-11-27 02:10:35 - HTTP Request: POST https://api.studio.nebius.ai/v1/embeddings \"HTTP/1.1 200 OK\"\n",
|
| 160 |
+
"Relevant documents: {'text': 'βReasonableβ means having regard to all of the circumstances including the nature of the act and how obviously discriminatory it is, the authority of the person making the statement and the knowledge that the helper has or ought to have.', 'metadata': {'source': 'data\\\\technical-guidance-further-higher-education.docx', 'file_directory': 'data', 'filename': 'technical-guidance-further-higher-education.docx', 'last_modified': '2025-07-02T21:00:50', 'page_number': 36, 'languages': ['eng'], 'filetype': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', 'category': 'CompositeElement', 'element_id': 'c5e3a60e2a6ccc88e0eff961f645a962'}, 'score': 0.7367081046104431}\n",
|
| 161 |
+
"2025-11-27 02:10:51 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
| 162 |
+
"2025-11-27 02:10:52 - HTTP Request: POST https://api.studio.nebius.ai/v1/embeddings \"HTTP/1.1 200 OK\"\n",
|
| 163 |
+
"2025-11-27 02:10:52 - HTTP Request: POST https://api.studio.nebius.ai/v1/embeddings \"HTTP/1.1 200 OK\"\n",
|
| 164 |
+
"Relevant documents: {'text': 'The Act states that disadvantage must be substantial, which is defined as more than minor or trivial. Whether such a disadvantage exists in a particular case is a question of fact, and is assessed on an objective basis. s212(1)', 'metadata': {'source': 'data\\\\technical-guidance-further-higher-education.docx', 'file_directory': 'data', 'filename': 'technical-guidance-further-higher-education.docx', 'last_modified': '2025-07-02T21:00:50', 'page_number': 89, 'languages': ['eng'], 'filetype': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', 'category': 'CompositeElement', 'element_id': 'b9e8ef04daf9150c9f7e32736b53df5b'}, 'score': 0.8376985788345337}\n",
|
| 165 |
+
"2025-11-27 02:11:10 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
| 166 |
+
"2025-11-27 02:11:10 - HTTP Request: POST https://api.studio.nebius.ai/v1/embeddings \"HTTP/1.1 200 OK\"\n"
|
| 167 |
+
]
|
| 168 |
+
}
|
| 169 |
+
],
|
| 170 |
+
"source": [
|
| 171 |
+
"answers = []\n",
|
| 172 |
+
"contexts = []\n",
|
| 173 |
+
"\n",
|
| 174 |
+
"def clean_answer(answer):\n",
|
| 175 |
+
" \"\"\"Remove <think></think> tags and content from the answer.\"\"\"\n",
|
| 176 |
+
" import re\n",
|
| 177 |
+
" # Remove everything between <think> and </think> tags, including the tags themselves\n",
|
| 178 |
+
" cleaned = re.sub(r'<think>.*?</think>\\s*', '', answer, flags=re.DOTALL)\n",
|
| 179 |
+
" return cleaned.strip()\n",
|
| 180 |
+
"\n",
|
| 181 |
+
"# Inference\n",
|
| 182 |
+
"for query in questions:\n",
|
| 183 |
+
" # Get answer from the RAG chain\n",
|
| 184 |
+
" answer = rag_chain.invoke({\"question\": query})\n",
|
| 185 |
+
" # Clean the answer to remove thinking content\n",
|
| 186 |
+
" cleaned_answer = clean_answer(answer)\n",
|
| 187 |
+
" answers.append(cleaned_answer)\n",
|
| 188 |
+
" \n",
|
| 189 |
+
" # Get relevant documents for context\n",
|
| 190 |
+
" relevant_docs = retrieve_relevant_documents(query, limit=5)\n",
|
| 191 |
+
" context_texts = [doc['text'] for doc in relevant_docs]\n",
|
| 192 |
+
" contexts.append(context_texts)\n",
|
| 193 |
+
"\n",
|
| 194 |
+
"# To dict\n",
|
| 195 |
+
"data = {\n",
|
| 196 |
+
" \"question\": questions,\n",
|
| 197 |
+
" \"answer\": answers,\n",
|
| 198 |
+
" \"contexts\": contexts,\n",
|
| 199 |
+
" \"reference\": ground_truths\n",
|
| 200 |
+
"}\n",
|
| 201 |
+
"\n",
|
| 202 |
+
"# Convert dict to dataset\n",
|
| 203 |
+
"dataset = Dataset.from_dict(data)\n",
|
| 204 |
+
"\n"
|
| 205 |
+
]
|
| 206 |
+
},
|
| 207 |
+
{
|
| 208 |
+
"cell_type": "code",
|
| 209 |
+
"execution_count": 7,
|
| 210 |
+
"id": "3e016be2",
|
| 211 |
+
"metadata": {},
|
| 212 |
+
"outputs": [
|
| 213 |
+
{
|
| 214 |
+
"name": "stderr",
|
| 215 |
+
"output_type": "stream",
|
| 216 |
+
"text": [
|
| 217 |
+
"Evaluating: 0%| | 0/12 [00:00<?, ?it/s]"
|
| 218 |
+
]
|
| 219 |
+
},
|
| 220 |
+
{
|
| 221 |
+
"name": "stdout",
|
| 222 |
+
"output_type": "stream",
|
| 223 |
+
"text": [
|
| 224 |
+
"2025-11-27 02:11:16 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
| 225 |
+
"2025-11-27 02:11:22 - HTTP Request: POST https://api.studio.nebius.ai/v1/embeddings \"HTTP/1.1 200 OK\"\n",
|
| 226 |
+
"2025-11-27 02:11:23 - HTTP Request: POST https://api.studio.nebius.ai/v1/embeddings \"HTTP/1.1 200 OK\"\n",
|
| 227 |
+
"2025-11-27 02:11:23 - HTTP Request: POST https://api.studio.nebius.ai/v1/embeddings \"HTTP/1.1 200 OK\"\n",
|
| 228 |
+
"2025-11-27 02:11:23 - HTTP Request: POST https://api.studio.nebius.ai/v1/embeddings \"HTTP/1.1 200 OK\"\n"
|
| 229 |
+
]
|
| 230 |
+
},
|
| 231 |
+
{
|
| 232 |
+
"name": "stderr",
|
| 233 |
+
"output_type": "stream",
|
| 234 |
+
"text": [
|
| 235 |
+
"Evaluating: 8%|β | 1/12 [00:10<01:50, 10.08s/it]"
|
| 236 |
+
]
|
| 237 |
+
},
|
| 238 |
+
{
|
| 239 |
+
"name": "stdout",
|
| 240 |
+
"output_type": "stream",
|
| 241 |
+
"text": [
|
| 242 |
+
"2025-11-27 02:11:24 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
| 243 |
+
"2025-11-27 02:11:24 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
| 244 |
+
"2025-11-27 02:11:24 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
| 245 |
+
"2025-11-27 02:11:24 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
| 246 |
+
"2025-11-27 02:11:24 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
| 247 |
+
"2025-11-27 02:11:26 - HTTP Request: POST https://api.studio.nebius.ai/v1/embeddings \"HTTP/1.1 200 OK\"\n",
|
| 248 |
+
"2025-11-27 02:11:27 - HTTP Request: POST https://api.studio.nebius.ai/v1/embeddings \"HTTP/1.1 200 OK\"\n",
|
| 249 |
+
"2025-11-27 02:11:27 - HTTP Request: POST https://api.studio.nebius.ai/v1/embeddings \"HTTP/1.1 200 OK\"\n",
|
| 250 |
+
"2025-11-27 02:11:27 - HTTP Request: POST https://api.studio.nebius.ai/v1/embeddings \"HTTP/1.1 200 OK\"\n",
|
| 251 |
+
"2025-11-27 02:11:34 - HTTP Request: POST https://api.studio.nebius.ai/v1/embeddings \"HTTP/1.1 200 OK\"\n",
|
| 252 |
+
"2025-11-27 02:11:34 - HTTP Request: POST https://api.studio.nebius.ai/v1/embeddings \"HTTP/1.1 200 OK\"\n",
|
| 253 |
+
"2025-11-27 02:11:35 - HTTP Request: POST https://api.studio.nebius.ai/v1/embeddings \"HTTP/1.1 200 OK\"\n",
|
| 254 |
+
"2025-11-27 02:11:35 - HTTP Request: POST https://api.studio.nebius.ai/v1/embeddings \"HTTP/1.1 200 OK\"\n"
|
| 255 |
+
]
|
| 256 |
+
},
|
| 257 |
+
{
|
| 258 |
+
"name": "stderr",
|
| 259 |
+
"output_type": "stream",
|
| 260 |
+
"text": [
|
| 261 |
+
"Evaluating: 17%|ββ | 2/12 [00:23<02:01, 12.20s/it]"
|
| 262 |
+
]
|
| 263 |
+
},
|
| 264 |
+
{
|
| 265 |
+
"name": "stdout",
|
| 266 |
+
"output_type": "stream",
|
| 267 |
+
"text": [
|
| 268 |
+
"2025-11-27 02:11:38 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
| 269 |
+
"2025-11-27 02:11:38 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
| 270 |
+
"2025-11-27 02:11:38 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
| 271 |
+
"2025-11-27 02:11:38 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
| 272 |
+
"2025-11-27 02:11:38 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
| 273 |
+
"2025-11-27 02:11:38 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n"
|
| 274 |
+
]
|
| 275 |
+
},
|
| 276 |
+
{
|
| 277 |
+
"name": "stderr",
|
| 278 |
+
"output_type": "stream",
|
| 279 |
+
"text": [
|
| 280 |
+
"Evaluating: 33%|ββββ | 4/12 [00:34<01:02, 7.87s/it]"
|
| 281 |
+
]
|
| 282 |
+
},
|
| 283 |
+
{
|
| 284 |
+
"name": "stdout",
|
| 285 |
+
"output_type": "stream",
|
| 286 |
+
"text": [
|
| 287 |
+
"2025-11-27 02:11:48 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
| 288 |
+
"2025-11-27 02:11:48 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n"
|
| 289 |
+
]
|
| 290 |
+
},
|
| 291 |
+
{
|
| 292 |
+
"name": "stderr",
|
| 293 |
+
"output_type": "stream",
|
| 294 |
+
"text": [
|
| 295 |
+
"Evaluating: 50%|βββββ | 6/12 [00:36<00:27, 4.59s/it]"
|
| 296 |
+
]
|
| 297 |
+
},
|
| 298 |
+
{
|
| 299 |
+
"name": "stdout",
|
| 300 |
+
"output_type": "stream",
|
| 301 |
+
"text": [
|
| 302 |
+
"2025-11-27 02:11:54 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
| 303 |
+
"2025-11-27 02:11:57 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
| 304 |
+
"2025-11-27 02:11:59 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
| 305 |
+
"2025-11-27 02:12:03 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
| 306 |
+
"2025-11-27 02:12:04 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
| 307 |
+
"2025-11-27 02:12:06 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
| 308 |
+
"2025-11-27 02:12:08 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
|
| 309 |
+
"2025-11-27 02:12:09 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n"
|
| 310 |
+
]
|
| 311 |
+
},
|
| 312 |
+
{
|
| 313 |
+
"name": "stderr",
|
| 314 |
+
"output_type": "stream",
|
| 315 |
+
"text": [
|
| 316 |
+
"Evaluating: 58%|ββββββ | 7/12 [00:57<00:43, 8.69s/it]"
|
| 317 |
+
]
|
| 318 |
+
},
|
| 319 |
+
{
|
| 320 |
+
"name": "stdout",
|
| 321 |
+
"output_type": "stream",
|
| 322 |
+
"text": [
|
| 323 |
+
"2025-11-27 02:12:11 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n"
|
| 324 |
+
]
|
| 325 |
+
},
|
| 326 |
+
{
|
| 327 |
+
"name": "stderr",
|
| 328 |
+
"output_type": "stream",
|
| 329 |
+
"text": [
|
| 330 |
+
"Evaluating: 67%|βββββββ | 8/12 [00:58<00:27, 6.89s/it]"
|
| 331 |
+
]
|
| 332 |
+
},
|
| 333 |
+
{
|
| 334 |
+
"name": "stdout",
|
| 335 |
+
"output_type": "stream",
|
| 336 |
+
"text": [
|
| 337 |
+
"2025-11-27 02:12:14 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n"
|
| 338 |
+
]
|
| 339 |
+
},
|
| 340 |
+
{
|
| 341 |
+
"name": "stderr",
|
| 342 |
+
"output_type": "stream",
|
| 343 |
+
"text": [
|
| 344 |
+
"Evaluating: 75%|ββββββββ | 9/12 [01:01<00:17, 5.81s/it]"
|
| 345 |
+
]
|
| 346 |
+
},
|
| 347 |
+
{
|
| 348 |
+
"name": "stdout",
|
| 349 |
+
"output_type": "stream",
|
| 350 |
+
"text": [
|
| 351 |
+
"2025-11-27 02:12:37 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n"
|
| 352 |
+
]
|
| 353 |
+
},
|
| 354 |
+
{
|
| 355 |
+
"name": "stderr",
|
| 356 |
+
"output_type": "stream",
|
| 357 |
+
"text": [
|
| 358 |
+
"Evaluating: 83%|βββββββββ | 10/12 [01:24<00:21, 10.59s/it]"
|
| 359 |
+
]
|
| 360 |
+
},
|
| 361 |
+
{
|
| 362 |
+
"name": "stdout",
|
| 363 |
+
"output_type": "stream",
|
| 364 |
+
"text": [
|
| 365 |
+
"2025-11-27 02:12:49 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n"
|
| 366 |
+
]
|
| 367 |
+
},
|
| 368 |
+
{
|
| 369 |
+
"name": "stderr",
|
| 370 |
+
"output_type": "stream",
|
| 371 |
+
"text": [
|
| 372 |
+
"Evaluating: 92%|ββββββββββ| 11/12 [01:37<00:11, 11.16s/it]"
|
| 373 |
+
]
|
| 374 |
+
},
|
| 375 |
+
{
|
| 376 |
+
"name": "stdout",
|
| 377 |
+
"output_type": "stream",
|
| 378 |
+
"text": [
|
| 379 |
+
"2025-11-27 02:12:55 - HTTP Request: POST https://api.studio.nebius.ai/v1/chat/completions \"HTTP/1.1 200 OK\"\n"
|
| 380 |
+
]
|
| 381 |
+
},
|
| 382 |
+
{
|
| 383 |
+
"name": "stderr",
|
| 384 |
+
"output_type": "stream",
|
| 385 |
+
"text": [
|
| 386 |
+
"Evaluating: 100%|ββββββββββ| 12/12 [01:42<00:00, 8.57s/it]\n"
|
| 387 |
+
]
|
| 388 |
+
}
|
| 389 |
+
],
|
| 390 |
+
"source": [
|
| 391 |
+
"from ragas import evaluate\n",
|
| 392 |
+
"from ragas.metrics import (\n",
|
| 393 |
+
" faithfulness,\n",
|
| 394 |
+
" answer_relevancy,\n",
|
| 395 |
+
" context_recall,\n",
|
| 396 |
+
" context_precision,\n",
|
| 397 |
+
")\n",
|
| 398 |
+
"\n",
|
| 399 |
+
"result = evaluate(\n",
|
| 400 |
+
" llm=model,\n",
|
| 401 |
+
" embeddings=embedding_model,\n",
|
| 402 |
+
" dataset = dataset, \n",
|
| 403 |
+
" metrics=[\n",
|
| 404 |
+
" context_precision,\n",
|
| 405 |
+
" context_recall,\n",
|
| 406 |
+
" faithfulness,\n",
|
| 407 |
+
" answer_relevancy,\n",
|
| 408 |
+
" ],\n",
|
| 409 |
+
")\n",
|
| 410 |
+
"\n",
|
| 411 |
+
"df = result.to_pandas()\n",
|
| 412 |
+
"\n",
|
| 413 |
+
"# evaluation_results = result.to_pandas()\n",
|
| 414 |
+
"\n",
|
| 415 |
+
"# display_columns = ['user_input', 'answer_relevancy', 'faithfulness', 'context_precision', 'context_recall']\n",
|
| 416 |
+
"# formatted_results = evaluation_results[display_columns].to_markdown(index=False, numalign=\"left\", stralign=\"left\")\n",
|
| 417 |
+
"\n",
|
| 418 |
+
"# print(formatted_results)\n"
|
| 419 |
+
]
|
| 420 |
+
},
|
| 421 |
+
{
|
| 422 |
+
"cell_type": "code",
|
| 423 |
+
"execution_count": 8,
|
| 424 |
+
"id": "d8514ff3",
|
| 425 |
+
"metadata": {
|
| 426 |
+
"slideshow": {
|
| 427 |
+
"slide_type": "slide"
|
| 428 |
+
}
|
| 429 |
+
},
|
| 430 |
+
"outputs": [
|
| 431 |
+
{
|
| 432 |
+
"name": "stdout",
|
| 433 |
+
"output_type": "stream",
|
| 434 |
+
"text": [
|
| 435 |
+
" user_input \\\n",
|
| 436 |
+
"0 What are Provisions, Criteria and Practices? \n",
|
| 437 |
+
"1 What is 'reasonable'? \n",
|
| 438 |
+
"2 What is 'substantial disadvantage'? \n",
|
| 439 |
+
"\n",
|
| 440 |
+
" retrieved_contexts \\\n",
|
| 441 |
+
"0 [What is a provision, criterion or practice? T... \n",
|
| 442 |
+
"1 [βReasonableβ means having regard to all of th... \n",
|
| 443 |
+
"2 [The Act states that disadvantage must be subs... \n",
|
| 444 |
+
"\n",
|
| 445 |
+
" response \\\n",
|
| 446 |
+
"0 According to the context documents, the phrase... \n",
|
| 447 |
+
"1 According to the context documents, 'reasonabl... \n",
|
| 448 |
+
"2 According to the provided context documents, '... \n",
|
| 449 |
+
"\n",
|
| 450 |
+
" reference context_precision \\\n",
|
| 451 |
+
"0 The Equality and Human Rights Commission (EHRC... 0.9500 \n",
|
| 452 |
+
"1 There are two key considerations of 'reasonabl... 0.8875 \n",
|
| 453 |
+
"2 'Substantial' is defined in the Act as 'more t... 0.8875 \n",
|
| 454 |
+
"\n",
|
| 455 |
+
" context_recall faithfulness answer_relevancy \n",
|
| 456 |
+
"0 1.0 0.875000 0.736830 \n",
|
| 457 |
+
"1 1.0 0.850000 0.751613 \n",
|
| 458 |
+
"2 0.8 0.666667 0.878305 \n"
|
| 459 |
+
]
|
| 460 |
+
}
|
| 461 |
+
],
|
| 462 |
+
"source": [
|
| 463 |
+
"print(df)"
|
| 464 |
+
]
|
| 465 |
+
}
|
| 466 |
+
],
|
| 467 |
+
"metadata": {
|
| 468 |
+
"kernelspec": {
|
| 469 |
+
"display_name": ".venv",
|
| 470 |
+
"language": "python",
|
| 471 |
+
"name": "python3"
|
| 472 |
+
},
|
| 473 |
+
"language_info": {
|
| 474 |
+
"codemirror_mode": {
|
| 475 |
+
"name": "ipython",
|
| 476 |
+
"version": 3
|
| 477 |
+
},
|
| 478 |
+
"file_extension": ".py",
|
| 479 |
+
"mimetype": "text/x-python",
|
| 480 |
+
"name": "python",
|
| 481 |
+
"nbconvert_exporter": "python",
|
| 482 |
+
"pygments_lexer": "ipython3",
|
| 483 |
+
"version": "3.12.5"
|
| 484 |
+
}
|
| 485 |
+
},
|
| 486 |
+
"nbformat": 4,
|
| 487 |
+
"nbformat_minor": 5
|
| 488 |
+
}
|
tests/test_vector_search.py
CHANGED
|
@@ -9,7 +9,7 @@ COLLECTION_NAME = "my_rag_collection"
|
|
| 9 |
DOCUMENT_DIR = "data/"
|
| 10 |
EMBEDDING_DIMENSION = 4096
|
| 11 |
|
| 12 |
-
milvus_client = MilvusClient(uri=MILVUS_URI)
|
| 13 |
|
| 14 |
TEXT_MAX_LENGTH = 65000
|
| 15 |
CHUNK_SIZE = 100
|
|
|
|
| 9 |
DOCUMENT_DIR = "data/"
|
| 10 |
EMBEDDING_DIMENSION = 4096
|
| 11 |
|
| 12 |
+
milvus_client = MilvusClient(uri=MILVUS_URI, token=os.getenv("MILVUS_API_KEY"))
|
| 13 |
|
| 14 |
TEXT_MAX_LENGTH = 65000
|
| 15 |
CHUNK_SIZE = 100
|