baselines-v2 / scripts /data_processing /mldr_processing.py
manu's picture
Upload folder using huggingface_hub
545c4d5 verified
import json
from tqdm import tqdm
from langchain_text_splitters import RecursiveCharacterTextSplitter
from datasets import load_dataset, Dataset
import vertexai
PROJECT_ID = "llm-testing"
vertexai.init(project=PROJECT_ID, location="us-central1")
from vertexai.generative_models import GenerativeModel
model = GenerativeModel("gemini-1.5-flash-002")
system_instruction = """
Given a query and a wikipedia article, that is composed of multiple chunks, output which chunk is the most relevant to answer the query.
If the query is not understandable without the context of the entire article, reformulate the query to be more specific.
For example, if the query is "What is the population of the country?", it should be reformulated to "What is the population of France?" if the article clearly is about France.
Similarly, "What is her occupation?" should be reformulated to "What is Marie Curie's occupation?" if the article is about Marie Curie.
If multiple chunks are relevant, output only the most relevant, it should contain at least partially the necessary information to answer the query.
If no chunk is relevant, output an empty list.
The output should be a JSON with format:
{"query": <original_query>, "reformulated_query": <reformulated_query>, "answer": <brief answer>, "relevant_chunks_id": [<relevant_chunk1>]}
"""
dataset = load_dataset("sentence-transformers/mldr", "en-triplet", split="train")
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1000,
chunk_overlap=0,
length_function=len,
is_separator_regex=False,
)
outputs = []
for i in tqdm(range(2000)): # len(dataset)):
sample = dataset[i]
# count words
num_words = len(sample["positive"].split(" "))
num_chars = len(sample["positive"])
print("The text has", num_words, "words and", num_chars, "characters.")
# input cost is 0.00001875 per 1000 characters
texts = text_splitter.create_documents([sample["positive"]])
text = "\n\n".join(
[f"<chunk_{i}> {chunk.page_content}" for i, chunk in enumerate(texts)]
)
# parse output string to get a python list of integers
output = None
try:
response = model.generate_content(
f"{system_instruction}\n\n{text}\n\n The query is: {sample['anchor']}\n What is the most relevant chunk to answer the query?"
)
output = response.text
output = json.loads(output.replace("```json\n", "").replace("\n```", ""))
output["positive"] = sample["positive"]
output["negative"] = sample["negative"]
output["chunks"] = [t.page_content for t in texts]
print(
output["reformulated_query"],
output["answer"],
output["relevant_chunks_id"],
output["chunks"][output["relevant_chunks_id"][0]],
)
outputs.append(output)
except:
print(output)
print("Error parsing output")
Dataset.from_list(outputs).push_to_hub("manu/mldr-zoomed-1000char-2000")