baselines-v2 / scripts /data_processing /gutenberg_processing.py
manu's picture
Upload folder using huggingface_hub
545c4d5 verified
import json
from datetime import timezone
from langchain_text_splitters import RecursiveCharacterTextSplitter
from datasets import load_dataset
import datetime
from tqdm import tqdm
import vertexai
from vertexai.generative_models import Part
from vertexai.preview import caching
from vertexai.preview.generative_models import GenerativeModel
PROJECT_ID = "llm-testing"
vertexai.init(project=PROJECT_ID, location="us-central1")
# from vertexai.generative_models import GenerativeModel
# model = GenerativeModel("gemini-1.5-flash-002")
system_instruction = """
First generate a summary of the text that should be a paragraph long.
Then, for chunks with specified ids, ask 1 question that pertains to the chunk.
Make sure the questions contain the necessary context and can be at least partially with chunk information.
Questions can necessitate previous context from the book to be understood, but should not assume the person answering knows which part of the book they are referring to.
Do not mention the chunk id in the question.
Answer each questions in a brief paragraph.
Format the output as a JSON consisting of a key 'chunks', associated with a list of chunk ids (ex: 13), each with 2 fields 'question' and 'answer' and their associated value.
If no relevant questions can be asked about the chunk, or the chunk contains noisy information, do not ask any questions.
Make sure the output is valid JSON, that in text quotes are preceded by a backslash.
"""
# load dataset
ds = load_dataset(
"manu/project_gutenberg",
data_files=["data/en-00001-of-00052-5c2b3fd5e60f0124.parquet"],
verification_mode="no_checks",
) # split="en",
text_splitter = RecursiveCharacterTextSplitter(
# Set a really small chunk size, just to show.
chunk_size=3000,
chunk_overlap=0,
length_function=len,
is_separator_regex=False,
)
import os
if os.path.exists(f"/home/manuel/Desktop/all_books.json"):
with open(f"/home/manuel/Desktop/all_books.json", "r") as f:
all_books = json.load(f)
else:
all_books = {}
for book_id in tqdm(range(10, 20)):
if book_id in all_books:
print("Book already processed, skipping.")
continue
total_cost = 0
assert total_cost < 10
sample = ds["train"][book_id]
# count words
num_words = len(sample["text"].split(" "))
num_chars = len(sample["text"])
print("The text has", num_words, "words and", num_chars, "characters.")
if num_words < 26000:
print("The text is too short, skipping.")
continue
# input cost is 0.00001875 per 1000 characters
texts = text_splitter.create_documents([sample["text"]])
text = "\n\n".join(
[f"<chunk_{i+1}> {chunk.page_content}" for i, chunk in enumerate(texts)]
)
contents = [
Part.from_text(text),
Part.from_text(system_instruction),
]
print("Caching Input")
cached_content = caching.CachedContent.create(
model_name="gemini-1.5-flash-002",
system_instruction=system_instruction,
contents=contents,
ttl=datetime.timedelta(minutes=20),
display_name="example-cache",
)
# cache_id --> can be found in response of the above call
# cached_content = caching.CachedContent(cached_content_name=cache_id)
print(cached_content)
model = GenerativeModel.from_cached_content(cached_content=cached_content)
print("The cost of processing this input is", 0.00001875 * num_chars / 1000, "USD.")
total_cost += 0.00001875 * num_chars / 1000
total_text = {"chunks": []}
for i in tqdm(range(0, len(texts), 20)):
# choose 20 random chunk ids amongst all chunks
chunk_ids = list(range(i, min(i + 20, len(texts))))
# print("The chunk ids are", chunk_ids)
response = model.generate_content(
f"The chunks ids are {chunk_ids}. Generate the output JSON."
)
# print("The cost of using this cached input is", 0.0000046875 * num_chars / 1000, "USD.")
total_cost += 0.0000046875 * num_chars / 1000
# response = model.generate_content(f"Generate a summary of the text: {text}\n\n First generate a summary of the previous text that should be a paragraph long. Then, for chunks with ids {chunk_ids} of the book, ask 2 questions that pertain to the chunk. Make sure the questions contain the necessary context and can be at least partially with chunk information. "
# f"Questions can necessitate previous context from the book to be understood, but should not assume the person answering knows which part of the book they are referring to. Answer each questions in a brief paragraph. "
# f"Format the output as a JSON with a summary key, then a chunks key, consisting of a list of chunk ids, each with 2 question:answer pairs. "
# f"If no relevant questions can be asked about the chunk, or the chunk contains noisy information, do not ask any questions. Make sure the output is valid JSON and not markdown, that in text quotes are preceded by a backslash.",
# # labels={"project-name": "llm-testing"}
# )
# print("The cost of processing this output is", 0.000075 * len(response.text) / 1000, "USD.")
total_cost += 0.000075 * len(response.text) / 1000
# 0.0000046875 / 1k characters (cached input) + 0.00025 / 1k characters / hr (storage)
try:
text = eval(response.text.replace("```json\n", "").replace("\n```", ""))
total_text["chunks"] += text["chunks"]
except SyntaxError as e:
print("Error parsing response:", e)
print(response.text)
# compute how long the cache has been stored
cache_time = datetime.datetime.now(timezone.utc) - cached_content.create_time
# print(f"The cost of storing this cached input for {cache_time.total_seconds() / 60 } mn is", (cache_time.total_seconds() / 3600) * 0.00025 * num_chars / 1000, "USD.")
total_cost += (cache_time.total_seconds() / 3600) * 0.00025 * num_chars / 1000
cached_content.delete()
# cost
total_text["og_chunks"] = [c.page_content for c in texts]
total_text["book_id"] = book_id
total_text["cost"] = total_cost
print("The total cost is", total_cost, "USD.")
with open(f"/home/manuel/Desktop/summary_{book_id}.json", "w") as f:
json.dump(total_text, f)
all_books[book_id] = total_text
with open(f"/home/manuel/Desktop/all_books.json", "w") as f:
json.dump(all_books, f)
print(
"All books have been processed and saved to /home/manuel/Desktop/summary_all_books.json"
)
# upload to huggingface
from datasets import Dataset
Dataset.from_list(all_books).push_to_hub("manu/all_books_test")