| | import json |
| |
|
| | from datetime import timezone |
| |
|
| | from langchain_text_splitters import RecursiveCharacterTextSplitter |
| |
|
| | from datasets import load_dataset |
| | import datetime |
| | from tqdm import tqdm |
| |
|
| | import vertexai |
| | from vertexai.generative_models import Part |
| | from vertexai.preview import caching |
| | from vertexai.preview.generative_models import GenerativeModel |
| |
|
| | PROJECT_ID = "llm-testing" |
| | vertexai.init(project=PROJECT_ID, location="us-central1") |
| |
|
| | |
| | |
| |
|
| | system_instruction = """ |
| | First generate a summary of the text that should be a paragraph long. |
| | Then, for chunks with specified ids, ask 1 question that pertains to the chunk. |
| | Make sure the questions contain the necessary context and can be at least partially with chunk information. |
| | Questions can necessitate previous context from the book to be understood, but should not assume the person answering knows which part of the book they are referring to. |
| | Do not mention the chunk id in the question. |
| | Answer each questions in a brief paragraph. |
| | Format the output as a JSON consisting of a key 'chunks', associated with a list of chunk ids (ex: 13), each with 2 fields 'question' and 'answer' and their associated value. |
| | If no relevant questions can be asked about the chunk, or the chunk contains noisy information, do not ask any questions. |
| | Make sure the output is valid JSON, that in text quotes are preceded by a backslash. |
| | """ |
| |
|
| |
|
| | |
| | ds = load_dataset( |
| | "manu/project_gutenberg", |
| | data_files=["data/en-00001-of-00052-5c2b3fd5e60f0124.parquet"], |
| | verification_mode="no_checks", |
| | ) |
| |
|
| | text_splitter = RecursiveCharacterTextSplitter( |
| | |
| | chunk_size=3000, |
| | chunk_overlap=0, |
| | length_function=len, |
| | is_separator_regex=False, |
| | ) |
| |
|
| | import os |
| |
|
| | if os.path.exists(f"/home/manuel/Desktop/all_books.json"): |
| | with open(f"/home/manuel/Desktop/all_books.json", "r") as f: |
| | all_books = json.load(f) |
| | else: |
| | all_books = {} |
| |
|
| |
|
| | for book_id in tqdm(range(10, 20)): |
| | if book_id in all_books: |
| | print("Book already processed, skipping.") |
| | continue |
| | total_cost = 0 |
| | assert total_cost < 10 |
| |
|
| | sample = ds["train"][book_id] |
| |
|
| | |
| | num_words = len(sample["text"].split(" ")) |
| | num_chars = len(sample["text"]) |
| |
|
| | print("The text has", num_words, "words and", num_chars, "characters.") |
| | if num_words < 26000: |
| | print("The text is too short, skipping.") |
| | continue |
| | |
| |
|
| | texts = text_splitter.create_documents([sample["text"]]) |
| | text = "\n\n".join( |
| | [f"<chunk_{i+1}> {chunk.page_content}" for i, chunk in enumerate(texts)] |
| | ) |
| |
|
| | contents = [ |
| | Part.from_text(text), |
| | Part.from_text(system_instruction), |
| | ] |
| |
|
| | print("Caching Input") |
| |
|
| | cached_content = caching.CachedContent.create( |
| | model_name="gemini-1.5-flash-002", |
| | system_instruction=system_instruction, |
| | contents=contents, |
| | ttl=datetime.timedelta(minutes=20), |
| | display_name="example-cache", |
| | ) |
| |
|
| | |
| | |
| | print(cached_content) |
| |
|
| | model = GenerativeModel.from_cached_content(cached_content=cached_content) |
| |
|
| | print("The cost of processing this input is", 0.00001875 * num_chars / 1000, "USD.") |
| | total_cost += 0.00001875 * num_chars / 1000 |
| |
|
| | total_text = {"chunks": []} |
| | for i in tqdm(range(0, len(texts), 20)): |
| | |
| | chunk_ids = list(range(i, min(i + 20, len(texts)))) |
| | |
| |
|
| | response = model.generate_content( |
| | f"The chunks ids are {chunk_ids}. Generate the output JSON." |
| | ) |
| | |
| | total_cost += 0.0000046875 * num_chars / 1000 |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| | total_cost += 0.000075 * len(response.text) / 1000 |
| |
|
| | |
| | try: |
| | text = eval(response.text.replace("```json\n", "").replace("\n```", "")) |
| | total_text["chunks"] += text["chunks"] |
| | except SyntaxError as e: |
| | print("Error parsing response:", e) |
| | print(response.text) |
| |
|
| | |
| | cache_time = datetime.datetime.now(timezone.utc) - cached_content.create_time |
| | |
| | total_cost += (cache_time.total_seconds() / 3600) * 0.00025 * num_chars / 1000 |
| | cached_content.delete() |
| | |
| | total_text["og_chunks"] = [c.page_content for c in texts] |
| | total_text["book_id"] = book_id |
| | total_text["cost"] = total_cost |
| | print("The total cost is", total_cost, "USD.") |
| |
|
| | with open(f"/home/manuel/Desktop/summary_{book_id}.json", "w") as f: |
| | json.dump(total_text, f) |
| |
|
| | all_books[book_id] = total_text |
| |
|
| | with open(f"/home/manuel/Desktop/all_books.json", "w") as f: |
| | json.dump(all_books, f) |
| |
|
| | print( |
| | "All books have been processed and saved to /home/manuel/Desktop/summary_all_books.json" |
| | ) |
| |
|
| | |
| | from datasets import Dataset |
| |
|
| | Dataset.from_list(all_books).push_to_hub("manu/all_books_test") |
| |
|