File size: 6,872 Bytes
545c4d5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
import json

from datetime import timezone

from langchain_text_splitters import RecursiveCharacterTextSplitter

from datasets import load_dataset
import datetime
from tqdm import tqdm

import vertexai
from vertexai.generative_models import Part
from vertexai.preview import caching
from vertexai.preview.generative_models import GenerativeModel

PROJECT_ID = "llm-testing"
vertexai.init(project=PROJECT_ID, location="us-central1")

# from vertexai.generative_models import GenerativeModel
# model = GenerativeModel("gemini-1.5-flash-002")

system_instruction = """
First generate a summary of the text that should be a paragraph long. 
Then, for chunks with specified ids, ask 1 question that pertains to the chunk. 
Make sure the questions contain the necessary context and can be at least partially with chunk information. 
Questions can necessitate previous context from the book to be understood, but should not assume the person answering knows which part of the book they are referring to.
Do not mention the chunk id in the question.
Answer each questions in a brief paragraph.
Format the output as a JSON consisting of a key 'chunks', associated with a list of chunk ids (ex: 13), each with 2 fields 'question' and 'answer' and their associated value.
If no relevant questions can be asked about the chunk, or the chunk contains noisy information, do not ask any questions. 
Make sure the output is valid JSON, that in text quotes are preceded by a backslash.
"""


# load dataset
ds = load_dataset(
    "manu/project_gutenberg",
    data_files=["data/en-00001-of-00052-5c2b3fd5e60f0124.parquet"],
    verification_mode="no_checks",
)  # split="en",

text_splitter = RecursiveCharacterTextSplitter(
    # Set a really small chunk size, just to show.
    chunk_size=3000,
    chunk_overlap=0,
    length_function=len,
    is_separator_regex=False,
)

import os

if os.path.exists(f"/home/manuel/Desktop/all_books.json"):
    with open(f"/home/manuel/Desktop/all_books.json", "r") as f:
        all_books = json.load(f)
else:
    all_books = {}


for book_id in tqdm(range(10, 20)):
    if book_id in all_books:
        print("Book already processed, skipping.")
        continue
    total_cost = 0
    assert total_cost < 10

    sample = ds["train"][book_id]

    # count words
    num_words = len(sample["text"].split(" "))
    num_chars = len(sample["text"])

    print("The text has", num_words, "words and", num_chars, "characters.")
    if num_words < 26000:
        print("The text is too short, skipping.")
        continue
    # input cost is 0.00001875 per 1000 characters

    texts = text_splitter.create_documents([sample["text"]])
    text = "\n\n".join(
        [f"<chunk_{i+1}> {chunk.page_content}" for i, chunk in enumerate(texts)]
    )

    contents = [
        Part.from_text(text),
        Part.from_text(system_instruction),
    ]

    print("Caching Input")

    cached_content = caching.CachedContent.create(
        model_name="gemini-1.5-flash-002",
        system_instruction=system_instruction,
        contents=contents,
        ttl=datetime.timedelta(minutes=20),
        display_name="example-cache",
    )

    # cache_id --> can be found in response of the above call
    # cached_content = caching.CachedContent(cached_content_name=cache_id)
    print(cached_content)

    model = GenerativeModel.from_cached_content(cached_content=cached_content)

    print("The cost of processing this input is", 0.00001875 * num_chars / 1000, "USD.")
    total_cost += 0.00001875 * num_chars / 1000

    total_text = {"chunks": []}
    for i in tqdm(range(0, len(texts), 20)):
        # choose 20 random chunk ids amongst all chunks
        chunk_ids = list(range(i, min(i + 20, len(texts))))
        # print("The chunk ids are", chunk_ids)

        response = model.generate_content(
            f"The chunks ids are {chunk_ids}. Generate the output JSON."
        )
        # print("The cost of using this cached input is", 0.0000046875 * num_chars / 1000, "USD.")
        total_cost += 0.0000046875 * num_chars / 1000

        # response = model.generate_content(f"Generate a summary of the text: {text}\n\n First generate a summary of the previous text that should be a paragraph long. Then, for chunks with ids {chunk_ids} of the book, ask 2 questions that pertain to the chunk. Make sure the questions contain the necessary context and can be at least partially with chunk information. "
        #                                   f"Questions can necessitate previous context from the book to be understood, but should not assume the person answering knows which part of the book they are referring to. Answer each questions in a brief paragraph. "
        #                                   f"Format the output as a JSON with a summary key, then a chunks key, consisting of a list of chunk ids, each with 2 question:answer pairs. "
        #                                   f"If no relevant questions can be asked about the chunk, or the chunk contains noisy information, do not ask any questions. Make sure the output is valid JSON and not markdown, that in text quotes are preceded by a backslash.",
        #                                   # labels={"project-name": "llm-testing"}
        #                                   )

        # print("The cost of processing this output is", 0.000075 * len(response.text) / 1000, "USD.")
        total_cost += 0.000075 * len(response.text) / 1000

        # 0.0000046875 / 1k characters (cached input) + 0.00025 / 1k characters / hr (storage)
        try:
            text = eval(response.text.replace("```json\n", "").replace("\n```", ""))
            total_text["chunks"] += text["chunks"]
        except SyntaxError as e:
            print("Error parsing response:", e)
            print(response.text)

    # compute how long the cache has been stored
    cache_time = datetime.datetime.now(timezone.utc) - cached_content.create_time
    # print(f"The cost of storing this cached input for {cache_time.total_seconds() / 60 } mn is", (cache_time.total_seconds() / 3600) * 0.00025 * num_chars / 1000, "USD.")
    total_cost += (cache_time.total_seconds() / 3600) * 0.00025 * num_chars / 1000
    cached_content.delete()
    # cost
    total_text["og_chunks"] = [c.page_content for c in texts]
    total_text["book_id"] = book_id
    total_text["cost"] = total_cost
    print("The total cost is", total_cost, "USD.")

    with open(f"/home/manuel/Desktop/summary_{book_id}.json", "w") as f:
        json.dump(total_text, f)

    all_books[book_id] = total_text

    with open(f"/home/manuel/Desktop/all_books.json", "w") as f:
        json.dump(all_books, f)

print(
    "All books have been processed and saved to /home/manuel/Desktop/summary_all_books.json"
)

# upload to huggingface
from datasets import Dataset

Dataset.from_list(all_books).push_to_hub("manu/all_books_test")