File size: 2,992 Bytes
e1c42ff
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
import json
import nltk
from nltk.tokenize import sent_tokenize, word_tokenize


# nltk.download('punkt')


def split_content_by_tokens(data, max_tokens=200):
    """
    Splits content into chunks of approximately max_tokens, respecting sentence boundaries.

    Args:
        data (list): List of dictionaries containing "page_number" and "content".
        max_tokens (int): Maximum number of tokens per chunk.

    Returns:
        list: A new list of dictionaries with split content.
    """
    processed_data = []
    chunk_id = 1
    prev_page = -1
    current_chunk = []
    current_token_count = 0

    for record in data:
        page_number = record.get("page_number")
        content = record.get("content", "")

        if prev_page == -1:
            prev_page = page_number
        elif prev_page != page_number:  # not the same page
            if current_chunk:
                processed_data.append({
                    "id": chunk_id,
                    "page_number": prev_page,
                    "content": " ".join(current_chunk),
                    "type": "slide"
                })
                chunk_id += 1
                current_chunk = []
                current_token_count = 0
                prev_page = page_number

        # Tokenize content into sentences
        sentences = sent_tokenize(content)

        for sentence in sentences:
            sentence_tokens = word_tokenize(sentence)
            sentence_length = len(sentence_tokens)

            # Check if adding this sentence exceeds the token limit
            if current_token_count + sentence_length > max_tokens:
                # Save the current chunk
                if current_chunk:
                    processed_data.append({
                        "id": chunk_id,
                        "page_number": page_number,
                        "content": " ".join(current_chunk),
                        "type": "slide"
                    })
                    chunk_id += 1
                # Start a new chunk
                current_chunk = []
                current_token_count = 0

            # Add the current sentence to the chunk
            current_chunk.append(sentence)
            current_token_count += sentence_length

    # Save the last chunk
    if current_chunk:
        processed_data.append({
            "id": chunk_id+1,
            "page_number": prev_page,
            "content": " ".join(current_chunk),
            "type": "slide"
        })

    return processed_data


if __name__ == "__main__":
    # Load your JSON file
    input_file = "/Users/yuchenhua/Coding/pdf/1121ppt.json"
    output_file = "1121_ppt.json"

    with open(input_file, 'r') as f:
        data = json.load(f)

    # Process the data
    split_data = split_content_by_tokens(data)

    # Save the processed data to a new JSON file
    with open(output_file, 'w') as f:
        json.dump(split_data, f, indent=4)

    print(f"Processed content has been saved to {output_file}")