Spaces:
Sleeping
Sleeping
File size: 1,360 Bytes
70ab3b6 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 | from nano_graphrag._utils import encode_string_by_tiktoken
from nano_graphrag.base import QueryParam
from nano_graphrag.graphrag import GraphRAG
from nano_graphrag._op import chunking_by_seperators
def chunking_by_token_size(
tokens_list: list[list[int]], # nano-graphrag may pass a batch of docs' tokens
doc_keys: list[str], # nano-graphrag may pass a batch of docs' key ids
tiktoken_model, # a titoken model
overlap_token_size=128,
max_token_size=1024,
):
results = []
for index, tokens in enumerate(tokens_list):
chunk_token = []
lengths = []
for start in range(0, len(tokens), max_token_size - overlap_token_size):
chunk_token.append(tokens[start : start + max_token_size])
lengths.append(min(max_token_size, len(tokens) - start))
chunk_token = tiktoken_model.decode_batch(chunk_token)
for i, chunk in enumerate(chunk_token):
results.append(
{
"tokens": lengths[i],
"content": chunk.strip(),
"chunk_order_index": i,
"full_doc_id": doc_keys[index],
}
)
return results
WORKING_DIR = "./nano_graphrag_cache_local_embedding_TEST"
rag = GraphRAG(
working_dir=WORKING_DIR,
chunk_func=chunking_by_seperators,
)
|