mciancone commited on
Commit
56b2929
·
verified ·
1 Parent(s): d275266

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +353 -38
src/streamlit_app.py CHANGED
@@ -1,40 +1,355 @@
1
- import altair as alt
2
- import numpy as np
3
- import pandas as pd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  import streamlit as st
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
- """
7
- # Welcome to Streamlit!
8
-
9
- Edit `/streamlit_app.py` to customize this app to your heart's desire :heart:.
10
- If you have any questions, checkout our [documentation](https://docs.streamlit.io) and [community
11
- forums](https://discuss.streamlit.io).
12
-
13
- In the meantime, below is an example of what you can do with just a few lines of code:
14
- """
15
-
16
- num_points = st.slider("Number of points in spiral", 1, 10000, 1100)
17
- num_turns = st.slider("Number of turns in spiral", 1, 300, 31)
18
-
19
- indices = np.linspace(0, 1, num_points)
20
- theta = 2 * np.pi * num_turns * indices
21
- radius = indices
22
-
23
- x = radius * np.cos(theta)
24
- y = radius * np.sin(theta)
25
-
26
- df = pd.DataFrame({
27
- "x": x,
28
- "y": y,
29
- "idx": indices,
30
- "rand": np.random.randn(num_points),
31
- })
32
-
33
- st.altair_chart(alt.Chart(df, height=700, width=700)
34
- .mark_point(filled=True)
35
- .encode(
36
- x=alt.X("x", axis=None),
37
- y=alt.Y("y", axis=None),
38
- color=alt.Color("idx", legend=None, scale=alt.Scale()),
39
- size=alt.Size("rand", legend=None, scale=alt.Scale(range=[1, 150])),
40
- ))
 
1
+ import json
2
+ import time
3
+ from pathlib import Path
4
+ from typing import Literal
5
+
6
+ import requests
7
+ import tiktoken
8
+ from chunknorris.chunkers import MarkdownChunker
9
+ from chunknorris.parsers import (
10
+ AbstractParser,
11
+ CSVParser,
12
+ DocxParser,
13
+ ExcelParser,
14
+ HTMLParser,
15
+ MarkdownParser,
16
+ PdfParser,
17
+ )
18
  import streamlit as st
19
+ from streamlit import session_state as ss
20
+ from streamlit.runtime.uploaded_file_manager import UploadedFile, UploadedFileRec
21
+
22
+ st.set_page_config(
23
+ layout="wide",
24
+ page_icon="🔪",
25
+ page_title="ChunkNorris demo",
26
+ menu_items={
27
+ "Report a bug": "https://github.com/wikit-ai/chunknorris/issues",
28
+ "About": "https://wikit-ai.github.io/chunknorris/",
29
+ },
30
+ )
31
+
32
+ LOGGER = st.empty()
33
+
34
+ SAMPLE_FILE = {
35
+ "sample PDF - 264 pages": "https://raw.githubusercontent.com/wikit-ai/chunknorris/refs/heads/main/docs/examples/example_data/sample.pdf",
36
+ "sample PDF - 16 pages": "https://raw.githubusercontent.com/wikit-ai/chunknorris/refs/heads/main/docs/examples/example_data/sample2.pdf",
37
+ "sample MD": "https://raw.githubusercontent.com/wikit-ai/chunknorris/refs/heads/main/README.md",
38
+ "sample XLSX": "https://raw.githubusercontent.com/wikit-ai/chunknorris/refs/heads/main/docs/examples/example_data/sample.xlsx",
39
+ }
40
+
41
+ if "parsing_time" not in ss:
42
+ ss.parsing_time = 0
43
+
44
+ if "parsed_md" not in ss:
45
+ ss.parsed_md = ""
46
+
47
+ if "chunks" not in ss:
48
+ ss.chunks = [] # type: ignore | list[Chunk]
49
+
50
+ # keep track of the uploader to clear the file
51
+ if "uploader_key" not in ss:
52
+ ss.uploader_key = 1
53
+
54
+
55
+ def get_parser(fileext: str) -> AbstractParser:
56
+ """Get the pipeline for the given filename."""
57
+ match fileext:
58
+ case ".md":
59
+ parser = MarkdownParser()
60
+ case ".html":
61
+ parser = HTMLParser()
62
+ case ".pdf":
63
+ parser = PdfParser(
64
+ use_ocr="never",
65
+ )
66
+ case ".docx":
67
+ parser = DocxParser()
68
+ case ".xls" | ".xlsx" | ".xlsm" | ".xlsb" | ".odf" | ".ods" | ".odt":
69
+ parser = ExcelParser()
70
+ case ".csv":
71
+ parser = CSVParser()
72
+ case _:
73
+ raise ValueError("File format not supported by ChunkNorris")
74
+
75
+ return parser
76
+
77
+
78
+ def get_md_chunker() -> MarkdownChunker:
79
+ """Considering arguments set, returns the md chunker."""
80
+ return MarkdownChunker(
81
+ max_headers_to_use=ss.max_headers_to_use,
82
+ max_chunk_word_count=ss.max_chunk_word_count,
83
+ hard_max_chunk_word_count=ss.hard_max_chunk_word_count,
84
+ min_chunk_word_count=ss.min_chunk_word_count,
85
+ hard_max_chunk_token_count=ss.hard_max_chunk_token_count,
86
+ tokenizer=tiktoken.encoding_for_model("text-embedding-ada-002"),
87
+ )
88
+
89
+
90
+ def parse_and_chunk(uploaded_file: UploadedFile | None):
91
+ """Parse and chunk the file."""
92
+ if uploaded_file is None:
93
+ log("Please upload a file.", "warning")
94
+ return
95
+ log("Parsing and chunking...", "info")
96
+
97
+ try:
98
+ fileext = Path(uploaded_file.name).suffix.lower()
99
+ parser = get_parser(fileext)
100
+ start_time = time.perf_counter()
101
+ match fileext:
102
+ case ".pdf" | ".xlsx":
103
+ md_doc = parser.parse_string(uploaded_file.getvalue())
104
+ chunker = get_md_chunker()
105
+ chunks = chunker.chunk(md_doc)
106
+ case _:
107
+ md_doc = parser.parse_string(uploaded_file.getvalue().decode("utf-8"))
108
+ chunker = get_md_chunker()
109
+ chunks = chunker.chunk(md_doc)
110
+
111
+ ss.parsing_time = time.perf_counter() - start_time
112
+ ss.parsed_md = md_doc.to_string()
113
+ ss.chunks = chunks
114
+ log(
115
+ f"Parsing and chunking took {round(ss.parsing_time, 4)} seconds.", "success"
116
+ )
117
+
118
+ except Exception as e:
119
+ log(f"Error when parsing file.", "warning")
120
+ print(e)
121
+ return
122
+
123
+
124
+ def save_parsed_md():
125
+ """Save the parsed markdown string to a md file."""
126
+ return ss.parsed_md.encode("utf-8")
127
+
128
+
129
+ def save_chunks():
130
+ """Save the parsed chunks to a json file."""
131
+ return json.dumps(
132
+ [
133
+ {
134
+ k: v
135
+ for k, v in chunk.model_dump().items()
136
+ if k not in ["headers", "content"]
137
+ }
138
+ | {"text": chunk.get_text(prepend_headers=ss.prepend_headers_to_chunks)}
139
+ for chunk in ss.chunks
140
+ ],
141
+ indent=4,
142
+ ensure_ascii=False,
143
+ ).encode("utf-8")
144
+
145
+
146
+ def log(message: str, log_type: Literal["success", "warning", "info"] = "info"):
147
+ """Display a warning message."""
148
+ match log_type:
149
+ case "warning":
150
+ LOGGER.warning(message, icon="⚠️")
151
+ case "success":
152
+ LOGGER.success(message, icon="✅")
153
+ case "info":
154
+ LOGGER.info(message, icon="ℹ️")
155
+
156
+
157
+ def load_sample_file(url: str):
158
+ """Get the file from url"""
159
+ response = requests.get(url)
160
+ if response.status_code == 200:
161
+ return UploadedFile(
162
+ record=UploadedFileRec(
163
+ file_id="sample_file",
164
+ name=url.split("/")[-1],
165
+ data=response.content,
166
+ type="application/octet-stream",
167
+ ),
168
+ file_urls=[url],
169
+ )
170
+ else:
171
+ print(response.status_code, response.content)
172
+ st.error("Failed to get data.")
173
+ return None
174
+
175
+
176
+ st.title("ChunkNorris.")
177
+ st.subheader("*Fast, smart, lightweight document chunking.*")
178
+
179
+ st.sidebar.header("Chunking settings")
180
+ st.sidebar.markdown(
181
+ "| [Documentation](https://wikit-ai.github.io/chunknorris/) | [Tutorials](https://wikit-ai.github.io/chunknorris/examples/) | [Repo](https://github.com/wikit-ai/chunknorris) |"
182
+ )
183
+ st.sidebar.select_slider(
184
+ label="Max header level to consider for chunking",
185
+ options=["h1", "h2", "h3", "h4", "h5", "h6"],
186
+ value="h4",
187
+ key="max_headers_to_use",
188
+ help="Max section header level to consider for chunking. Lower level headers won't be used to split a chunk into smaller chunks.",
189
+ label_visibility="visible",
190
+ )
191
+
192
+ st.sidebar.slider(
193
+ label="Maximum words (soft maximum) per chunk",
194
+ value=250,
195
+ min_value=0,
196
+ max_value=3000,
197
+ step=50,
198
+ key="max_chunk_word_count",
199
+ help="Maximum number of words per chunk. If a chunk is bigger than this, chunk is split using subsection headers if any are available.",
200
+ label_visibility="visible",
201
+ )
202
+
203
+ st.sidebar.slider(
204
+ label="Maximum words (hard maximum) per chunk",
205
+ value=400,
206
+ min_value=100,
207
+ max_value=3000,
208
+ step=50,
209
+ key="hard_max_chunk_word_count",
210
+ help="The hard maximum number of words per chunk. If a chunk is bigger than this, chunk is split using newlines, still trying to preverse code blocks or tables integrity.",
211
+ label_visibility="visible",
212
+ )
213
+
214
+ st.sidebar.slider(
215
+ label="Maximum token (hard maximum) per chunk",
216
+ value=400,
217
+ min_value=100,
218
+ max_value=8000,
219
+ step=100,
220
+ key="hard_max_chunk_token_count",
221
+ help="The hard maximum number of tokens per chunk. If a chunk is bigger than this, chunk is split using newlines. Applied after the word-based chunking",
222
+ label_visibility="visible",
223
+ )
224
+
225
+ st.sidebar.slider(
226
+ label="Minumum words per chunk",
227
+ value=10,
228
+ min_value=0,
229
+ max_value=50,
230
+ step=1,
231
+ key="min_chunk_word_count",
232
+ help="The minimum words a chunk must have to avoid being discarded.",
233
+ label_visibility="visible",
234
+ )
235
+
236
+ st.sidebar.checkbox(
237
+ "Prepend headers to chunk's text",
238
+ value=True,
239
+ key="prepend_headers_to_chunks",
240
+ label_visibility="visible",
241
+ help="Whether or not all the parent headers should be prepended to the chunk's text content. Might improve retrieval performance of the chunk as it preserves context.",
242
+ )
243
+
244
+ st.sidebar.select_slider(
245
+ label="Parse sheets (.csv, .xlsx) as :",
246
+ options=["JSON lines", "auto", "Markdown table"],
247
+ value="auto",
248
+ key="sheet_parsers_output_format",
249
+ label_visibility="visible",
250
+ help="How the tables should be parsed. JSON lines is easier to understand for an LLM and ensures headers wont be lost at the top of the document. Markdown table produces less tokens and is more suitable for non-CSV-like Excel spreadsheets.",
251
+ )
252
+
253
+ _, col1, col2, _ = st.columns([0.1, 0.5, 0.3, 0.1])
254
+ with col1:
255
+ uploaded_file = st.file_uploader(
256
+ "Upload your own file...",
257
+ type=[
258
+ "md",
259
+ "html",
260
+ "pdf",
261
+ "docx",
262
+ "xls",
263
+ "xlsx",
264
+ "xlsm",
265
+ "xlsb",
266
+ "odf",
267
+ "ods",
268
+ "odt",
269
+ "csv",
270
+ ],
271
+ key=ss.uploader_key,
272
+ )
273
+
274
+ with col2:
275
+ sample_file = st.selectbox(
276
+ "... Or choose a sample file from the list.",
277
+ options=list(SAMPLE_FILE.keys()),
278
+ index=None,
279
+ )
280
+ if sample_file is not None:
281
+ st.markdown(f"[View file]({SAMPLE_FILE[sample_file]})")
282
+ uploaded_file = load_sample_file(SAMPLE_FILE[sample_file])
283
+
284
+
285
+ if uploaded_file is not None:
286
+ parse_and_chunk(uploaded_file)
287
+ st.sidebar.button(
288
+ "Parse & Chunk",
289
+ on_click=parse_and_chunk,
290
+ args=(uploaded_file,),
291
+ type="primary",
292
+ use_container_width=True,
293
+ )
294
+ else:
295
+ st.sidebar.button(
296
+ "Parse & Chunk",
297
+ on_click=log,
298
+ args=(
299
+ "You must upload a file first.",
300
+ "warning",
301
+ ),
302
+ type="secondary",
303
+ use_container_width=True,
304
+ )
305
+ ss.parsed_md = ""
306
+ ss.chunks = []
307
+
308
+
309
+ col1, col2 = st.columns(2)
310
+ with col1:
311
+ if uploaded_file and ss.parsed_md:
312
+ file_parsed_md = save_parsed_md()
313
+ cola, colb = st.columns([0.25, 0.75])
314
+ with colb:
315
+ st.subheader("⚙️ Parsed Document", divider="blue")
316
+ with cola:
317
+ st.markdown("\n")
318
+ st.download_button(
319
+ label="⬇️ Download",
320
+ data=file_parsed_md,
321
+ file_name="chunknorris_parsed_document.md",
322
+ mime="text/markdown",
323
+ use_container_width=True,
324
+ )
325
+ if Path(uploaded_file.name).suffix.lower() == ".pdf":
326
+ st.info(
327
+ "For the purpose of this demo, OCR on pdf documents is deactivated.",
328
+ icon="ℹ️",
329
+ )
330
+ with st.expander("Parsed document", expanded=True):
331
+ with st.container(height=600, border=False):
332
+ st.markdown(ss.parsed_md)
333
 
334
+ with col2:
335
+ if uploaded_file and ss.chunks: # type: ignore | list[Chunk]
336
+ file_chunks = save_chunks()
337
+ cola, colb = st.columns([0.25, 0.75])
338
+ with colb:
339
+ st.subheader("📦 Chunks", divider="blue")
340
+ with cola:
341
+ st.markdown("\n")
342
+ st.download_button(
343
+ label="⬇️ Download",
344
+ data=file_chunks,
345
+ file_name="chunknorris_chunks.json",
346
+ mime="application/json",
347
+ use_container_width=True,
348
+ )
349
+ with st.container(border=False):
350
+ for i, chunk in enumerate(ss.chunks): # type: ignore | list[Chunk]
351
+ with st.expander(f"Chunk {i+1}", expanded=False):
352
+ with st.container(height=300, border=False):
353
+ st.markdown(
354
+ chunk.get_text(prepend_headers=ss.prepend_headers_to_chunks) # type: ignore | Chunk.get_text()
355
+ )