Rageshhf commited on
Commit
84d6673
ยท
verified ยท
1 Parent(s): d8717ee

Upload 5 files

Browse files
Files changed (4) hide show
  1. Dockerfile +28 -0
  2. app.py +476 -0
  3. compose.yaml +11 -0
  4. requirements.txt +15 -0
Dockerfile ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 1. Use an official Python runtime
2
+ FROM python:3.12-slim
3
+
4
+ # 2. Set working directory
5
+ WORKDIR /app
6
+
7
+ # 3. Install system dependencies
8
+ RUN apt-get update && apt-get install -y \
9
+ build-essential \
10
+ git \
11
+ poppler-utils \
12
+ tesseract-ocr \
13
+ && rm -rf /var/lib/apt/lists/*
14
+
15
+ # 4. Copy requirements (create one if needed)
16
+ COPY requirements.txt .
17
+
18
+ # 5. Install dependencies
19
+ RUN pip install --no-cache-dir -r requirements.txt
20
+
21
+ # 6. Copy the app code
22
+ COPY . .
23
+
24
+ # 7. Expose Gradio default port
25
+ EXPOSE 7860
26
+
27
+ # 8. Run the Gradio app
28
+ CMD ["python", "app.py"]
app.py ADDED
@@ -0,0 +1,476 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import bs4
3
+ from langchain import hub
4
+ from langchain_unstructured import UnstructuredLoader
5
+ from langchain_core.documents import Document
6
+ from typing_extensions import List, TypedDict
7
+ from langchain_core.vectorstores import InMemoryVectorStore
8
+ from langgraph.graph import START, StateGraph, MessagesState
9
+ from langchain.chat_models import init_chat_model
10
+ from langgraph.graph import END
11
+ from langgraph.prebuilt import ToolNode, tools_condition
12
+ from langchain_core.tools import tool
13
+ from langchain_core.messages import SystemMessage, HumanMessage
14
+ import getpass
15
+ import os
16
+ from langchain_huggingface import HuggingFaceEmbeddings
17
+ import base64
18
+ import json
19
+ import re
20
+ import pytesseract
21
+ import cv2
22
+ import numpy as np
23
+ from pdf2image import convert_from_path
24
+
25
+ # ---------- SETUP ----------
26
+ if not os.environ.get("GOOGLE_API_KEY"):
27
+ os.environ["GOOGLE_API_KEY"] = getpass.getpass("Enter API key for Google Gemini: ")
28
+
29
+ llm = init_chat_model("gemini-1.5-flash", model_provider="google_genai")
30
+ embeddings = HuggingFaceEmbeddings(
31
+ model_name="intfloat/multilingual-e5-large-instruct",
32
+ model_kwargs={"device": 'cpu', "trust_remote_code": True}
33
+ )
34
+
35
+ vector_store = InMemoryVectorStore(embeddings)
36
+ prompt = hub.pull("rlm/rag-prompt")
37
+
38
+ # ---------- RETRIEVAL TOOL ----------
39
+ @tool(response_format="content_and_artifact")
40
+ def retrieve(query: str):
41
+ """Retrieve information related to a query."""
42
+ retrieved_docs = vector_store.similarity_search(query, k=3)
43
+ serialized = "\n\n".join(
44
+ (f"Source: {doc.metadata}\nContent: {doc.page_content}")
45
+ for doc in retrieved_docs
46
+ )
47
+ return serialized, retrieved_docs
48
+
49
+ # ---------- GRAPH FUNCTIONS FOR RAG ----------
50
+ def query_or_respond(state: MessagesState):
51
+ """Generate tool call for retrieval or respond."""
52
+ llm_with_tools = llm.bind_tools([retrieve])
53
+ response = llm_with_tools.invoke(state["messages"])
54
+ return {"messages": [response]}
55
+
56
+ tools = ToolNode([retrieve])
57
+
58
+ def generate(state: MessagesState):
59
+ """Generate answer."""
60
+ recent_tool_messages = []
61
+ for message in reversed(state["messages"]):
62
+ if message.type == "tool":
63
+ recent_tool_messages.append(message)
64
+ else:
65
+ break
66
+ tool_messages = recent_tool_messages[::-1]
67
+
68
+ docs_content = "\n\n".join(doc.content for doc in tool_messages)
69
+ print(f"retrieved docs: ", docs_content)
70
+ system_message_content = (
71
+ "You are an assistant for question-answering tasks. "
72
+ "Use the following pieces of retrieved context to answer "
73
+ "the question. If you don't know the answer, say that you don't know. "
74
+ "Use three sentences maximum and keep the answer concise."
75
+ "\n\n"
76
+ f"{docs_content}"
77
+ )
78
+ conversation_messages = [
79
+ message
80
+ for message in state["messages"]
81
+ if message.type in ("human", "system")
82
+ or (message.type == "ai" and not message.tool_calls)
83
+ ]
84
+ prompt = [SystemMessage(system_message_content)] + conversation_messages
85
+
86
+ response = llm.invoke(prompt)
87
+ return {"messages": [response]}
88
+
89
+ # ---------- BUILD RAG GRAPH ----------
90
+ graph_builder = StateGraph(MessagesState)
91
+ graph_builder.add_node(query_or_respond)
92
+ graph_builder.add_node(tools)
93
+ graph_builder.add_node(generate)
94
+
95
+ graph_builder.set_entry_point("query_or_respond")
96
+ graph_builder.add_conditional_edges(
97
+ "query_or_respond",
98
+ tools_condition,
99
+ {END: END, "tools": "tools"},
100
+ )
101
+ graph_builder.add_edge("tools", "generate")
102
+ graph_builder.add_edge("generate", END)
103
+
104
+ rag_graph = graph_builder.compile()
105
+
106
+
107
+ # ---------- FORM FILLING AGENTS ----------
108
+ class FormState(TypedDict):
109
+ template_path: str
110
+ source_path: str
111
+ schema: dict
112
+ filled_data: dict
113
+ filled_image: str
114
+
115
+ def find_bbox(file_path, prim_schema, alignment="down"):
116
+ keys = list(prim_schema.keys())
117
+ img = cv2.imread(file_path)
118
+ img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
119
+ data = pytesseract.image_to_data(img_rgb, output_type=pytesseract.Output.DICT)
120
+
121
+ words = []
122
+ for i in range(len(data['text'])):
123
+ if int(data['conf'][i]) > -1 and data['text'][i].strip():
124
+ words.append({
125
+ 'text': data['text'][i],
126
+ 'left': data['left'][i],
127
+ 'top': data['top'][i],
128
+ 'width': data['width'][i],
129
+ 'height': data['height'][i],
130
+ 'conf': int(data['conf'][i])
131
+ })
132
+
133
+ words.sort(key=lambda w: (w['top'], w['left']))
134
+
135
+ lines = []
136
+ current_line = []
137
+ current_top = None if not words else words[0]['top']
138
+ for word in words:
139
+ if current_line and abs(word['top'] - current_top) > 20:
140
+ lines.append(current_line)
141
+ current_line = []
142
+ current_line.append(word)
143
+ current_top = word['top']
144
+ if current_line:
145
+ lines.append(current_line)
146
+
147
+ boxes = []
148
+ for line in lines:
149
+ if line:
150
+ full_text = ' '.join(w['text'] for w in line)
151
+ left = min(w['left'] for w in line)
152
+ top = min(w['top'] for w in line)
153
+ right = max(w['left'] + w['width'] for w in line)
154
+ bottom = max(w['top'] + w['height'] for w in line)
155
+ boxes.append({
156
+ 'text': full_text,
157
+ 'clean': full_text.lower().replace(" ", "").replace(":", ""),
158
+ 'left': left,
159
+ 'top': top,
160
+ 'width': right - left,
161
+ 'height': bottom - top
162
+ })
163
+
164
+ boxes.sort(key=lambda b: (b['top'], b['left']))
165
+
166
+ schema = {}
167
+ height, width = img.shape[:2]
168
+ threshold = 40
169
+
170
+ for idx, box in enumerate(boxes):
171
+ if box['clean'] in keys and box['clean'] not in schema:
172
+ key = box['clean']
173
+ label_bbox_norm = [box['left'] / width, box['top'] / height, (box['left'] + box['width']) / width, (box['top'] + box['height']) / height]
174
+ label_bbox_pixel = [box['left'], box['top'], box['left'] + box['width'], box['top'] + box['height']]
175
+ input_bbox = None
176
+ if alignment == "right":
177
+ # Look for the next box to the right within the same line with tighter vertical alignment
178
+ for next_idx in range(idx + 1, len(boxes)):
179
+ next_box = boxes[next_idx]
180
+ if (next_box['top'] >= box['top'] and next_box['top'] + next_box['height'] <= box['top'] + box['height'] + 10 and
181
+ next_box['left'] > box['left'] + box['width'] and next_box['left'] < box['left'] + box['width'] + 300):
182
+ input_bbox_norm = [next_box['left'] / width, next_box['top'] / height, (next_box['left'] + next_box['width']) / width, (next_box['top'] + next_box['height']) / height]
183
+ input_bbox_pixel = [next_box['left'], next_box['top'], next_box['left'] + next_box['width'], next_box['top'] + next_box['height']]
184
+ break
185
+ else: # Default to "down"
186
+ # Look for the next box below the key
187
+ for next_idx in range(idx + 1, len(boxes)):
188
+ next_box = boxes[next_idx]
189
+ if next_box['top'] > box['top'] + box['height'] and abs(next_box['left'] - box['left']) < 50 and next_box['top'] - box['top'] < 100:
190
+ input_bbox_norm = [next_box['left'] / width, next_box['top'] / height, (next_box['left'] + next_box['width']) / width, (next_box['top'] + next_box['height']) / height]
191
+ input_bbox_pixel = [next_box['left'], next_box['top'], next_box['left'] + next_box['width'], next_box['top'] + next_box['height']]
192
+ break
193
+
194
+ if input_bbox is None:
195
+ if alignment == "right":
196
+ input_x = box['left'] + box['width'] + threshold
197
+ input_y = box['top']
198
+ input_w = 200 # Adjusted to match typical input width in your image
199
+ input_h = box['height']
200
+ else: # "down"
201
+ input_x = box['left']
202
+ input_y = box['top'] + box['height'] + threshold
203
+ input_w = box['width']
204
+ input_h = 20
205
+ input_bbox_norm = [input_x / width, input_y / height, (input_x + input_w) / width, (input_y + input_h) / height]
206
+ input_bbox_pixel = [input_x, input_y, input_x + input_w, input_y + input_h]
207
+
208
+ schema[key] = input_bbox_pixel
209
+ return schema
210
+
211
+ def convert_template_file(file_path):
212
+ ext = os.path.splitext(file_path)[1].lower()
213
+ if ext == ".pdf":
214
+ images = convert_from_path(file_path, dpi=300)
215
+ out_path = "template_converted.png"
216
+ images[0].save(out_path, "PNG")
217
+ return out_path
218
+ else:
219
+ raise ValueError("Unsupported template file format")
220
+
221
+ def analyze_template(file_path: str) -> dict:
222
+ if file_path.endswith("pdf"):
223
+ file_path = convert_template_file(file_path)
224
+
225
+ with open(file_path, "rb") as image_file:
226
+ image_data = base64.b64encode(image_file.read()).decode("utf-8")
227
+
228
+ message = {
229
+ "role": "user",
230
+ "content": [
231
+ {
232
+ "type": "text",
233
+ "text": "Analyse the following form and return just the JSON containing keys and values present in the image"
234
+ "If te corresponding value is not present keep it as None."
235
+ "Keep in mind that the keys cannot contain any spaces and should be lowercase. The output should be json loadable. ",
236
+ },
237
+ {
238
+ "type": "image",
239
+ "source_type": "base64",
240
+ "data": image_data,
241
+ "mime_type": "image/jpeg",
242
+ },
243
+ ]
244
+ }
245
+
246
+ response = llm.invoke([message])
247
+ out = response.text()
248
+ match = re.search(r"\{.*\}", out, re.DOTALL)
249
+ if match:
250
+ json_string = match.group(0) # clean JSON
251
+ schema = json.loads(json_string)
252
+ else:
253
+ schema = {}
254
+ updated_schema = find_bbox(file_path, schema)
255
+ position_schema = updated_schema
256
+
257
+ return position_schema
258
+
259
+ def extract_values(file_path: str, schema: dict) -> dict:
260
+ filled_schema = {}
261
+ schema_keys = list(schema.keys())
262
+
263
+ if file_path.endswith("txt"):
264
+ with open(file_path, "r") as f:
265
+ text = f.read()
266
+ instr = f"Extract values from the source text for the following fields: {schema_keys}.Return a JSON with keys and extracted values.\n source text: {text}"
267
+ message = {
268
+ "role": "user",
269
+ "content": [
270
+ {
271
+ "type": "text",
272
+ "text": instr,
273
+ }
274
+ ]
275
+ }
276
+
277
+ else:
278
+ if file_path.endswith("pdf"):
279
+ images = convert_from_path(file_path, dpi=300)
280
+ out_path = "source_converted.png"
281
+ images[0].save(out_path, "PNG")
282
+ with open(out_path, "rb") as image_file:
283
+ image_data = base64.b64encode(image_file.read()).decode("utf-8")
284
+
285
+ if file_path.endswith("png") or file_path.endswith("jpg"):
286
+ with open(file_path, "rb") as image_file:
287
+ image_data = base64.b64encode(image_file.read()).decode("utf-8")
288
+
289
+
290
+ text = f"Extract values from the image for the following fields: {schema_keys}.Return a JSON with keys and extracted values."
291
+
292
+
293
+ message = {
294
+ "role": "user",
295
+ "content": [
296
+ {
297
+ "type": "text",
298
+ "text": text,
299
+ },
300
+ {
301
+ "type": "image",
302
+ "source_type": "base64",
303
+ "data": image_data,
304
+ "mime_type": "image/jpeg",
305
+ },
306
+ ]
307
+ }
308
+
309
+ response = llm.invoke([message])
310
+ out = response.text()
311
+
312
+ match = re.search(r"\{.*\}", out, re.DOTALL)
313
+ if match:
314
+ json_string = match.group(0) # clean JSON
315
+ filled = json.loads(json_string)
316
+ else:
317
+ filled = {}
318
+
319
+ for key in schema:
320
+ if key in filled:
321
+ filled_schema[key] = filled[key]
322
+ return filled_schema
323
+
324
+ def fill_template(state: FormState):
325
+ template_path = state["template_path"]
326
+ position = state["schema"]
327
+ filled_data = state["filled_data"]
328
+
329
+ img = cv2.imread(template_path)
330
+
331
+ for key, bbox in position.items():
332
+ if key in filled_data:
333
+ x1, y1, x2, y2 = bbox
334
+ text = filled_data[key]
335
+
336
+ # Position text inside the box (slightly padded)
337
+ cv2.putText(img, text, (x1+5, y2-5),
338
+ cv2.FONT_HERSHEY_SIMPLEX, 0.8,
339
+ (0, 0, 0), 2)
340
+
341
+ filled_path = template_path.replace(".jpg", "_filled.jpg").replace(".png", "_filled.png")
342
+ cv2.imwrite(filled_path, img)
343
+ return filled_path
344
+
345
+ def analyze_node(state: FormState):
346
+ schema = analyze_template(state["template_path"])
347
+ return {"schema": schema}
348
+
349
+ def extract_node(state: FormState):
350
+ filled = extract_values(state["source_path"], state["schema"])
351
+ return {"filled_data": filled}
352
+
353
+ def fill_node(state: FormState):
354
+ filled_image = fill_template(state)
355
+ return {"filled_image": filled_image}
356
+
357
+ # ---------- BUILD FORM FILLING GRAPH ----------
358
+ form_graph_builder = StateGraph(FormState)
359
+ form_graph_builder.add_node("analyze", analyze_node)
360
+ form_graph_builder.add_node("extract", extract_node)
361
+ form_graph_builder.add_node("fill", fill_node)
362
+ form_graph_builder.add_edge(START, "analyze")
363
+ form_graph_builder.add_edge("analyze", "extract")
364
+ form_graph_builder.add_edge("extract", "fill")
365
+ form_graph_builder.add_edge("fill", END)
366
+ form_graph = form_graph_builder.compile()
367
+
368
+ # ---------- GRADIO APP FUNCTIONS ----------
369
+ def process_doc(file):
370
+ loader = UnstructuredLoader(
371
+ file_path=file.name,
372
+ extract_images_in_pdf=True,
373
+ languages=['ml', 'en']
374
+ )
375
+ docs = loader.load()
376
+ vector_store.add_documents(documents=docs)
377
+ return "โœ… Document processed successfully! You can now ask questions."
378
+
379
+ def ask_question(query, history):
380
+ state = {"messages": [HumanMessage(content=query)]}
381
+ response_text = ""
382
+ for step in rag_graph.stream(state, stream_mode="values"):
383
+ response_text = step["messages"][-1].content
384
+ history.append((query, response_text))
385
+ return history, ""
386
+
387
+ def process_form_filling(source, template):
388
+ if not source or not template:
389
+ return {"error": "Please upload both source and template files."}, None
390
+ state = {
391
+ "template_path": template.name,
392
+ "source_path": source.name,
393
+ "schema": {},
394
+ "filled_data": {},
395
+ "filled_image": ""
396
+ }
397
+ result = form_graph.invoke(state)
398
+ return result["filled_data"], result["filled_image"]
399
+
400
+ theme = gr.themes.Soft(
401
+ primary_hue="blue",
402
+ secondary_hue="gray",
403
+ neutral_hue="slate",
404
+ font=[gr.themes.GoogleFont("Inter"), gr.themes.GoogleFont("Source Sans Pro"), "ui-sans-serif", "system-ui"],
405
+ font_mono=[gr.themes.GoogleFont("IBM Plex Mono"), "ui-monospace", "Consolas", "monospace"],
406
+ ).set(
407
+ body_background_fill="*neutral_50",
408
+ body_background_fill_dark="*neutral_900",
409
+ block_background_fill="*neutral_100",
410
+ block_background_fill_dark="*neutral_800",
411
+ block_border_width="1px",
412
+ block_radius="8px",
413
+ block_shadow="0 1px 3px rgba(0,0,0,0.1)",
414
+ block_shadow_dark="0 1px 3px rgba(255,255,255,0.1)",
415
+ button_primary_background_fill="*primary_500",
416
+ button_primary_background_fill_hover="*primary_600",
417
+ button_primary_text_color="white",
418
+ button_secondary_background_fill="*neutral_200",
419
+ button_secondary_background_fill_hover="*neutral_300",
420
+ button_secondary_text_color="*neutral_800",
421
+ input_background_fill="*neutral_50",
422
+ input_background_fill_dark="*neutral_800",
423
+ input_border_color="*neutral_200",
424
+ input_border_color_dark="*neutral_700",
425
+ panel_background_fill="*neutral_50",
426
+ panel_background_fill_dark="*neutral_900",
427
+ slider_color="*primary_500",
428
+ )
429
+
430
+ with gr.Blocks(theme=theme, css=".gradio-container {max-width: 1200px !important; margin: auto;}") as demo:
431
+ gr.Markdown(
432
+ """
433
+ # ๐Ÿ“‘ Multi-lingual Doc RAG and Form Filling System
434
+ """,
435
+ elem_classes="text-center"
436
+ )
437
+
438
+ with gr.Tabs():
439
+ with gr.Tab("๐Ÿ“„ Document RAG"):
440
+ with gr.Row():
441
+ with gr.Column(scale=1):
442
+ gr.Markdown("### Upload Document")
443
+ upload_btn = gr.File(label="Select Document", file_types=[".pdf", ".txt", ".docx"], interactive=True)
444
+ process_status = gr.Textbox(label="Processing Status", placeholder="Upload a document to start...", interactive=False)
445
+ upload_btn.upload(process_doc, upload_btn, process_status)
446
+ with gr.Column(scale=2):
447
+ gr.Markdown("### Chat with Document")
448
+ chatbot = gr.Chatbot(height=400, placeholder="Ask questions about your document here...")
449
+ query = gr.Textbox(label="Your Question", placeholder="Type your question and press Enter...")
450
+ query.submit(ask_question, [query, chatbot], [chatbot, query])
451
+
452
+ with gr.Tab("๐Ÿ–Š๏ธ Form Filling"):
453
+ with gr.Row():
454
+ with gr.Column():
455
+ gr.Markdown("### Upload Files")
456
+ source_upload = gr.File(label="Source File (Information Source)", file_types=[".jpg", ".png", ".txt", ".pdf"], interactive=True)
457
+ template_upload = gr.File(label="Template File (Form to Fill)", file_types=[".jpg", ".png"], interactive=True)
458
+ fill_btn = gr.Button("Process and Fill Form", variant="primary")
459
+ with gr.Row():
460
+ with gr.Column():
461
+ gr.Markdown("### Extracted Data")
462
+ output_json = gr.JSON(label="Filled Form Data (JSON)")
463
+ with gr.Column():
464
+ gr.Markdown("### Filled Form Preview")
465
+ output_image = gr.Image(label="Filled Form Image", interactive=False)
466
+ fill_btn.click(process_form_filling, [source_upload, template_upload], [output_json, output_image])
467
+
468
+ gr.Markdown(
469
+ """
470
+ ---
471
+ *Note: For form filling the system currently expects all required fields to be completed under their corresponding keys.*
472
+ """,
473
+ elem_classes="text-center"
474
+ )
475
+
476
+ demo.launch(server_name="0.0.0.0", server_port=7860)
compose.yaml ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ version: "3.9"
2
+ services:
3
+ rag-app:
4
+ build: .
5
+ container_name: rag_gradio_app
6
+ ports:
7
+ - "7860:7860"
8
+ environment:
9
+ - GOOGLE_API_KEY=${GOOGLE_API_KEY}
10
+ volumes:
11
+ - ./data:/app/data
requirements.txt ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ langchain-core
2
+ langgraph
3
+ langchain-community
4
+ beautifulsoup4
5
+ langchain-unstructured
6
+ unstructured-client
7
+ unstructured
8
+ python-magic
9
+ sentence-transformers
10
+ gradio
11
+ langchain-huggingface
12
+ langchain-google-genai
13
+ opencv-python
14
+ pdf2image
15
+ pytesseract