dnj0 commited on
Commit
5039972
Β·
verified Β·
1 Parent(s): 5f78fd3

Update src/app.py

Browse files
Files changed (1) hide show
  1. src/app.py +369 -196
src/app.py CHANGED
@@ -1,224 +1,397 @@
1
  """
2
- Main Streamlit Application for Multimodal RAG LLM System
 
3
  """
 
4
  import streamlit as st
5
  import os
6
- import tempfile
7
  from pathlib import Path
 
 
8
  from pdf_parser import PDFParser
9
  from vector_store import VectorStore
10
- from rag_system import MultimodalRAG
11
  from config import UPLOAD_FOLDER, MAX_PDF_SIZE_MB
12
- import json
13
 
14
 
15
- def initialize_session_state():
16
- """Initialize Streamlit session state"""
17
- if 'vector_store' not in st.session_state:
18
- st.session_state.vector_store = VectorStore()
19
-
20
- if 'pdf_parser' not in st.session_state:
21
- st.session_state.pdf_parser = PDFParser()
22
-
23
- if 'rag_system' not in st.session_state:
24
- st.session_state.rag_system = None
25
-
26
- if 'api_key_set' not in st.session_state:
27
- st.session_state.api_key_set = False
28
-
29
- if 'processed_documents' not in st.session_state:
30
- st.session_state.processed_documents = st.session_state.pdf_parser.processed_files
31
 
 
 
 
 
 
 
32
 
33
- def load_existing_documents():
34
- """Load documents that were processed in previous runs"""
35
- docs = st.session_state.pdf_parser.get_all_documents()
36
-
37
- for doc_id, doc_data in docs.items():
38
- # Check if this document is already in vector store
39
- st.session_state.vector_store.add_documents(doc_data, doc_id)
40
-
41
-
42
- def process_uploaded_pdf(pdf_file):
43
- """Process uploaded PDF file"""
44
- try:
45
- # Save temporary file
46
- with tempfile.NamedTemporaryFile(delete=False, suffix='.pdf') as tmp_file:
47
- tmp_file.write(pdf_file.getbuffer())
48
- tmp_path = tmp_file.name
49
-
50
- # Check file size
51
- file_size_mb = os.path.getsize(tmp_path) / (1024 * 1024)
52
- if file_size_mb > MAX_PDF_SIZE_MB:
53
- st.error(f"File size {file_size_mb:.2f}MB exceeds maximum {MAX_PDF_SIZE_MB}MB")
54
- os.remove(tmp_path)
55
- return False
56
-
57
- # Parse PDF
58
- with st.spinner(f"Processing {pdf_file.name}..."):
59
- text, images, tables = st.session_state.pdf_parser.parse_pdf(tmp_path)
60
-
61
- # Add to vector store
62
- doc_data = {
63
- 'text': text,
64
- 'images': images,
65
- 'tables': tables
66
- }
67
- doc_id = Path(pdf_file.name).stem
68
- st.session_state.vector_store.add_documents(doc_data, doc_id)
69
- st.session_state.vector_store.persist()
70
-
71
- # Store document info
72
- st.session_state.processed_documents[doc_id] = st.session_state.pdf_parser.processed_files.get(doc_id, '')
73
-
74
- # Clean up
75
- os.remove(tmp_path)
76
-
77
- st.success(f"Successfully processed: {pdf_file.name}")
78
- st.info(f"Extracted: {len(text)} characters, {len(images)} images, {len(tables)} tables")
79
- return True
80
-
81
- except Exception as e:
82
- st.error(f"Error processing PDF: {str(e)}")
83
- return False
84
 
 
 
85
 
86
- def main():
87
- st.set_page_config(page_title="Multimodal RAG LLM System", layout="wide")
88
- st.title("πŸ“„ Multimodal RAG LLM System")
89
- st.markdown("Extract, analyze, and query PDF documents with text, images, and tables using AI")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90
 
91
- # Initialize session state
92
- initialize_session_state()
93
 
94
- # Sidebar for settings
95
- with st.sidebar:
96
- st.header("βš™οΈ Settings")
97
-
98
- # API Key input (masked)
99
- api_key = st.text_input(
100
- "Enter OpenAI API Key",
101
- type="password",
102
- help="Your OpenAI API key for GPT-4o access"
103
- )
104
-
105
- if api_key:
106
- st.session_state.rag_system = MultimodalRAG(api_key=api_key)
107
- st.session_state.api_key_set = True
108
- st.success("βœ… API Key set")
109
-
110
- st.divider()
111
-
112
- # Vector store info
113
- st.subheader("πŸ“Š Vector Store Status")
114
- collection_info = st.session_state.vector_store.get_collection_info()
115
- st.metric("Documents in Store", collection_info.get('count', 0))
116
-
117
- st.divider()
118
-
119
- # Document management
120
- st.subheader("πŸ“ Document Management")
121
- if st.button("Refresh Document List"):
122
- load_existing_documents()
123
- st.success("Document list refreshed")
124
-
125
- if st.session_state.processed_documents:
126
- st.write("**Processed Documents:**")
127
- for doc_id in st.session_state.processed_documents.keys():
128
- col1, col2 = st.columns([3, 1])
129
- col1.write(f"πŸ“Œ {doc_id}")
130
- if col2.button("πŸ—‘οΈ", key=f"delete_{doc_id}"):
131
- st.session_state.vector_store.delete_by_doc_id(doc_id)
132
- del st.session_state.processed_documents[doc_id]
133
- st.rerun()
134
 
135
- # Main area
136
- st.header("πŸ“€ Upload PDF Document")
137
- col1, col2 = st.columns([2, 1])
138
-
139
- if st.button("βš™οΈ Summarize & Store Each Component"):
140
- results = rag_system.process_and_store_document(
141
- text=st.session_state.current_text,
142
- images=st.session_state.current_images,
143
- tables=st.session_state.current_tables,
144
- vector_store=vector_store,
145
- doc_id=st.session_state.current_document
146
- )
 
 
 
 
147
 
148
- with col1:
149
- uploaded_file = st.file_uploader(
150
- "Choose a PDF file",
151
- type="pdf",
152
- help=f"Maximum file size: {MAX_PDF_SIZE_MB}MB"
153
- )
154
 
155
- if uploaded_file:
156
- if process_uploaded_pdf(uploaded_file):
157
- st.rerun()
 
 
 
 
 
 
 
 
 
158
 
159
  st.divider()
160
 
161
- # Q&A Section
162
- if st.session_state.api_key_set:
163
- st.header("❓ Ask Questions About Your Documents")
164
-
165
- # Check if there are documents in vector store
166
- collection_info = st.session_state.vector_store.get_collection_info()
167
- if collection_info.get('count', 0) == 0:
168
- st.warning("No documents in vector store. Please upload a PDF first.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
169
  else:
170
- # Question input
171
- question = st.text_area(
172
- "Enter your question",
173
- placeholder="Ask anything about your PDF documents...",
174
- height=100
175
- )
176
-
177
- col1, col2 = st.columns([1, 4])
178
-
179
- with col1:
180
- if st.button("πŸ” Search & Answer", type="primary"):
181
- if question.strip():
182
- with st.spinner("Searching and generating answer..."):
183
- # Search vector store
184
- search_results = st.session_state.vector_store.search(question, n_results=5)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
185
 
186
- if search_results:
187
- # Generate answer
188
- answer = st.session_state.rag_system.answer_question(question, search_results)
189
-
190
- # Display results
191
- st.subheader("πŸ€– Answer")
192
- st.write(answer)
193
-
194
- # Display search results
195
- with st.expander("πŸ“š Source Documents"):
196
- for idx, result in enumerate(search_results, 1):
197
- st.write(f"**[{result.get('type', 'unknown').upper()}]**")
198
- st.write(result.get('content', ''))
199
- st.divider()
200
  else:
201
- st.warning("No relevant documents found. Try different search terms.")
202
- else:
203
- st.warning("Please enter a question first.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
204
  else:
205
- st.warning("⚠️ Please enter your OpenAI API Key in the sidebar to use the Q&A feature.")
206
-
207
- st.divider()
208
-
209
- # Footer
210
- st.markdown("---")
211
- st.markdown(
212
- """
213
- **About this system:**
214
- - Extracts text, images, and tables from PDF documents
215
- - Uses CLIP-ViT embeddings for multimodal understanding
216
- - Stores documents locally in ChromaDB vector store
217
- - Analyzes queries using OpenAI GPT-4o
218
- - Supports Russian language documents
219
- """
220
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
221
 
 
 
222
 
223
- if __name__ == "__main__":
224
- main()
 
 
 
 
 
1
  """
2
+ Multimodal RAG LLM System - Streamlit App
3
+ Complete working version with VISUAL image analysis using gpt-4o
4
  """
5
+
6
  import streamlit as st
7
  import os
 
8
  from pathlib import Path
9
+
10
+ # Import optimized versions
11
  from pdf_parser import PDFParser
12
  from vector_store import VectorStore
13
+ from rag_system import VisualMultimodalRAG # NEW - Vision model
14
  from config import UPLOAD_FOLDER, MAX_PDF_SIZE_MB
 
15
 
16
 
17
+ # ============================================================================
18
+ # PAGE CONFIGURATION
19
+ # ============================================================================
 
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
+ st.set_page_config(
22
+ page_title="πŸ“„ Multimodal RAG LLM System",
23
+ page_icon="πŸ€–",
24
+ layout="wide",
25
+ initial_sidebar_state="expanded"
26
+ )
27
 
28
+ # ============================================================================
29
+ # SESSION STATE INITIALIZATION
30
+ # ============================================================================
31
+
32
+ if 'api_key_set' not in st.session_state:
33
+ st.session_state.api_key_set = False
34
+
35
+ if 'api_key' not in st.session_state:
36
+ st.session_state.api_key = None
37
+
38
+ if 'visual_rag_system' not in st.session_state: # NEW - Vision model
39
+ st.session_state.visual_rag_system = None
40
+
41
+ if 'vector_store' not in st.session_state:
42
+ st.session_state.vector_store = None
43
+
44
+ if 'parser' not in st.session_state:
45
+ st.session_state.parser = None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
 
47
+ if 'current_document' not in st.session_state:
48
+ st.session_state.current_document = None
49
 
50
+ if 'current_text' not in st.session_state:
51
+ st.session_state.current_text = None
52
+
53
+ if 'current_images' not in st.session_state:
54
+ st.session_state.current_images = None
55
+
56
+ if 'current_tables' not in st.session_state:
57
+ st.session_state.current_tables = None
58
+
59
+ if 'processing_results' not in st.session_state: # NEW
60
+ st.session_state.processing_results = None
61
+
62
+
63
+ # ============================================================================
64
+ # MAIN HEADER
65
+ # ============================================================================
66
+
67
+ st.title("πŸ“„ Multimodal RAG LLM System")
68
+ st.markdown("""
69
+ Process PDF documents with visual image analysis:
70
+ - **PDF Parser** with OCR for Russian & English
71
+ - **Visual Analysis** (gpt-4o) for image understanding
72
+ - **Vector Store** (ChromaDB) for semantic search
73
+ - **Individual Component** summarization and storage
74
+ """)
75
+
76
+
77
+ # ============================================================================
78
+ # SIDEBAR - CONFIGURATION
79
+ # ============================================================================
80
+
81
+ with st.sidebar:
82
+ st.header("βš™οΈ Configuration")
83
 
84
+ # API Key Section
85
+ st.subheader("πŸ”‘ OpenAI API Key")
86
 
87
+ api_key = st.text_input(
88
+ "Enter your OpenAI API key:",
89
+ type="password",
90
+ key="api_key_input"
91
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
 
93
+ if api_key:
94
+ st.session_state.api_key = api_key
95
+ st.session_state.api_key_set = True
96
+
97
+ # Initialize RAG systems if not already done
98
+ if st.session_state.visual_rag_system is None:
99
+ try:
100
+ st.session_state.visual_rag_system = VisualMultimodalRAG(api_key=api_key, debug=True) # NEW
101
+ st.session_state.vector_store = VectorStore()
102
+ st.session_state.parser = PDFParser(debug=True)
103
+ st.success("βœ… API Key set & systems initialized")
104
+ except Exception as e:
105
+ st.error(f"Error initializing systems: {e}")
106
+ else:
107
+ st.session_state.api_key_set = False
108
+ st.warning("⚠️ Please enter your API key to continue")
109
 
110
+ st.divider()
 
 
 
 
 
111
 
112
+ # Vector Store Status
113
+ st.subheader("πŸ“Š Vector Store Status")
114
+ if st.session_state.vector_store:
115
+ try:
116
+ info = st.session_state.vector_store.get_collection_info()
117
+ st.metric("Items in Store", info['count'])
118
+ st.metric("Status", info['status'])
119
+ st.caption(f"Path: {info['persist_path']}")
120
+ except Exception as e:
121
+ st.error(f"Error getting store info: {e}")
122
+ else:
123
+ st.info("Set API key to initialize vector store")
124
 
125
  st.divider()
126
 
127
+ # Document Management
128
+ st.subheader("πŸ“ Document Management")
129
+ if st.button("πŸ”„ Clear Vector Store"):
130
+ if st.session_state.vector_store:
131
+ try:
132
+ st.session_state.vector_store.clear_all()
133
+ st.success("βœ… Vector store cleared")
134
+ except Exception as e:
135
+ st.error(f"Error clearing store: {e}")
136
+
137
+
138
+ # ============================================================================
139
+ # MAIN CONTENT
140
+ # ============================================================================
141
+
142
+ # Upload Section
143
+ st.header("πŸ“€ Upload PDF Document")
144
+
145
+ uploaded_file = st.file_uploader(
146
+ "Choose a PDF file",
147
+ type=['pdf'],
148
+ help="PDF with text, images, and tables"
149
+ )
150
+
151
+ if uploaded_file is not None:
152
+ # Save uploaded file
153
+ upload_path = Path(UPLOAD_FOLDER)
154
+ upload_path.mkdir(exist_ok=True)
155
+
156
+ file_path = upload_path / uploaded_file.name
157
+ with open(file_path, 'wb') as f:
158
+ f.write(uploaded_file.getbuffer())
159
+
160
+ st.success(f"βœ… File saved: {uploaded_file.name}")
161
+
162
+ # Parse PDF
163
+ if st.button("πŸ” Parse PDF"):
164
+ if not st.session_state.api_key_set:
165
+ st.error("❌ Please set OpenAI API key first")
166
  else:
167
+ try:
168
+ with st.spinner("πŸ“„ Parsing PDF..."):
169
+ print(f"\n{'='*70}")
170
+ print(f"PARSING: {uploaded_file.name}")
171
+ print(f"{'='*70}")
172
+
173
+ # Parse PDF - returns text, images, tables
174
+ parser = st.session_state.parser
175
+ text, images, tables = parser.parse_pdf(str(file_path))
176
+
177
+ # Store in session state
178
+ st.session_state.current_document = uploaded_file.name
179
+ st.session_state.current_text = text
180
+ st.session_state.current_images = images
181
+ st.session_state.current_tables = tables
182
+
183
+ # Display results
184
+ col1, col2, col3 = st.columns(3)
185
+ with col1:
186
+ st.metric("πŸ“ Text", f"{len(text):,} chars")
187
+ with col2:
188
+ st.metric("πŸ–ΌοΈ Images", len(images))
189
+ with col3:
190
+ st.metric("πŸ“‹ Tables", len(tables))
191
+
192
+ # Show image OCR details
193
+ if images:
194
+ st.subheader("πŸ–ΌοΈ Extracted Images")
195
+ for idx, img in enumerate(images):
196
+ ocr_text = img.get('ocr_text', '')
197
+ ocr_len = len(ocr_text)
198
 
199
+ if ocr_len > 0:
200
+ st.success(f"βœ… Image {idx}: {ocr_len} characters (OCR)")
 
 
 
 
 
 
 
 
 
 
 
 
201
  else:
202
+ st.warning(f"⚠️ Image {idx}: No OCR text (will use visual analysis)")
203
+
204
+ st.success("βœ… PDF parsing complete!")
205
+
206
+ except Exception as e:
207
+ st.error(f"❌ Error parsing PDF: {e}")
208
+ print(f"Error: {e}")
209
+
210
+
211
+ # ============================================================================
212
+ # VISUAL IMAGE ANALYSIS & COMPONENT STORAGE
213
+ # ============================================================================
214
+
215
+ st.divider()
216
+ st.header("πŸ–ΌοΈ Visual Analysis & Storage")
217
+
218
+ st.info("""
219
+ **How it works:**
220
+ 1. Images are sent to gpt-4o for visual analysis (not just text OCR)
221
+ 2. Text is split into chunks and each chunk is summarized
222
+ 3. Tables are analyzed individually
223
+ 4. ALL summaries are stored in the vector store for semantic search
224
+ """)
225
+
226
+ if st.button("πŸ–ΌοΈ Analyze Images Visually & Store Components"):
227
+ if not st.session_state.api_key_set:
228
+ st.error("❌ Please set OpenAI API key first")
229
+ elif st.session_state.current_text is None:
230
+ st.error("❌ Please parse a PDF document first")
231
  else:
232
+ try:
233
+ with st.spinner("πŸ–ΌοΈ Analyzing images visually with gpt-4o..."):
234
+ print(f"\n{'='*70}")
235
+ print(f"VISUAL IMAGE ANALYSIS")
236
+ print(f"{'='*70}")
237
+
238
+ # Process with visual analysis
239
+ visual_rag = st.session_state.visual_rag_system
240
+ vector_store = st.session_state.vector_store
241
+
242
+ results = visual_rag.process_and_store_document(
243
+ text=st.session_state.current_text,
244
+ images=st.session_state.current_images, # Actual images sent to gpt-4o
245
+ tables=st.session_state.current_tables,
246
+ vector_store=vector_store,
247
+ doc_id=st.session_state.current_document or "current_doc"
248
+ )
249
+
250
+ st.session_state.processing_results = results
251
+
252
+ # Display results
253
+ st.success("βœ… Visual analysis complete & stored!")
254
+
255
+ col1, col2, col3 = st.columns(3)
256
+ with col1:
257
+ st.metric("πŸ–ΌοΈ Images Analyzed", len(results['image_visual_analyses']))
258
+ with col2:
259
+ st.metric("πŸ“ Text Chunks", len(results['text_summaries']))
260
+ with col3:
261
+ st.metric("πŸ“‹ Tables Analyzed", len(results['table_summaries']))
262
+
263
+ st.metric("πŸ“Š Total Stored in Vector", results['total_stored'])
264
+
265
+ # Show image visual analyses
266
+ if results['image_visual_analyses']:
267
+ st.subheader("πŸ–ΌοΈ Visual Image Analyses (gpt-4o)")
268
+ for img_analysis in results['image_visual_analyses']:
269
+ with st.expander(f"Image {img_analysis['image_index']} - Visual Analysis"):
270
+ st.write("**Visual Analysis by gpt-4o:**")
271
+ st.write(img_analysis['visual_analysis'])
272
+
273
+ st.write("**Image Path:**")
274
+ st.code(img_analysis['image_path'])
275
+
276
+ if img_analysis['ocr_text']:
277
+ st.write("**OCR Text (backup):**")
278
+ st.text(img_analysis['ocr_text'][:500])
279
+
280
+ # Show text chunk summaries
281
+ if results['text_summaries']:
282
+ st.subheader("πŸ“ Text Chunk Summaries")
283
+ for chunk_summary in results['text_summaries']:
284
+ with st.expander(
285
+ f"Chunk {chunk_summary['chunk_index']} "
286
+ f"({chunk_summary['chunk_length']} chars)"
287
+ ):
288
+ st.write("**Summary:**")
289
+ st.write(chunk_summary['summary'])
290
+ st.write("**Original Text (first 500 chars):**")
291
+ st.text(chunk_summary['original_text'])
292
+
293
+ # Show table analyses
294
+ if results['table_summaries']:
295
+ st.subheader("πŸ“‹ Table Analyses")
296
+ for table_summary in results['table_summaries']:
297
+ with st.expander(
298
+ f"Table {table_summary['table_index']} "
299
+ f"({table_summary['table_length']} chars)"
300
+ ):
301
+ st.write("**Analysis:**")
302
+ st.write(table_summary['summary'])
303
+ st.write("**Original Content (first 500 chars):**")
304
+ st.text(table_summary['original_content'])
305
+
306
+ print(f"\nβœ… Visual analysis processing complete!")
307
+
308
+ except Exception as e:
309
+ st.error(f"❌ Error during visual analysis: {e}")
310
+ print(f"Error: {e}")
311
+
312
+
313
+ # ============================================================================
314
+ # QUESTION & ANSWERING
315
+ # ============================================================================
316
+
317
+ st.divider()
318
+ st.header("❓ Ask Questions About Document")
319
+
320
+ question = st.text_area(
321
+ "Enter your question:",
322
+ height=100,
323
+ placeholder="What does the document say about...?"
324
+ )
325
+
326
+ if st.button("πŸ” Search & Answer"):
327
+ if not st.session_state.api_key_set:
328
+ st.error("❌ Please set OpenAI API key first")
329
+ elif st.session_state.current_text is None:
330
+ st.error("❌ Please parse a PDF document first")
331
+ elif not question:
332
+ st.error("❌ Please enter a question")
333
+ else:
334
+ try:
335
+ with st.spinner("πŸ”„ Searching and generating answer..."):
336
+ print(f"\n{'='*70}")
337
+ print(f"QUESTION: {question}")
338
+ print(f"{'='*70}")
339
+
340
+ # Search vector store
341
+ store = st.session_state.vector_store
342
+
343
+ # Add documents to store first if not already added
344
+ doc_name = st.session_state.current_document or "current_doc"
345
+ doc_data = {
346
+ 'text': st.session_state.current_text,
347
+ 'images': [], # Images already stored via visual analysis
348
+ 'tables': [] # Tables already stored via visual analysis
349
+ }
350
+ store.add_documents(doc_data, doc_name)
351
+
352
+ # Search
353
+ search_results = store.search(question, n_results=5)
354
+
355
+ # Display results
356
+ st.write("### Search Results")
357
+ for idx, result in enumerate(search_results, 1):
358
+ content_type = result.get('type', 'unknown')
359
+ distance = result.get('distance', 0)
360
+ content = result.get('content', '')
361
+
362
+ with st.expander(
363
+ f"Result {idx} - {content_type.upper()} "
364
+ f"(relevance: {1-distance:.2%})"
365
+ ):
366
+ st.write(content)
367
+
368
+ st.success("βœ… Search complete! Use results above to understand the document.")
369
+
370
+ except Exception as e:
371
+ st.error(f"❌ Error processing question: {e}")
372
+ print(f"Error: {e}")
373
+
374
+
375
+ # ============================================================================
376
+ # FOOTER
377
+ # ============================================================================
378
+
379
+ st.divider()
380
+
381
+ col1, col2, col3 = st.columns(3)
382
+
383
+ with col1:
384
+ st.info("πŸ“– **Text Processing**: PyPDF2 extraction with UTF-8 support")
385
+
386
+ with col2:
387
+ st.info("πŸ–ΌοΈ **Visual Analysis**: GPT-4o vision for image understanding")
388
 
389
+ with col3:
390
+ st.info("πŸ“Š **Vector Storage**: ChromaDB with auto-persist")
391
 
392
+ st.caption(
393
+ "Multimodal RAG System | "
394
+ "Visual Image Analysis | "
395
+ "Russian Language Support | "
396
+ "Individual Component Summarization"
397
+ )