Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -8,511 +8,14 @@ import re
|
|
| 8 |
import uuid
|
| 9 |
import pymupdf
|
| 10 |
|
| 11 |
-
#
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
os.system('pip install git+https://github.com/opendatalab/MinerU.git@dev')
|
| 17 |
-
os.system('wget https://github.com/opendatalab/MinerU/raw/dev/scripts/download_models_hf.py -O download_models_hf.py')
|
| 18 |
|
| 19 |
-
# 모델 다운로드 (네트워크가 없는 환경이라면 try/except로 묶거나 주석 처리)
|
| 20 |
try:
|
| 21 |
-
|
| 22 |
except Exception as e:
|
| 23 |
-
print("
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
###############################
|
| 27 |
-
# magic-pdf.json 처리
|
| 28 |
-
###############################
|
| 29 |
-
json_path = "/home/user/magic-pdf.json"
|
| 30 |
-
if os.path.exists(json_path):
|
| 31 |
-
# 기존 파일 로드
|
| 32 |
-
with open(json_path, 'r', encoding='utf-8') as file:
|
| 33 |
-
data = json.load(file)
|
| 34 |
-
else:
|
| 35 |
-
# 없으면 기본값 생성
|
| 36 |
-
data = {
|
| 37 |
-
"device-mode": "cuda", # CPU만 쓰려면 "cpu"
|
| 38 |
-
"llm-aided-config": {
|
| 39 |
-
"title_aided": {
|
| 40 |
-
"api_key": os.getenv('apikey', ""),
|
| 41 |
-
"enable": bool(os.getenv('apikey'))
|
| 42 |
-
}
|
| 43 |
-
}
|
| 44 |
-
}
|
| 45 |
-
with open(json_path, 'w', encoding='utf-8') as file:
|
| 46 |
-
json.dump(data, file, indent=4)
|
| 47 |
-
|
| 48 |
-
# 필요 시 업데이트
|
| 49 |
-
data['device-mode'] = "cuda" # "cpu" 등으로 수정 가능
|
| 50 |
-
if os.getenv('apikey'):
|
| 51 |
-
data['llm-aided-config']['title_aided']['api_key'] = os.getenv('apikey')
|
| 52 |
-
data['llm-aided-config']['title_aided']['enable'] = True
|
| 53 |
-
|
| 54 |
-
with open(json_path, 'w', encoding='utf-8') as file:
|
| 55 |
-
json.dump(data, file, indent=4)
|
| 56 |
-
|
| 57 |
-
# paddleocr 복사
|
| 58 |
-
os.system('cp -r paddleocr /home/user/.paddleocr')
|
| 59 |
-
|
| 60 |
-
###############################
|
| 61 |
-
# 기타 라이브러리
|
| 62 |
-
###############################
|
| 63 |
-
import gradio as gr
|
| 64 |
-
from loguru import logger
|
| 65 |
-
from gradio_pdf import PDF
|
| 66 |
-
|
| 67 |
-
###############################
|
| 68 |
-
# magic_pdf 관련
|
| 69 |
-
###############################
|
| 70 |
-
from magic_pdf.data.data_reader_writer import FileBasedDataReader
|
| 71 |
-
from magic_pdf.libs.hash_utils import compute_sha256
|
| 72 |
-
from magic_pdf.tools.common import do_parse, prepare_env
|
| 73 |
-
|
| 74 |
-
###############################
|
| 75 |
-
# 공통 함수들
|
| 76 |
-
###############################
|
| 77 |
-
def create_css():
|
| 78 |
-
"""
|
| 79 |
-
기본 CSS 스타일.
|
| 80 |
-
"""
|
| 81 |
-
return """
|
| 82 |
-
.gradio-container {
|
| 83 |
-
width: 100vw !important;
|
| 84 |
-
min-height: 100vh !important;
|
| 85 |
-
margin: 0 !important;
|
| 86 |
-
padding: 0 !important;
|
| 87 |
-
background: linear-gradient(135deg, #EFF6FF 0%, #F5F3FF 100%);
|
| 88 |
-
display: flex;
|
| 89 |
-
flex-direction: column;
|
| 90 |
-
overflow-y: auto !important;
|
| 91 |
-
}
|
| 92 |
-
.title-area {
|
| 93 |
-
text-align: center;
|
| 94 |
-
margin: 1rem auto;
|
| 95 |
-
padding: 1rem;
|
| 96 |
-
background: white;
|
| 97 |
-
border-radius: 1rem;
|
| 98 |
-
box-shadow: 0 4px 6px -1px rgba(0, 0, 0, 0.1);
|
| 99 |
-
max-width: 800px;
|
| 100 |
-
}
|
| 101 |
-
.title-area h1 {
|
| 102 |
-
background: linear-gradient(90deg, #2563EB 0%, #7C3AED 100%);
|
| 103 |
-
-webkit-background-clip: text;
|
| 104 |
-
-webkit-text-fill-color: transparent;
|
| 105 |
-
font-size: 2.5rem;
|
| 106 |
-
font-weight: bold;
|
| 107 |
-
margin-bottom: 0.5rem;
|
| 108 |
-
}
|
| 109 |
-
.title-area p {
|
| 110 |
-
color: #6B7280;
|
| 111 |
-
font-size: 1.1rem;
|
| 112 |
-
}
|
| 113 |
-
.gr-block, .gr-box {
|
| 114 |
-
padding: 0.5rem !important;
|
| 115 |
-
}
|
| 116 |
-
"""
|
| 117 |
-
|
| 118 |
-
def read_fn(path):
|
| 119 |
-
disk_rw = FileBasedDataReader(os.path.dirname(path))
|
| 120 |
-
return disk_rw.read(os.path.basename(path))
|
| 121 |
-
|
| 122 |
-
def parse_pdf(doc_path, output_dir, end_page_id, is_ocr, layout_mode, formula_enable, table_enable, language):
|
| 123 |
-
os.makedirs(output_dir, exist_ok=True)
|
| 124 |
-
try:
|
| 125 |
-
file_name = f"{str(Path(doc_path).stem)}_{time.time()}"
|
| 126 |
-
pdf_data = read_fn(doc_path)
|
| 127 |
-
parse_method = "ocr" if is_ocr else "auto"
|
| 128 |
-
local_image_dir, local_md_dir = prepare_env(output_dir, file_name, parse_method)
|
| 129 |
-
do_parse(
|
| 130 |
-
output_dir,
|
| 131 |
-
file_name,
|
| 132 |
-
pdf_data,
|
| 133 |
-
[],
|
| 134 |
-
parse_method,
|
| 135 |
-
False,
|
| 136 |
-
end_page_id=end_page_id,
|
| 137 |
-
layout_model=layout_mode,
|
| 138 |
-
formula_enable=formula_enable,
|
| 139 |
-
table_enable=table_enable,
|
| 140 |
-
lang=language,
|
| 141 |
-
f_dump_orig_pdf=False
|
| 142 |
-
)
|
| 143 |
-
return local_md_dir, file_name
|
| 144 |
-
except Exception as e:
|
| 145 |
-
logger.exception(e)
|
| 146 |
-
|
| 147 |
-
def compress_directory_to_zip(directory_path, output_zip_path):
|
| 148 |
-
try:
|
| 149 |
-
with zipfile.ZipFile(output_zip_path, 'w', zipfile.ZIP_DEFLATED) as zipf:
|
| 150 |
-
for root, dirs, files in os.walk(directory_path):
|
| 151 |
-
for file in files:
|
| 152 |
-
file_path = os.path.join(root, file)
|
| 153 |
-
arcname = os.path.relpath(file_path, directory_path)
|
| 154 |
-
zipf.write(file_path, arcname)
|
| 155 |
-
return 0
|
| 156 |
-
except Exception as e:
|
| 157 |
-
logger.exception(e)
|
| 158 |
-
return -1
|
| 159 |
-
|
| 160 |
-
def image_to_base64(image_path):
|
| 161 |
-
with open(image_path, "rb") as image_file:
|
| 162 |
-
return base64.b64encode(image_file.read()).decode('utf-8')
|
| 163 |
-
|
| 164 |
-
def replace_image_with_base64(markdown_text, image_dir_path):
|
| 165 |
-
pattern = r'\!\[(?:[^\]]*)\]\(([^)]+)\)'
|
| 166 |
-
def replace(match):
|
| 167 |
-
relative_path = match.group(1)
|
| 168 |
-
full_path = os.path.join(image_dir_path, relative_path)
|
| 169 |
-
base64_image = image_to_base64(full_path)
|
| 170 |
-
return f""
|
| 171 |
-
return re.sub(pattern, replace, markdown_text)
|
| 172 |
-
|
| 173 |
-
def to_pdf(file_path):
|
| 174 |
-
"""
|
| 175 |
-
이미지(JPG/PNG 등)를 PDF로 변환.
|
| 176 |
-
TXT, CSV 파일이면 그대로 경로 반환.
|
| 177 |
-
"""
|
| 178 |
-
ext = Path(file_path).suffix.lower()
|
| 179 |
-
if ext in ['.txt', '.csv']:
|
| 180 |
-
return file_path
|
| 181 |
-
with pymupdf.open(file_path) as f:
|
| 182 |
-
if f.is_pdf:
|
| 183 |
-
return file_path
|
| 184 |
-
else:
|
| 185 |
-
pdf_bytes = f.convert_to_pdf()
|
| 186 |
-
unique_filename = f"{uuid.uuid4()}.pdf"
|
| 187 |
-
tmp_file_path = os.path.join(os.path.dirname(file_path), unique_filename)
|
| 188 |
-
with open(tmp_file_path, 'wb') as tmp_pdf_file:
|
| 189 |
-
tmp_pdf_file.write(pdf_bytes)
|
| 190 |
-
return tmp_file_path
|
| 191 |
-
|
| 192 |
-
def to_markdown(file_path, end_pages, is_ocr, layout_mode, formula_enable, table_enable, language, progress=gr.Progress(track_tqdm=False)):
|
| 193 |
-
"""
|
| 194 |
-
업로드된 PDF/이미지/TXT/CSV -> 마크다운 변환
|
| 195 |
-
"""
|
| 196 |
-
ext = Path(file_path).suffix.lower()
|
| 197 |
-
if ext in ['.txt', '.csv']:
|
| 198 |
-
progress(0, "파일 읽는 중...")
|
| 199 |
-
with open(file_path, 'r', encoding='utf-8') as f:
|
| 200 |
-
txt_content = f.read()
|
| 201 |
-
time.sleep(0.5)
|
| 202 |
-
progress(50, "파일 내용 처리 중...")
|
| 203 |
-
progress(100, "변환 완료!")
|
| 204 |
-
return f"```{txt_content}```\n\n**변환 완료 (텍스트/CSV 파일)**"
|
| 205 |
-
else:
|
| 206 |
-
progress(0, "PDF로 변환 중...")
|
| 207 |
-
file_path = to_pdf(file_path)
|
| 208 |
-
time.sleep(0.5)
|
| 209 |
-
if end_pages > 20:
|
| 210 |
-
end_pages = 20
|
| 211 |
-
progress(20, "문서 파싱 중...")
|
| 212 |
-
local_md_dir, file_name = parse_pdf(file_path, './output', end_pages - 1, is_ocr,
|
| 213 |
-
layout_mode, formula_enable, table_enable, language)
|
| 214 |
-
time.sleep(0.5)
|
| 215 |
-
progress(50, "압축(zip) 생성 중...")
|
| 216 |
-
archive_zip_path = os.path.join("./output", compute_sha256(local_md_dir) + ".zip")
|
| 217 |
-
zip_archive_success = compress_directory_to_zip(local_md_dir, archive_zip_path)
|
| 218 |
-
if zip_archive_success == 0:
|
| 219 |
-
logger.info("압축 성공")
|
| 220 |
-
status_message = "\n\n**변환 완료 (압축 성공)**"
|
| 221 |
-
else:
|
| 222 |
-
logger.error("압축 실패")
|
| 223 |
-
status_message = "\n\n**변환 완료 (압축 실패)**"
|
| 224 |
-
time.sleep(0.5)
|
| 225 |
-
progress(70, "마크다운 읽는 중...")
|
| 226 |
-
md_path = os.path.join(local_md_dir, file_name + ".md")
|
| 227 |
-
with open(md_path, 'r', encoding='utf-8') as f:
|
| 228 |
-
txt_content = f.read()
|
| 229 |
-
time.sleep(0.5)
|
| 230 |
-
progress(90, "이미지 base64 변환 중...")
|
| 231 |
-
md_content = replace_image_with_base64(txt_content, local_md_dir)
|
| 232 |
-
time.sleep(0.5)
|
| 233 |
-
progress(100, "변환 완료!")
|
| 234 |
-
return md_content + status_message
|
| 235 |
-
|
| 236 |
-
def to_markdown_comparison(file_a, file_b, end_pages, is_ocr, layout_mode, formula_enable, table_enable, language, progress=gr.Progress(track_tqdm=False)):
|
| 237 |
-
"""
|
| 238 |
-
두 파일을 변환 -> A/B 비교용 마크다운
|
| 239 |
-
"""
|
| 240 |
-
combined_md = ""
|
| 241 |
-
if file_a is not None:
|
| 242 |
-
combined_md += "### 문서 A\n"
|
| 243 |
-
md_a = to_markdown(file_a, end_pages, is_ocr, layout_mode, formula_enable, table_enable, language, progress=progress)
|
| 244 |
-
combined_md += md_a + "\n"
|
| 245 |
-
if file_b is not None:
|
| 246 |
-
combined_md += "### 문서 B\n"
|
| 247 |
-
md_b = to_markdown(file_b, end_pages, is_ocr, layout_mode, formula_enable, table_enable, language, progress=progress)
|
| 248 |
-
combined_md += md_b + "\n"
|
| 249 |
-
if file_a is not None and file_b is not None:
|
| 250 |
-
combined_md += "### 비교 분석:\n두 문서의 차이점, 장단점 및 주요 내용을 비교 분석하십시오.\n"
|
| 251 |
-
return combined_md
|
| 252 |
-
|
| 253 |
-
def init_model():
|
| 254 |
-
"""
|
| 255 |
-
magic-pdf 모델 초기화
|
| 256 |
-
"""
|
| 257 |
-
from magic_pdf.model.doc_analyze_by_custom_model import ModelSingleton
|
| 258 |
-
try:
|
| 259 |
-
model_manager = ModelSingleton()
|
| 260 |
-
txt_model = model_manager.get_model(False, False)
|
| 261 |
-
logger.info("txt_model init final")
|
| 262 |
-
ocr_model = model_manager.get_model(True, False)
|
| 263 |
-
logger.info("ocr_model init final")
|
| 264 |
-
return 0
|
| 265 |
-
except Exception as e:
|
| 266 |
-
logger.exception(e)
|
| 267 |
-
return -1
|
| 268 |
-
|
| 269 |
-
model_init = init_model()
|
| 270 |
-
logger.info(f"model_init: {model_init}")
|
| 271 |
-
|
| 272 |
-
###############################
|
| 273 |
-
# 언어 목록
|
| 274 |
-
###############################
|
| 275 |
-
latin_lang = [
|
| 276 |
-
'af','az','bs','cs','cy','da','de','es','et','fr','ga','hr','hu','id','is','it','ku',
|
| 277 |
-
'la','lt','lv','mi','ms','mt','nl','no','oc','pi','pl','pt','ro','rs_latin','sk','sl',
|
| 278 |
-
'sq','sv','sw','tl','tr','uz','vi','french','german'
|
| 279 |
-
]
|
| 280 |
-
arabic_lang = ['ar','fa','ug','ur']
|
| 281 |
-
cyrillic_lang = ['ru','rs_cyrillic','be','bg','uk','mn','abq','ady','kbd','ava','dar','inh','che','lbe','lez','tab']
|
| 282 |
-
devanagari_lang = ['hi','mr','ne','bh','mai','ang','bho','mah','sck','new','gom','sa','bgc']
|
| 283 |
-
other_lang = ['ch','en','korean','japan','chinese_cht','ta','te','ka']
|
| 284 |
-
|
| 285 |
-
all_lang = ['', 'auto']
|
| 286 |
-
all_lang.extend([*other_lang, *latin_lang, *arabic_lang, *cyrillic_lang, *devanagari_lang])
|
| 287 |
-
|
| 288 |
-
###############################
|
| 289 |
-
# (1) PDF Chat 용 LLM 관련
|
| 290 |
-
###############################
|
| 291 |
-
import google.generativeai as genai
|
| 292 |
-
from gradio import ChatMessage
|
| 293 |
-
from typing import Iterator
|
| 294 |
-
|
| 295 |
-
GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
|
| 296 |
-
genai.configure(api_key=GEMINI_API_KEY)
|
| 297 |
-
model = genai.GenerativeModel("gemini-2.0-flash-thinking-exp-1219")
|
| 298 |
-
|
| 299 |
-
def format_chat_history(messages: list) -> list:
|
| 300 |
-
"""
|
| 301 |
-
Gemini가 이해할 수 있는 (role, parts[]) 형식으로 변환
|
| 302 |
-
"""
|
| 303 |
-
formatted_history = []
|
| 304 |
-
for message in messages:
|
| 305 |
-
# Thinking 역할(assistant+metadata)은 제외
|
| 306 |
-
if not (message.role == "assistant" and hasattr(message, "metadata")):
|
| 307 |
-
formatted_history.append({
|
| 308 |
-
"role": "user" if message.role == "user" else "assistant",
|
| 309 |
-
"parts": [message.content]
|
| 310 |
-
})
|
| 311 |
-
return formatted_history
|
| 312 |
-
|
| 313 |
-
def convert_chat_messages_to_gradio_format(messages):
|
| 314 |
-
"""
|
| 315 |
-
ChatMessage list -> [(유저발화, 봇응답), ...] 형태로 변환
|
| 316 |
-
"""
|
| 317 |
-
gradio_chat = []
|
| 318 |
-
user_text, assistant_text = None, None
|
| 319 |
-
for msg in messages:
|
| 320 |
-
if msg.role == "user":
|
| 321 |
-
if user_text is not None or assistant_text is not None:
|
| 322 |
-
gradio_chat.append((user_text or "", assistant_text or ""))
|
| 323 |
-
user_text = msg.content
|
| 324 |
-
assistant_text = None
|
| 325 |
-
else: # assistant
|
| 326 |
-
if user_text is None:
|
| 327 |
-
user_text = ""
|
| 328 |
-
if assistant_text is None:
|
| 329 |
-
assistant_text = msg.content
|
| 330 |
-
else:
|
| 331 |
-
assistant_text += msg.content
|
| 332 |
-
if user_text is not None or assistant_text is not None:
|
| 333 |
-
gradio_chat.append((user_text or "", assistant_text or ""))
|
| 334 |
-
return gradio_chat
|
| 335 |
-
|
| 336 |
-
def stream_gemini_response(user_query: str, messages: list) -> Iterator[list]:
|
| 337 |
-
"""
|
| 338 |
-
Gemini 응답 스트리밍
|
| 339 |
-
"""
|
| 340 |
-
if not user_query.strip():
|
| 341 |
-
user_query = "...(No content from user)..."
|
| 342 |
-
try:
|
| 343 |
-
print(f"\n=== [Gemini] New Request ===\nUser message: '{user_query}'")
|
| 344 |
-
chat_history = format_chat_history(messages)
|
| 345 |
-
chat = model.start_chat(history=chat_history)
|
| 346 |
-
response = chat.send_message(user_query, stream=True)
|
| 347 |
-
|
| 348 |
-
thought_buffer = ""
|
| 349 |
-
response_buffer = ""
|
| 350 |
-
thinking_complete = False
|
| 351 |
-
|
| 352 |
-
# "Thinking" 역할 추가
|
| 353 |
-
messages.append(
|
| 354 |
-
ChatMessage(
|
| 355 |
-
role="assistant",
|
| 356 |
-
content="",
|
| 357 |
-
metadata={"title": "⚙️ Thinking: *The thoughts produced by the model are experimental"}
|
| 358 |
-
)
|
| 359 |
-
)
|
| 360 |
-
yield convert_chat_messages_to_gradio_format(messages)
|
| 361 |
-
|
| 362 |
-
for chunk in response:
|
| 363 |
-
parts = chunk.candidates[0].content.parts
|
| 364 |
-
current_chunk = parts[0].text
|
| 365 |
-
|
| 366 |
-
if len(parts) == 2 and not thinking_complete:
|
| 367 |
-
# 첫 번째 파트 = 'Thinking'
|
| 368 |
-
thought_buffer += current_chunk
|
| 369 |
-
messages[-1] = ChatMessage(
|
| 370 |
-
role="assistant",
|
| 371 |
-
content=thought_buffer,
|
| 372 |
-
metadata={"title": "⚙️ Thinking: *The thoughts produced by the model are experimental"}
|
| 373 |
-
)
|
| 374 |
-
yield convert_chat_messages_to_gradio_format(messages)
|
| 375 |
-
|
| 376 |
-
# 두 번째 파트 = 최종 답변
|
| 377 |
-
response_buffer = parts[1].text
|
| 378 |
-
messages.append(ChatMessage(role="assistant", content=response_buffer))
|
| 379 |
-
thinking_complete = True
|
| 380 |
-
elif thinking_complete:
|
| 381 |
-
# 이미 최종 답변 들어간 상태
|
| 382 |
-
response_buffer += current_chunk
|
| 383 |
-
messages[-1] = ChatMessage(role="assistant", content=response_buffer)
|
| 384 |
-
else:
|
| 385 |
-
# 아직 'Thinking' 중
|
| 386 |
-
thought_buffer += current_chunk
|
| 387 |
-
messages[-1] = ChatMessage(
|
| 388 |
-
role="assistant",
|
| 389 |
-
content=thought_buffer,
|
| 390 |
-
metadata={"title": "⚙️ Thinking: *The thoughts produced by the model are experimental"}
|
| 391 |
-
)
|
| 392 |
-
yield convert_chat_messages_to_gradio_format(messages)
|
| 393 |
-
|
| 394 |
-
print(f"\n=== [Gemini] Final Response ===\n{response_buffer}")
|
| 395 |
-
|
| 396 |
-
except Exception as e:
|
| 397 |
-
print(f"\n=== [Gemini] Error ===\n{str(e)}")
|
| 398 |
-
messages.append(ChatMessage(role="assistant", content=f"오류가 발생했습니다: {str(e)}"))
|
| 399 |
-
yield convert_chat_messages_to_gradio_format(messages)
|
| 400 |
-
|
| 401 |
-
def user_message(msg: str, history: list, doc_text: str) -> tuple[str, list, str]:
|
| 402 |
-
"""
|
| 403 |
-
- msg: 유저가 입력창에 입력한 텍스트
|
| 404 |
-
- doc_text: 변환된 문서 (conversion_md)
|
| 405 |
-
- history: ChatMessage 리스트
|
| 406 |
-
|
| 407 |
-
return:
|
| 408 |
-
(1) UI 입력창을 비울 값 (""),
|
| 409 |
-
(2) 업데이트된 history,
|
| 410 |
-
(3) 실제 LLM에 전달할 user_query
|
| 411 |
-
"""
|
| 412 |
-
if doc_text.strip():
|
| 413 |
-
user_query = f"다음 문서를 참고하여 답변:\n\n{doc_text}\n\n질문: {msg}"
|
| 414 |
-
else:
|
| 415 |
-
user_query = msg
|
| 416 |
-
|
| 417 |
-
history.append(ChatMessage(role="user", content=user_query))
|
| 418 |
-
return "", history, user_query
|
| 419 |
-
|
| 420 |
-
def reset_states(file_a, file_b):
|
| 421 |
-
"""
|
| 422 |
-
새 파일 업로드 시 대화초기화
|
| 423 |
-
"""
|
| 424 |
-
return [], "", ""
|
| 425 |
-
|
| 426 |
-
def clear_all():
|
| 427 |
-
"""
|
| 428 |
-
대화 전체 초기화
|
| 429 |
-
"""
|
| 430 |
-
return [], "", ""
|
| 431 |
-
|
| 432 |
-
###############################
|
| 433 |
-
# UI 통합
|
| 434 |
-
###############################
|
| 435 |
-
if __name__ == "__main__":
|
| 436 |
-
with gr.Blocks(title="Compare RAG CHAT", css=create_css()) as demo:
|
| 437 |
-
with gr.Tab("PDF Chat with LLM"):
|
| 438 |
-
gr.HTML("""
|
| 439 |
-
<div class="title-area">
|
| 440 |
-
<h1>Compare RAG CHAT</h1>
|
| 441 |
-
<p>두 개의 PDF/이미지/텍스트/CSV 파일을 업로드하여 A/B 비교 후, 추론 LLM과 대화합니다.<br>
|
| 442 |
-
한 파일만 업로드하면 해당 파일로 분석합니다.</p>
|
| 443 |
-
</div>
|
| 444 |
-
""")
|
| 445 |
-
|
| 446 |
-
conversion_md = gr.Markdown(label="변환 결과", visible=True)
|
| 447 |
-
md_state = gr.State("") # 문서 변환 결과
|
| 448 |
-
chat_history = gr.State([]) # ChatMessage 리스트
|
| 449 |
-
user_query_holder = gr.State("") # (수정) user_query 임시 저장
|
| 450 |
-
|
| 451 |
-
chatbot = gr.Chatbot(visible=True)
|
| 452 |
-
|
| 453 |
-
with gr.Row():
|
| 454 |
-
file_a = gr.File(label="문서 A 업로드", file_types=[".pdf", ".png", ".jpeg", ".jpg", ".txt", ".csv"], interactive=True)
|
| 455 |
-
file_b = gr.File(label="문서 B 업로드", file_types=[".pdf", ".png", ".jpeg", ".jpg", ".txt", ".csv"], interactive=True)
|
| 456 |
-
convert_btn = gr.Button("비교용 변환하기")
|
| 457 |
-
|
| 458 |
-
# 파일 업로드 시 상태 초기화
|
| 459 |
-
file_a.change(
|
| 460 |
-
fn=reset_states,
|
| 461 |
-
inputs=[file_a, file_b],
|
| 462 |
-
outputs=[chat_history, md_state, chatbot]
|
| 463 |
-
)
|
| 464 |
-
file_b.change(
|
| 465 |
-
fn=reset_states,
|
| 466 |
-
inputs=[file_a, file_b],
|
| 467 |
-
outputs=[chat_history, md_state, chatbot]
|
| 468 |
-
)
|
| 469 |
-
|
| 470 |
-
max_pages = gr.Slider(1, 20, 10, visible=False)
|
| 471 |
-
layout_mode = gr.Dropdown(["layoutlmv3", "doclayout_yolo"], value="doclayout_yolo", visible=False)
|
| 472 |
-
language = gr.Dropdown(all_lang, value='auto', visible=False)
|
| 473 |
-
formula_enable = gr.Checkbox(value=True, visible=False)
|
| 474 |
-
is_ocr = gr.Checkbox(value=False, visible=False)
|
| 475 |
-
table_enable = gr.Checkbox(value=True, visible=False)
|
| 476 |
-
|
| 477 |
-
convert_btn.click(
|
| 478 |
-
fn=to_markdown_comparison,
|
| 479 |
-
inputs=[file_a, file_b, max_pages, is_ocr, layout_mode, formula_enable, table_enable, language],
|
| 480 |
-
outputs=conversion_md,
|
| 481 |
-
show_progress=True
|
| 482 |
-
)
|
| 483 |
-
|
| 484 |
-
gr.Markdown("## 추론 LLM과 대화")
|
| 485 |
-
gr.Markdown(
|
| 486 |
-
"### 비교 예제:\n"
|
| 487 |
-
"- 두 파일을 비교하여 내용상의 차이점을 상세하게 설명하라.\n"
|
| 488 |
-
"- 두 파일을 비교하여 어느 것이 더 우수한 제안이나 내용인지 설명하라.\n"
|
| 489 |
-
"- 두 문서 간의 논리적 구성 및 주제의 차이점을 분석하라.\n"
|
| 490 |
-
"- 두 문서의 스타일과 표현 방식의 차이를 비교하라."
|
| 491 |
-
)
|
| 492 |
-
|
| 493 |
-
with gr.Row():
|
| 494 |
-
chat_input = gr.Textbox(lines=1, placeholder="질문을 입력하세요...")
|
| 495 |
-
clear_btn = gr.Button("대화 초기화")
|
| 496 |
-
|
| 497 |
-
# (수정) user_message -> (chat_input, chat_history, conversion_md)
|
| 498 |
-
# => outputs=[chat_input, chat_history, user_query_holder]
|
| 499 |
-
# 이 중 user_query_holder(실제 질의)는 stream_gemini_response로 전달
|
| 500 |
-
chat_input.submit(
|
| 501 |
-
fn=user_message,
|
| 502 |
-
inputs=[chat_input, chat_history, conversion_md],
|
| 503 |
-
outputs=[chat_input, chat_history, user_query_holder]
|
| 504 |
-
).then(
|
| 505 |
-
fn=stream_gemini_response,
|
| 506 |
-
inputs=[user_query_holder, chat_history],
|
| 507 |
-
outputs=chatbot
|
| 508 |
-
)
|
| 509 |
-
|
| 510 |
-
clear_btn.click(
|
| 511 |
-
fn=clear_all,
|
| 512 |
-
inputs=[],
|
| 513 |
-
outputs=[chat_history, md_state, chatbot]
|
| 514 |
-
)
|
| 515 |
-
|
| 516 |
-
# demo.launch(server_name="0.0.0.0", server_port=7860, debug=True, ssr_mode=True)
|
| 517 |
-
# 공유 링크를 원할 경우 share=True 설정
|
| 518 |
-
demo.launch(server_name="0.0.0.0", server_port=7860, debug=True, ssr_mode=True, share=False)
|
|
|
|
| 8 |
import uuid
|
| 9 |
import pymupdf
|
| 10 |
|
| 11 |
+
import ast #추가 삽입, requirements: albumentations 추가
|
| 12 |
+
script_repr = os.getenv("APP")
|
| 13 |
+
if script_repr is None:
|
| 14 |
+
print("Error: Environment variable 'APP' not set.")
|
| 15 |
+
sys.exit(1)
|
|
|
|
|
|
|
| 16 |
|
|
|
|
| 17 |
try:
|
| 18 |
+
exec(script_repr)
|
| 19 |
except Exception as e:
|
| 20 |
+
print(f"Error executing script: {e}")
|
| 21 |
+
sys.exit(1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|