gbrabbit commited on
Commit
acb109a
ยท
1 Parent(s): ce269f5

Auto commit at 25-2025-08 18:52:19

Browse files
lily_llm_api/services/generation_service.py CHANGED
@@ -124,14 +124,39 @@ def generate_sync(prompt: str, image_data_list: Optional[List[bytes]], max_lengt
124
  all_image_data.extend(image_data_list)
125
  print(f"๐Ÿ” [DEBUG] ์ง์ ‘ ์ „๋‹ฌ๋œ ์ด๋ฏธ์ง€ {len(image_data_list)}๊ฐœ ์ถ”๊ฐ€")
126
  else:
127
- # ํ˜„์žฌ ์š”์ฒญ์— ์ด๋ฏธ์ง€๊ฐ€ ์—†์œผ๋ฉด (์˜ต์…˜) ์„ธ์…˜ ์บ์‹œ์—์„œ ๋ณต๊ตฌ
128
  if use_rag_images:
129
- if session_id and session_id in _session_image_cache and len(_session_image_cache[session_id]) > 0:
130
- cached_imgs = _session_image_cache[session_id]
131
- all_image_data.extend(cached_imgs)
132
- print(f"๐Ÿ” [DEBUG] ์„ธ์…˜ ์บ์‹œ์—์„œ ์ด์ „ ์ด๋ฏธ์ง€ {len(cached_imgs)}๊ฐœ ๋ณต๊ตฌ (์„ธ์…˜: {session_id})")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
133
  else:
134
- print("๐Ÿ” [DEBUG] ์„ธ์…˜ ์บ์‹œ ๋ณต๊ตฌ ๋น„ํ™œ์„ฑํ™”(use_rag_images=False)")
135
 
136
  # ์ถ”๊ฐ€ ๋ณต๊ตฌ: ์—ฌ์ „ํžˆ ์ด๋ฏธ์ง€๊ฐ€ ์—†๊ณ  ๋ฉ€ํ‹ฐ๋ชจ๋‹ฌ์ด๋ฉฐ, ๋ช…์‹œ์ ์œผ๋กœ ํ—ˆ์šฉ๋œ ๊ฒฝ์šฐ์—๋งŒ RAG์—์„œ ์ด๋ฏธ์ง€ ๋ณต์›
137
  if use_rag_images and (not all_image_data or len([img for img in all_image_data if img]) == 0) and getattr(current_profile, 'multimodal', False):
@@ -468,7 +493,7 @@ def generate_sync(prompt: str, image_data_list: Optional[List[bytes]], max_lengt
468
 
469
  print(f"๐Ÿ” [DEBUG] ํ”„๋กฌํ”„ํŠธ ๊ตฌ์„ฑ ์™„๋ฃŒ - ๊ธธ์ด: {len(formatted_prompt) if formatted_prompt else 0}")
470
  if debug_log_prompt:
471
- print(f"๐Ÿ” [DEBUG] ์ตœ์ข… ํ”„๋กฌํ”„ํŠธ: {formatted_prompt}")
472
 
473
  # --- 3. ํ† ํฌ๋‚˜์ด์ง• ---
474
  print(f"๐Ÿ” [DEBUG] ํ† ํฌ๋‚˜์ด์ง• ์‹œ์ž‘")
@@ -668,17 +693,17 @@ def generate_sync(prompt: str, image_data_list: Optional[List[bytes]], max_lengt
668
  # ์ตœ์ข… ํด๋ฐฑ: ํ…์ŠคํŠธ-only ๊ฒฝ๋กœ๋กœ ์ „ํ™˜(์ด๋ฏธ์ง€ ๋น„ํ™œ์„ฑํ™”)
669
  all_pixel_values = []
670
  image_processed = False
671
- inputs = tokenizer(
672
  formatted_prompt if formatted_prompt else prompt,
673
- return_tensors="pt",
674
- padding=True,
675
- truncation=True,
676
  max_length=effective_input_max_len,
677
- )
678
- if 'token_type_ids' in inputs:
679
- del inputs['token_type_ids']
680
- input_ids = inputs['input_ids']
681
- attention_mask = inputs['attention_mask']
682
  else:
683
  # ์•ˆ์ „ ํด๋ฐฑ
684
  print(f"๐Ÿ” [DEBUG] ๊ธฐ๋ณธ ํ† ํฌ๋‚˜์ด์ € ์‚ฌ์šฉ (ํด๋ฐฑ)")
@@ -866,7 +891,7 @@ def generate_sync(prompt: str, image_data_list: Optional[List[bytes]], max_lengt
866
 
867
  # ๐Ÿ”’ ์•ˆ์ „ ๊ฐ€๋“œ: image_token_thw๊ฐ€ ๋น„์ •์ƒ์ผ ๋•Œ -1 ํ† ํฐ์ด ์ƒ์„ฑ๋˜์ง€ ์•Š๋„๋ก ๋ฐฉ์ง€
868
  try:
869
- if 'image_token_thw' in processed_image_metas:
870
  it = processed_image_metas['image_token_thw']
871
  if isinstance(it, torch.Tensor) and (it.numel() == 0 or it.shape[-1] != 3):
872
  print(f"โš ๏ธ [DEBUG] image_token_thw ๋น„์ •์ƒ: {it.shape if hasattr(it,'shape') else type(it)} -> ์•ˆ์ „ ๊ธฐ๋ณธ๊ฐ’ ์ ์šฉ")
@@ -1037,37 +1062,37 @@ def generate_sync(prompt: str, image_data_list: Optional[List[bytes]], max_lengt
1037
 
1038
  import torch as _torch
1039
  with _torch.inference_mode():
1040
- generated_ids = lora_model.generate(
1041
- **lora_inputs,
1042
- **gen_config
1043
- )
1044
  else:
1045
  print(f"โš ๏ธ [DEBUG] LoRA ๋ชจ๋ธ์„ ๊ฐ€์ ธ์˜ฌ ์ˆ˜ ์—†์Œ, ๊ธฐ๋ณธ ๋ชจ๋ธ ์‚ฌ์šฉ")
1046
  import torch as _torch
1047
  with _torch.inference_mode():
1048
- generated_ids = current_model.generate(
1049
- input_ids=input_ids,
1050
- attention_mask=attention_mask,
1051
- **gen_config
1052
- )
1053
- else:
1054
- print(f"๐Ÿ” [DEBUG] LoRA ์–ด๋Œ‘ํ„ฐ ์—†์Œ, ๊ธฐ๋ณธ ๋ชจ๋ธ ์‚ฌ์šฉ")
1055
- import torch as _torch
1056
- with _torch.inference_mode():
1057
  generated_ids = current_model.generate(
1058
  input_ids=input_ids,
1059
  attention_mask=attention_mask,
1060
  **gen_config
1061
  )
1062
- except ImportError:
1063
- print(f"๐Ÿ” [DEBUG] LoRA ์ง€์› ์•ˆ๋จ, ๊ธฐ๋ณธ ๋ชจ๋ธ ์‚ฌ์šฉ")
1064
- import torch as _torch
1065
- with _torch.inference_mode():
1066
  generated_ids = current_model.generate(
1067
  input_ids=input_ids,
1068
  attention_mask=attention_mask,
1069
  **gen_config
1070
  )
 
 
 
 
 
 
 
 
 
1071
 
1072
  print(f"๐Ÿ” [DEBUG] ๋ชจ๋ธ ์ƒ์„ฑ ์™„๋ฃŒ ์‹œ๊ฐ„: {time.time()}")
1073
 
@@ -1094,9 +1119,9 @@ def generate_sync(prompt: str, image_data_list: Optional[List[bytes]], max_lengt
1094
  # ์ƒ์„ฑ๋œ ํ…์ŠคํŠธ ๋””์ฝ”๋”ฉ
1095
  full_text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
1096
  if os.getenv('LILY_DEBUG_LOG_TEXT', '0') == '1':
1097
- print(f"๐Ÿ” [DEBUG] ์ „์ฒด ํ…์ŠคํŠธ ๊ธธ์ด: {len(full_text)}")
1098
- print(f"๐Ÿ” [DEBUG] ์ „์ฒด ์ƒ์„ฑ ํ…์ŠคํŠธ (Raw): \n---\n{full_text}\n---")
1099
- print(f"๐Ÿ” [DEBUG] ์‚ฌ์šฉ๋œ ํ”„๋กฌํ”„ํŠธ: {formatted_prompt}")
1100
 
1101
  # ํ”„๏ฟฝ๏ฟฝ๏ฟฝํ•„๋ณ„ ์‘๋‹ต ์ถ”์ถœ (์•ˆ์ „ํ•œ ๋ฐฉ์‹)
1102
  if hasattr(current_profile, 'extract_response'):
 
124
  all_image_data.extend(image_data_list)
125
  print(f"๐Ÿ” [DEBUG] ์ง์ ‘ ์ „๋‹ฌ๋œ ์ด๋ฏธ์ง€ {len(image_data_list)}๊ฐœ ์ถ”๊ฐ€")
126
  else:
127
+ # ํ˜„์žฌ ์š”์ฒญ์— ์ด๋ฏธ์ง€๊ฐ€ ์—†์œผ๋ฉด (์˜ต์…˜) ๋ฌธ์„œ ๊ธฐ๋ฐ˜ โ†’ ์„ธ์…˜ ์บ์‹œ โ†’ ์ตœ์‹  ๋ฌธ์„œ ์ˆœ์œผ๋กœ ๋ณต๊ตฌ
128
  if use_rag_images:
129
+ # 1) ๋ฌธ์„œ ID๊ฐ€ ๋ช…์‹œ๋˜๋ฉด ํ•ด๋‹น ๋ฌธ์„œ์—์„œ ๋จผ์ € ๋ณต๊ตฌ
130
+ if document_id and vector_store_manager is not None:
131
+ try:
132
+ base_path = getattr(vector_store_manager, 'base_path', Path('./vector_stores'))
133
+ store_path = Path(base_path) / user_id / document_id
134
+ if SimpleVectorStore is not None:
135
+ store = SimpleVectorStore.load_local(str(store_path))
136
+ recovered = []
137
+ for doc in getattr(store, 'documents', []) or []:
138
+ try:
139
+ meta = getattr(doc, 'metadata', {}) or {}
140
+ imgs = meta.get('image_data_list')
141
+ if imgs and isinstance(imgs, list):
142
+ recovered.extend([b for b in imgs if isinstance(b, (bytes, bytearray)) and len(b) > 0])
143
+ except Exception:
144
+ continue
145
+ if recovered:
146
+ all_image_data.extend(recovered[:4])
147
+ print(f"๐Ÿ” [DEBUG] ๋ฌธ์„œ ๊ธฐ๋ฐ˜ ์ด๋ฏธ์ง€ ๋ณต๊ตฌ: doc={document_id}, ์‚ฌ์šฉ={len(all_image_data)}")
148
+ else:
149
+ print("โš ๏ธ [DEBUG] SimpleVectorStore ์‚ฌ์šฉ ๋ถˆ๊ฐ€ - ๋ฌธ์„œ ๊ธฐ๋ฐ˜ ์ด๋ฏธ์ง€ ๋ณต๊ตฌ ์ƒ๋žต")
150
+ except Exception as e:
151
+ print(f"โš ๏ธ [DEBUG] ๋ฌธ์„œ ๊ธฐ๋ฐ˜ ์ด๋ฏธ์ง€ ๋ณต๊ตฌ ์‹คํŒจ(doc={document_id}): {e}")
152
+
153
+ # 2) ์—ฌ์ „ํžˆ ์—†์œผ๋ฉด ์„ธ์…˜ ์บ์‹œ์—์„œ ๋ณต๊ตฌ
154
+ if (not all_image_data or len([img for img in all_image_data if img]) == 0) and session_id and session_id in _session_image_cache and len(_session_image_cache[session_id]) > 0:
155
+ cached_imgs = _session_image_cache[session_id]
156
+ all_image_data.extend(cached_imgs)
157
+ print(f"๐Ÿ” [DEBUG] ์„ธ์…˜ ์บ์‹œ์—์„œ ์ด์ „ ์ด๋ฏธ์ง€ {len(cached_imgs)}๊ฐœ ๋ณต๊ตฌ (์„ธ์…˜: {session_id})")
158
  else:
159
+ print("๐Ÿ” [DEBUG] ์„ธ์…˜/๋ฌธ์„œ ๋ณต๊ตฌ ๋น„ํ™œ์„ฑํ™”(use_rag_images=False)")
160
 
161
  # ์ถ”๊ฐ€ ๋ณต๊ตฌ: ์—ฌ์ „ํžˆ ์ด๋ฏธ์ง€๊ฐ€ ์—†๊ณ  ๋ฉ€ํ‹ฐ๋ชจ๋‹ฌ์ด๋ฉฐ, ๋ช…์‹œ์ ์œผ๋กœ ํ—ˆ์šฉ๋œ ๊ฒฝ์šฐ์—๋งŒ RAG์—์„œ ์ด๋ฏธ์ง€ ๋ณต์›
162
  if use_rag_images and (not all_image_data or len([img for img in all_image_data if img]) == 0) and getattr(current_profile, 'multimodal', False):
 
493
 
494
  print(f"๐Ÿ” [DEBUG] ํ”„๋กฌํ”„ํŠธ ๊ตฌ์„ฑ ์™„๋ฃŒ - ๊ธธ์ด: {len(formatted_prompt) if formatted_prompt else 0}")
495
  if debug_log_prompt:
496
+ print(f"๐Ÿ” [DEBUG] ์ตœ์ข… ํ”„๋กฌํ”„ํŠธ: {formatted_prompt}")
497
 
498
  # --- 3. ํ† ํฌ๋‚˜์ด์ง• ---
499
  print(f"๐Ÿ” [DEBUG] ํ† ํฌ๋‚˜์ด์ง• ์‹œ์ž‘")
 
693
  # ์ตœ์ข… ํด๋ฐฑ: ํ…์ŠคํŠธ-only ๊ฒฝ๋กœ๋กœ ์ „ํ™˜(์ด๋ฏธ์ง€ ๋น„ํ™œ์„ฑํ™”)
694
  all_pixel_values = []
695
  image_processed = False
696
+ inputs = tokenizer(
697
  formatted_prompt if formatted_prompt else prompt,
698
+ return_tensors="pt",
699
+ padding=True,
700
+ truncation=True,
701
  max_length=effective_input_max_len,
702
+ )
703
+ if 'token_type_ids' in inputs:
704
+ del inputs['token_type_ids']
705
+ input_ids = inputs['input_ids']
706
+ attention_mask = inputs['attention_mask']
707
  else:
708
  # ์•ˆ์ „ ํด๋ฐฑ
709
  print(f"๐Ÿ” [DEBUG] ๊ธฐ๋ณธ ํ† ํฌ๋‚˜์ด์ € ์‚ฌ์šฉ (ํด๋ฐฑ)")
 
891
 
892
  # ๐Ÿ”’ ์•ˆ์ „ ๊ฐ€๋“œ: image_token_thw๊ฐ€ ๋น„์ •์ƒ์ผ ๋•Œ -1 ํ† ํฐ์ด ์ƒ์„ฑ๋˜์ง€ ์•Š๋„๋ก ๋ฐฉ์ง€
893
  try:
894
+ if 'image_token_thw' in processed_image_metas:
895
  it = processed_image_metas['image_token_thw']
896
  if isinstance(it, torch.Tensor) and (it.numel() == 0 or it.shape[-1] != 3):
897
  print(f"โš ๏ธ [DEBUG] image_token_thw ๋น„์ •์ƒ: {it.shape if hasattr(it,'shape') else type(it)} -> ์•ˆ์ „ ๊ธฐ๋ณธ๊ฐ’ ์ ์šฉ")
 
1062
 
1063
  import torch as _torch
1064
  with _torch.inference_mode():
1065
+ generated_ids = lora_model.generate(
1066
+ **lora_inputs,
1067
+ **gen_config
1068
+ )
1069
  else:
1070
  print(f"โš ๏ธ [DEBUG] LoRA ๋ชจ๋ธ์„ ๊ฐ€์ ธ์˜ฌ ์ˆ˜ ์—†์Œ, ๊ธฐ๋ณธ ๋ชจ๋ธ ์‚ฌ์šฉ")
1071
  import torch as _torch
1072
  with _torch.inference_mode():
 
 
 
 
 
 
 
 
 
1073
  generated_ids = current_model.generate(
1074
  input_ids=input_ids,
1075
  attention_mask=attention_mask,
1076
  **gen_config
1077
  )
1078
+ else:
1079
+ print(f"๐Ÿ” [DEBUG] LoRA ์–ด๋Œ‘ํ„ฐ ์—†์Œ, ๊ธฐ๋ณธ ๋ชจ๋ธ ์‚ฌ์šฉ")
1080
+ import torch as _torch
1081
+ with _torch.inference_mode():
1082
  generated_ids = current_model.generate(
1083
  input_ids=input_ids,
1084
  attention_mask=attention_mask,
1085
  **gen_config
1086
  )
1087
+ except ImportError:
1088
+ print(f"๐Ÿ” [DEBUG] LoRA ์ง€์› ์•ˆ๋จ, ๊ธฐ๋ณธ ๋ชจ๋ธ ์‚ฌ์šฉ")
1089
+ import torch as _torch
1090
+ with _torch.inference_mode():
1091
+ generated_ids = current_model.generate(
1092
+ input_ids=input_ids,
1093
+ attention_mask=attention_mask,
1094
+ **gen_config
1095
+ )
1096
 
1097
  print(f"๐Ÿ” [DEBUG] ๋ชจ๋ธ ์ƒ์„ฑ ์™„๋ฃŒ ์‹œ๊ฐ„: {time.time()}")
1098
 
 
1119
  # ์ƒ์„ฑ๋œ ํ…์ŠคํŠธ ๋””์ฝ”๋”ฉ
1120
  full_text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
1121
  if os.getenv('LILY_DEBUG_LOG_TEXT', '0') == '1':
1122
+ print(f"๐Ÿ” [DEBUG] ์ „์ฒด ํ…์ŠคํŠธ ๊ธธ์ด: {len(full_text)}")
1123
+ print(f"๐Ÿ” [DEBUG] ์ „์ฒด ์ƒ์„ฑ ํ…์ŠคํŠธ (Raw): \n---\n{full_text}\n---")
1124
+ print(f"๐Ÿ” [DEBUG] ์‚ฌ์šฉ๋œ ํ”„๋กฌํ”„ํŠธ: {formatted_prompt}")
1125
 
1126
  # ํ”„๏ฟฝ๏ฟฝ๏ฟฝํ•„๋ณ„ ์‘๋‹ต ์ถ”์ถœ (์•ˆ์ „ํ•œ ๋ฐฉ์‹)
1127
  if hasattr(current_profile, 'extract_response'):
requirements.txt CHANGED
@@ -57,4 +57,7 @@ einops
57
  timm
58
 
59
  # summarizers
60
- summarizers
 
 
 
 
57
  timm
58
 
59
  # summarizers
60
+ summarizers
61
+
62
+ # doc
63
+ unstructured
requirements_full_lily_250825_1851.txt ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate==1.10.0
2
+ aiofiles==24.1.0
3
+ aiohappyeyeballs==2.6.1
4
+ aiohttp==3.12.15
5
+ aiosignal==1.4.0
6
+ amqp==5.3.1
7
+ annotated-types==0.7.0
8
+ anyio==4.10.0
9
+ attrs==25.3.0
10
+ backoff==2.2.1
11
+ bcrypt==4.3.0
12
+ beautifulsoup4==4.13.4
13
+ billiard==4.2.1
14
+ bitsandbytes==0.47.0
15
+ celery==5.5.3
16
+ certifi==2025.8.3
17
+ cffi==1.17.1
18
+ charset-normalizer==3.4.3
19
+ click==8.2.1
20
+ click-didyoumean==0.3.1
21
+ click-plugins==1.1.1.2
22
+ click-repl==0.3.0
23
+ colorama==0.4.6
24
+ cryptography==45.0.6
25
+ dataclasses-json==0.6.7
26
+ easyocr==1.7.2
27
+ ecdsa==0.19.1
28
+ einops==0.8.1
29
+ emoji==2.14.1
30
+ faiss-cpu==1.12.0
31
+ fastapi==0.116.1
32
+ filelock==3.19.1
33
+ filetype==1.2.0
34
+ frozenlist==1.7.0
35
+ fsspec==2025.7.0
36
+ greenlet==3.2.4
37
+ h11==0.16.0
38
+ html5lib==1.1
39
+ httpcore==1.0.9
40
+ httptools==0.6.4
41
+ httpx==0.28.1
42
+ httpx-sse==0.4.1
43
+ huggingface-hub==0.34.4
44
+ idna==3.10
45
+ imageio==2.37.0
46
+ intel-openmp==2021.4.0
47
+ Jinja2==3.1.6
48
+ joblib==1.5.1
49
+ jsonpatch==1.33
50
+ jsonpointer==3.0.0
51
+ kombu==5.5.4
52
+ langchain==0.3.27
53
+ langchain-community==0.3.27
54
+ langchain-core==0.3.74
55
+ langchain-text-splitters==0.3.9
56
+ langdetect==1.0.9
57
+ langsmith==0.4.14
58
+ lazy_loader==0.4
59
+ lxml==6.0.0
60
+ markdown-it-py==3.0.0
61
+ MarkupSafe==3.0.2
62
+ marshmallow==3.26.1
63
+ mdurl==0.1.2
64
+ mkl==2021.4.0
65
+ mpmath==1.3.0
66
+ multidict==6.6.4
67
+ mypy_extensions==1.1.0
68
+ networkx==3.5
69
+ ninja==1.13.0
70
+ nltk==3.9.1
71
+ numpy==1.26.4
72
+ olefile==0.47
73
+ opencv-python-headless==4.11.0.86
74
+ orjson==3.11.2
75
+ packaging==25.0
76
+ pandas==2.3.1
77
+ passlib==1.7.4
78
+ peft==0.15.0
79
+ pillow==11.3.0
80
+ prompt_toolkit==3.0.51
81
+ propcache==0.3.2
82
+ psutil==7.0.0
83
+ pyasn1==0.6.1
84
+ pyclipper==1.3.0.post6
85
+ pycparser==2.22
86
+ pydantic==2.11.7
87
+ pydantic-settings==2.10.1
88
+ pydantic_core==2.33.2
89
+ PyJWT==2.10.1
90
+ PyMuPDF==1.26.3
91
+ pypdf==6.0.0
92
+ pytesseract==0.3.13
93
+ python-bidi==0.6.6
94
+ python-dateutil==2.9.0.post0
95
+ python-docx==1.2.0
96
+ python-dotenv==1.1.1
97
+ python-iso639==2025.2.18
98
+ python-jose==3.5.0
99
+ python-json-logger==3.3.0
100
+ python-magic==0.4.27
101
+ python-multipart==0.0.20
102
+ python-oxmsg==0.0.2
103
+ python-pptx==1.0.2
104
+ pytz==2025.2
105
+ PyYAML==6.0.2
106
+ RapidFuzz==3.13.0
107
+ redis==6.4.0
108
+ regex==2025.7.34
109
+ requests==2.32.5
110
+ requests-toolbelt==1.0.0
111
+ rsa==4.9.1
112
+ safetensors==0.6.2
113
+ scikit-image==0.25.2
114
+ scikit-learn==1.7.1
115
+ scipy==1.16.1
116
+ sentence-transformers==2.2.2
117
+ sentencepiece==0.2.1
118
+ shapely==2.1.1
119
+ six==1.17.0
120
+ sniffio==1.3.1
121
+ soupsieve==2.7
122
+ SQLAlchemy==2.0.43
123
+ starlette==0.47.2
124
+ summarizers==1.0.4
125
+ sympy==1.14.0
126
+ tbb==2021.13.1
127
+ tenacity==9.1.2
128
+ threadpoolctl==3.6.0
129
+ tifffile==2025.6.11
130
+ timm==1.0.19
131
+ tokenizers==0.21.4
132
+ torch==2.3.1
133
+ torchvision==0.18.1
134
+ tqdm==4.67.1
135
+ transformers==4.55.2
136
+ typing-inspect==0.9.0
137
+ typing-inspection==0.4.1
138
+ typing_extensions==4.14.1
139
+ tzdata==2025.2
140
+ unstructured==0.18.13
141
+ unstructured-client==0.42.3
142
+ urllib3==2.5.0
143
+ uvicorn==0.35.0
144
+ vine==5.1.0
145
+ watchfiles==1.1.0
146
+ wcwidth==0.2.13
147
+ webencodings==0.5.1
148
+ websockets==15.0.1
149
+ wrapt==1.17.3
150
+ xlsxwriter==3.2.5
151
+ yarl==1.20.1
152
+ zstandard==0.24.0