gbrabbit commited on
Commit
d3654f8
ยท
1 Parent(s): 0553b33

Auto commit at 07-2025-08 1:24:19

Browse files
Files changed (1) hide show
  1. app.py +45 -0
app.py CHANGED
@@ -10,6 +10,11 @@ from PIL import Image
10
  import io
11
  import base64
12
 
 
 
 
 
 
13
  # .env ํŒŒ์ผ์—์„œ ํ™˜๊ฒฝ ๋ณ€์ˆ˜ ๋กœ๋“œ
14
  try:
15
  from dotenv import load_dotenv
@@ -182,6 +187,26 @@ def chat_with_model(message, history, file=None):
182
  print(f"๐Ÿ“ DEBUG: ์ „์ฒด ๋ฉ”์‹œ์ง€: {full_message[:200]}...")
183
 
184
  print("๐Ÿ”ค DEBUG: ํ† ํฌ๋‚˜์ด์ € ์ฒ˜๋ฆฌ ์‹œ์ž‘")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
185
  inputs = tokenizer(full_message, return_tensors="pt")
186
  print(f" ์ž…๋ ฅ shape: {inputs['input_ids'].shape}")
187
  print(f" attention_mask shape: {inputs['attention_mask'].shape}")
@@ -342,6 +367,26 @@ def solve_math_problem(problem, file=None):
342
  print(f"๐Ÿ“ DEBUG: ์ „์ฒด ํ”„๋กฌํ”„ํŠธ: {full_prompt[:200]}...")
343
 
344
  print("๐Ÿ”ค DEBUG: ํ† ํฌ๋‚˜์ด์ € ์ฒ˜๋ฆฌ ์‹œ์ž‘")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
345
  inputs = tokenizer(full_prompt, return_tensors="pt")
346
  print(f" ์ž…๋ ฅ shape: {inputs['input_ids'].shape}")
347
  print(f" attention_mask shape: {inputs['attention_mask'].shape}")
 
10
  import io
11
  import base64
12
 
13
+ # ์ „์—ญ ๋ณ€์ˆ˜๋กœ ์„ ์–ธ
14
+ tokenizer = None
15
+ model = None
16
+ MODEL_LOADED = False
17
+
18
  # .env ํŒŒ์ผ์—์„œ ํ™˜๊ฒฝ ๋ณ€์ˆ˜ ๋กœ๋“œ
19
  try:
20
  from dotenv import load_dotenv
 
187
  print(f"๐Ÿ“ DEBUG: ์ „์ฒด ๋ฉ”์‹œ์ง€: {full_message[:200]}...")
188
 
189
  print("๐Ÿ”ค DEBUG: ํ† ํฌ๋‚˜์ด์ € ์ฒ˜๋ฆฌ ์‹œ์ž‘")
190
+ print(f" tokenizer ํƒ€์ž…: {type(tokenizer)}")
191
+ print(f" tokenizer ๊ฐ’: {tokenizer}")
192
+
193
+ # tokenizer๊ฐ€ ์˜ฌ๋ฐ”๋ฅธ์ง€ ํ™•์ธ
194
+ if not hasattr(tokenizer, 'encode'):
195
+ print("โŒ DEBUG: tokenizer๊ฐ€ ์˜ฌ๋ฐ”๋ฅด์ง€ ์•Š์Œ")
196
+ # tokenizer๋ฅผ ๋‹ค์‹œ ๋กœ๋“œ
197
+ print("๐Ÿ”„ DEBUG: tokenizer ์žฌ๋กœ๋“œ ์‹œ๋„")
198
+ try:
199
+ tokenizer = AutoTokenizer.from_pretrained(
200
+ MODEL_NAME,
201
+ token=HF_TOKEN,
202
+ trust_remote_code=True,
203
+ use_fast=False
204
+ )
205
+ print("โœ… DEBUG: tokenizer ์žฌ๋กœ๋“œ ์„ฑ๊ณต")
206
+ except Exception as reload_error:
207
+ print(f"โŒ DEBUG: tokenizer ์žฌ๋กœ๋“œ ์‹คํŒจ: {reload_error}")
208
+ return f"ํ† ํฌ๋‚˜์ด์ € ์˜ค๋ฅ˜: {str(reload_error)}"
209
+
210
  inputs = tokenizer(full_message, return_tensors="pt")
211
  print(f" ์ž…๋ ฅ shape: {inputs['input_ids'].shape}")
212
  print(f" attention_mask shape: {inputs['attention_mask'].shape}")
 
367
  print(f"๐Ÿ“ DEBUG: ์ „์ฒด ํ”„๋กฌํ”„ํŠธ: {full_prompt[:200]}...")
368
 
369
  print("๐Ÿ”ค DEBUG: ํ† ํฌ๋‚˜์ด์ € ์ฒ˜๋ฆฌ ์‹œ์ž‘")
370
+ print(f" tokenizer ํƒ€์ž…: {type(tokenizer)}")
371
+ print(f" tokenizer ๊ฐ’: {tokenizer}")
372
+
373
+ # tokenizer๊ฐ€ ์˜ฌ๋ฐ”๋ฅธ์ง€ ํ™•์ธ
374
+ if not hasattr(tokenizer, 'encode'):
375
+ print("โŒ DEBUG: tokenizer๊ฐ€ ์˜ฌ๋ฐ”๋ฅด์ง€ ์•Š์Œ")
376
+ # tokenizer๋ฅผ ๋‹ค์‹œ ๋กœ๋“œ
377
+ print("๐Ÿ”„ DEBUG: tokenizer ์žฌ๋กœ๋“œ ์‹œ๋„")
378
+ try:
379
+ tokenizer = AutoTokenizer.from_pretrained(
380
+ MODEL_NAME,
381
+ token=HF_TOKEN,
382
+ trust_remote_code=True,
383
+ use_fast=False
384
+ )
385
+ print("โœ… DEBUG: tokenizer ์žฌ๋กœ๋“œ ์„ฑ๊ณต")
386
+ except Exception as reload_error:
387
+ print(f"โŒ DEBUG: tokenizer ์žฌ๋กœ๋“œ ์‹คํŒจ: {reload_error}")
388
+ return f"ํ† ํฌ๋‚˜์ด์ € ์˜ค๋ฅ˜: {str(reload_error)}"
389
+
390
  inputs = tokenizer(full_prompt, return_tensors="pt")
391
  print(f" ์ž…๋ ฅ shape: {inputs['input_ids'].shape}")
392
  print(f" attention_mask shape: {inputs['attention_mask'].shape}")