gbrabbit commited on
Commit
a796dd8
ยท
1 Parent(s): dd012ae

'app.py_fixed'

Browse files
Files changed (1) hide show
  1. app.py +149 -0
app.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ import requests
4
+ import json
5
+ from transformers import AutoTokenizer, AutoModelForCausalLM
6
+ import torch
7
+
8
+ # ํ™˜๊ฒฝ ๋ณ€์ˆ˜ (ํ† ํฐ์€ ํ™˜๊ฒฝ ๋ณ€์ˆ˜์—์„œ๋งŒ ๊ฐ€์ ธ์˜ด)
9
+ HF_TOKEN = os.getenv("HF_TOKEN")
10
+ MODEL_NAME = os.getenv("MODEL_NAME", "gbrabbit/lily-math-model")
11
+
12
+ # ๋ชจ๋ธ ๋กœ๋“œ (๋กœ์ปฌ์—์„œ ์ง์ ‘ ๋กœ๋“œ)
13
+ try:
14
+ print("๐Ÿค– ๋ชจ๋ธ ๋กœ๋”ฉ ์ค‘...")
15
+ if HF_TOKEN:
16
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, token=HF_TOKEN, trust_remote_code=True)
17
+ model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, token=HF_TOKEN, torch_dtype=torch.float16, trust_remote_code=True)
18
+ else:
19
+ # ํ† ํฐ์ด ์—†์œผ๋ฉด ๊ณต๊ฐœ ๋ชจ๋ธ ์‚ฌ์šฉ
20
+ print("โš ๏ธ ํ† ํฐ์ด ์—†์–ด์„œ ๊ณต๊ฐœ ๋ชจ๋ธ์„ ์‚ฌ์šฉํ•ฉ๋‹ˆ๋‹ค.")
21
+ MODEL_NAME = "microsoft/DialoGPT-medium"
22
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
23
+ model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, torch_dtype=torch.float16)
24
+ print("โœ… ๋ชจ๋ธ ๋กœ๋”ฉ ์™„๋ฃŒ!")
25
+ MODEL_LOADED = True
26
+ except Exception as e:
27
+ print(f"โŒ ๋ชจ๋ธ ๋กœ๋”ฉ ์‹คํŒจ: {e}")
28
+ MODEL_LOADED = False
29
+
30
+ def chat_with_model(message, history):
31
+ """์ฑ„ํŒ… ํ•จ์ˆ˜ (๋กœ์ปฌ ๋ชจ๋ธ ์‚ฌ์šฉ)"""
32
+ if not MODEL_LOADED:
33
+ return "โŒ ๋ชจ๋ธ์ด ๋กœ๋“œ๋˜์ง€ ์•Š์•˜์Šต๋‹ˆ๋‹ค."
34
+
35
+ try:
36
+ # ํ† ํฌ๋‚˜์ด์ง•
37
+ inputs = tokenizer(message, return_tensors="pt")
38
+
39
+ # ์ƒ์„ฑ
40
+ with torch.no_grad():
41
+ outputs = model.generate(
42
+ **inputs,
43
+ max_new_tokens=200,
44
+ temperature=0.7,
45
+ do_sample=True,
46
+ pad_token_id=tokenizer.eos_token_id
47
+ )
48
+
49
+ # ๋””์ฝ”๋”ฉ
50
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
51
+
52
+ # ์ž…๋ ฅ ์ œ๊ฑฐํ•˜๊ณ  ์‘๋‹ต๋งŒ ๋ฐ˜ํ™˜
53
+ if message in response:
54
+ response = response.replace(message, "").strip()
55
+
56
+ return response if response else "์ฃ„์†กํ•ฉ๋‹ˆ๋‹ค. ์‘๋‹ต์„ ์ƒ์„ฑํ•  ์ˆ˜ ์—†์Šต๋‹ˆ๋‹ค."
57
+
58
+ except Exception as e:
59
+ return f"์˜ค๋ฅ˜ ๋ฐœ์ƒ: {str(e)}"
60
+
61
+ def solve_math_problem(problem):
62
+ """์ˆ˜ํ•™ ๋ฌธ์ œ ํ•ด๊ฒฐ ํ•จ์ˆ˜ (๋กœ์ปฌ ๋ชจ๋ธ ์‚ฌ์šฉ)"""
63
+ if not MODEL_LOADED:
64
+ return "โŒ ๋ชจ๋ธ์ด ๋กœ๋“œ๋˜์ง€ ์•Š์•˜์Šต๋‹ˆ๋‹ค."
65
+
66
+ try:
67
+ # ์ˆ˜ํ•™ ๋ฌธ์ œ ํ•ด๊ฒฐ์„ ์œ„ํ•œ ํ”„๋กฌํ”„ํŠธ
68
+ prompt = f"๋‹ค์Œ ์ˆ˜ํ•™ ๋ฌธ์ œ๋ฅผ ๋‹จ๊ณ„๋ณ„๋กœ ํ’€์–ด์ฃผ์„ธ์š”: {problem}"
69
+
70
+ # ํ† ํฌ๋‚˜์ด์ง•
71
+ inputs = tokenizer(prompt, return_tensors="pt")
72
+
73
+ # ์ƒ์„ฑ
74
+ with torch.no_grad():
75
+ outputs = model.generate(
76
+ **inputs,
77
+ max_new_tokens=300,
78
+ temperature=0.3,
79
+ do_sample=True,
80
+ pad_token_id=tokenizer.eos_token_id
81
+ )
82
+
83
+ # ๋””์ฝ”๋”ฉ
84
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
85
+
86
+ # ์ž…๋ ฅ ์ œ๊ฑฐํ•˜๊ณ  ์‘๋‹ต๋งŒ ๋ฐ˜ํ™˜
87
+ if prompt in response:
88
+ response = response.replace(prompt, "").strip()
89
+
90
+ return response if response else "์ฃ„์†กํ•ฉ๋‹ˆ๋‹ค. ์ˆ˜ํ•™ ๋ฌธ์ œ๋ฅผ ํ’€ ์ˆ˜ ์—†์Šต๋‹ˆ๋‹ค."
91
+
92
+ except Exception as e:
93
+ return f"์˜ค๋ฅ˜ ๋ฐœ์ƒ: {str(e)}"
94
+
95
+ # Gradio ์ธํ„ฐํŽ˜์ด์Šค ์ƒ์„ฑ (ํ˜ธํ™˜์„ฑ ์ˆ˜์ •)
96
+ with gr.Blocks(title="Lily Math RAG System", theme=gr.themes.Soft()) as demo:
97
+ gr.Markdown("# ๐Ÿงฎ Lily Math RAG System")
98
+ gr.Markdown("์ˆ˜ํ•™ ๋ฌธ์ œ ํ•ด๊ฒฐ์„ ์œ„ํ•œ AI ์‹œ์Šคํ…œ์ž…๋‹ˆ๋‹ค.")
99
+
100
+ with gr.Tabs():
101
+ # ์ฑ„ํŒ… ํƒญ
102
+ with gr.Tab("๐Ÿ’ฌ ์ฑ„ํŒ…"):
103
+ chatbot = gr.Chatbot(height=400)
104
+ msg = gr.Textbox(
105
+ label="๋ฉ”์‹œ์ง€๋ฅผ ์ž…๋ ฅํ•˜์„ธ์š”",
106
+ placeholder="์•ˆ๋…•ํ•˜์„ธ์š”! ์ˆ˜ํ•™ ๋ฌธ์ œ๋ฅผ ๋„์™€์ฃผ์„ธ์š”.",
107
+ lines=2
108
+ )
109
+ clear = gr.Button("๋Œ€ํ™” ์ดˆ๊ธฐํ™”")
110
+
111
+ def respond(message, chat_history):
112
+ bot_message = chat_with_model(message, chat_history)
113
+ chat_history.append((message, bot_message))
114
+ return "", chat_history
115
+
116
+ msg.submit(respond, [msg, chatbot], [msg, chatbot])
117
+ clear.click(lambda: None, None, chatbot, queue=False)
118
+
119
+ # ์ˆ˜ํ•™ ๋ฌธ์ œ ํ•ด๊ฒฐ ํƒญ
120
+ with gr.Tab("๐Ÿงฎ ์ˆ˜ํ•™ ๋ฌธ์ œ ํ•ด๊ฒฐ"):
121
+ with gr.Row():
122
+ with gr.Column():
123
+ math_input = gr.Textbox(
124
+ label="์ˆ˜ํ•™ ๋ฌธ์ œ",
125
+ placeholder="์˜ˆ: 2x + 5 = 13",
126
+ lines=3
127
+ )
128
+ solve_btn = gr.Button("๋ฌธ์ œ ํ’€๊ธฐ", variant="primary")
129
+
130
+ with gr.Column():
131
+ math_output = gr.Textbox(
132
+ label="ํ•ด๋‹ต",
133
+ lines=8,
134
+ interactive=False
135
+ )
136
+
137
+ solve_btn.click(solve_math_problem, math_input, math_output)
138
+
139
+ # ์„ค์ • ํƒญ
140
+ with gr.Tab("โš™๏ธ ์„ค์ •"):
141
+ gr.Markdown("## ์‹œ์Šคํ…œ ์ •๋ณด")
142
+ gr.Markdown(f"**๋ชจ๋ธ**: {MODEL_NAME}")
143
+ gr.Markdown(f"**๋ชจ๋ธ ์ƒํƒœ**: {'โœ… ๋กœ๋“œ๋จ' if MODEL_LOADED else 'โŒ ๋กœ๋“œ ์‹คํŒจ'}")
144
+ gr.Markdown(f"**ํ† ํฐ ์ƒํƒœ**: {'โœ… ์„ค์ •๋จ' if HF_TOKEN else 'โŒ ์„ค์ •๋˜์ง€ ์•Š์Œ'}")
145
+ gr.Markdown("**๋ฒ„์ „**: 2.0.0 (๋กœ์ปฌ ๋ชจ๋ธ)")
146
+
147
+ if __name__ == "__main__":
148
+ demo.launch()
149
+