devvtaco commited on
Commit
1fcd7be
ยท
verified ยท
1 Parent(s): dc5e80d

changed to openai

Browse files
Files changed (1) hide show
  1. app.py +39 -45
app.py CHANGED
@@ -1,53 +1,47 @@
1
  import gradio as gr
2
- from transformers import pipeline
3
- import threading
4
-
5
- chat_model = None
6
- loading_done = False
7
- status_text = "โณ ๋ชจ๋ธ ๋กœ๋”ฉ ์ค‘์ž…๋‹ˆ๋‹ค..."
8
-
9
- # ๋ฐฑ๊ทธ๋ผ์šด๋“œ์—์„œ ๋ชจ๋ธ ๋กœ๋”ฉ
10
- def load_model_bg():
11
- global chat_model, loading_done, status_text
12
- chat_model = pipeline("text-generation", model="beomi/KoAlpaca-Polyglot-5.8B")
13
- loading_done = True
14
- status_text = "โœ… ๋ชจ๋ธ ๋กœ๋”ฉ ์™„๋ฃŒ! ๋ฌธ์žฅ์„ ์ž…๋ ฅํ•ด๋ณด์„ธ์š”."
15
-
16
- # ๋Œ€๋‹ต ํ•จ์ˆ˜
17
- def chat_with_model(message, history):
18
- global chat_model, loading_done
19
- if not loading_done:
20
- return history + [[message, "โš ๏ธ ๋ชจ๋ธ์ด ์•„์ง ๋กœ๋”ฉ ์ค‘์ž…๋‹ˆ๋‹ค. ์ž ์‹œ๋งŒ ๊ธฐ๋‹ค๋ ค ์ฃผ์„ธ์š”."]]
21
-
22
- prompt = f"""
23
- ### Instruction: ๋‹ค์Œ ๋ฌธ์žฅ์„ ๋ถ„์„ํ•ด์„œ ๋ฌด๋ก€ํ•˜๊ฑฐ๋‚˜ ๊ณต๊ฒฉ์ ์ธ ํ‘œํ˜„์ด ์žˆ๋Š”์ง€ ํŒ๋‹จํ•˜๊ณ , ์žˆ๋‹ค๋ฉด ๋” ์˜ˆ์˜ ์žˆ๊ฒŒ ๊ณ ์ณ์ค˜.
24
-
25
- ### Input:
26
- {message}
27
-
28
- ### Response:
29
- """
30
- response = chat_model(prompt, max_new_tokens=200)[0]['generated_text']
31
- response_only = response[len(prompt):].strip()
32
- return history + [[message, response_only]]
33
-
34
- # ์ƒํƒœ ํ…์ŠคํŠธ ๋ฐ˜ํ™˜ ํ•จ์ˆ˜ (๋งค๋ฒˆ ์ƒˆ๋กœ ์ฝ์–ด์˜ด)
35
- def get_status():
36
- return status_text
37
-
38
- # ๋ฐฑ๊ทธ๋ผ์šด๋“œ์—์„œ ๋ชจ๋ธ ๋กœ๋”ฉ ์‹œ์ž‘
39
- threading.Thread(target=load_model_bg).start()
40
-
41
- # Gradio ์•ฑ
42
  with gr.Blocks() as demo:
43
  chatbot = gr.Chatbot()
44
  msg = gr.Textbox(label="๋ฌธ์žฅ์„ ์ž…๋ ฅํ•˜์„ธ์š”", placeholder="์˜ˆ: ๋„ˆ ์ •๋ง ์™œ ๊ทธ๋ ‡๊ฒŒ ๋งํ•ด?")
45
- status = gr.Markdown(get_status)
46
 
47
  def respond_and_clear(user_input, history):
48
- updated_history = chat_with_model(user_input, history)
49
- return "", updated_history, get_status()
50
 
51
- msg.submit(respond_and_clear, [msg, chatbot], [msg, chatbot, status])
52
 
53
- demo.launch()
 
1
  import gradio as gr
2
+ import openai
3
+
4
+ # โœ… OpenAI API ํ‚ค ์ž…๋ ฅ (์•ˆ์ „ํ•œ ์ €์žฅ ๋ฐฉ์‹ ๊ถŒ์žฅ)
5
+ openai.api_key = "YOUR_OPENAI_API_KEY"
6
+
7
+ # GPT์—๊ฒŒ ๋ณด๋‚ผ ํ”„๋กฌํ”„ํŠธ๋ฅผ ๊ตฌ์„ฑํ•˜๊ณ  ์‘๋‹ต ๋ฐ›๊ธฐ
8
+ def chat_with_gpt(message, history):
9
+ # ์‹œ์Šคํ…œ ํ”„๋กฌํ”„ํŠธ: ์—ญํ•  ์„ค์ •
10
+ system_prompt = "๋„ˆ๋Š” ๋ฌธ์žฅ์„ ๊ณต์†ํ•˜๊ณ  ์˜ˆ์˜ ์žˆ๊ฒŒ ๋ฐ”๊ฟ”์ฃผ๋Š” ํ•œ๊ตญ์–ด ์ „๋ฌธ๊ฐ€์•ผ."
11
+
12
+ # ์‚ฌ์šฉ์ž ํ”„๋กฌํ”„ํŠธ ๊ตฌ์„ฑ
13
+ user_prompt = f"""์•„๋ž˜ ๋ฌธ์žฅ์˜ ๋ฌด๋ก€ํ•˜๊ฑฐ๋‚˜ ๊ณต๊ฒฉ์ ์ธ ํ‘œํ˜„์„ ์ฐพ์•„๋‚ด๊ณ , ๋” ์˜ˆ์˜ ์žˆ๋Š” ํ‘œํ˜„์œผ๋กœ ๋ฐ”๊ฟ”์ค˜. ๊ฐ„๋‹จํ•œ ์ด์œ ๋„ ํ•จ๊ป˜ ์•Œ๋ ค์ค˜.
14
+
15
+ ๋ฌธ์žฅ: "{message}"
16
+
17
+ ์‘๋‹ต ํ˜•์‹:
18
+ 1. ์ง€์  ์‚ฌํ•ญ: (๋ฌด๋ก€ํ•œ ํ‘œํ˜„์ด ์žˆ๋‹ค๋ฉด ์–ด๋–ค ๋ถ€๋ถ„์ธ์ง€ ์„ค๋ช…)
19
+ 2. ์ œ์•ˆ ๋ฌธ์žฅ: (๋” ์˜ˆ์˜ ์žˆ๊ฒŒ ๋ฐ”๊พผ ๋ฌธ์žฅ ์ œ์•ˆ)
20
+ """
21
+
22
+ try:
23
+ response = openai.ChatCompletion.create(
24
+ model="gpt-4", # ๋˜๋Š” "gpt-3.5-turbo"
25
+ messages=[
26
+ {"role": "system", "content": system_prompt},
27
+ {"role": "user", "content": user_prompt}
28
+ ],
29
+ temperature=0.7,
30
+ )
31
+ reply = response["choices"][0]["message"]["content"].strip()
32
+ return history + [[message, reply]]
33
+ except Exception as e:
34
+ return history + [[message, f"โš ๏ธ ์˜ค๋ฅ˜ ๋ฐœ์ƒ: {str(e)}"]]
35
+
36
+ # Gradio ์•ฑ ๊ตฌ์„ฑ
 
 
 
 
 
37
  with gr.Blocks() as demo:
38
  chatbot = gr.Chatbot()
39
  msg = gr.Textbox(label="๋ฌธ์žฅ์„ ์ž…๋ ฅํ•˜์„ธ์š”", placeholder="์˜ˆ: ๋„ˆ ์ •๋ง ์™œ ๊ทธ๋ ‡๊ฒŒ ๋งํ•ด?")
 
40
 
41
  def respond_and_clear(user_input, history):
42
+ updated_history = chat_with_gpt(user_input, history)
43
+ return "", updated_history
44
 
45
+ msg.submit(respond_and_clear, [msg, chatbot], [msg, chatbot])
46
 
47
+ demo.launch()