widefirst103 commited on
Commit
940c5d5
ยท
verified ยท
1 Parent(s): 0fa7c56

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -30
app.py CHANGED
@@ -1,46 +1,46 @@
1
  import gradio as gr
2
  from transformers import pipeline
3
 
4
- # โœ… ์ดˆ๊ฒฝ๋Ÿ‰ ํ•œ๊ตญ์–ด ๋ชจ๋ธ (CPU์—์„œ๋„ ๋น ๋ฆ„)
5
  generator = pipeline(
6
  "text-generation",
7
- model="skt/kogpt2-base-v2"
 
8
  )
9
 
10
- # โœ… ์ž…์‹œ ์ •๋ณด AI ํ•จ์ˆ˜
11
- def college_ai(question):
12
- prompt = f"""๋„ˆ๋Š” ํ•œ๊ตญ ์ž…์‹œ ์ „๋ฌธ๊ฐ€์•ผ.
13
- ํ•™์ƒ์ด ์งˆ๋ฌธํ•˜๋ฉด ๊ฐ„๋‹จํ•˜๊ณ  ์ •ํ™•ํ•˜๊ฒŒ ์•Œ๋ ค์ค˜.
14
- ๋„ˆ๋ฌด ์žฅํ™ฉํ•˜์ง€ ๋ง๊ณ , 3~4๋ฌธ์žฅ ์ด๋‚ด๋กœ ์„ค๋ช…ํ•ด์ค˜.
15
-
16
- ์งˆ๋ฌธ: {question}
17
- ๋‹ต๋ณ€:"""
18
 
 
19
  response = generator(
20
  prompt,
21
- max_new_tokens=80, # ๐Ÿ”น ์†๋„ ํ–ฅ์ƒ
22
- do_sample=True,
23
- temperature=0.8,
24
  top_p=0.9,
25
- repetition_penalty=1.2
26
- )
 
27
 
28
- answer = response[0]["generated_text"].split("๋‹ต๋ณ€:")[-1].strip()
29
- return answer
 
 
30
 
31
- # โœ… Gradio ์ธํ„ฐํŽ˜์ด์Šค
32
- demo = gr.Interface(
33
- fn=college_ai,
34
- inputs=gr.Textbox(
35
- lines=2,
36
- placeholder="์˜ˆ: ๊ฐ€์ฒœ๋Œ€ ํ•™์ƒ๋ถ€๊ต๊ณผ ์ „ํ˜• ๊ฒฝ์Ÿ๋ฅ  ์•Œ๋ ค์ค˜",
37
- label="์ž…์‹œ ๊ด€๋ จ ์งˆ๋ฌธ ์ž…๋ ฅ"
38
- ),
39
- outputs=gr.Textbox(label="AI ๋‹ต๋ณ€"),
40
- title="๐ŸŽ“ ์ž…์‹œ์ •๋ณด AI (๋น ๋ฅธ ๋ฒ„์ „)",
41
- description="ํ•œ๊ตญ ๋Œ€ํ•™ ์ž…์‹œ, ํ•™์ƒ๋ถ€๊ต๊ณผยท์ข…ํ•ฉยท๋…ผ์ˆ  ์ „ํ˜• ๊ด€๋ จ ์ •๋ณด๋ฅผ ๋น ๋ฅด๊ฒŒ ์•Œ๋ ค์ฃผ๋Š” ์ธ๊ณต์ง€๋Šฅ์ž…๋‹ˆ๋‹ค.",
42
- theme="default"
43
- )
44
 
 
45
  if __name__ == "__main__":
46
  demo.launch()
 
1
  import gradio as gr
2
  from transformers import pipeline
3
 
4
+ # KoAlpaca ๋ชจ๋ธ ๋กœ๋“œ
5
  generator = pipeline(
6
  "text-generation",
7
+ model="beomi/KoAlpaca-Polyglot-1.1B",
8
+ device_map="auto"
9
  )
10
 
11
+ # ์‘๋‹ต ํ•จ์ˆ˜ ์ •์˜
12
+ def chat_with_koalpaca(message, history):
13
+ if history is None:
14
+ history = []
15
+ # ๋Œ€ํ™” ๋งฅ๋ฝ์„ ๊ฐ„๋‹จํžˆ ํ•ฉ์น˜๊ธฐ
16
+ context = "\n".join([f"User: {h[0]}\nAssistant: {h[1]}" for h in history])
17
+ prompt = f"{context}\nUser: {message}\nAssistant:"
 
18
 
19
+ # ๋ชจ๋ธ๋กœ๋ถ€ํ„ฐ ์‘๋‹ต ์ƒ์„ฑ
20
  response = generator(
21
  prompt,
22
+ max_new_tokens=200,
23
+ temperature=0.7,
 
24
  top_p=0.9,
25
+ repetition_penalty=1.1,
26
+ do_sample=True
27
+ )[0]["generated_text"]
28
 
29
+ # ํ”„๋กฌํ”„ํŠธ ์ดํ›„ ํ…์ŠคํŠธ๋งŒ ์ถ”์ถœ
30
+ answer = response.split("Assistant:")[-1].strip()
31
+ history.append((message, answer))
32
+ return answer, history
33
 
34
+ # Gradio ์ธํ„ฐํŽ˜์ด์Šค
35
+ with gr.Blocks(theme=gr.themes.Soft()) as demo:
36
+ gr.Markdown("## ๐Ÿฆ™ KoAlpaca Chatbot (Polyglot 1.1B)")
37
+ chatbot = gr.Chatbot(label="KoAlpaca")
38
+ msg = gr.Textbox(label="๋ฉ”์‹œ์ง€๋ฅผ ์ž…๋ ฅํ•˜์„ธ์š”", placeholder="์˜ˆ: ์•ˆ๋…•! ์˜ค๋Š˜ ๋‚ ์”จ ์–ด๋•Œ?")
39
+ clear = gr.Button("๋Œ€ํ™” ์ดˆ๊ธฐํ™”")
40
+
41
+ msg.submit(chat_with_koalpaca, [msg, chatbot], [msg, chatbot])
42
+ clear.click(lambda: None, None, chatbot, queue=False)
 
 
 
 
43
 
44
+ # ์•ฑ ์‹คํ–‰
45
  if __name__ == "__main__":
46
  demo.launch()