File size: 1,229 Bytes
bb3dda2
209a482
e6e9f4b
fb9d94d
bb3dda2
 
fb9d94d
 
 
bb3dda2
e6e9f4b
209a482
 
fb9d94d
 
209a482
aead082
209a482
fb9d94d
940c5d5
bb3dda2
940c5d5
209a482
aead082
0fa7c56
209a482
 
 
 
aead082
fb9d94d
209a482
 
fb9d94d
 
209a482
f0aa58c
bb3dda2
209a482
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
from transformers import pipeline
import gradio as gr

# KoAlpaca λͺ¨λΈ λ‘œλ“œ (속도와 ν’ˆμ§ˆμ˜ 밸런슀)
generator = pipeline(
    "text-generation",
    model="beomi/KoAlpaca-Polyglot-1.1B",
    tokenizer="beomi/KoAlpaca-Polyglot-1.1B",
    device_map="auto"
)

def answer_question(prompt):
    system_prompt = (
        "λ„ˆλŠ” ν•œκ΅­ λŒ€ν•™ μž…μ‹œ 정보λ₯Ό μ•Œλ €μ£ΌλŠ” AIμ•Ό. "
        "수λŠ₯, 학생뢀쒅합, λ…Όμˆ , μ •μ‹œ λ“± μ „ν˜•μ„ μ΄ν•΄ν•˜κΈ° μ‰½κ²Œ μ„€λͺ…ν•΄μ€˜.\n\n"
    )
    response = generator(
        system_prompt + prompt,
        max_new_tokens=250,
        temperature=0.7,
        top_p=0.9,
        do_sample=True
    )
    return response[0]["generated_text"].replace(system_prompt, "").strip()

app = gr.Interface(
    fn=answer_question,
    inputs=gr.Textbox(
        lines=2,
        label="μž…μ‹œ 질문 μž…λ ₯",
        placeholder="예: κ°€μ²œλŒ€ λ…Όμˆ μ „ν˜• / 2025 수λŠ₯ 일정 / ν•œκ΅­κ³΅ν•™λŒ€ μ „ν˜•μš”μ•½"
    ),
    outputs=gr.Textbox(label="AI λ‹΅λ³€"),
    title="μž…μ‹œ μ „λ¬Έ AI (KoAlpaca 1.1B)",
    description="λΉ λ₯΄κ³  λ˜‘λ˜‘ν•œ μž…μ‹œμ •λ³΄ μ±—λ΄‡μž…λ‹ˆλ‹€. μ‹€μ œ μ „ν˜•μ •λ³΄μ— κ·Όκ±°ν•œ μ„€λͺ…을 μ œκ³΅ν•©λ‹ˆλ‹€."
)

if __name__ == "__main__":
    app.launch()