Spaces:
Sleeping
Sleeping
File size: 1,229 Bytes
bb3dda2 209a482 e6e9f4b fb9d94d bb3dda2 fb9d94d bb3dda2 e6e9f4b 209a482 fb9d94d 209a482 aead082 209a482 fb9d94d 940c5d5 bb3dda2 940c5d5 209a482 aead082 0fa7c56 209a482 aead082 fb9d94d 209a482 fb9d94d 209a482 f0aa58c bb3dda2 209a482 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 | from transformers import pipeline
import gradio as gr
# KoAlpaca λͺ¨λΈ λ‘λ (μλμ νμ§μ λ°Έλ°μ€)
generator = pipeline(
"text-generation",
model="beomi/KoAlpaca-Polyglot-1.1B",
tokenizer="beomi/KoAlpaca-Polyglot-1.1B",
device_map="auto"
)
def answer_question(prompt):
system_prompt = (
"λλ νκ΅ λν μ
μ μ 보λ₯Ό μλ €μ£Όλ AIμΌ. "
"μλ₯, νμλΆμ’
ν©, λ
Όμ , μ μ λ± μ νμ μ΄ν΄νκΈ° μ½κ² μ€λͺ
ν΄μ€.\n\n"
)
response = generator(
system_prompt + prompt,
max_new_tokens=250,
temperature=0.7,
top_p=0.9,
do_sample=True
)
return response[0]["generated_text"].replace(system_prompt, "").strip()
app = gr.Interface(
fn=answer_question,
inputs=gr.Textbox(
lines=2,
label="μ
μ μ§λ¬Έ μ
λ ₯",
placeholder="μ: κ°μ²λ λ
Όμ μ ν / 2025 μλ₯ μΌμ / νκ΅κ³΅νλ μ νμμ½"
),
outputs=gr.Textbox(label="AI λ΅λ³"),
title="μ
μ μ λ¬Έ AI (KoAlpaca 1.1B)",
description="λΉ λ₯΄κ³ λλν μ
μμ 보 μ±λ΄μ
λλ€. μ€μ μ νμ 보μ κ·Όκ±°ν μ€λͺ
μ μ 곡ν©λλ€."
)
if __name__ == "__main__":
app.launch()
|