Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -12,14 +12,14 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
|
| 12 |
# ===== ๋ชจ๋ธ ๋ชฉ๋ก =====
|
| 13 |
MODEL_OPTIONS = {
|
| 14 |
"Qwen2.5-1.5B-Instruct": "Qwen/Qwen2.5-1.5B-Instruct",
|
| 15 |
-
"CLOVA-Text(๋์ฒด)": "
|
| 16 |
}
|
| 17 |
|
| 18 |
# ===== ํ
์คํธ ๋ชจ๋ธ ๋ก๋ =====
|
| 19 |
def load_text_model(model_choice):
|
| 20 |
model_name = MODEL_OPTIONS[model_choice]
|
| 21 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name
|
| 22 |
-
model = AutoModelForCausalLM.from_pretrained(model_name
|
| 23 |
return pipeline("text-generation", model=model, tokenizer=tokenizer, device=-1)
|
| 24 |
|
| 25 |
# ===== ํ
์คํธ ์ ์ฒ๋ฆฌ =====
|
|
@@ -95,7 +95,7 @@ iface = gr.Interface(
|
|
| 95 |
gr.Textbox(label="์๋ ์ฌ์์ฑ (LLM)", lines=5)
|
| 96 |
],
|
| 97 |
title="ํ๊ตญ์ด ๋ณธ๋ฌธ ์ถ์ถ + ์๋ ์์ฝ + LLM ์ฌ์์ฑ",
|
| 98 |
-
description="์๋ฌธ ํ
์คํธ์์ ๋ฐ๋ก ์์ฝ ํ, ์ ํํ ๋ชจ๋ธ(Qwen ๋๋
|
| 99 |
)
|
| 100 |
|
| 101 |
if __name__ == "__main__":
|
|
|
|
| 12 |
# ===== ๋ชจ๋ธ ๋ชฉ๋ก =====
|
| 13 |
MODEL_OPTIONS = {
|
| 14 |
"Qwen2.5-1.5B-Instruct": "Qwen/Qwen2.5-1.5B-Instruct",
|
| 15 |
+
"CLOVA-Text(๋์ฒด)": "skt/kogpt2-base-v2"
|
| 16 |
}
|
| 17 |
|
| 18 |
# ===== ํ
์คํธ ๋ชจ๋ธ ๋ก๋ =====
|
| 19 |
def load_text_model(model_choice):
|
| 20 |
model_name = MODEL_OPTIONS[model_choice]
|
| 21 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 22 |
+
model = AutoModelForCausalLM.from_pretrained(model_name).to("cpu")
|
| 23 |
return pipeline("text-generation", model=model, tokenizer=tokenizer, device=-1)
|
| 24 |
|
| 25 |
# ===== ํ
์คํธ ์ ์ฒ๋ฆฌ =====
|
|
|
|
| 95 |
gr.Textbox(label="์๋ ์ฌ์์ฑ (LLM)", lines=5)
|
| 96 |
],
|
| 97 |
title="ํ๊ตญ์ด ๋ณธ๋ฌธ ์ถ์ถ + ์๋ ์์ฝ + LLM ์ฌ์์ฑ",
|
| 98 |
+
description="์๋ฌธ ํ
์คํธ์์ ๋ฐ๋ก ์์ฝ ํ, ์ ํํ ๋ชจ๋ธ(Qwen ๋๋ KoGPT2)๋ก ์ฌ์์ฑํฉ๋๋ค."
|
| 99 |
)
|
| 100 |
|
| 101 |
if __name__ == "__main__":
|