|
|
""" |
|
|
EXAONE-3.0-7.8B-Instruct ๋ชจ๋ธ์ Ollama์ ์ถ๊ฐํ๋ ์คํฌ๋ฆฝํธ |
|
|
|
|
|
์ฌ์ฉ ๋ฐฉ๋ฒ: |
|
|
1. Hugging Face์์ ๋ชจ๋ธ ์ก์ธ์ค ๊ถํ์ ๋ฐ์์ผ ํฉ๋๋ค. |
|
|
2. ํ์ํ ํจํค์ง๋ฅผ ์ค์นํฉ๋๋ค: |
|
|
pip install transformers torch huggingface_hub |
|
|
3. ์ด ์คํฌ๋ฆฝํธ๋ฅผ ์คํํฉ๋๋ค: |
|
|
python add_exaone_model.py |
|
|
""" |
|
|
|
|
|
import os |
|
|
import subprocess |
|
|
import json |
|
|
from pathlib import Path |
|
|
|
|
|
def create_ollama_modelfile(): |
|
|
"""Ollama Modelfile ์์ฑ""" |
|
|
|
|
|
modelfile_content = """FROM huggingface:LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct |
|
|
|
|
|
# ๋ชจ๋ธ ์ค์ |
|
|
PARAMETER temperature 0.7 |
|
|
PARAMETER top_p 0.9 |
|
|
PARAMETER top_k 40 |
|
|
PARAMETER num_ctx 4096 |
|
|
PARAMETER num_predict 512 |
|
|
|
|
|
# ์์คํ
ํ๋กฌํํธ |
|
|
SYSTEM \"\"\"You are EXAONE, a helpful AI assistant developed by LG AI Research. |
|
|
You are designed to be helpful, harmless, and honest. |
|
|
You can communicate in both Korean and English.\"\"\" |
|
|
|
|
|
# EXAONE ๋ชจ๋ธ์ ์ฑํ
ํ
ํ๋ฆฟ ํ์ |
|
|
# ์ฐธ๊ณ : ์ค์ ๋ชจ๋ธ์ ํ ํฌ๋์ด์ ํ
ํ๋ฆฟ์ ํ์ธํ์ฌ ์กฐ์ ์ด ํ์ํ ์ ์์ต๋๋ค. |
|
|
TEMPLATE \"\"\"{{ if .System }}<|im_start|>system |
|
|
{{ .System }}<|im_end|> |
|
|
{{ end }}{{ if .Prompt }}<|im_start|>user |
|
|
{{ .Prompt }}<|im_end|> |
|
|
{{ end }}<|im_start|>assistant |
|
|
{{ .Response }}<|im_end|> |
|
|
\"\"\" |
|
|
""" |
|
|
|
|
|
modelfile_path = Path("EXAONE-3.0-7.8B-Instruct.modelfile") |
|
|
modelfile_path.write_text(modelfile_content, encoding='utf-8') |
|
|
print(f"โ
Modelfile ์์ฑ ์๋ฃ: {modelfile_path}") |
|
|
return modelfile_path |
|
|
|
|
|
def create_ollama_model_from_huggingface(): |
|
|
"""Hugging Face ๋ชจ๋ธ์ ์ฌ์ฉํ์ฌ Ollama ๋ชจ๋ธ ์์ฑ""" |
|
|
print("=" * 60) |
|
|
print("EXAONE-3.0-7.8B-Instruct ๋ชจ๋ธ์ Ollama์ ์ถ๊ฐํฉ๋๋ค") |
|
|
print("=" * 60) |
|
|
|
|
|
|
|
|
print("\n[๋ฐฉ๋ฒ 1] Modelfile์ ์ฌ์ฉํ ๋ชจ๋ธ ์์ฑ") |
|
|
print("-" * 60) |
|
|
|
|
|
modelfile_path = create_ollama_modelfile() |
|
|
|
|
|
print(f"\n๋ค์ ๋ช
๋ น์ด๋ฅผ ์คํํ์ฌ ๋ชจ๋ธ์ ์์ฑํ์ธ์:") |
|
|
print(f" ollama create EXAONE-3.0-7.8B-Instruct -f {modelfile_path}") |
|
|
|
|
|
|
|
|
print("\n[๋ฐฉ๋ฒ 2] Hugging Face์์ ์ง์ ๊ฐ์ ธ์ค๊ธฐ") |
|
|
print("-" * 60) |
|
|
print("๋ค์ ๋ช
๋ น์ด๋ฅผ ์คํํ์ธ์:") |
|
|
print(" ollama create EXAONE-3.0-7.8B-Instruct --from huggingface:LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct") |
|
|
|
|
|
|
|
|
print("\n[๋ฐฉ๋ฒ 3] GGUF ํ์์ผ๋ก ๋ณํ ํ ์ถ๊ฐ (๊ณ ๊ธ)") |
|
|
print("-" * 60) |
|
|
print("์ด ๋ฐฉ๋ฒ์ llama.cpp๋ฅผ ์ฌ์ฉํ์ฌ ๋ชจ๋ธ์ GGUF ํ์์ผ๋ก ๋ณํํ ํ") |
|
|
print("Ollama์ ์ถ๊ฐํ๋ ๋ฐฉ๋ฒ์
๋๋ค. ๋ ๋ง์ ์ค์ ์ด ํ์ํฉ๋๋ค.") |
|
|
|
|
|
print("\n" + "=" * 60) |
|
|
print("โ ๏ธ ์ค์ ์ฌํญ:") |
|
|
print("=" * 60) |
|
|
print("1. Hugging Face์์ ๋ชจ๋ธ ์ก์ธ์ค ๊ถํ์ด ํ์ํฉ๋๋ค.") |
|
|
print(" https://huggingface.co/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct") |
|
|
print("2. Hugging Face ํ ํฐ์ด ํ์ํ ์ ์์ต๋๋ค.") |
|
|
print(" ํ๊ฒฝ ๋ณ์์ ์ค์ : export HUGGINGFACE_HUB_TOKEN=your_token") |
|
|
print(" Windows PowerShell: $env:HUGGINGFACE_HUB_TOKEN='your_token'") |
|
|
print("3. ๋ชจ๋ธ ํฌ๊ธฐ๊ฐ ์ฝ 15GB์ด๋ฏ๋ก ์ถฉ๋ถํ ๋์คํฌ ๊ณต๊ฐ์ด ํ์ํฉ๋๋ค.") |
|
|
print("4. GPU ๋ฉ๋ชจ๋ฆฌ๊ฐ ์ถฉ๋ถํด์ผ ํฉ๋๋ค (์ต์ 16GB ๊ถ์ฅ).") |
|
|
print("=" * 60) |
|
|
|
|
|
def check_ollama_installation(): |
|
|
"""Ollama ์ค์น ํ์ธ""" |
|
|
try: |
|
|
result = subprocess.run(['ollama', '--version'], |
|
|
capture_output=True, text=True, timeout=5) |
|
|
if result.returncode == 0: |
|
|
print(f"โ
Ollama ์ค์น ํ์ธ: {result.stdout.strip()}") |
|
|
return True |
|
|
else: |
|
|
print("โ Ollama๊ฐ ์ค์น๋์ด ์์ง ์์ต๋๋ค.") |
|
|
return False |
|
|
except FileNotFoundError: |
|
|
print("โ Ollama๊ฐ ์ค์น๋์ด ์์ง ์์ต๋๋ค.") |
|
|
print(" ์ค์น ๋ฐฉ๋ฒ: https://ollama.ai/download") |
|
|
return False |
|
|
except Exception as e: |
|
|
print(f"โ Ollama ํ์ธ ์ค ์ค๋ฅ: {e}") |
|
|
return False |
|
|
|
|
|
def check_huggingface_access(): |
|
|
"""Hugging Face ์ก์ธ์ค ํ์ธ""" |
|
|
try: |
|
|
from huggingface_hub import whoami |
|
|
user_info = whoami() |
|
|
print(f"โ
Hugging Face ๋ก๊ทธ์ธ ํ์ธ: {user_info.get('name', 'Unknown')}") |
|
|
return True |
|
|
except Exception as e: |
|
|
print(f"โ ๏ธ Hugging Face ๋ก๊ทธ์ธ ํ์ธ ์คํจ: {e}") |
|
|
print(" Hugging Face CLI๋ก ๋ก๊ทธ์ธํ์ธ์: huggingface-cli login") |
|
|
return False |
|
|
|
|
|
if __name__ == "__main__": |
|
|
print("\n" + "=" * 60) |
|
|
print("EXAONE-3.0-7.8B-Instruct Ollama ์ถ๊ฐ ์คํฌ๋ฆฝํธ") |
|
|
print("=" * 60 + "\n") |
|
|
|
|
|
|
|
|
ollama_ok = check_ollama_installation() |
|
|
hf_ok = check_huggingface_access() |
|
|
|
|
|
if not ollama_ok: |
|
|
print("\nโ ๏ธ Ollama๋ฅผ ๋จผ์ ์ค์นํด์ฃผ์ธ์.") |
|
|
exit(1) |
|
|
|
|
|
|
|
|
create_ollama_model_from_huggingface() |
|
|
|
|
|
print("\n" + "=" * 60) |
|
|
print("๋ค์ ๋จ๊ณ:") |
|
|
print("=" * 60) |
|
|
print("1. ์์ ๋ช
๋ น์ด ์ค ํ๋๋ฅผ ์ ํํ์ฌ ์คํํ์ธ์.") |
|
|
print("2. ๋ชจ๋ธ ์์ฑ์ด ์๋ฃ๋๋ฉด ๋ค์ ๋ช
๋ น์ด๋ก ํ์ธํ์ธ์:") |
|
|
print(" ollama list") |
|
|
print("3. ๋ชจ๋ธ์ด ๋ชฉ๋ก์ ๋ํ๋๋ฉด ์น ์ ํ๋ฆฌ์ผ์ด์
์์ ์ฌ์ฉํ ์ ์์ต๋๋ค.") |
|
|
print("=" * 60 + "\n") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|