File size: 5,455 Bytes
d54e6a9 b834258 c2280e3 d234e06 04600a5 59f2d9e 04600a5 ae31891 208adf4 9c8de54 ef22967 04600a5 d234e06 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 |
"""
EXAONE-3.0-7.8B-Instruct ๋ชจ๋ธ์ Ollama์ ์ถ๊ฐํ๋ ์คํฌ๋ฆฝํธ
์ฌ์ฉ ๋ฐฉ๋ฒ:
1. Hugging Face์์ ๋ชจ๋ธ ์ก์ธ์ค ๊ถํ์ ๋ฐ์์ผ ํฉ๋๋ค.
2. ํ์ํ ํจํค์ง๋ฅผ ์ค์นํฉ๋๋ค:
pip install transformers torch huggingface_hub
3. ์ด ์คํฌ๋ฆฝํธ๋ฅผ ์คํํฉ๋๋ค:
python add_exaone_model.py
"""
import os
import subprocess
import json
from pathlib import Path
def create_ollama_modelfile():
"""Ollama Modelfile ์์ฑ"""
# EXAONE ๋ชจ๋ธ์ ์ค์ ์ฑํ
ํ
ํ๋ฆฟ ํ์์ ๋ง์ถฐ ์์
modelfile_content = """FROM huggingface:LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct
# ๋ชจ๋ธ ์ค์
PARAMETER temperature 0.7
PARAMETER top_p 0.9
PARAMETER top_k 40
PARAMETER num_ctx 4096
PARAMETER num_predict 512
# ์์คํ
ํ๋กฌํํธ
SYSTEM \"\"\"You are EXAONE, a helpful AI assistant developed by LG AI Research.
You are designed to be helpful, harmless, and honest.
You can communicate in both Korean and English.\"\"\"
# EXAONE ๋ชจ๋ธ์ ์ฑํ
ํ
ํ๋ฆฟ ํ์
# ์ฐธ๊ณ : ์ค์ ๋ชจ๋ธ์ ํ ํฌ๋์ด์ ํ
ํ๋ฆฟ์ ํ์ธํ์ฌ ์กฐ์ ์ด ํ์ํ ์ ์์ต๋๋ค.
TEMPLATE \"\"\"{{ if .System }}<|im_start|>system
{{ .System }}<|im_end|>
{{ end }}{{ if .Prompt }}<|im_start|>user
{{ .Prompt }}<|im_end|>
{{ end }}<|im_start|>assistant
{{ .Response }}<|im_end|>
\"\"\"
"""
modelfile_path = Path("EXAONE-3.0-7.8B-Instruct.modelfile")
modelfile_path.write_text(modelfile_content, encoding='utf-8')
print(f"โ
Modelfile ์์ฑ ์๋ฃ: {modelfile_path}")
return modelfile_path
def create_ollama_model_from_huggingface():
"""Hugging Face ๋ชจ๋ธ์ ์ฌ์ฉํ์ฌ Ollama ๋ชจ๋ธ ์์ฑ"""
print("=" * 60)
print("EXAONE-3.0-7.8B-Instruct ๋ชจ๋ธ์ Ollama์ ์ถ๊ฐํฉ๋๋ค")
print("=" * 60)
# ๋ฐฉ๋ฒ 1: Modelfile ์ฌ์ฉ (๊ถ์ฅ)
print("\n[๋ฐฉ๋ฒ 1] Modelfile์ ์ฌ์ฉํ ๋ชจ๋ธ ์์ฑ")
print("-" * 60)
modelfile_path = create_ollama_modelfile()
print(f"\n๋ค์ ๋ช
๋ น์ด๋ฅผ ์คํํ์ฌ ๋ชจ๋ธ์ ์์ฑํ์ธ์:")
print(f" ollama create EXAONE-3.0-7.8B-Instruct -f {modelfile_path}")
# ๋ฐฉ๋ฒ 2: ์ง์ Hugging Face์์ ๊ฐ์ ธ์ค๊ธฐ
print("\n[๋ฐฉ๋ฒ 2] Hugging Face์์ ์ง์ ๊ฐ์ ธ์ค๊ธฐ")
print("-" * 60)
print("๋ค์ ๋ช
๋ น์ด๋ฅผ ์คํํ์ธ์:")
print(" ollama create EXAONE-3.0-7.8B-Instruct --from huggingface:LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct")
# ๋ฐฉ๋ฒ 3: Python ์คํฌ๋ฆฝํธ๋ก ๋ณํ
print("\n[๋ฐฉ๋ฒ 3] GGUF ํ์์ผ๋ก ๋ณํ ํ ์ถ๊ฐ (๊ณ ๊ธ)")
print("-" * 60)
print("์ด ๋ฐฉ๋ฒ์ llama.cpp๋ฅผ ์ฌ์ฉํ์ฌ ๋ชจ๋ธ์ GGUF ํ์์ผ๋ก ๋ณํํ ํ")
print("Ollama์ ์ถ๊ฐํ๋ ๋ฐฉ๋ฒ์
๋๋ค. ๋ ๋ง์ ์ค์ ์ด ํ์ํฉ๋๋ค.")
print("\n" + "=" * 60)
print("โ ๏ธ ์ค์ ์ฌํญ:")
print("=" * 60)
print("1. Hugging Face์์ ๋ชจ๋ธ ์ก์ธ์ค ๊ถํ์ด ํ์ํฉ๋๋ค.")
print(" https://huggingface.co/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct")
print("2. Hugging Face ํ ํฐ์ด ํ์ํ ์ ์์ต๋๋ค.")
print(" ํ๊ฒฝ ๋ณ์์ ์ค์ : export HUGGINGFACE_HUB_TOKEN=your_token")
print(" Windows PowerShell: $env:HUGGINGFACE_HUB_TOKEN='your_token'")
print("3. ๋ชจ๋ธ ํฌ๊ธฐ๊ฐ ์ฝ 15GB์ด๋ฏ๋ก ์ถฉ๋ถํ ๋์คํฌ ๊ณต๊ฐ์ด ํ์ํฉ๋๋ค.")
print("4. GPU ๋ฉ๋ชจ๋ฆฌ๊ฐ ์ถฉ๋ถํด์ผ ํฉ๋๋ค (์ต์ 16GB ๊ถ์ฅ).")
print("=" * 60)
def check_ollama_installation():
"""Ollama ์ค์น ํ์ธ"""
try:
result = subprocess.run(['ollama', '--version'],
capture_output=True, text=True, timeout=5)
if result.returncode == 0:
print(f"โ
Ollama ์ค์น ํ์ธ: {result.stdout.strip()}")
return True
else:
print("โ Ollama๊ฐ ์ค์น๋์ด ์์ง ์์ต๋๋ค.")
return False
except FileNotFoundError:
print("โ Ollama๊ฐ ์ค์น๋์ด ์์ง ์์ต๋๋ค.")
print(" ์ค์น ๋ฐฉ๋ฒ: https://ollama.ai/download")
return False
except Exception as e:
print(f"โ Ollama ํ์ธ ์ค ์ค๋ฅ: {e}")
return False
def check_huggingface_access():
"""Hugging Face ์ก์ธ์ค ํ์ธ"""
try:
from huggingface_hub import whoami
user_info = whoami()
print(f"โ
Hugging Face ๋ก๊ทธ์ธ ํ์ธ: {user_info.get('name', 'Unknown')}")
return True
except Exception as e:
print(f"โ ๏ธ Hugging Face ๋ก๊ทธ์ธ ํ์ธ ์คํจ: {e}")
print(" Hugging Face CLI๋ก ๋ก๊ทธ์ธํ์ธ์: huggingface-cli login")
return False
if __name__ == "__main__":
print("\n" + "=" * 60)
print("EXAONE-3.0-7.8B-Instruct Ollama ์ถ๊ฐ ์คํฌ๋ฆฝํธ")
print("=" * 60 + "\n")
# ์ฌ์ ํ์ธ
ollama_ok = check_ollama_installation()
hf_ok = check_huggingface_access()
if not ollama_ok:
print("\nโ ๏ธ Ollama๋ฅผ ๋จผ์ ์ค์นํด์ฃผ์ธ์.")
exit(1)
# ๋ชจ๋ธ ์ถ๊ฐ ๋ฐฉ๋ฒ ์๋ด
create_ollama_model_from_huggingface()
print("\n" + "=" * 60)
print("๋ค์ ๋จ๊ณ:")
print("=" * 60)
print("1. ์์ ๋ช
๋ น์ด ์ค ํ๋๋ฅผ ์ ํํ์ฌ ์คํํ์ธ์.")
print("2. ๋ชจ๋ธ ์์ฑ์ด ์๋ฃ๋๋ฉด ๋ค์ ๋ช
๋ น์ด๋ก ํ์ธํ์ธ์:")
print(" ollama list")
print("3. ๋ชจ๋ธ์ด ๋ชฉ๋ก์ ๋ํ๋๋ฉด ์น ์ ํ๋ฆฌ์ผ์ด์
์์ ์ฌ์ฉํ ์ ์์ต๋๋ค.")
print("=" * 60 + "\n")
|