File size: 2,615 Bytes
d54e6a9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b834258
 
 
 
c2280e3
 
d234e06
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
"""
EXAONE-3.0-7.8B-Instruct๋ฅผ Ollama์— ์ง์ ‘ ์ถ”๊ฐ€ํ•˜๋Š” ์Šคํฌ๋ฆฝํŠธ
ํ† ํฐ์„ ์‚ฌ์šฉํ•˜์—ฌ ๋ชจ๋ธ์„ ์ถ”๊ฐ€ํ•ฉ๋‹ˆ๋‹ค.
"""

import os
import subprocess
import json

HF_TOKEN = "YOUR_HUGGINGFACE_TOKEN_HERE"

def create_simple_modelfile():
    """๊ฐ„๋‹จํ•œ Modelfile ์ƒ์„ฑ (๋กœ์ปฌ ๊ฒฝ๋กœ ๋Œ€์‹  ๋ชจ๋ธ ์ด๋ฆ„๋งŒ ์‚ฌ์šฉ)"""
    modelfile_content = """# EXAONE-3.0-7.8B-Instruct ๋ชจ๋ธ ์„ค์ •
# ์ฐธ๊ณ : Ollama๊ฐ€ Hugging Face ๋ชจ๋ธ์„ ์ง์ ‘ ์ง€์›ํ•˜์ง€ ์•Š๋Š” ๊ฒฝ์šฐ
# ๋‹ค๋ฅธ ๋ฐฉ๋ฒ•์ด ํ•„์š”ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค.

PARAMETER temperature 0.7
PARAMETER top_p 0.9
PARAMETER top_k 40
PARAMETER num_ctx 4096

SYSTEM \"\"\"You are EXAONE, a helpful AI assistant developed by LG AI Research. 
You can communicate in both Korean and English.\"\"\"
"""
    
    with open("EXAONE-3.0-7.8B-Instruct.modelfile", "w", encoding="utf-8") as f:
        f.write(modelfile_content)
    
    print("[OK] Modelfile ์ƒ์„ฑ ์™„๋ฃŒ")

def check_model_availability():
    """๋ชจ๋ธ์ด ์ด๋ฏธ ์„ค์น˜๋˜์–ด ์žˆ๋Š”์ง€ ํ™•์ธ"""
    try:
        result = subprocess.run(['ollama', 'list'], 
                              capture_output=True, text=True, timeout=5)
        if 'EXAONE' in result.stdout or 'exaone' in result.stdout.lower():
            print("[INFO] EXAONE ๋ชจ๋ธ์ด ์ด๋ฏธ ์„ค์น˜๋˜์–ด ์žˆ์Šต๋‹ˆ๋‹ค.")
            return True
        return False
    except Exception as e:
        print(f"[WARNING] ๋ชจ๋ธ ํ™•์ธ ์ค‘ ์˜ค๋ฅ˜: {e}")
        return False

def main():
    print("\n" + "=" * 60)
    print("EXAONE-3.0-7.8B-Instruct Ollama ์ถ”๊ฐ€ ์‹œ๋„")
    print("=" * 60)
    
    # ํ† ํฐ ์„ค์ •
    os.environ['HUGGINGFACE_HUB_TOKEN'] = HF_TOKEN
    print(f"[OK] Hugging Face ํ† ํฐ ์„ค์ • ์™„๋ฃŒ")
    
    # ๋ชจ๋ธ ํ™•์ธ
    if check_model_availability():
        print("\n๋ชจ๋ธ์ด ์ด๋ฏธ ์„ค์น˜๋˜์–ด ์žˆ์Šต๋‹ˆ๋‹ค.")
        return
    
    print("\nํ˜„์žฌ Ollama ๋ฒ„์ „์—์„œ๋Š” Hugging Face ๋ชจ๋ธ์„ ์ง์ ‘ ๊ฐ€์ ธ์˜ฌ ์ˆ˜ ์—†์Šต๋‹ˆ๋‹ค.")
    print("\n๋‹ค์Œ ๋ฐฉ๋ฒ•์„ ์‹œ๋„ํ•ด๋ณด์„ธ์š”:")
    print("\n1. Ollama๋ฅผ ์ตœ์‹  ๋ฒ„์ „์œผ๋กœ ์—…๋ฐ์ดํŠธ")
    print("   https://ollama.ai/download")
    
    print("\n2. ์ˆ˜๋™์œผ๋กœ ๋ชจ๋ธ ์ •๋ณด ํ™•์ธ")
    print("   ํ˜„์žฌ Ollama ๋ฒ„์ „ ํ™•์ธ: ollama --version")
    print("   ์‚ฌ์šฉ ๊ฐ€๋Šฅํ•œ ๋ชจ๋ธ ํ™•์ธ: ollama list")
    
    print("\n3. EXAONE ๋ชจ๋ธ์˜ GGUF ๋ฒ„์ „ ์ฐพ๊ธฐ")
    print("   Hugging Face์—์„œ GGUF ํ˜•์‹์˜ ๋ชจ๋ธ์„ ์ฐพ์•„๋ณด์„ธ์š”.")
    
    print("\n" + "=" * 60)
    print("์ฐธ๊ณ : ํ˜„์žฌ Ollama 0.13.0์€ Hugging Face ๋ชจ๋ธ ์ง์ ‘ ์ง€์›์ด ์ œํ•œ์ ์ž…๋‹ˆ๋‹ค.")
    print("=" * 60)

if __name__ == "__main__":
    main()