File size: 2,816 Bytes
f6f8796
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
# /// script
# dependencies = ["transformers", "peft", "torch", "accelerate", "datasets"]
# ///

"""HF Jobs์—์„œ ๋ชจ๋ธ ํ…Œ์ŠคํŠธ"""

from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
import torch

# ์„ค์ •
BASE_MODEL = "Qwen/Qwen2.5-0.5B"
ADAPTER_MODEL = "epinfomax/youtube-thumbnail-trend-analyzer"

print("=" * 60)
print("YouTube ์ธ๋„ค์ผ ํŠธ๋ Œ๋“œ ๋ถ„์„ ๋ชจ๋ธ ํ…Œ์ŠคํŠธ")
print("=" * 60)

# ๋ชจ๋ธ ๋กœ๋“œ
print("\n๋ชจ๋ธ ๋กœ๋“œ ์ค‘...")
tokenizer = AutoTokenizer.from_pretrained(ADAPTER_MODEL)

base_model = AutoModelForCausalLM.from_pretrained(
    BASE_MODEL,
    torch_dtype=torch.float16,
    device_map="auto",
    trust_remote_code=True
)

model = PeftModel.from_pretrained(base_model, ADAPTER_MODEL)
model.eval()
print("๋ชจ๋ธ ๋กœ๋“œ ์™„๋ฃŒ!")

# ํ…Œ์ŠคํŠธ ์ž…๋ ฅ
test_input = """๋‹ค์Œ ์ธ๋„ค์ผ ๋ถ„์„๋“ค์„ ๋ณด๊ณ  ์˜ค๋Š˜์˜ ํŠธ๋ Œ๋“œ๋ฅผ ์š”์•ฝํ•˜๊ณ  Midjourney ํ”„๋กฌํ”„ํŠธ๋ฅผ ์ถ”์ฒœํ•ด์ค˜:

[์Œ์•…] ์ˆœ์ˆœํฌ - ์†Œ์‹ฌํ•œ ๋‚จ์ž
- ๋ฐฐ๊ฒฝ: ์˜ค๋ Œ์ง€์ƒ‰ ๊ทธ๋ผ๋ฐ์ด์…˜
- ์ธ๋ฌผ: ์กธ๋ฆฐ ํ‘œ์ •, ๊ณ ๊ฐœ ์ˆ™์ž„
- ํ…์ŠคํŠธ: '์†Œ์‹ฌํ•œ ๋‚จ์ž' ํฐ์ƒ‰ ์„ธ๋ฆฌํ”„์ฒด
- ๋ถ„์œ„๊ธฐ: ๊ฐ์„ฑ์ , ์„œ์ •์ 

[๊ฒŒ์ž„] ์นผ๋ฐ”๋žŒ ์นด๋ฅดํ…”
- ๋ฐฐ๊ฒฝ: ์–ด๋‘์šด ๊ฒŒ์ž„ ํ™”๋ฉด
- ์ธ๋ฌผ: ๊ฒŒ์ž„ ์บ๋ฆญํ„ฐ๋“ค
- ํ…์ŠคํŠธ: '์ฐํ–‰๋ณต๋กค' ๋…ธ๋ž€์ƒ‰ ๊ตต์€ ๊ธ€์”จ
- ๋ถ„์œ„๊ธฐ: ์œ ๋จธ๋Ÿฌ์Šค, ๊ฐ€๋ฒผ์šด

[์—”ํ„ฐํ…Œ์ธ๋จผํŠธ] ํ™ฉ์ •๋ฏผ ์œ ํ–‰์–ด
- ๋ฐฐ๊ฒฝ: ๋ฐฉ์†ก ์ŠคํŠœ๋””์˜ค
- ์ธ๋ฌผ: ํ™ฉ์ •๋ฏผ, ์›ƒ๋Š” ํ‘œ์ •
- ํ…์ŠคํŠธ: ์—†์Œ
- ๋ถ„์œ„๊ธฐ: ์ฝ”๋ฏน, ์นœ๊ทผํ•จ

[๊ณผํ•™๊ธฐ์ˆ ] ์•„์ดํฐ ์‹ ๊ธฐ์ˆ 
- ๋ฐฐ๊ฒฝ: ๊น”๋”ํ•œ ํฐ์ƒ‰/ํšŒ์ƒ‰
- ์ธ๋ฌผ: ์—†์Œ
- ํ…์ŠคํŠธ: ๊ธฐ์ˆ  ๊ด€๋ จ ํ…์ŠคํŠธ
- ๋ถ„์œ„๊ธฐ: ๋ฏธ๋ž˜์ , ๊น”๋”ํ•จ

[๋…ธํ•˜์šฐ] ๊ธฐ์•ˆ84 ์ˆ˜์˜์žฅ
- ๋ฐฐ๊ฒฝ: ์ˆ˜์˜์žฅ, ํŒŒ๋ž€์ƒ‰
- ์ธ๋ฌผ: ๊ธฐ์•ˆ84, ๋›ฐ์–ด๋“œ๋Š” ๋ชจ์Šต
- ํ…์ŠคํŠธ: '์˜ํ•˜4๋„' ๋นจ๊ฐ„์ƒ‰
- ๋ถ„์œ„๊ธฐ: ๋„์ „์ , ์œ ๋จธ๋Ÿฌ์Šค"""

print("\n" + "=" * 60)
print("์ž…๋ ฅ:")
print("=" * 60)
print(test_input[:500] + "...")

# ์‘๋‹ต ์ƒ์„ฑ
print("\n" + "=" * 60)
print("๋ชจ๋ธ ์‘๋‹ต ์ƒ์„ฑ ์ค‘...")
print("=" * 60)

messages = [{"role": "user", "content": test_input}]
text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(text, return_tensors="pt").to(model.device)

with torch.no_grad():
    outputs = model.generate(
        **inputs,
        max_new_tokens=500,
        temperature=0.7,
        top_p=0.9,
        do_sample=True,
        pad_token_id=tokenizer.pad_token_id,
        eos_token_id=tokenizer.eos_token_id,
    )

response = tokenizer.decode(outputs[0], skip_special_tokens=True)

# ํ”„๋กฌํ”„ํŠธ ๋ถ€๋ถ„ ์ œ๊ฑฐ
if "์ถ”์ฒœํ•ด์ค˜:" in response:
    response = response.split("์ถ”์ฒœํ•ด์ค˜:")[-1].strip()

print("\n" + "=" * 60)
print("๋ชจ๋ธ ์ถœ๋ ฅ:")
print("=" * 60)
print(response)

print("\n" + "=" * 60)
print("ํ…Œ์ŠคํŠธ ์™„๋ฃŒ!")
print("=" * 60)