File size: 2,816 Bytes
f6f8796 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 |
# /// script
# dependencies = ["transformers", "peft", "torch", "accelerate", "datasets"]
# ///
"""HF Jobs์์ ๋ชจ๋ธ ํ
์คํธ"""
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
import torch
# ์ค์
BASE_MODEL = "Qwen/Qwen2.5-0.5B"
ADAPTER_MODEL = "epinfomax/youtube-thumbnail-trend-analyzer"
print("=" * 60)
print("YouTube ์ธ๋ค์ผ ํธ๋ ๋ ๋ถ์ ๋ชจ๋ธ ํ
์คํธ")
print("=" * 60)
# ๋ชจ๋ธ ๋ก๋
print("\n๋ชจ๋ธ ๋ก๋ ์ค...")
tokenizer = AutoTokenizer.from_pretrained(ADAPTER_MODEL)
base_model = AutoModelForCausalLM.from_pretrained(
BASE_MODEL,
torch_dtype=torch.float16,
device_map="auto",
trust_remote_code=True
)
model = PeftModel.from_pretrained(base_model, ADAPTER_MODEL)
model.eval()
print("๋ชจ๋ธ ๋ก๋ ์๋ฃ!")
# ํ
์คํธ ์
๋ ฅ
test_input = """๋ค์ ์ธ๋ค์ผ ๋ถ์๋ค์ ๋ณด๊ณ ์ค๋์ ํธ๋ ๋๋ฅผ ์์ฝํ๊ณ Midjourney ํ๋กฌํํธ๋ฅผ ์ถ์ฒํด์ค:
[์์
] ์์ํฌ - ์์ฌํ ๋จ์
- ๋ฐฐ๊ฒฝ: ์ค๋ ์ง์ ๊ทธ๋ผ๋ฐ์ด์
- ์ธ๋ฌผ: ์กธ๋ฆฐ ํ์ , ๊ณ ๊ฐ ์์
- ํ
์คํธ: '์์ฌํ ๋จ์' ํฐ์ ์ธ๋ฆฌํ์ฒด
- ๋ถ์๊ธฐ: ๊ฐ์ฑ์ , ์์ ์
[๊ฒ์] ์นผ๋ฐ๋ ์นด๋ฅดํ
- ๋ฐฐ๊ฒฝ: ์ด๋์ด ๊ฒ์ ํ๋ฉด
- ์ธ๋ฌผ: ๊ฒ์ ์บ๋ฆญํฐ๋ค
- ํ
์คํธ: '์ฐํ๋ณต๋กค' ๋
ธ๋์ ๊ตต์ ๊ธ์จ
- ๋ถ์๊ธฐ: ์ ๋จธ๋ฌ์ค, ๊ฐ๋ฒผ์ด
[์ํฐํ
์ธ๋จผํธ] ํฉ์ ๋ฏผ ์ ํ์ด
- ๋ฐฐ๊ฒฝ: ๋ฐฉ์ก ์คํ๋์ค
- ์ธ๋ฌผ: ํฉ์ ๋ฏผ, ์๋ ํ์
- ํ
์คํธ: ์์
- ๋ถ์๊ธฐ: ์ฝ๋ฏน, ์น๊ทผํจ
[๊ณผํ๊ธฐ์ ] ์์ดํฐ ์ ๊ธฐ์
- ๋ฐฐ๊ฒฝ: ๊น๋ํ ํฐ์/ํ์
- ์ธ๋ฌผ: ์์
- ํ
์คํธ: ๊ธฐ์ ๊ด๋ จ ํ
์คํธ
- ๋ถ์๊ธฐ: ๋ฏธ๋์ , ๊น๋ํจ
[๋
ธํ์ฐ] ๊ธฐ์84 ์์์ฅ
- ๋ฐฐ๊ฒฝ: ์์์ฅ, ํ๋์
- ์ธ๋ฌผ: ๊ธฐ์84, ๋ฐ์ด๋๋ ๋ชจ์ต
- ํ
์คํธ: '์ํ4๋' ๋นจ๊ฐ์
- ๋ถ์๊ธฐ: ๋์ ์ , ์ ๋จธ๋ฌ์ค"""
print("\n" + "=" * 60)
print("์
๋ ฅ:")
print("=" * 60)
print(test_input[:500] + "...")
# ์๋ต ์์ฑ
print("\n" + "=" * 60)
print("๋ชจ๋ธ ์๋ต ์์ฑ ์ค...")
print("=" * 60)
messages = [{"role": "user", "content": test_input}]
text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(text, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=500,
temperature=0.7,
top_p=0.9,
do_sample=True,
pad_token_id=tokenizer.pad_token_id,
eos_token_id=tokenizer.eos_token_id,
)
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
# ํ๋กฌํํธ ๋ถ๋ถ ์ ๊ฑฐ
if "์ถ์ฒํด์ค:" in response:
response = response.split("์ถ์ฒํด์ค:")[-1].strip()
print("\n" + "=" * 60)
print("๋ชจ๋ธ ์ถ๋ ฅ:")
print("=" * 60)
print(response)
print("\n" + "=" * 60)
print("ํ
์คํธ ์๋ฃ!")
print("=" * 60)
|