youtube-thumbnail-analysis / test_script.py
epinfomax's picture
Upload test_script.py with huggingface_hub
f6f8796 verified
# /// script
# dependencies = ["transformers", "peft", "torch", "accelerate", "datasets"]
# ///
"""HF Jobs์—์„œ ๋ชจ๋ธ ํ…Œ์ŠคํŠธ"""
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
import torch
# ์„ค์ •
BASE_MODEL = "Qwen/Qwen2.5-0.5B"
ADAPTER_MODEL = "epinfomax/youtube-thumbnail-trend-analyzer"
print("=" * 60)
print("YouTube ์ธ๋„ค์ผ ํŠธ๋ Œ๋“œ ๋ถ„์„ ๋ชจ๋ธ ํ…Œ์ŠคํŠธ")
print("=" * 60)
# ๋ชจ๋ธ ๋กœ๋“œ
print("\n๋ชจ๋ธ ๋กœ๋“œ ์ค‘...")
tokenizer = AutoTokenizer.from_pretrained(ADAPTER_MODEL)
base_model = AutoModelForCausalLM.from_pretrained(
BASE_MODEL,
torch_dtype=torch.float16,
device_map="auto",
trust_remote_code=True
)
model = PeftModel.from_pretrained(base_model, ADAPTER_MODEL)
model.eval()
print("๋ชจ๋ธ ๋กœ๋“œ ์™„๋ฃŒ!")
# ํ…Œ์ŠคํŠธ ์ž…๋ ฅ
test_input = """๋‹ค์Œ ์ธ๋„ค์ผ ๋ถ„์„๋“ค์„ ๋ณด๊ณ  ์˜ค๋Š˜์˜ ํŠธ๋ Œ๋“œ๋ฅผ ์š”์•ฝํ•˜๊ณ  Midjourney ํ”„๋กฌํ”„ํŠธ๋ฅผ ์ถ”์ฒœํ•ด์ค˜:
[์Œ์•…] ์ˆœ์ˆœํฌ - ์†Œ์‹ฌํ•œ ๋‚จ์ž
- ๋ฐฐ๊ฒฝ: ์˜ค๋ Œ์ง€์ƒ‰ ๊ทธ๋ผ๋ฐ์ด์…˜
- ์ธ๋ฌผ: ์กธ๋ฆฐ ํ‘œ์ •, ๊ณ ๊ฐœ ์ˆ™์ž„
- ํ…์ŠคํŠธ: '์†Œ์‹ฌํ•œ ๋‚จ์ž' ํฐ์ƒ‰ ์„ธ๋ฆฌํ”„์ฒด
- ๋ถ„์œ„๊ธฐ: ๊ฐ์„ฑ์ , ์„œ์ •์ 
[๊ฒŒ์ž„] ์นผ๋ฐ”๋žŒ ์นด๋ฅดํ…”
- ๋ฐฐ๊ฒฝ: ์–ด๋‘์šด ๊ฒŒ์ž„ ํ™”๋ฉด
- ์ธ๋ฌผ: ๊ฒŒ์ž„ ์บ๋ฆญํ„ฐ๋“ค
- ํ…์ŠคํŠธ: '์ฐํ–‰๋ณต๋กค' ๋…ธ๋ž€์ƒ‰ ๊ตต์€ ๊ธ€์”จ
- ๋ถ„์œ„๊ธฐ: ์œ ๋จธ๋Ÿฌ์Šค, ๊ฐ€๋ฒผ์šด
[์—”ํ„ฐํ…Œ์ธ๋จผํŠธ] ํ™ฉ์ •๋ฏผ ์œ ํ–‰์–ด
- ๋ฐฐ๊ฒฝ: ๋ฐฉ์†ก ์ŠคํŠœ๋””์˜ค
- ์ธ๋ฌผ: ํ™ฉ์ •๋ฏผ, ์›ƒ๋Š” ํ‘œ์ •
- ํ…์ŠคํŠธ: ์—†์Œ
- ๋ถ„์œ„๊ธฐ: ์ฝ”๋ฏน, ์นœ๊ทผํ•จ
[๊ณผํ•™๊ธฐ์ˆ ] ์•„์ดํฐ ์‹ ๊ธฐ์ˆ 
- ๋ฐฐ๊ฒฝ: ๊น”๋”ํ•œ ํฐ์ƒ‰/ํšŒ์ƒ‰
- ์ธ๋ฌผ: ์—†์Œ
- ํ…์ŠคํŠธ: ๊ธฐ์ˆ  ๊ด€๋ จ ํ…์ŠคํŠธ
- ๋ถ„์œ„๊ธฐ: ๋ฏธ๋ž˜์ , ๊น”๋”ํ•จ
[๋…ธํ•˜์šฐ] ๊ธฐ์•ˆ84 ์ˆ˜์˜์žฅ
- ๋ฐฐ๊ฒฝ: ์ˆ˜์˜์žฅ, ํŒŒ๋ž€์ƒ‰
- ์ธ๋ฌผ: ๊ธฐ์•ˆ84, ๋›ฐ์–ด๋“œ๋Š” ๋ชจ์Šต
- ํ…์ŠคํŠธ: '์˜ํ•˜4๋„' ๋นจ๊ฐ„์ƒ‰
- ๋ถ„์œ„๊ธฐ: ๋„์ „์ , ์œ ๋จธ๋Ÿฌ์Šค"""
print("\n" + "=" * 60)
print("์ž…๋ ฅ:")
print("=" * 60)
print(test_input[:500] + "...")
# ์‘๋‹ต ์ƒ์„ฑ
print("\n" + "=" * 60)
print("๋ชจ๋ธ ์‘๋‹ต ์ƒ์„ฑ ์ค‘...")
print("=" * 60)
messages = [{"role": "user", "content": test_input}]
text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(text, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=500,
temperature=0.7,
top_p=0.9,
do_sample=True,
pad_token_id=tokenizer.pad_token_id,
eos_token_id=tokenizer.eos_token_id,
)
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
# ํ”„๋กฌํ”„ํŠธ ๋ถ€๋ถ„ ์ œ๊ฑฐ
if "์ถ”์ฒœํ•ด์ค˜:" in response:
response = response.split("์ถ”์ฒœํ•ด์ค˜:")[-1].strip()
print("\n" + "=" * 60)
print("๋ชจ๋ธ ์ถœ๋ ฅ:")
print("=" * 60)
print(response)
print("\n" + "=" * 60)
print("ํ…Œ์ŠคํŠธ ์™„๋ฃŒ!")
print("=" * 60)