epinfomax commited on
Commit
f6f8796
ยท
verified ยท
1 Parent(s): 90068b2

Upload test_script.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. test_script.py +105 -0
test_script.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # dependencies = ["transformers", "peft", "torch", "accelerate", "datasets"]
3
+ # ///
4
+
5
+ """HF Jobs์—์„œ ๋ชจ๋ธ ํ…Œ์ŠคํŠธ"""
6
+
7
+ from transformers import AutoModelForCausalLM, AutoTokenizer
8
+ from peft import PeftModel
9
+ import torch
10
+
11
+ # ์„ค์ •
12
+ BASE_MODEL = "Qwen/Qwen2.5-0.5B"
13
+ ADAPTER_MODEL = "epinfomax/youtube-thumbnail-trend-analyzer"
14
+
15
+ print("=" * 60)
16
+ print("YouTube ์ธ๋„ค์ผ ํŠธ๋ Œ๋“œ ๋ถ„์„ ๋ชจ๋ธ ํ…Œ์ŠคํŠธ")
17
+ print("=" * 60)
18
+
19
+ # ๋ชจ๋ธ ๋กœ๋“œ
20
+ print("\n๋ชจ๋ธ ๋กœ๋“œ ์ค‘...")
21
+ tokenizer = AutoTokenizer.from_pretrained(ADAPTER_MODEL)
22
+
23
+ base_model = AutoModelForCausalLM.from_pretrained(
24
+ BASE_MODEL,
25
+ torch_dtype=torch.float16,
26
+ device_map="auto",
27
+ trust_remote_code=True
28
+ )
29
+
30
+ model = PeftModel.from_pretrained(base_model, ADAPTER_MODEL)
31
+ model.eval()
32
+ print("๋ชจ๋ธ ๋กœ๋“œ ์™„๋ฃŒ!")
33
+
34
+ # ํ…Œ์ŠคํŠธ ์ž…๋ ฅ
35
+ test_input = """๋‹ค์Œ ์ธ๋„ค์ผ ๋ถ„์„๋“ค์„ ๋ณด๊ณ  ์˜ค๋Š˜์˜ ํŠธ๋ Œ๋“œ๋ฅผ ์š”์•ฝํ•˜๊ณ  Midjourney ํ”„๋กฌํ”„ํŠธ๋ฅผ ์ถ”์ฒœํ•ด์ค˜:
36
+
37
+ [์Œ์•…] ์ˆœ์ˆœํฌ - ์†Œ์‹ฌํ•œ ๋‚จ์ž
38
+ - ๋ฐฐ๊ฒฝ: ์˜ค๋ Œ์ง€์ƒ‰ ๊ทธ๋ผ๋ฐ์ด์…˜
39
+ - ์ธ๋ฌผ: ์กธ๋ฆฐ ํ‘œ์ •, ๊ณ ๊ฐœ ์ˆ™์ž„
40
+ - ํ…์ŠคํŠธ: '์†Œ์‹ฌํ•œ ๋‚จ์ž' ํฐ์ƒ‰ ์„ธ๋ฆฌํ”„์ฒด
41
+ - ๋ถ„์œ„๊ธฐ: ๊ฐ์„ฑ์ , ์„œ์ •์ 
42
+
43
+ [๊ฒŒ์ž„] ์นผ๋ฐ”๋žŒ ์นด๋ฅดํ…”
44
+ - ๋ฐฐ๊ฒฝ: ์–ด๋‘์šด ๊ฒŒ์ž„ ํ™”๋ฉด
45
+ - ์ธ๋ฌผ: ๊ฒŒ์ž„ ์บ๋ฆญํ„ฐ๋“ค
46
+ - ํ…์ŠคํŠธ: '์ฐํ–‰๋ณต๋กค' ๋…ธ๋ž€์ƒ‰ ๊ตต์€ ๊ธ€์”จ
47
+ - ๋ถ„์œ„๊ธฐ: ์œ ๋จธ๋Ÿฌ์Šค, ๊ฐ€๋ฒผ์šด
48
+
49
+ [์—”ํ„ฐํ…Œ์ธ๋จผํŠธ] ํ™ฉ์ •๋ฏผ ์œ ํ–‰์–ด
50
+ - ๋ฐฐ๊ฒฝ: ๋ฐฉ์†ก ์ŠคํŠœ๋””์˜ค
51
+ - ์ธ๋ฌผ: ํ™ฉ์ •๋ฏผ, ์›ƒ๋Š” ํ‘œ์ •
52
+ - ํ…์ŠคํŠธ: ์—†์Œ
53
+ - ๋ถ„์œ„๊ธฐ: ์ฝ”๋ฏน, ์นœ๊ทผํ•จ
54
+
55
+ [๊ณผํ•™๊ธฐ์ˆ ] ์•„์ดํฐ ์‹ ๊ธฐ์ˆ 
56
+ - ๋ฐฐ๊ฒฝ: ๊น”๋”ํ•œ ํฐ์ƒ‰/ํšŒ์ƒ‰
57
+ - ์ธ๋ฌผ: ์—†์Œ
58
+ - ํ…์ŠคํŠธ: ๊ธฐ์ˆ  ๊ด€๋ จ ํ…์ŠคํŠธ
59
+ - ๋ถ„์œ„๊ธฐ: ๋ฏธ๋ž˜์ , ๊น”๋”ํ•จ
60
+
61
+ [๋…ธํ•˜์šฐ] ๊ธฐ์•ˆ84 ์ˆ˜์˜์žฅ
62
+ - ๋ฐฐ๊ฒฝ: ์ˆ˜์˜์žฅ, ํŒŒ๋ž€์ƒ‰
63
+ - ์ธ๋ฌผ: ๊ธฐ์•ˆ84, ๋›ฐ์–ด๋“œ๋Š” ๋ชจ์Šต
64
+ - ํ…์ŠคํŠธ: '์˜ํ•˜4๋„' ๋นจ๊ฐ„์ƒ‰
65
+ - ๋ถ„์œ„๊ธฐ: ๋„์ „์ , ์œ ๋จธ๋Ÿฌ์Šค"""
66
+
67
+ print("\n" + "=" * 60)
68
+ print("์ž…๋ ฅ:")
69
+ print("=" * 60)
70
+ print(test_input[:500] + "...")
71
+
72
+ # ์‘๋‹ต ์ƒ์„ฑ
73
+ print("\n" + "=" * 60)
74
+ print("๋ชจ๋ธ ์‘๋‹ต ์ƒ์„ฑ ์ค‘...")
75
+ print("=" * 60)
76
+
77
+ messages = [{"role": "user", "content": test_input}]
78
+ text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
79
+ inputs = tokenizer(text, return_tensors="pt").to(model.device)
80
+
81
+ with torch.no_grad():
82
+ outputs = model.generate(
83
+ **inputs,
84
+ max_new_tokens=500,
85
+ temperature=0.7,
86
+ top_p=0.9,
87
+ do_sample=True,
88
+ pad_token_id=tokenizer.pad_token_id,
89
+ eos_token_id=tokenizer.eos_token_id,
90
+ )
91
+
92
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
93
+
94
+ # ํ”„๋กฌํ”„ํŠธ ๋ถ€๋ถ„ ์ œ๊ฑฐ
95
+ if "์ถ”์ฒœํ•ด์ค˜:" in response:
96
+ response = response.split("์ถ”์ฒœํ•ด์ค˜:")[-1].strip()
97
+
98
+ print("\n" + "=" * 60)
99
+ print("๋ชจ๋ธ ์ถœ๋ ฅ:")
100
+ print("=" * 60)
101
+ print(response)
102
+
103
+ print("\n" + "=" * 60)
104
+ print("ํ…Œ์ŠคํŠธ ์™„๋ฃŒ!")
105
+ print("=" * 60)