developer-lunark commited on
Commit
e6d6584
Β·
verified Β·
1 Parent(s): 81ac730

Upload folder using huggingface_hub

Browse files
Files changed (3) hide show
  1. app_minimal.py +9 -0
  2. app_simple.py +54 -0
  3. requirements.txt +3 -3
app_minimal.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ def greet(name):
4
+ return f"μ•ˆλ…•ν•˜μ„Έμš”, {name}λ‹˜!"
5
+
6
+ demo = gr.Interface(fn=greet, inputs="text", outputs="text", title="KAIdol Test")
7
+
8
+ if __name__ == "__main__":
9
+ demo.launch()
app_simple.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """KAIdol A/B Test Arena - Simple Version"""
3
+
4
+ import gradio as gr
5
+
6
+ def chat_response(message, character):
7
+ """Mock 응닡 생성"""
8
+ thinking = f"<think>{character}의 μž…μž₯μ—μ„œ 생각해보면... 이 λ©”μ‹œμ§€μ— μ–΄λ–»κ²Œ λ°˜μ‘ν•΄μ•Ό ν• κΉŒ?</think>"
9
+ response = f"μ•ˆλ…•~ λ°˜κ°€μ›Œ! λ‚˜λŠ” {character}μ•Ό~"
10
+ return f"{thinking}\n\n{response}"
11
+
12
+ # 캐릭터 λͺ©λ‘
13
+ CHARACTERS = ["κ°•μœ¨", "μ„œμ΄μ•ˆ", "이지후", "μ°¨λ„ν•˜", "졜민"]
14
+
15
+ # λͺ¨λΈ λͺ©λ‘
16
+ MODELS = [
17
+ "hyperclovax-32b-dpo-v5",
18
+ "qwen2.5-14b-dpo-v5",
19
+ "qwen2.5-7b-dpo-v5",
20
+ "exaone-7.8b-dpo-v5",
21
+ ]
22
+
23
+ with gr.Blocks(title="KAIdol A/B Test Arena") as demo:
24
+ gr.Markdown("# KAIdol A/B Test Arena")
25
+ gr.Markdown("K-pop μ•„μ΄λŒ λ‘€ν”Œλ ˆμ΄ λͺ¨λΈ A/B 비ꡐ 평가")
26
+ gr.Markdown("**Mock λͺ¨λ“œ**: μ‹€μ œ λͺ¨λΈ 없이 ν…ŒμŠ€νŠΈ 응닡을 μƒμ„±ν•©λ‹ˆλ‹€.")
27
+
28
+ with gr.Row():
29
+ character = gr.Dropdown(choices=CHARACTERS, value="κ°•μœ¨", label="캐릭터")
30
+ model_a = gr.Dropdown(choices=MODELS, value=MODELS[0], label="Model A")
31
+ model_b = gr.Dropdown(choices=MODELS, value=MODELS[1], label="Model B")
32
+
33
+ with gr.Row():
34
+ with gr.Column():
35
+ gr.Markdown("### Model A")
36
+ response_a = gr.Textbox(label="응닡", lines=5)
37
+ with gr.Column():
38
+ gr.Markdown("### Model B")
39
+ response_b = gr.Textbox(label="응닡", lines=5)
40
+
41
+ user_input = gr.Textbox(label="λ©”μ‹œμ§€", placeholder="μ•„μ΄λŒμ—κ²Œ λ©”μ‹œμ§€λ₯Ό λ³΄λ‚΄μ„Έμš”...")
42
+ submit_btn = gr.Button("전솑", variant="primary")
43
+
44
+ def generate(msg, char, ma, mb):
45
+ return chat_response(msg, char), chat_response(msg, char)
46
+
47
+ submit_btn.click(
48
+ fn=generate,
49
+ inputs=[user_input, character, model_a, model_b],
50
+ outputs=[response_a, response_b]
51
+ )
52
+
53
+ if __name__ == "__main__":
54
+ demo.launch(server_name="0.0.0.0", server_port=7860)
requirements.txt CHANGED
@@ -5,9 +5,9 @@ huggingface_hub>=0.21.0
5
  pyyaml
6
  pandas
7
 
8
- # ML dependencies - CUDA 12.1 torch
9
- torch==2.1.0+cu121
10
- transformers>=4.40.0
11
  accelerate>=0.27.0
12
  bitsandbytes>=0.43.0
13
  peft>=0.10.0
 
5
  pyyaml
6
  pandas
7
 
8
+ # ML dependencies - CUDA 12.1 torch (2.2.0+ for pytree compatibility)
9
+ torch==2.2.0+cu121
10
+ transformers>=4.40.0,<4.46.0
11
  accelerate>=0.27.0
12
  bitsandbytes>=0.43.0
13
  peft>=0.10.0