Spaces:
Running
on
T4
Running
on
T4
Upload folder using huggingface_hub
Browse files- app_minimal.py +9 -0
- app_simple.py +54 -0
- requirements.txt +3 -3
app_minimal.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
|
| 3 |
+
def greet(name):
|
| 4 |
+
return f"μλ
νμΈμ, {name}λ!"
|
| 5 |
+
|
| 6 |
+
demo = gr.Interface(fn=greet, inputs="text", outputs="text", title="KAIdol Test")
|
| 7 |
+
|
| 8 |
+
if __name__ == "__main__":
|
| 9 |
+
demo.launch()
|
app_simple.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""KAIdol A/B Test Arena - Simple Version"""
|
| 3 |
+
|
| 4 |
+
import gradio as gr
|
| 5 |
+
|
| 6 |
+
def chat_response(message, character):
|
| 7 |
+
"""Mock μλ΅ μμ±"""
|
| 8 |
+
thinking = f"<think>{character}μ μ
μ₯μμ μκ°ν΄λ³΄λ©΄... μ΄ λ©μμ§μ μ΄λ»κ² λ°μν΄μΌ ν κΉ?</think>"
|
| 9 |
+
response = f"μλ
~ λ°κ°μ! λλ {character}μΌ~"
|
| 10 |
+
return f"{thinking}\n\n{response}"
|
| 11 |
+
|
| 12 |
+
# μΊλ¦ν° λͺ©λ‘
|
| 13 |
+
CHARACTERS = ["κ°μ¨", "μμ΄μ", "μ΄μ§ν", "μ°¨λν", "μ΅λ―Ό"]
|
| 14 |
+
|
| 15 |
+
# λͺ¨λΈ λͺ©λ‘
|
| 16 |
+
MODELS = [
|
| 17 |
+
"hyperclovax-32b-dpo-v5",
|
| 18 |
+
"qwen2.5-14b-dpo-v5",
|
| 19 |
+
"qwen2.5-7b-dpo-v5",
|
| 20 |
+
"exaone-7.8b-dpo-v5",
|
| 21 |
+
]
|
| 22 |
+
|
| 23 |
+
with gr.Blocks(title="KAIdol A/B Test Arena") as demo:
|
| 24 |
+
gr.Markdown("# KAIdol A/B Test Arena")
|
| 25 |
+
gr.Markdown("K-pop μμ΄λ λ‘€νλ μ΄ λͺ¨λΈ A/B λΉκ΅ νκ°")
|
| 26 |
+
gr.Markdown("**Mock λͺ¨λ**: μ€μ λͺ¨λΈ μμ΄ ν
μ€νΈ μλ΅μ μμ±ν©λλ€.")
|
| 27 |
+
|
| 28 |
+
with gr.Row():
|
| 29 |
+
character = gr.Dropdown(choices=CHARACTERS, value="κ°μ¨", label="μΊλ¦ν°")
|
| 30 |
+
model_a = gr.Dropdown(choices=MODELS, value=MODELS[0], label="Model A")
|
| 31 |
+
model_b = gr.Dropdown(choices=MODELS, value=MODELS[1], label="Model B")
|
| 32 |
+
|
| 33 |
+
with gr.Row():
|
| 34 |
+
with gr.Column():
|
| 35 |
+
gr.Markdown("### Model A")
|
| 36 |
+
response_a = gr.Textbox(label="μλ΅", lines=5)
|
| 37 |
+
with gr.Column():
|
| 38 |
+
gr.Markdown("### Model B")
|
| 39 |
+
response_b = gr.Textbox(label="μλ΅", lines=5)
|
| 40 |
+
|
| 41 |
+
user_input = gr.Textbox(label="λ©μμ§", placeholder="μμ΄λμκ² λ©μμ§λ₯Ό 보λ΄μΈμ...")
|
| 42 |
+
submit_btn = gr.Button("μ μ‘", variant="primary")
|
| 43 |
+
|
| 44 |
+
def generate(msg, char, ma, mb):
|
| 45 |
+
return chat_response(msg, char), chat_response(msg, char)
|
| 46 |
+
|
| 47 |
+
submit_btn.click(
|
| 48 |
+
fn=generate,
|
| 49 |
+
inputs=[user_input, character, model_a, model_b],
|
| 50 |
+
outputs=[response_a, response_b]
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
if __name__ == "__main__":
|
| 54 |
+
demo.launch(server_name="0.0.0.0", server_port=7860)
|
requirements.txt
CHANGED
|
@@ -5,9 +5,9 @@ huggingface_hub>=0.21.0
|
|
| 5 |
pyyaml
|
| 6 |
pandas
|
| 7 |
|
| 8 |
-
# ML dependencies - CUDA 12.1 torch
|
| 9 |
-
torch==2.
|
| 10 |
-
transformers>=4.40.0
|
| 11 |
accelerate>=0.27.0
|
| 12 |
bitsandbytes>=0.43.0
|
| 13 |
peft>=0.10.0
|
|
|
|
| 5 |
pyyaml
|
| 6 |
pandas
|
| 7 |
|
| 8 |
+
# ML dependencies - CUDA 12.1 torch (2.2.0+ for pytree compatibility)
|
| 9 |
+
torch==2.2.0+cu121
|
| 10 |
+
transformers>=4.40.0,<4.46.0
|
| 11 |
accelerate>=0.27.0
|
| 12 |
bitsandbytes>=0.43.0
|
| 13 |
peft>=0.10.0
|