Add minimal Gradio app so HF Space runs
Browse files- app.py +34 -0
- requirements.txt +1 -17
app.py
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import gradio as gr
|
| 3 |
+
|
| 4 |
+
INTRO = """
|
| 5 |
+
# Humigence (Space Preview)
|
| 6 |
+
|
| 7 |
+
This Space provides a **lightweight preview UI**.
|
| 8 |
+
The full fine‑tuning & evaluation pipeline runs via the **Humigence CLI** locally.
|
| 9 |
+
|
| 10 |
+
Use this UI to form the CLI command you'll run on your machine.
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
def make_command(mode, model, dataset):
|
| 14 |
+
mode_flag = "--mode basic" if mode == "Basic" else "--mode advanced"
|
| 15 |
+
model_flag = f'--model "{model.strip()}"' if model.strip() else ""
|
| 16 |
+
dataset_flag = f'--dataset "{dataset.strip()}"' if dataset.strip() else ""
|
| 17 |
+
cmd = f"humigence {mode_flag} {model_flag} {dataset_flag}".strip()
|
| 18 |
+
return f"### Preview Command\n```\n{cmd}\n```\n\n> Copy/paste and run this locally to execute the real pipeline."
|
| 19 |
+
|
| 20 |
+
with gr.Blocks(title="Humigence") as demo:
|
| 21 |
+
gr.Markdown(INTRO)
|
| 22 |
+
|
| 23 |
+
with gr.Row():
|
| 24 |
+
mode = gr.Radio(["Basic", "Advanced"], value="Basic", label="Mode")
|
| 25 |
+
model = gr.Textbox(label="Base Model", value="TinyLlama/TinyLlama-1.1B", placeholder="e.g., TinyLlama/TinyLlama-1.1B or microsoft/phi-2")
|
| 26 |
+
dataset = gr.Textbox(label="Dataset Source (path or URL)", placeholder="e.g., data/processed/my_dataset.jsonl")
|
| 27 |
+
|
| 28 |
+
run = gr.Button("Generate CLI Command")
|
| 29 |
+
out = gr.Markdown()
|
| 30 |
+
|
| 31 |
+
run.click(make_command, inputs=[mode, model, dataset], outputs=out)
|
| 32 |
+
|
| 33 |
+
# HF Spaces expose PORT; default to 7860 locally
|
| 34 |
+
demo.launch(server_name="0.0.0.0", server_port=int(os.getenv("PORT", "7860")))
|
requirements.txt
CHANGED
|
@@ -1,17 +1 @@
|
|
| 1 |
-
|
| 2 |
-
transformers>=4.40,<4.47
|
| 3 |
-
accelerate>=0.28.0
|
| 4 |
-
peft>=0.12.0
|
| 5 |
-
bitsandbytes==0.45.5
|
| 6 |
-
datasets>=2.18.0
|
| 7 |
-
evaluate>=0.4.0
|
| 8 |
-
huggingface_hub>=0.24.0
|
| 9 |
-
numpy>=1.24.0
|
| 10 |
-
pydantic>=2.0.0
|
| 11 |
-
typer>=0.12.3
|
| 12 |
-
InquirerPy>=0.3.4
|
| 13 |
-
rich>=13.7.1
|
| 14 |
-
scikit-learn>=1.3.0
|
| 15 |
-
tqdm>=4.65.0
|
| 16 |
-
tokenizers>=0.15.0
|
| 17 |
-
safetensors>=0.4.0
|
|
|
|
| 1 |
+
gradio==4.43.0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|