Spaces:
Sleeping
Sleeping
| import os | |
| import gradio as gr | |
| from huggingface_hub import HfApi | |
| from transformers import pipeline | |
| HF_TOKEN = os.getenv("HF_TOKEN", "") | |
| api = HfApi(token=HF_TOKEN) | |
| def list_my_models(owner: str): | |
| # owner can be your username or org name | |
| models = api.list_models(author=owner, full=False) | |
| # Return sorted model IDs like "username/modelname" | |
| ids = sorted([m.modelId for m in models]) | |
| return ids | |
| def build_pipeline(model_id: str): | |
| """ | |
| Best-effort: try text-generation first; if that fails, try text2text-generation. | |
| You can extend this by inspecting model tags/tasks and routing accordingly. | |
| """ | |
| try: | |
| return pipeline("text-generation", model=model_id, token=HF_TOKEN) | |
| except Exception: | |
| return pipeline("text2text-generation", model=model_id, token=HF_TOKEN) | |
| _PIPE_CACHE = {} | |
| def run_model(owner: str, model_id: str, prompt: str, max_new_tokens: int, temperature: float): | |
| if not model_id: | |
| return "No model selected." | |
| if model_id not in _PIPE_CACHE: | |
| _PIPE_CACHE[model_id] = build_pipeline(model_id) | |
| pipe = _PIPE_CACHE[model_id] | |
| # Handle outputs from different pipelines | |
| out = pipe(prompt, max_new_tokens=max_new_tokens, temperature=temperature) | |
| if isinstance(out, list) and len(out) > 0: | |
| if "generated_text" in out[0]: | |
| return out[0]["generated_text"] | |
| if "generated_text" in out[0]: | |
| return out[0]["generated_text"] | |
| if "generated_text" in out[0]: | |
| return out[0]["generated_text"] | |
| if "generated_text" in out[0]: | |
| return out[0]["generated_text"] | |
| if "generated_text" in out[0]: | |
| return out[0]["generated_text"] | |
| if "generated_text" in out[0]: | |
| return out[0]["generated_text"] | |
| if "generated_text" in out[0]: | |
| return out[0]["generated_text"] | |
| if "generated_text" in out[0]: | |
| return out[0]["generated_text"] | |
| # text2text often returns 'generated_text' | |
| if "generated_text" in out[0]: | |
| return out[0]["generated_text"] | |
| # fallback pretty print | |
| return str(out[0]) | |
| return str(out) | |
| def refresh_models(owner: str): | |
| return gr.Dropdown(choices=list_my_models(owner), value=None) | |
| with gr.Blocks() as demo: | |
| gr.Markdown("## 🤗 My Models – One UI") | |
| with gr.Row(): | |
| owner = gr.Textbox(label="HF username / org", placeholder="e.g. my-username", value="") | |
| refresh = gr.Button("Refresh model list") | |
| model_id = gr.Dropdown(label="Select a model from your account", choices=[]) | |
| with gr.Row(): | |
| prompt = gr.Textbox(label="Prompt", lines=4, placeholder="Type here…") | |
| with gr.Row(): | |
| max_new_tokens = gr.Slider(1, 1024, value=256, step=1, label="max_new_tokens") | |
| temperature = gr.Slider(0.0, 2.0, value=0.7, step=0.05, label="temperature") | |
| run = gr.Button("Run") | |
| output = gr.Textbox(label="Output", lines=12) | |
| refresh.click(fn=refresh_models, inputs=[owner], outputs=[model_id]) | |
| run.click(fn=run_model, inputs=[owner, model_id, prompt, max_new_tokens, temperature], outputs=[output]) | |
| demo.launch() |