Spaces:
Runtime error
Runtime error
| import os | |
| from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline | |
| from huggingface_hub import login | |
| import gradio as gr | |
| # Step 1: Load token from repository secret (environment variable) | |
| hf_token = os.getenv("HF_TOKEN") | |
| # Step 2: Login to Hugging Face using token | |
| if hf_token is not None: | |
| login(token=hf_token) | |
| else: | |
| raise EnvironmentError("HF_TOKEN not found in environment. Please set it in repository secrets.") | |
| # Step 3: Load tokenizer and model with auth token | |
| model_name = "mistralai/Mistral-7B-Instruct-v0.1" | |
| tokenizer = AutoTokenizer.from_pretrained(model_name, token=hf_token) | |
| model = AutoModelForCausalLM.from_pretrained(model_name, token=hf_token) | |
| # Step 4: Create a pipeline | |
| pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) | |
| # Step 5: Define a simple Gradio UI | |
| def predict_completion(prompt): | |
| output = pipe(prompt, max_new_tokens=10, num_return_sequences=1, do_sample=True) | |
| return output[0]['generated_text'] | |
| # Step 6: Launch Gradio interface | |
| interface = gr.Interface(fn=predict_completion, | |
| inputs=gr.Textbox(label="Input Prompt"), | |
| outputs="text", | |
| title="Predictive Keyboard using Mistral") | |
| interface.launch() | |