Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| from transformers import AutoModelForCausalLM, LlamaTokenizer | |
| import torch | |
| from huggingface_hub import login | |
| import re | |
| import os | |
| login(token=os.getenv("HF_TOKEN")) | |
| # Load the model and tokenizer | |
| model_name = "ranggafermata/Fermata-v1.2-lightcoder" | |
| tokenizer = LlamaTokenizer.from_pretrained(model_name) | |
| model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32) | |
| model.eval() | |
| def generate_code(prompt, max_tokens, temperature, top_p): | |
| inputs = tokenizer(prompt, return_tensors="pt").to(model.device) | |
| with torch.no_grad(): | |
| outputs = model.generate( | |
| **inputs, | |
| max_new_tokens=max_tokens, | |
| temperature=temperature, | |
| top_p=top_p, | |
| do_sample=True, | |
| pad_token_id=tokenizer.eos_token_id | |
| ) | |
| return tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| iface = gr.Interface( | |
| fn=generate_code, | |
| inputs=[ | |
| gr.Textbox(lines=5, label="Prompt"), | |
| gr.Slider(10, 512, value=128, label="Max Tokens"), | |
| gr.Slider(0.1, 1.5, value=0.8, label="Temperature"), | |
| gr.Slider(0.1, 1.0, value=0.95, label="Top-p") | |
| ], | |
| outputs=gr.Textbox(lines=20, label="Generated Code"), | |
| title="Fermata v1.2 LightCoder", | |
| description="A fine-tuned code model based on TinyLlama." | |
| ) | |
| iface.launch(mcp_server=True) | |