Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from transformers import AutoModelForCausalLM, AutoTokenizer | |
| import torch | |
| # Load model and tokenizer | |
| model_name = "AhmedBAH/DesertMind" # Your fine-tuned model path | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| model = AutoModelForCausalLM.from_pretrained( | |
| model_name, | |
| torch_dtype=torch.float32, | |
| device_map={'': 'cpu'}, | |
| use_cache=False | |
| ) | |
| def generate_text(prompt): | |
| inputs = tokenizer(prompt, return_tensors="pt") | |
| output = model.generate(**inputs, max_new_tokens=50, do_sample=True, temperature=0.1) | |
| response = tokenizer.decode(output[0], skip_special_tokens=True) | |
| return response | |
| # Gradio Interface | |
| iface = gr.Interface( | |
| fn=generate_text, | |
| inputs="text", | |
| outputs="text", | |
| title="🚀 Fine-tuned Model Demo", | |
| description="Enter a prompt to generate text using your fine-tuned model." | |
| ) | |
| iface.launch() | |