Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM | |
| import torch | |
| # Specify your finetuned model name here | |
| model_name = "BotCuddles/to_deploy" | |
| # Load your finetuned model and tokenizer from the Hugging Face Hub | |
| try: | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16) | |
| generator = pipeline("text-generation", model=model, tokenizer=tokenizer) | |
| print("Model and tokenizer loaded successfully!") | |
| except Exception as e: | |
| print(f"Error loading model and tokenizer: {e}") | |
| generator = None | |
| top_k = 50 | |
| top_p = 0.9 | |
| temperature = 0.7 | |
| max_new_tokens = 120 | |
| def generate_text(prompt): | |
| if generator: | |
| try: | |
| full_prompt = "Understand the following message from user and give a compassionate reply. Message: " + prompt | |
| result = generator( | |
| full_prompt, | |
| top_k=top_k, | |
| top_p=top_p, | |
| temperature=temperature, | |
| max_new_tokens=max_new_tokens, | |
| num_return_sequences=1 | |
| ) | |
| return result[0]['generated_text'] | |
| except Exception as e: | |
| return f"Error generating text: {e}" | |
| else: | |
| return "Model not loaded properly." | |
| iface = gr.Interface( | |
| fn=generate_text, | |
| inputs= | |
| gr.Textbox(lines=2, placeholder="Enter your prompt here..."), | |
| outputs="text", | |
| title="Demo - Lift Me Up", | |
| description="Generate text for relief" | |
| ) | |
| if __name__ == "__main__": | |
| iface.launch() | |