import gradio as gr import torch # Load your trained model + tokenizer from transformers import AutoModelForCausalLM, AutoTokenizer model_name = "theguywhosucks/Instruct-MochaCofeeeee" # replace with your uploaded model repo tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name) def generate_text(prompt): inputs = tokenizer(prompt, return_tensors="pt") outputs = model.generate( **inputs, max_length=200, temperature=0.7, do_sample=True, top_k=50 ) return tokenizer.decode(outputs[0], skip_special_tokens=True) # Gradio UI with gr.Blocks() as demo: gr.Markdown("## ✨ Custom Text Generator") with gr.Row(): starter_prompt = gr.Textbox(label="Starter Prompt", placeholder="Type your prompt here...") with gr.Row(): generate_btn = gr.Button("Generate Text") with gr.Row(): output_box = gr.Textbox(label="Generated Result") generate_btn.click(fn=generate_text, inputs=starter_prompt, outputs=output_box) demo.launch()