promptmaster / app.py
Mayur74's picture
Create app.py
0c6d327 verified
raw
history blame contribute delete
926 Bytes
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
model_id = "Mayur74/Llama-2-7b-chat-finetune" # your uploaded model
# Load model and tokenizer
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype="auto")
# Set up pipeline
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
# Define generation function
def generate_prompt(prompt):
output = pipe(prompt, max_new_tokens=300, temperature=0.7)
return output[0]["generated_text"]
# Create Gradio Interface
demo = gr.Interface(
fn=generate_prompt,
inputs=gr.Textbox(lines=5, label="Base Prompt"),
outputs="text",
title="🧠 LLaMA 2 Prompt Optimizer",
description="Enter your prompt and get an optimized version.",
)
# Enable API mode
demo.launch(share=False, server_name="0.0.0.0", server_port=7860)