File size: 926 Bytes
0c6d327
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline

model_id = "Mayur74/Llama-2-7b-chat-finetune"  # your uploaded model

# Load model and tokenizer
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype="auto")

# Set up pipeline
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)

# Define generation function
def generate_prompt(prompt):
    output = pipe(prompt, max_new_tokens=300, temperature=0.7)
    return output[0]["generated_text"]

# Create Gradio Interface
demo = gr.Interface(
    fn=generate_prompt,
    inputs=gr.Textbox(lines=5, label="Base Prompt"),
    outputs="text",
    title="🧠 LLaMA 2 Prompt Optimizer",
    description="Enter your prompt and get an optimized version.",
)

# Enable API mode
demo.launch(share=False, server_name="0.0.0.0", server_port=7860)