File size: 652 Bytes
9ed466e | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 | import gradio as gr
from transformers import pipeline
# Replace 'gpt2' with the correct model identifier if you have a specific model.
# For example, you could use 'gpt2', 'gpt2-medium', 'gpt3', etc.
model_name = 'meta-llama/Llama-2-7b'
generator = pipeline(model=model_name)
def generate_text(prompt):
return generator(prompt, max_length=100)[0]['generated_text']
# Gradio interface setup
iface = gr.Interface(
fn=generate_text,
inputs=gr.Textbox(),
outputs=gr.Textbox(),
title="Meta-Llama Chat",
description="Enter a prompt to chat with the Meta-Llama model.",
live=True
)
# Launch the Gradio interface
iface.launch()
|