kumar1907's picture
Update app.py
0af2cbc verified
raw
history blame contribute delete
693 Bytes
import os
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
import gradio as gr
model_id = "meta-llama/Llama-3.1-8B-Instruct"
token = os.getenv("kumar")
tokenizer = AutoTokenizer.from_pretrained(model_id, token=token)
model = AutoModelForCausalLM.from_pretrained(model_id, token=token, device_map="auto")
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
def chat_with_llama(prompt):
result = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7)
return result[0]["generated_text"]
# ❌ DO NOT include share=True here on Spaces
gr.Interface(fn=chat_with_llama, inputs="text", outputs="text", title="Venkat's Chatbot").launch()