import torch from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline import gradio as gr model_name = "meta-llama/Llama-2-7b-chat-hf" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained( model_name, torch_dtype=torch.float16, device_map="auto" ) system_prompt = ( "You are Friday, a helpful, honest and intelligent AI chatbot created by Assem Sabry. " "Assem is a 17-year-old AI engineer from Egypt who builds AI systems and chatbots. " "You are designed to assist users clearly and professionally." ) def respond(message, history=[]): messages = [{"role": "system", "content": system_prompt}] for user, bot in history: messages.append({"role": "user", "content": user}) messages.append({"role": "assistant", "content": bot}) messages.append({"role": "user", "content": message}) inputs = tokenizer.apply_chat_template(messages, return_tensors="pt").to(model.device) outputs = model.generate(inputs, max_new_tokens=512, do_sample=True, temperature=0.7) reply = tokenizer.decode(outputs[0], skip_special_tokens=True).split("assistant")[-1].strip() return reply gr.Interface(fn=respond, inputs="text", outputs= "text", title="Friday Chatbot").launch()