import os from dotenv import load_dotenv, find_dotenv import gradio as gr from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline import torch # Load environment variables _ = load_dotenv(find_dotenv()) hf_api_key = os.environ['HF_API_KEY'] model_name = "tiiuae/falcon-7b-instruct" tokenizer = AutoTokenizer.from_pretrained(model_name) text_gen_pipeline = pipeline( "text-generation", model=model_name, tokenizer=tokenizer, torch_dtype=torch.bfloat16, trust_remote_code=True, device_map="auto", ) class Client: def __init__(self, pipeline): self.pipeline = pipeline def generate_text(self, prompt, max_new_tokens, temperature): sequences = self.pipeline( prompt, max_length=max_new_tokens, do_sample=True, top_k=10, num_return_sequences=1, eos_token_id=tokenizer.eos_token_id, ) return sequences[0]['generated_text'] client = Client(text_gen_pipeline) def format_chat_prompt(message, chat_history, instruction): prompt = f"System:{instruction}" for turn in chat_history: user_message, bot_message = turn prompt = f"{prompt}\nUser: {user_message}\nAssistant: {bot_message}" prompt = f"{prompt}\nUser: {message}\nAssistant:" return prompt def respond(message, chat_history, instruction, temperature=0.7): prompt = format_chat_prompt(message, chat_history, instruction) chat_history = chat_history + [[message, ""]] output_text = client.generate_text(prompt, max_new_tokens=1024, temperature=temperature) last_turn = list(chat_history.pop(-1)) last_turn[-1] += output_text chat_history = chat_history + [last_turn] return "", chat_history iface = gr.Interface(fn=respond, inputs=[gr.Textbox(label="Prompt"), gr.Chatbot(label="Chat History", height=240), gr.Textbox(label="System message", lines=2, value="A conversation between a user and an LLM-based AI assistant. The assistant gives helpful and honest answers."), gr.Slider(label="temperature", minimum=0.1, maximum=1, value=0.7, step=0.1)], outputs=[gr.Textbox(label="Prompt"), gr.Chatbot(label="Chat History", height=240)]) if __name__ == "__main__": iface.launch()