import gradio as gr import torch from transformers import AutoTokenizer, LlamaForCausalLM import spaces import subprocess subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True) # Initialize model and tokenizer model_id = 'akjindal53244/Llama-3.1-Storm-8B' tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True) model = LlamaForCausalLM.from_pretrained( model_id, torch_dtype=torch.float32, device_map="auto", low_cpu_mem_usage=True ) # Function to format the prompt def format_prompt(messages): prompt = "<|begin_of_text|>" for message in messages: prompt += f"<|start_header_id|>{message['role']}<|end_header_id|>\n\n{message['content']}<|eot_id|>" prompt += "<|start_header_id|>assistant<|end_header_id|>\n\n" return prompt # Function to generate response @spaces.GPU(duration=300) # Increased duration due to potential slower processing def generate_response(message, history): messages = [{"role": "system", "content": "You are a helpful assistant."}] for human, assistant in history: messages.append({"role": "user", "content": human}) messages.append({"role": "assistant", "content": assistant}) messages.append({"role": "user", "content": message}) prompt = format_prompt(messages) input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(model.device) generated_ids = model.generate(input_ids, max_new_tokens=256, temperature=0.7, do_sample=True, eos_token_id=tokenizer.eos_token_id) response = tokenizer.decode(generated_ids[0][input_ids.shape[-1]:], skip_special_tokens=True) return response.strip() # Create Gradio interface iface = gr.ChatInterface( generate_response, title="Llama-3.1-Storm-8B Chatbot", description="Chat with the Llama-3.1-Storm-8B model. Type your message and press Enter to send.", ) # Launch the app iface.launch()