import gradio as gr import os import threading import time from pathlib import Path from huggingface_hub import login # Try to import llama-cpp-python, fallback to instructions if not available try: from llama_cpp import Llama LLAMA_CPP_AVAILABLE = True except ImportError: LLAMA_CPP_AVAILABLE = False print("llama-cpp-python not installed. Please install it with: pip install llama-cpp-python") hf_token = os.environ.get("HF_TOKEN") login(token = hf_token) # Global variables for model model = None model_loaded = False def find_gguf_file(directory="."): """Find GGUF files in the specified directory""" gguf_files = [] for root, dirs, files in os.walk(directory): for file in files: if file.endswith('.gguf'): gguf_files.append(os.path.join(root, file)) return gguf_files def get_optimal_settings(): """Get optimal CPU threads and GPU layers automatically""" # Auto-detect CPU threads (use all available cores) n_threads = os.cpu_count() # Auto-detect GPU layers (try to use GPU if available) n_gpu_layers = 0 try: # Try to detect if CUDA is available import subprocess result = subprocess.run(['nvidia-smi'], capture_output=True, text=True) if result.returncode == 0: # NVIDIA GPU detected, use more layers n_gpu_layers = 35 # Good default for Llama-3-8B except: # No GPU or CUDA not available n_gpu_layers = 0 return n_threads, n_gpu_layers def load_model_from_huggingface(repo_id, filename, n_ctx=2048): """Load the model from Hugging Face repository""" global model, model_loaded if not LLAMA_CPP_AVAILABLE: return False, "llama-cpp-python not installed. Please install it with: pip install llama-cpp-python" try: print(f"Loading model from Hugging Face: {repo_id}/{filename}") # Get optimal settings automatically n_threads, n_gpu_layers = get_optimal_settings() print(f"Auto-detected settings: {n_threads} CPU threads, {n_gpu_layers} GPU layers") # Load model from Hugging Face with optimized settings model = Llama.from_pretrained( repo_id=repo_id, filename=filename, n_ctx=n_ctx, # Context window (configurable) n_threads=n_threads, # CPU threads (auto-detected) n_gpu_layers=n_gpu_layers, # Number of layers to offload to GPU (auto-detected) verbose=False, chat_format="chatml", # Use Llama-3 chat format n_batch=512, # Batch size for prompt processing use_mlock=True, # Keep model in memory use_mmap=True, # Use memory mapping ) model_loaded = True print("Model loaded successfully!") return True, f"ā Model loaded successfully from {repo_id}/{filename}\nš Context: {n_ctx} tokens\nš„ļø CPU Threads: {n_threads}\nš® GPU Layers: {n_gpu_layers}" except Exception as e: model_loaded = False error_msg = f"Error loading model: {str(e)}" print(error_msg) return False, f"ā {error_msg}" def load_model_from_gguf(gguf_path=None, n_ctx=2048): """Load the model from a local GGUF file with automatic optimization""" global model, model_loaded if not LLAMA_CPP_AVAILABLE: return False, "llama-cpp-python not installed. Please install it with: pip install llama-cpp-python" try: # If no path provided, try to find GGUF files if gguf_path is None: gguf_files = find_gguf_file() if not gguf_files: return False, "No GGUF files found in the repository" gguf_path = gguf_files[0] # Use the first one found print(f"Found GGUF file: {gguf_path}") # Check if file exists if not os.path.exists(gguf_path): return False, f"GGUF file not found: {gguf_path}" print(f"Loading model from: {gguf_path}") # Get optimal settings automatically n_threads, n_gpu_layers = get_optimal_settings() print(f"Auto-detected settings: {n_threads} CPU threads, {n_gpu_layers} GPU layers") # Load model with optimized settings model = Llama( model_path=gguf_path, n_ctx=n_ctx, # Context window (configurable) n_threads=n_threads, # CPU threads (auto-detected) n_gpu_layers=n_gpu_layers, # Number of layers to offload to GPU (auto-detected) verbose=False, chat_format="llama-3", # Use Llama-3 chat format n_batch=512, # Batch size for prompt processing use_mlock=True, # Keep model in memory use_mmap=True, # Use memory mapping ) model_loaded = True print("Model loaded successfully!") return True, f"ā Model loaded successfully from {os.path.basename(gguf_path)}\nš Context: {n_ctx} tokens\nš„ļø CPU Threads: {n_threads}\nš® GPU Layers: {n_gpu_layers}" except Exception as e: model_loaded = False error_msg = f"Error loading model: {str(e)}" print(error_msg) return False, f"ā {error_msg}" def generate_response_stream(message, history, max_tokens=512, temperature=0.7, top_p=0.9, repeat_penalty=1.1): """Generate response from the model with streaming""" global model, model_loaded if not model_loaded or model is None: yield "Error: Model not loaded. Please load the model first." return try: # Format the conversation history for Llama-3 conversation = [] # Add conversation history for human, assistant in history: conversation.append({"role": "user", "content": human}) if assistant: # Only add if assistant response exists conversation.append({"role": "assistant", "content": assistant}) # Add current message conversation.append({"role": "user", "content": message}) # Generate response with streaming response = "" stream = model.create_chat_completion( messages=conversation, max_tokens=max_tokens, temperature=temperature, top_p=top_p, repeat_penalty=repeat_penalty, stream=True, stop=["<|eot_id|>", "<|end_of_text|>"] ) for chunk in stream: if chunk['choices'][0]['delta'].get('content'): new_text = chunk['choices'][0]['delta']['content'] response += new_text yield response except Exception as e: yield f"Error generating response: {str(e)}" def chat_interface(message, history, max_tokens, temperature, top_p, repeat_penalty): """Main chat interface function""" if not message.strip(): return history, "" if not model_loaded: history.append((message, "Please load the model first using the 'Load Model' button.")) return history, "" # Add user message to history history = history + [(message, "")] # Generate response for response in generate_response_stream(message, history[:-1], max_tokens, temperature, top_p, repeat_penalty): history[-1] = (message, response) yield history, "" def clear_chat(): """Clear the chat history""" return [], "" def load_model_interface(source_type, gguf_path, repo_id, filename, context_size): """Interface function to load model with configurable context size""" if source_type == "Hugging Face": success, message = load_model_from_huggingface(repo_id, filename, n_ctx=int(context_size)) else: # Local file success, message = load_model_from_gguf(gguf_path, n_ctx=int(context_size)) return message def get_available_gguf_files(): """Get list of available GGUF files""" gguf_files = find_gguf_file() if not gguf_files: return ["No GGUF files found"] return [os.path.basename(f) for f in gguf_files] # Create the Gradio interface def create_interface(): # Get available GGUF files gguf_files = find_gguf_file() gguf_choices = [os.path.basename(f) for f in gguf_files] if gguf_files else ["No GGUF files found"] with gr.Blocks(title="Llama-3-8B GGUF Chatbot", theme=gr.themes.Soft()) as demo: gr.HTML("""
Chat with the MMed-Llama-Alpaca model (Q4_K_M quantized) for medical assistance!
""") with gr.Row(): with gr.Column(scale=4): # Chat interface chatbot = gr.Chatbot( height=500, show_copy_button=True, bubble_full_width=False, show_label=False, placeholder="Model not loaded. Please load the model first to start chatting." ) with gr.Row(): msg = gr.Textbox( placeholder="Type your message here...", container=False, scale=7, show_label=False ) submit_btn = gr.Button("Send", variant="primary", scale=1) clear_btn = gr.Button("Clear", variant="secondary", scale=1) with gr.Column(scale=1): # Model loading section gr.HTML("Format: GGUF (optimized)
Backend: llama-cpp-python
Features: CPU/GPU support, streaming
Memory: Optimized usage
Auto-Optimization: CPU threads & GPU layers detected automatically
Sources: Hugging Face Hub or Local Files
""") if not LLAMA_CPP_AVAILABLE: gr.HTML("""ā ļø Missing Dependency
Install llama-cpp-python:
pip install llama-cpp-python