import gradio as gr import os import threading import time from pathlib import Path from huggingface_hub import hf_hub_download,login # Handle HuggingFace authentication def setup_hf_auth(): """Setup HuggingFace authentication if token is available""" hf_token = os.environ.get("HF_TOKEN") if hf_token and hf_token.strip(): try: login(token=hf_token, add_to_git_credential=False) print("✅ Successfully logged in to HuggingFace Hub") return True except Exception as e: print(f"⚠️ HuggingFace login failed: {e}") return False else: print("ℹ️ No HF_TOKEN found, proceeding without authentication") return False # Setup authentication on import setup_hf_auth() # Try to import llama-cpp-python, fallback to instructions if not available try: from llama_cpp import Llama LLAMA_CPP_AVAILABLE = True except ImportError: LLAMA_CPP_AVAILABLE = False print("llama-cpp-python not installed. Please install it with: pip install llama-cpp-python") # Global variables for model model = None model_loaded = False # HuggingFace repository information HF_REPO_ID = "Axcel1/MMed-llama-alpaca-Q4_K_M-GGUF" HF_FILENAME = "mmed-llama-alpaca-q4_k_m.gguf" def find_gguf_file(directory="."): """Find GGUF files in the specified directory""" gguf_files = [] for root, dirs, files in os.walk(directory): for file in files: if file.endswith('.gguf'): gguf_files.append(os.path.join(root, file)) return gguf_files def download_model_from_hf(repo_id=HF_REPO_ID, filename=HF_FILENAME): """Download GGUF model from HuggingFace Hub""" try: print(f"Downloading model from {repo_id}/{filename}...") gguf_path = hf_hub_download( repo_id=repo_id, filename=filename, cache_dir="./models", resume_download=True # Resume partial downloads ) print(f"Model downloaded to: {gguf_path}") return gguf_path, None except Exception as e: error_msg = f"Error downloading model: {str(e)}" print(error_msg) return None, error_msg def get_optimal_settings(): """Get optimal CPU threads and GPU layers automatically""" # Auto-detect CPU threads (use all available cores) n_threads = os.cpu_count() # For Hugging Face Spaces, limit threads to avoid resource issues if n_threads and n_threads > 4: n_threads = 4 # Auto-detect GPU layers (try to use GPU if available) n_gpu_layers = 0 try: # Try to detect if CUDA is available import subprocess result = subprocess.run(['nvidia-smi'], capture_output=True, text=True) if result.returncode == 0: # NVIDIA GPU detected, use more layers n_gpu_layers = 35 # Good default for Llama-3-8B except: # No GPU or CUDA not available n_gpu_layers = 0 return n_threads, n_gpu_layers def load_model_from_gguf(gguf_path=None, n_ctx=2048, use_hf_download=True): """Load the model from a GGUF file with automatic optimization""" global model, model_loaded if not LLAMA_CPP_AVAILABLE: return False, "llama-cpp-python not installed. Please install it with: pip install llama-cpp-python" try: # If no path provided, try different approaches if gguf_path is None: if use_hf_download: # Try to download from HuggingFace first gguf_path, error = download_model_from_hf() if error: return False, f"❌ Failed to download from HuggingFace: {error}" else: # Try to find local GGUF files gguf_files = find_gguf_file() if not gguf_files: return False, "No GGUF files found in the repository" gguf_path = gguf_files[0] # Use the first one found print(f"Found local GGUF file: {gguf_path}") # Check if file exists if not os.path.exists(gguf_path): return False, f"GGUF file not found: {gguf_path}" print(f"Loading model from: {gguf_path}") # Get optimal settings automatically n_threads, n_gpu_layers = get_optimal_settings() print(f"Auto-detected settings: {n_threads} CPU threads, {n_gpu_layers} GPU layers") # Load model with optimized settings for Hugging Face Spaces model = Llama( model_path=gguf_path, n_ctx=n_ctx, # Context window (configurable) n_threads=n_threads, # CPU threads (limited for Spaces) n_gpu_layers=n_gpu_layers, # Number of layers to offload to GPU verbose=False, chat_format="llama-3", # Use Llama-3 chat format n_batch=256, # Smaller batch size for Spaces use_mlock=False, # Disabled for Spaces compatibility use_mmap=True, # Use memory mapping ) model_loaded = True print("Model loaded successfully!") return True, f"✅ Model loaded successfully from {os.path.basename(gguf_path)}\n📊 Context: {n_ctx} tokens\n🖥️ CPU Threads: {n_threads}\n🎮 GPU Layers: {n_gpu_layers}\n📦 Source: {HF_REPO_ID}" except Exception as e: model_loaded = False error_msg = f"Error loading model: {str(e)}" print(error_msg) return False, f"❌ {error_msg}" def generate_response_stream(message, history, max_tokens=512, temperature=0.7, top_p=0.9, repeat_penalty=1.1): """Generate response from the model with streaming""" global model, model_loaded if not model_loaded or model is None: yield "Error: Model not loaded. Please load the model first." return try: # Format the conversation history for Llama-3 conversation = [] # Add conversation history for human, assistant in history: conversation.append({"role": "user", "content": human}) if assistant: # Only add if assistant response exists conversation.append({"role": "assistant", "content": assistant}) # Add current message conversation.append({"role": "user", "content": message}) # Generate response with streaming response = "" stream = model.create_chat_completion( messages=conversation, max_tokens=max_tokens, temperature=temperature, top_p=top_p, repeat_penalty=repeat_penalty, stream=True, stop=["<|eot_id|>", "<|end_of_text|>"] ) for chunk in stream: if chunk['choices'][0]['delta'].get('content'): new_text = chunk['choices'][0]['delta']['content'] response += new_text yield response except Exception as e: yield f"Error generating response: {str(e)}" def chat_interface(message, history, max_tokens, temperature, top_p, repeat_penalty): """Main chat interface function""" if not message.strip(): return history, "" if not model_loaded: history.append((message, "Please load the model first using the 'Load Model' button.")) return history, "" # Add user message to history history = history + [(message, "")] # Generate response for response in generate_response_stream(message, history[:-1], max_tokens, temperature, top_p, repeat_penalty): history[-1] = (message, response) yield history, "" def clear_chat(): """Clear the chat history""" return [], "" def load_model_interface(context_size, use_hf_download): """Interface function to load model with configurable context size""" success, message = load_model_from_gguf(gguf_path=None, n_ctx=int(context_size), use_hf_download=use_hf_download) return message def get_available_gguf_files(): """Get list of available GGUF files""" gguf_files = find_gguf_file() if not gguf_files: return ["No local GGUF files found"] return [os.path.basename(f) for f in gguf_files] def check_model_availability(): """Check if model is available locally or needs to be downloaded""" local_files = find_gguf_file() if local_files: return f"Local GGUF files found: {len(local_files)}" else: return "No local GGUF files found. Will download from HuggingFace." # Create the Gradio interface def create_interface(): # Check for available models availability_status = check_model_availability() with gr.Blocks(title="MMed-Llama-Alpaca GGUF Chatbot", theme=gr.themes.Soft()) as demo: gr.HTML("""
Chat with the MMed-Llama-Alpaca model (Q4_K_M quantized) for medical assistance!
⚠️ This is for educational purposes only. Always consult healthcare professionals for medical advice.
Repository: {HF_REPO_ID}
") gr.HTML(f"File: {HF_FILENAME}
") load_btn = gr.Button("Load Model", variant="primary", size="lg") model_status = gr.Textbox( label="Status", value=f"Model not loaded.\n{availability_status}\n⚙️ Auto-optimized: CPU threads & GPU layers auto-detected\n📝 Context size can be configured below", interactive=False, max_lines=6 ) # Generation parameters gr.HTML("Model: MMed-Llama-Alpaca
Quantization: Q4_K_M
Format: GGUF (optimized)
Backend: llama-cpp-python
Features: CPU/GPU support, streaming
Specialty: Medical assistance
Auto-Optimization: CPU threads & GPU layers detected automatically
""") if not LLAMA_CPP_AVAILABLE: gr.HTML("""⚠️ Missing Dependency
Install llama-cpp-python:
pip install llama-cpp-python