Spaces:
Sleeping
Sleeping
| """ | |
| Flexible version: Works on both ZeroGPU and CPU Upgrade hardware | |
| Automatically detects hardware and adjusts accordingly | |
| """ | |
| # Try to import spaces for ZeroGPU support | |
| try: | |
| import spaces | |
| ZEROGPU_AVAILABLE = True | |
| print("✅ ZeroGPU support enabled") | |
| except ImportError: | |
| ZEROGPU_AVAILABLE = False | |
| print("ℹ️ ZeroGPU not available, using standard mode") | |
| import os | |
| import gradio as gr | |
| from transformers import AutoModelForCausalLM, AutoTokenizer | |
| from huggingface_hub import snapshot_download | |
| import torch | |
| # Load environment variables from .env file | |
| try: | |
| from dotenv import load_dotenv | |
| load_dotenv() # Load .env file into environment | |
| print("✅ .env file loaded") | |
| except ImportError: | |
| print("⚠️ python-dotenv not installed, using system environment variables only") | |
| # Get HF token from environment | |
| HF_TOKEN = os.getenv("HF_TOKEN", None) | |
| if HF_TOKEN: | |
| print(f"✅ HF_TOKEN loaded (length: {len(HF_TOKEN)} chars)") | |
| else: | |
| print("⚠️ HF_TOKEN not found in environment - some models may not be accessible") | |
| # Model configurations (10 Public + 3 Gated models = 13 total) | |
| # Note: Gated models require HF access approval at https://huggingface.co/[model-name] | |
| MODEL_CONFIGS = [ | |
| { | |
| "MODEL_NAME": "LGAI-EXAONE/EXAONE-3.5-7.8B-Instruct", | |
| "MODEL_CONFIG": { | |
| "name": "EXAONE 3.5 7.8B Instruct ⭐ (파라미터 대비 최고 효율)", | |
| "max_length": 150, | |
| }, | |
| }, | |
| { | |
| "MODEL_NAME": "LGAI-EXAONE/EXAONE-3.5-2.4B-Instruct", | |
| "MODEL_CONFIG": { | |
| "name": "EXAONE 3.5 2.4B Instruct ⚡ (초경량, 빠른 응답)", | |
| "max_length": 150, | |
| }, | |
| }, | |
| { | |
| "MODEL_NAME": "beomi/Llama-3-Open-Ko-8B", | |
| "MODEL_CONFIG": { | |
| "name": "Llama-3 Open-Ko 8B 🔥 (Llama 3 생태계)", | |
| "max_length": 150, | |
| }, | |
| }, | |
| { | |
| "MODEL_NAME": "Qwen/Qwen2.5-7B-Instruct", | |
| "MODEL_CONFIG": { | |
| "name": "Qwen2.5 7B Instruct (한글 지시응답 우수)", | |
| "max_length": 150, | |
| }, | |
| }, | |
| { | |
| "MODEL_NAME": "Qwen/Qwen2.5-14B-Instruct", | |
| "MODEL_CONFIG": { | |
| "name": "Qwen2.5 14B Instruct (다국어·한글 강점, 여유 GPU 권장)", | |
| "max_length": 150, | |
| }, | |
| }, | |
| { | |
| "MODEL_NAME": "meta-llama/Llama-3.1-8B-Instruct", | |
| "MODEL_CONFIG": { | |
| "name": "Llama 3.1 8B Instruct 🔒 (커뮤니티 Ko 튜닝 활발, 승인 필요)", | |
| "max_length": 150, | |
| }, | |
| }, | |
| { | |
| "MODEL_NAME": "meta-llama/Llama-3.1-70B-Instruct", | |
| "MODEL_CONFIG": { | |
| "name": "Llama 3.1 70B Instruct 🔒 (대규모·한글 품질 우수, 승인 필요)", | |
| "max_length": 150, | |
| }, | |
| }, | |
| { | |
| "MODEL_NAME": "01-ai/Yi-1.5-9B-Chat", | |
| "MODEL_CONFIG": { | |
| "name": "Yi 1.5 9B Chat (다국어/한글 안정적 대화)", | |
| "max_length": 150, | |
| }, | |
| }, | |
| { | |
| "MODEL_NAME": "01-ai/Yi-1.5-34B-Chat", | |
| "MODEL_CONFIG": { | |
| "name": "Yi 1.5 34B Chat (긴 문맥·한글 생성 강점)", | |
| "max_length": 150, | |
| }, | |
| }, | |
| { | |
| "MODEL_NAME": "mistralai/Mistral-7B-Instruct-v0.3", | |
| "MODEL_CONFIG": { | |
| "name": "Mistral 7B Instruct v0.3 (경량·한글 커뮤니티 튜닝 多)", | |
| "max_length": 150, | |
| }, | |
| }, | |
| { | |
| "MODEL_NAME": "upstage/SOLAR-10.7B-Instruct-v1.0", | |
| "MODEL_CONFIG": { | |
| "name": "Solar 10.7B Instruct v1.0 (한국어 강점, 실전 지시응답)", | |
| "max_length": 150, | |
| }, | |
| }, | |
| { | |
| "MODEL_NAME": "EleutherAI/polyglot-ko-5.8b", | |
| "MODEL_CONFIG": { | |
| "name": "Polyglot-Ko 5.8B (한국어 중심 베이스)", | |
| "max_length": 150, | |
| }, | |
| }, | |
| { | |
| "MODEL_NAME": "CohereForAI/aya-23-8B", | |
| "MODEL_CONFIG": { | |
| "name": "Aya-23 8B 🔒 (다국어·한국어 지원 양호, 승인 필요)", | |
| "max_length": 150, | |
| }, | |
| }, | |
| ] | |
| # Default model | |
| current_model_index = 0 | |
| loaded_model_name = None # Track which model is currently loaded | |
| # Global model cache | |
| model = None | |
| tokenizer = None | |
| def check_model_cached(model_name): | |
| """Check if model is already downloaded in HF cache""" | |
| try: | |
| from huggingface_hub import scan_cache_dir | |
| cache_info = scan_cache_dir() | |
| # Check if model exists in cache | |
| for repo in cache_info.repos: | |
| if repo.repo_id == model_name: | |
| return True | |
| return False | |
| except Exception as e: | |
| # If unable to check cache, assume not cached | |
| print(f" ⚠️ Unable to check cache: {e}") | |
| return False | |
| def load_model_once(model_index=None): | |
| """Load model and tokenizer based on selected index (lazy loading)""" | |
| global model, tokenizer, current_model_index, loaded_model_name | |
| if model_index is None: | |
| model_index = current_model_index | |
| # Get model config | |
| model_name = MODEL_CONFIGS[model_index]["MODEL_NAME"] | |
| # Check if we need to reload (different model or not loaded yet) | |
| if loaded_model_name != model_name: | |
| print(f"🔄 Loading model: {model_name}") | |
| print(f" Previous model: {loaded_model_name or 'None'}") | |
| # Check if model is already cached | |
| is_cached = check_model_cached(model_name) | |
| if is_cached: | |
| print(f" ✅ Model found in cache, loading from disk...") | |
| else: | |
| print(f" 📥 Model not in cache, will download (~4-14GB depending on model)...") | |
| # Clear previous model | |
| if model is not None: | |
| print(f" 🗑️ Unloading previous model from memory...") | |
| del model | |
| del tokenizer | |
| if torch.cuda.is_available(): | |
| torch.cuda.empty_cache() | |
| # Load tokenizer | |
| print(f" 📝 Loading tokenizer...") | |
| tokenizer = AutoTokenizer.from_pretrained( | |
| model_name, | |
| token=HF_TOKEN, | |
| trust_remote_code=True, | |
| ) | |
| if tokenizer.pad_token is None: | |
| tokenizer.pad_token = tokenizer.eos_token | |
| # Detect device | |
| device = "cuda" if torch.cuda.is_available() else "cpu" | |
| print(f"📍 Using device: {device}") | |
| # Load model with appropriate settings | |
| if is_cached: | |
| print(f" 📀 Loading model from disk cache (15-30 seconds)...") | |
| else: | |
| print(f" 🌐 Downloading model from network (5-20 minutes, first time only)...") | |
| if device == "cuda": | |
| # GPU available (CPU Upgrade with GPU or ZeroGPU) | |
| model = AutoModelForCausalLM.from_pretrained( | |
| model_name, | |
| token=HF_TOKEN, | |
| dtype=torch.float16, # Use float16 for GPU | |
| low_cpu_mem_usage=True, | |
| trust_remote_code=True, | |
| device_map="auto", | |
| ) | |
| else: | |
| # CPU only | |
| model = AutoModelForCausalLM.from_pretrained( | |
| model_name, | |
| token=HF_TOKEN, | |
| dtype=torch.float32, # Use float32 for CPU | |
| low_cpu_mem_usage=True, | |
| trust_remote_code=True, | |
| ) | |
| model.to(device) | |
| model.eval() | |
| current_model_index = model_index | |
| loaded_model_name = model_name | |
| print(f"✅ Model {model_name} loaded successfully") | |
| else: | |
| print(f"ℹ️ Model {model_name} already loaded, reusing...") | |
| return model, tokenizer | |
| def generate_response_impl(message, history): | |
| """Core generation logic (same for both ZeroGPU and CPU)""" | |
| if not message or not message.strip(): | |
| return history | |
| try: | |
| # Ensure model is loaded | |
| current_model, current_tokenizer = load_model_once() | |
| if current_model is None or current_tokenizer is None: | |
| return history + [{"role": "assistant", "content": "❌ 모델을 로드할 수 없습니다."}] | |
| # Get device | |
| device = next(current_model.parameters()).device | |
| # Build conversation context (last 3 turns) | |
| conversation = "" | |
| for msg in history[-6:]: # Last 3 turns (6 messages: 3 user + 3 assistant) | |
| if msg["role"] == "user": | |
| conversation += f"사용자: {msg['content']}\n" | |
| elif msg["role"] == "assistant": | |
| conversation += f"어시스턴트: {msg['content']}\n" | |
| conversation += f"사용자: {message}\n어시스턴트:" | |
| # Tokenize with attention_mask | |
| encoded = current_tokenizer( | |
| conversation, | |
| return_tensors="pt", | |
| truncation=True, | |
| max_length=512, | |
| padding=True, | |
| ) | |
| inputs = encoded['input_ids'].to(device) | |
| attention_mask = encoded['attention_mask'].to(device) | |
| # Get current model config | |
| model_config = MODEL_CONFIGS[current_model_index]["MODEL_CONFIG"] | |
| # Generate response | |
| with torch.no_grad(): | |
| outputs = current_model.generate( | |
| inputs, | |
| attention_mask=attention_mask, | |
| max_new_tokens=model_config["max_length"], | |
| temperature=0.7, | |
| top_p=0.9, | |
| do_sample=True, | |
| pad_token_id=current_tokenizer.pad_token_id, | |
| eos_token_id=current_tokenizer.eos_token_id, | |
| ) | |
| # Decode response | |
| full_response = current_tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| # Extract only the assistant's response | |
| if "어시스턴트:" in full_response: | |
| response = full_response.split("어시스턴트:")[-1].strip() | |
| else: | |
| response = full_response[len(conversation):].strip() | |
| if not response: | |
| response = "죄송합니다. 응답을 생성할 수 없었습니다." | |
| return history + [{"role": "assistant", "content": response}] | |
| except Exception as e: | |
| import traceback | |
| error_msg = str(e) | |
| print("=" * 50) | |
| print(f"Error: {error_msg}") | |
| print(traceback.format_exc()) | |
| print("=" * 50) | |
| return history + [{"role": "assistant", "content": f"❌ 오류: {error_msg[:200]}"}] | |
| # Conditionally apply ZeroGPU decorator | |
| if ZEROGPU_AVAILABLE: | |
| def generate_response(message, history): | |
| """GPU-accelerated response generation (ZeroGPU mode)""" | |
| return generate_response_impl(message, history) | |
| else: | |
| def generate_response(message, history): | |
| """Standard response generation (CPU Upgrade mode)""" | |
| return generate_response_impl(message, history) | |
| def chat_wrapper(message, history): | |
| """Wrapper for Gradio ChatInterface""" | |
| # When type="messages", history includes user message already from Gradio | |
| # So we add it first, then generate response | |
| updated_history = history + [{"role": "user", "content": message}] | |
| response_history = generate_response(message, updated_history) | |
| return response_history | |
| # Determine hardware info for UI | |
| hardware_info = "NVIDIA H200 (ZeroGPU)" if ZEROGPU_AVAILABLE else "CPU Upgrade (32GB RAM)" | |
| print(f"✅ App initialized - Hardware: {hardware_info}") | |
| # Create Gradio interface | |
| with gr.Blocks(title="🤖 Multi-Model Chatbot") as demo: | |
| # Dynamic header based on hardware | |
| if ZEROGPU_AVAILABLE: | |
| header = """ | |
| # 🤖 다중 모델 챗봇 (ZeroGPU) | |
| **하드웨어**: NVIDIA H200 (ZeroGPU - 자동 할당) | |
| **특징**: | |
| - ⚡ GPU 가속으로 빠른 응답 (3-5초) | |
| - 🎯 10가지 한글 최적화 모델 선택 가능 | |
| - 🔄 모델 전환 시 자동 재로딩 | |
| - 💰 PRO 구독 시 하루 25분 무료 사용 | |
| """ | |
| else: | |
| header = """ | |
| # 🤖 다중 모델 챗봇 (CPU Upgrade) | |
| **하드웨어**: CPU Upgrade (8 vCPU / 32 GB RAM) | |
| **특징**: | |
| - 🎯 10가지 한글 최적화 모델 선택 가능 | |
| - 🔄 모델 전환 시 자동 재로딩 | |
| - ⏳ CPU 환경이므로 응답이 다소 느립니다 (30초~1분) | |
| - 💰 시간당 $0.03 (월 약 $22) | |
| """ | |
| gr.Markdown(header) | |
| # Model selector | |
| model_choices = [f"{cfg['MODEL_CONFIG']['name']}" for cfg in MODEL_CONFIGS] | |
| model_dropdown = gr.Dropdown( | |
| choices=model_choices, | |
| value=model_choices[0], | |
| label="🤖 모델 선택", | |
| interactive=True, | |
| ) | |
| chatbot = gr.Chatbot(height=400, type="messages", show_label=False) | |
| with gr.Row(): | |
| msg = gr.Textbox( | |
| placeholder="한글로 메시지를 입력하세요...", | |
| show_label=False, | |
| scale=9, | |
| ) | |
| btn = gr.Button("전송", scale=1, variant="primary") | |
| clear = gr.Button("🗑️ 대화 초기화", size="sm") | |
| def change_model(selected_model): | |
| """Handle model change""" | |
| global current_model_index | |
| # Find index of selected model | |
| for idx, cfg in enumerate(MODEL_CONFIGS): | |
| if cfg['MODEL_CONFIG']['name'] == selected_model: | |
| current_model_index = idx | |
| break | |
| # Clear chat history when changing model | |
| return [] | |
| def submit(message, history): | |
| global loaded_model_name, current_model_index | |
| # Immediately show user message | |
| updated_history = history + [{"role": "user", "content": message}] | |
| yield updated_history, "" | |
| # Check if model needs to be loaded | |
| selected_model_name = MODEL_CONFIGS[current_model_index]["MODEL_NAME"] | |
| if loaded_model_name != selected_model_name: | |
| # Check if model is cached | |
| is_cached = check_model_cached(selected_model_name) | |
| if is_cached: | |
| # Model is cached, just loading from disk | |
| loading_history = updated_history + [{"role": "assistant", "content": "💾 캐시된 모델 디스크에서 로딩 중... (15-30초, 다운로드 안 함)"}] | |
| else: | |
| # Model needs to be downloaded | |
| loading_history = updated_history + [{"role": "assistant", "content": "📥 모델 다운로드 시작... (4-14GB, 첫 사용 시 5-20분 소요)"}] | |
| yield loading_history, "" | |
| else: | |
| # Show "thinking" indicator | |
| thinking_history = updated_history + [{"role": "assistant", "content": "🤔 응답 생성 중..."}] | |
| yield thinking_history, "" | |
| # Generate and add bot response (this will load model if needed) | |
| final_history = chat_wrapper(message, history) | |
| yield final_history, "" | |
| # Event handlers | |
| model_dropdown.change(change_model, inputs=[model_dropdown], outputs=[chatbot]) | |
| btn.click(submit, [msg, chatbot], [chatbot, msg]) | |
| msg.submit(submit, [msg, chatbot], [chatbot, msg]) | |
| clear.click(lambda: [], outputs=chatbot) | |
| # Dynamic footer based on hardware | |
| if ZEROGPU_AVAILABLE: | |
| footer = """ | |
| --- | |
| **참고사항 (ZeroGPU 모드)**: | |
| - 🤖 10가지 모델 중 선택 가능 (드롭다운에서 선택) | |
| - ⚡ ZeroGPU는 요청 시 자동으로 GPU를 할당합니다 | |
| - 💰 PRO 구독자는 하루 25분 무료 사용 | |
| - 🔄 모델 변경 시 대화 내역이 초기화됩니다 | |
| - ⏱️ 첫 응답은 모델 로딩 시간 포함 (~10-15초) | |
| **테스트 예시**: | |
| - "안녕하세요" | |
| - "인공지능에 대해 설명해주세요" | |
| - "한국의 수도는 어디인가요?" | |
| """ | |
| else: | |
| footer = """ | |
| --- | |
| **참고사항 (CPU Upgrade 모드)**: | |
| - 🤖 10가지 모델 중 선택 가능 (드롭다운에서 선택) | |
| - 🔄 모델 변경 시 대화 내역이 초기화됩니다 | |
| - ⏳ CPU 환경이므로 응답이 느립니다 (30초~1분) | |
| - ⏱️ 첫 응답은 모델 로딩 시간 포함 (~1-2분) | |
| - 💰 24시간 무제한 사용 (시간당 $0.03) | |
| **테스트 예시**: | |
| - "안녕하세요" | |
| - "인공지능에 대해 설명해주세요" | |
| - "한국의 수도는 어디인가요?" | |
| """ | |
| gr.Markdown(footer) | |
| if __name__ == "__main__": | |
| demo.launch() | |