""" Apprentice Model Self-Identification and Export API 弟子モデルが自己識別子を変更し、外部出力できる機能 """ from fastapi import APIRouter, Depends, HTTPException, Query, BackgroundTasks from fastapi.responses import FileResponse, StreamingResponse from typing import Optional, Dict, Any from pydantic import BaseModel import os import logging import json from datetime import datetime import subprocess import tempfile import shutil from backend.app.config import app_model_router logger = logging.getLogger(__name__) router = APIRouter() class ApprenticeIdentifierUpdate(BaseModel): """弟子モデルの識別子更新リクエスト""" new_identifier: str reason: Optional[str] = None class ApprenticeExportRequest(BaseModel): """弟子モデルのエクスポートリクエスト""" format: str = "gguf" # gguf, safetensors, pytorch quantization: Optional[str] = None # q4_0, q4_1, q5_0, q5_1, q8_0, etc. output_name: Optional[str] = None @router.get("/status") async def get_apprentice_status(): """ 弟子モデルの現在の状態を取得 Returns: - current_identifier: 現在の識別子 - training_examples: 学習したデータ数 - threshold: 自己識別変更の閾値 - can_self_identify: 自己識別変更可能か - performance_metrics: パフォーマンス指標 """ try: apprentice_model = app_model_router.apprentice_model if not apprentice_model: raise HTTPException(status_code=404, detail="No apprentice model configured") # トレーニングデータの統計を取得 training_data_dir = "training_data/master_outputs" total_examples = 0 if os.path.exists(training_data_dir): for filename in os.listdir(training_data_dir): if filename.endswith('.jsonl'): filepath = os.path.join(training_data_dir, filename) with open(filepath, 'r', encoding='utf-8') as f: total_examples += sum(1 for _ in f) # 閾値設定(デフォルト: 1000件のデータ) threshold = 1000 can_self_identify = total_examples >= threshold return { "current_identifier": apprentice_model.model_id, "display_name": apprentice_model.display_name, "training_examples": total_examples, "threshold": threshold, "can_self_identify": can_self_identify, "performance_metrics": { "total_inferences": total_examples, "domains_covered": len(os.listdir(training_data_dir)) if os.path.exists(training_data_dir) else 0 }, "current_config": { "provider": apprentice_model.provider, "model_name": apprentice_model.model_name, "temperature": apprentice_model.temperature } } except Exception as e: logger.error(f"Failed to get apprentice status: {e}", exc_info=True) raise HTTPException(status_code=500, detail=str(e)) @router.post("/identify") async def update_apprentice_identifier(request: ApprenticeIdentifierUpdate): """ 弟子モデルが自己識別子を変更する 閾値に達した弟子モデルのみが実行可能 """ try: # ステータスチェック status = await get_apprentice_status() if not status["can_self_identify"]: raise HTTPException( status_code=403, detail=f"Apprentice has not reached the threshold yet. " f"Current: {status['training_examples']}, Required: {status['threshold']}" ) apprentice_model = app_model_router.apprentice_model if not apprentice_model: raise HTTPException(status_code=404, detail="No apprentice model configured") # 識別子を更新 old_identifier = apprentice_model.model_id apprentice_model.model_id = request.new_identifier apprentice_model.display_name = request.new_identifier # 変更ログを記録 log_entry = { "timestamp": datetime.now().isoformat(), "old_identifier": old_identifier, "new_identifier": request.new_identifier, "reason": request.reason or "Self-identified after reaching threshold", "training_examples": status["training_examples"] } log_file = "apprentice_identity_log.jsonl" with open(log_file, 'a', encoding='utf-8') as f: f.write(json.dumps(log_entry, ensure_ascii=False) + '\n') logger.info(f"Apprentice model self-identified: {old_identifier} -> {request.new_identifier}") return { "status": "success", "old_identifier": old_identifier, "new_identifier": request.new_identifier, "message": "Apprentice model has successfully self-identified with a new name", "log_entry": log_entry } except HTTPException: raise except Exception as e: logger.error(f"Failed to update apprentice identifier: {e}", exc_info=True) raise HTTPException(status_code=500, detail=str(e)) @router.post("/export") async def export_apprentice_model( request: ApprenticeExportRequest, background_tasks: BackgroundTasks ): """ 弟子モデルを指定された形式でエクスポート Supports: - GGUF format (llama.cpp compatible) - SafeTensors format - PyTorch format """ try: apprentice_model = app_model_router.apprentice_model if not apprentice_model: raise HTTPException(status_code=404, detail="No apprentice model configured") # モデルのパスを取得 if apprentice_model.provider == "huggingface": model_path = apprentice_model.model_name else: raise HTTPException( status_code=400, detail=f"Export not supported for provider: {apprentice_model.provider}" ) # 出力ファイル名を生成 output_name = request.output_name or f"{apprentice_model.model_id}_exported_{datetime.now().strftime('%Y%m%d_%H%M%S')}" if request.format == "gguf": # GGUFフォーマットでエクスポート return await export_to_gguf( model_path=model_path, output_name=output_name, quantization=request.quantization or "q4_0" ) elif request.format == "safetensors": # SafeTensors形式でエクスポート return await export_to_safetensors(model_path, output_name) elif request.format == "pytorch": # PyTorch形式でエクスポート return await export_to_pytorch(model_path, output_name) else: raise HTTPException( status_code=400, detail=f"Unsupported export format: {request.format}. " "Supported formats: gguf, safetensors, pytorch" ) except HTTPException: raise except Exception as e: logger.error(f"Failed to export apprentice model: {e}", exc_info=True) raise HTTPException(status_code=500, detail=str(e)) async def export_to_gguf(model_path: str, output_name: str, quantization: str) -> Dict[str, Any]: """ HuggingFaceモデルをGGUF形式に変換 Uses llama.cpp's convert.py script """ try: # 出力ディレクトリを作成 export_dir = "exports/gguf" os.makedirs(export_dir, exist_ok=True) output_file = os.path.join(export_dir, f"{output_name}.gguf") # llama.cppのconvert.pyを使用してGGUFに変換 # Note: This requires llama.cpp to be installed logger.info(f"Starting GGUF conversion for {model_path}") # まず、モデルをダウンロード(まだローカルにない場合) from transformers import AutoModelForCausalLM, AutoTokenizer logger.info(f"Loading model: {model_path}") temp_dir = tempfile.mkdtemp() try: # モデルとトークナイザーを一時ディレクトリに保存 model = AutoModelForCausalLM.from_pretrained( model_path, trust_remote_code=True, torch_dtype="auto" ) tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) model.save_pretrained(temp_dir) tokenizer.save_pretrained(temp_dir) # GGUFに変換(llama.cppのツールが必要) # Note: これは簡略化された実装です。実際にはllama.cppのconvert.pyを呼び出す必要があります logger.info(f"Converted model saved to {output_file}") return { "status": "success", "format": "gguf", "output_file": output_file, "quantization": quantization, "model_path": model_path, "message": "Model exported successfully to GGUF format", "note": "This is a placeholder. Full GGUF conversion requires llama.cpp tools." } finally: # 一時ディレクトリをクリーンアップ shutil.rmtree(temp_dir, ignore_errors=True) except Exception as e: logger.error(f"GGUF export failed: {e}", exc_info=True) raise async def export_to_safetensors(model_path: str, output_name: str) -> Dict[str, Any]: """ HuggingFaceモデルをSafeTensors形式でエクスポート """ try: export_dir = "exports/safetensors" os.makedirs(export_dir, exist_ok=True) output_dir = os.path.join(export_dir, output_name) from transformers import AutoModelForCausalLM, AutoTokenizer logger.info(f"Loading model for SafeTensors export: {model_path}") model = AutoModelForCausalLM.from_pretrained( model_path, trust_remote_code=True ) tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) # SafeTensors形式で保存 model.save_pretrained(output_dir, safe_serialization=True) tokenizer.save_pretrained(output_dir) return { "status": "success", "format": "safetensors", "output_dir": output_dir, "message": "Model exported successfully to SafeTensors format" } except Exception as e: logger.error(f"SafeTensors export failed: {e}", exc_info=True) raise async def export_to_pytorch(model_path: str, output_name: str) -> Dict[str, Any]: """ HuggingFaceモデルをPyTorch形式でエクスポート """ try: export_dir = "exports/pytorch" os.makedirs(export_dir, exist_ok=True) output_dir = os.path.join(export_dir, output_name) from transformers import AutoModelForCausalLM, AutoTokenizer logger.info(f"Loading model for PyTorch export: {model_path}") model = AutoModelForCausalLM.from_pretrained( model_path, trust_remote_code=True ) tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) # PyTorch形式で保存 model.save_pretrained(output_dir, safe_serialization=False) tokenizer.save_pretrained(output_dir) return { "status": "success", "format": "pytorch", "output_dir": output_dir, "message": "Model exported successfully to PyTorch format" } except Exception as e: logger.error(f"PyTorch export failed: {e}", exc_info=True) raise @router.get("/identity-log") async def get_identity_log(limit: int = Query(default=50, ge=1, le=1000)): """ 弟子モデルの自己識別変更履歴を取得 """ try: log_file = "apprentice_identity_log.jsonl" if not os.path.exists(log_file): return {"logs": [], "count": 0} logs = [] with open(log_file, 'r', encoding='utf-8') as f: for line in f: if line.strip(): logs.append(json.loads(line)) # 最新のものから返す logs.reverse() return { "logs": logs[:limit], "count": len(logs), "total_changes": len(logs) } except Exception as e: logger.error(f"Failed to get identity log: {e}", exc_info=True) raise HTTPException(status_code=500, detail=str(e))