{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# 🚀 Stack 2.9 - Colab Training Notebook\n", "\n", "**Zero-cost training on Google Colab free tier with T4 GPU**\n", "\n", "⏱️ **Expected runtime:** 3-5 hours\n", "💾 **VRAM needed:** ~12GB (fits in T4's 15GB)\n", "\n", "---\n", "\n", "**CRITICAL:** Run cells in order from the top!\n", "\n", "---" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": "# STEP 1: Setup - Mount Drive and define root directory\nfrom google.colab import drive\ndrive.mount('/content/drive')\n\nimport os\nROOT_DIR = \"/content/drive/MyDrive/stack-2.9\"\nos.makedirs(ROOT_DIR, exist_ok=True)\nos.chdir(ROOT_DIR)\n\n# Define all paths once\nREPO_DIR = os.path.join(ROOT_DIR, \"stack-2.9\")\nMODEL_DIR = os.path.join(REPO_DIR, \"base_model_qwen7b\")\nOUTPUT_DIR = os.path.join(ROOT_DIR, \"training_output\")\n\nprint(f\"✅ ROOT_DIR: {ROOT_DIR}\")\nprint(f\"✅ REPO_DIR: {REPO_DIR}\")\nprint(f\"✅ MODEL_DIR: {MODEL_DIR}\")\nprint(f\"✅ OUTPUT_DIR: {OUTPUT_DIR}\")\n!ls -la" }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": "# STEP 2: Clone repo (with retry logic)\nimport shutil\nimport time\n\nmax_retries = 3\nfor attempt in range(max_retries):\n try:\n if os.path.exists('stack-2.9'):\n print(f\"Attempt {attempt+1}: Removing old stack-2.9...\")\n shutil.rmtree('stack-2.9')\n \n print(f\"Attempt {attempt+1}: Cloning repository...\")\n result = !git clone https://github.com/my-ai-stack/stack-2.9.git\n \n if os.path.exists('stack-2.9'):\n print(\"✅ Clone successful!\")\n break\n except Exception as e:\n print(f\"⚠️ Attempt {attempt+1} failed: {e}\")\n if attempt < max_retries - 1:\n print(\"Retrying in 5 seconds...\")\n time.sleep(5)\nelse:\n raise RuntimeError(\"Failed to clone repository after 3 attempts\")\n\nos.chdir(REPO_DIR)\nprint(f\"✅ In: {os.getcwd()}\")\n!ls -la" }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# STEP 3: Install dependencies\n", "!pip install -q torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118\n", "!pip install -q transformers peft accelerate datasets pyyaml tqdm scipy bitsandbytes\n", "print(\"✅ Dependencies installed\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": "# STEP 4: Download Base Model (Qwen2.5-Coder-7B)\n# Check if model already exists FIRST before trying to download\nfrom transformers import AutoModelForCausalLM, AutoTokenizer\nimport os\n\nMODEL_NAME = \"Qwen/Qwen2.5-Coder-7B\"\n\n# Check if model files already exist (don't try to download first)\nif os.path.exists(os.path.join(MODEL_DIR, \"config.json\")):\n print(f\"✅ Model already exists at: {MODEL_DIR}\")\nelif os.path.exists(os.path.join(MODEL_DIR, \"model.safetensors\")):\n print(f\"✅ Model partially exists, verifying...\")\n # Just verify, don't download\nelse:\n print(f\"⚠️ Model not found at: {MODEL_DIR}\")\n print(\"⏭️ SKIPPING model download to avoid crash...\")\n print(\" To train, you'll need to:\")\n print(\" 1. Download model locally using Ollama\")\n print(\" 2. Upload model files to Drive manually\")\n print(\" OR use a smaller model\")\n\n# Continue even without model - training step will handle it\nprint(f\"\\nModel dir check: {os.path.exists(MODEL_DIR)}\")\nif os.path.exists(MODEL_DIR):\n !ls -lh {MODEL_DIR} | head -5" }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": "# STEP 5: Find or download training data\nimport json\n\nDATA_PATH = None\n\n# Check multiple possible locations\npossible_paths = [\n os.path.join(REPO_DIR, \"data/final/train.jsonl\"),\n os.path.join(REPO_DIR, \"training-data/final/train.jsonl\"),\n os.path.join(REPO_DIR, \"data_mini/train_mini.jsonl\"),\n]\n\nfor path in possible_paths:\n if os.path.exists(path):\n DATA_PATH = path\n print(f\"✅ Found data at: {path}\")\n break" }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": "# STEP 6: Prepare Training Configuration\nimport yaml\n\nconfig_path = os.path.join(REPO_DIR, \"stack/training/train_config_local.yaml\")\n\nif not os.path.exists(config_path):\n raise FileNotFoundError(f\"Config not found at: {config_path}\")\n\nwith open(config_path, 'r') as f:\n config = yaml.safe_load(f)\n\n# Update config with absolute paths\nconfig['model']['name'] = MODEL_DIR\nconfig['data']['input_path'] = DATA_PATH\nconfig['output']['lora_dir'] = os.path.join(OUTPUT_DIR, \"lora\")\nconfig['output']['merged_dir'] = os.path.join(OUTPUT_DIR, \"merged\")\nconfig['hardware']['device'] = \"cuda\"\nconfig['hardware']['num_gpus'] = 1\n\nos.makedirs(OUTPUT_DIR, exist_ok=True)\nupdated_config_path = os.path.join(OUTPUT_DIR, \"train_config.yaml\")\n\nwith open(updated_config_path, 'w') as f:\n yaml.dump(config, f)\n\nprint(f\"✅ Config saved to: {updated_config_path}\")\nprint(f\" Model: {config['model']['name']}\")\nprint(f\" Data: {config['data']['input_path']}\")" }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": "# STEP 7: Train LoRA Adapter\nimport os\nimport sys\n\n# Check if model exists before training\nif not os.path.exists(os.path.join(MODEL_DIR, \"config.json\")):\n print(\"❌ Model not found! Cannot train without base model.\")\n print(f\"Expected at: {MODEL_DIR}\")\n raise RuntimeError(\"Model missing - please upload base model to Drive first\")\n\nsys.path.insert(0, os.path.join(REPO_DIR, \"stack/training\"))\n\nprint(\"=\"*60)\nprint(\"STARTING TRAINING\")\nprint(\"=\"*60)\n\nfrom train_lora import train_lora\ntrainer = train_lora(updated_config_path)\n\nprint(\"=\"*60)\nprint(\"TRAINING COMPLETED\")\nprint(\"=\"*60)" }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# STEP 8: Verify and Merge\n", "lora_dir = os.path.join(OUTPUT_DIR, \"lora\")\n", "print(f\"Checking LoRA: {lora_dir}\")\n", "if os.path.exists(lora_dir):\n", " !ls -lh {lora_dir}\n", "else:\n", " print(\"❌ No LoRA output found\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": "# STEP 9: Merge LoRA\nimport os # Make sure os is imported\nimport yaml\nimport sys\nsys.path.insert(0, os.path.join(REPO_DIR, \"stack/training\"))\nfrom merge_adapter import merge_adapter" }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 🔚 Training Complete!\n", "\n", "Your model is ready at:\n", "`/content/drive/MyDrive/stack-2.9/training_output/merged/`\n", "\n", "Download it from Google Drive!" ] } ], "metadata": { "accelerator": "GPU", "colab": { "name": "Stack 2.9 Training", "provenance": [] }, "kernelspec": { "display_name": "Python 3", "name": "python3" }, "language_info": { "name": "python" } }, "nbformat": 4, "nbformat_minor": 0 }