Spaces:
Sleeping
Sleeping
File size: 12,039 Bytes
5c9a55b | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 | #!/usr/bin/env python3
"""
Production Installation Script for AI Text Humanizer
Ensures all advanced features are properly installed and working
"""
import subprocess
import sys
import os
import time
def run_command(cmd, description, critical=True):
"""Run a command and handle errors"""
print(f"π {description}...")
try:
result = subprocess.run(cmd, shell=True, check=True, capture_output=True, text=True)
print(f"β
{description} - SUCCESS")
if result.stdout.strip():
print(f" Output: {result.stdout.strip()}")
return True
except subprocess.CalledProcessError as e:
print(f"β {description} - FAILED")
print(f" Error: {e.stderr.strip()}")
if critical:
return False
return True
def check_gpu_availability():
"""Check if CUDA/GPU is available for better performance"""
try:
result = subprocess.run(["nvidia-smi"], capture_output=True, text=True)
if result.returncode == 0:
print("π NVIDIA GPU detected - will install CUDA support")
return True
except FileNotFoundError:
pass
print("π» No NVIDIA GPU detected - using CPU versions")
return False
def production_install():
"""Install production-grade AI Text Humanizer with all features"""
print("π AI TEXT HUMANIZER - PRODUCTION INSTALLATION")
print("=" * 55)
print("π This will install ALL advanced features:")
print(" β¨ Advanced semantic similarity (sentence-transformers)")
print(" π§ AI paraphrasing capabilities (transformers)")
print(" π GPU acceleration (if available)")
print(" π Full API and web interfaces")
print("")
# Check system
has_gpu = check_gpu_availability()
print("π§ Starting production installation...")
print("-" * 40)
# Step 1: Clean existing installation
print("\nπ¦ STEP 1: Cleaning existing installation")
cleanup_commands = [
"pip uninstall -y sentence-transformers transformers huggingface_hub torch torchvision torchaudio",
"pip cache purge"
]
for cmd in cleanup_commands:
run_command(cmd, "Cleaning previous installation", critical=False)
# Step 2: Upgrade pip and install build tools
print("\nπ¨ STEP 2: Installing build tools")
build_commands = [
"pip install --upgrade pip setuptools wheel",
"pip install --upgrade packaging"
]
for cmd in build_commands:
if not run_command(cmd, "Installing build tools"):
return False
# Step 3: Install PyTorch (choose CPU or GPU version)
print("\nπ§ STEP 3: Installing PyTorch")
if has_gpu:
torch_cmd = "pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu121"
else:
torch_cmd = "pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu"
if not run_command(torch_cmd, "Installing PyTorch with proper backend"):
print("β οΈ PyTorch installation failed, trying alternative...")
if not run_command("pip install torch==2.1.0", "Installing PyTorch (fallback)"):
return False
# Step 4: Install HuggingFace ecosystem with compatible versions
print("\nπ€ STEP 4: Installing HuggingFace ecosystem")
hf_commands = [
"pip install huggingface_hub==0.17.3",
"pip install tokenizers==0.14.1",
"pip install transformers==4.35.0",
"pip install accelerate==0.24.1"
]
for cmd in hf_commands:
if not run_command(cmd, f"Installing {cmd.split()[1]}"):
return False
# Step 5: Install sentence transformers
print("\nπ€ STEP 5: Installing Sentence Transformers")
if not run_command("pip install sentence-transformers==2.2.2", "Installing Sentence Transformers"):
print("β οΈ Trying alternative installation...")
if not run_command("pip install sentence-transformers==2.1.0", "Installing Sentence Transformers (fallback)"):
return False
# Step 6: Install additional ML libraries
print("\nπ STEP 6: Installing ML libraries")
ml_commands = [
"pip install scikit-learn==1.3.2",
"pip install numpy==1.25.2",
"pip install pandas==2.1.3",
"pip install nltk==3.8.1"
]
for cmd in ml_commands:
if not run_command(cmd, f"Installing {cmd.split()[1]}"):
return False
# Step 7: Install web frameworks
print("\nπ STEP 7: Installing web frameworks")
web_commands = [
"pip install fastapi==0.104.1",
"pip install uvicorn[standard]==0.24.0",
"pip install gradio==4.7.1",
"pip install python-multipart==0.0.6",
"pip install aiofiles==23.2.1",
"pip install requests==2.31.0"
]
for cmd in web_commands:
if not run_command(cmd, f"Installing {cmd.split()[1]}"):
return False
# Step 8: Install optional production libraries
print("\nβ‘ STEP 8: Installing production libraries")
prod_commands = [
"pip install redis==5.0.1",
"pip install psutil",
"pip install python-dotenv"
]
for cmd in prod_commands:
run_command(cmd, f"Installing {cmd.split()[1]}", critical=False)
# Step 9: Download NLTK data
print("\nπ STEP 9: Downloading NLTK data")
nltk_downloads = [
"python -c \"import nltk; nltk.download('punkt', quiet=True)\"",
"python -c \"import nltk; nltk.download('wordnet', quiet=True)\"",
"python -c \"import nltk; nltk.download('omw-1.4', quiet=True)\"",
"python -c \"import nltk; nltk.download('stopwords', quiet=True)\""
]
for cmd in nltk_downloads:
run_command(cmd, "Downloading NLTK data", critical=False)
# Step 10: Pre-download models
print("\nπ€ STEP 10: Pre-downloading models")
model_downloads = [
"python -c \"from sentence_transformers import SentenceTransformer; SentenceTransformer('all-MiniLM-L6-v2')\"",
"python -c \"from transformers import pipeline; pipeline('text2text-generation', model='google/flan-t5-small')\""
]
for cmd in model_downloads:
run_command(cmd, "Pre-downloading models", critical=False)
print(f"\nπ INSTALLATION COMPLETED!")
return True
def test_installation():
"""Test if all components are working"""
print(f"\nπ§ͺ TESTING INSTALLATION")
print("=" * 30)
test_results = {}
# Test imports
imports_to_test = [
("sentence_transformers", "SentenceTransformer"),
("transformers", "pipeline"),
("torch", None),
("sklearn", None),
("nltk", None),
("gradio", None),
("fastapi", None)
]
for module, component in imports_to_test:
try:
if component:
exec(f"from {module} import {component}")
else:
exec(f"import {module}")
print(f"β
{module}: Import successful")
test_results[module] = True
except Exception as e:
print(f"β {module}: Import failed - {e}")
test_results[module] = False
# Test model loading
print(f"\nπ€ Testing model loading...")
try:
from sentence_transformers import SentenceTransformer
model = SentenceTransformer('all-MiniLM-L6-v2')
print("β
Sentence transformer: Model loaded successfully")
test_results['sentence_model'] = True
except Exception as e:
print(f"β Sentence transformer: Model loading failed - {e}")
test_results['sentence_model'] = False
try:
from transformers import pipeline
paraphraser = pipeline("text2text-generation", model="google/flan-t5-small")
print("β
Paraphrasing model: Model loaded successfully")
test_results['paraphrase_model'] = True
except Exception as e:
print(f"β Paraphrasing model: Model loading failed - {e}")
test_results['paraphrase_model'] = False
# Test GPU availability
try:
import torch
if torch.cuda.is_available():
print(f"β
CUDA: {torch.cuda.device_count()} GPU(s) available")
test_results['gpu'] = True
else:
print("π» CUDA: Not available (using CPU)")
test_results['gpu'] = False
except:
test_results['gpu'] = False
return test_results
def create_production_requirements():
"""Create production requirements file"""
requirements = """# AI Text Humanizer - Production Requirements
# All features enabled with compatible versions
# Core ML frameworks
torch>=2.1.0
transformers==4.35.0
sentence-transformers==2.2.2
huggingface_hub==0.17.3
accelerate==0.24.1
# NLP libraries
nltk==3.8.1
scikit-learn==1.3.2
numpy==1.25.2
pandas==2.1.3
# Web frameworks
fastapi==0.104.1
uvicorn[standard]==0.24.0
gradio==4.7.1
python-multipart==0.0.6
aiofiles==23.2.1
requests==2.31.0
# Production libraries
redis==5.0.1
psutil
python-dotenv
# Build tools
setuptools
wheel
packaging
"""
with open("requirements-production.txt", "w") as f:
f.write(requirements)
print("β
Created requirements-production.txt")
def main():
"""Main installation process"""
print("π AI TEXT HUMANIZER - PRODUCTION SETUP")
print("======================================")
# Check Python version
if sys.version_info < (3, 7):
print("β Python 3.7+ required")
return False
print(f"π Python {sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro} detected")
# Check virtual environment
in_venv = hasattr(sys, 'real_prefix') or (hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix)
if not in_venv:
print("β οΈ Warning: Not in virtual environment")
response = input("Continue? (y/n): ").lower().strip()
if response != 'y':
print("π Please create a virtual environment first")
return False
else:
print("β
Virtual environment detected")
# Start installation
if not production_install():
print("\nβ Installation failed!")
return False
# Test installation
test_results = test_installation()
# Create requirements file
create_production_requirements()
# Summary
print(f"\nπ INSTALLATION SUMMARY")
print("=" * 30)
success_count = sum(1 for v in test_results.values() if v)
total_count = len(test_results)
print(f"β
{success_count}/{total_count} components working")
if test_results.get('sentence_model') and test_results.get('paraphrase_model'):
print("π ALL ADVANCED FEATURES ENABLED!")
print(" β’ Advanced semantic similarity β
")
print(" β’ AI paraphrasing capabilities β
")
print(" β’ Production-ready performance β
")
elif test_results.get('sentence_model'):
print("β οΈ Advanced similarity enabled, paraphrasing needs attention")
elif test_results.get('paraphrase_model'):
print("β οΈ Paraphrasing enabled, similarity needs attention")
else:
print("β Advanced features need troubleshooting")
print(f"\nπ― NEXT STEPS:")
print("1. Test: python text_humanizer_robust.py")
print("2. Run API: python fastapi_server.py")
print("3. Run web UI: python gradio_app.py")
return success_count >= total_count - 2 # Allow 2 optional failures
if __name__ == "__main__":
try:
success = main()
if success:
print(f"\nπ Production installation successful!")
else:
print(f"\nβ Production installation needs attention")
except KeyboardInterrupt:
print(f"\nπ Installation cancelled")
except Exception as e:
print(f"\nβ Unexpected error: {e}") |