Spaces:
Runtime error
Runtime error
File size: 7,516 Bytes
8aea612 7a9b3ec 8aea612 7a9b3ec 8aea612 7a9b3ec 8aea612 7a9b3ec 8aea612 bebbd46 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 |
import logging
import os
import time
from contextlib import contextmanager
from typing import Any, Optional
import torch
from transformers import pipeline
from virtual_vram import VirtualVRAM
from http_storage import HTTPGPUStorage
from torch_vgpu import VGPUDevice, to_vgpu
def setup_vgpu():
"""Setup vGPU device"""
try:
# Initialize the backend first
from torch_vgpu import init_vgpu_backend, VGPUDevice
if not init_vgpu_backend():
raise RuntimeError("Failed to initialize vGPU backend")
# Create and register vGPU device
vgpu = VGPUDevice()
device = vgpu.device()
# Set as default device for tensor operations
return device
except Exception as e:
logging.error(f"vGPU setup failed: {str(e)}")
raise
# Configure logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
@contextmanager
def gpu_context():
"""Context manager for vGPU resources"""
storage = None
try:
storage = HTTPGPUStorage()
yield storage
finally:
if storage:
storage.close()
logger.info("vGPU resources cleaned up")
def get_model_size(model):
"""Calculate model size in parameters and memory footprint"""
param_size = 0
for param in model.parameters():
param_size += param.nelement() * param.element_size()
buffer_size = 0
for buffer in model.buffers():
buffer_size += buffer.nelement() * buffer.element_size()
return param_size + buffer_size
def prepare_prompt(instruction: str) -> str:
"""Prepare a prompt for Llama-2 using its chat format."""
# Format: <s>[INST] instruction [/INST] assistant response </s>[INST] ...
return f"<s>[INST] {instruction} [/INST]"
def test_ai_integration_http():
"""Test GPT OSS model on vGPU with text generation"""
logger.info("Starting vGPU text generation test")
status = {
'pipeline_loaded': False,
'model_on_vgpu': False,
'generation_complete': False,
'cleanup_success': False
}
with gpu_context() as storage:
try:
# Initialize vRAM with monitoring
initial_mem = storage.get_used_memory() if hasattr(storage, 'get_used_memory') else 0
vram = VirtualVRAM(size_gb=None, storage=storage)
# Initialize vGPU device
device = setup_vgpu()
logger.info(f"vGPU initialized with device {device}")
# Load model using pipeline
model_id = "openai/gpt-oss-20b"
logger.info(f"Loading {model_id}")
try:
# Disable transformers logging temporarily
transformers_logger = logging.getLogger("transformers")
original_level = transformers_logger.level
transformers_logger.setLevel(logging.ERROR)
try:
# Create pipeline with model directly on vGPU
pipe = pipeline(
"text-generation",
model=model_id,
model_kwargs={
"torch_dtype": torch.float32, # Use full precision
"device_map": {"": device}, # Map all modules to our vGPU device
},
use_safetensors=True,
trust_remote_code=True,
device=device # Use our vGPU device
)
status["pipeline_loaded"] = True
status['model_on_vgpu'] = True
# Log model details
logger.info(f"Pipeline created with model: {model_id}")
# Log model size
model_size = get_model_size(pipe.model)
logger.info(f"Model loaded: {model_size/1e9:.2f} GB in parameters")
logger.info(f"Model architecture: {pipe.model.__class__.__name__}")
# Verify model location
with torch.device(device):
current_mem = storage.get_used_memory() if hasattr(storage, 'get_used_memory') else 0
logger.info(f"Model memory usage: {(current_mem - initial_mem)/1e9:.2f} GB")
finally:
# Restore original logging level
transformers_logger.setLevel(original_level)
except Exception as e:
logger.error(f"Model loading failed: {str(e)}")
raise
except Exception as e:
logger.error(f"Model transfer to vGPU failed: {str(e)}")
raise
# Run text generation
logger.info("Running text generation...")
start = time.time()
peak_mem = initial_mem
try:
# Prepare input prompt
prompt = "Explain how virtual GPUs work in simple terms."
with torch.no_grad():
outputs = pipe(
prompt,
max_new_tokens=256,
temperature=0.7,
top_p=0.95,
top_k=40,
num_beams=1,
do_sample=True,
return_full_text=True
)
if hasattr(storage, 'get_used_memory'):
peak_mem = max(peak_mem, storage.get_used_memory())
inference_time = time.time() - start
status['generation_complete'] = True
# Log performance metrics
logger.info(f"\nGeneration stats:")
logger.info(f"- Time: {inference_time:.4f}s")
logger.info(f"- Memory peak: {(peak_mem - initial_mem)/1e9:.2f} GB")
logger.info(f"- Generated text: {outputs[0]['generated_text']}")
except Exception as e:
logger.error(f"Text generation failed: {str(e)}")
raise
except Exception as e:
logger.error(f"Test failed: {str(e)}")
raise
finally:
# Cleanup and status report
try:
if 'pipe' in locals():
del pipe
if 'outputs' in locals():
del outputs
torch.cuda.empty_cache() if hasattr(torch, 'cuda') else None
status['cleanup_success'] = True
except Exception as e:
logger.error(f"Cleanup error: {str(e)}")
logger.info("\nTest Summary:")
for key, value in status.items():
logger.info(f"- {key}: {'✓' if value else '✗'}")
final_mem = storage.get_used_memory() if hasattr(storage, 'get_used_memory') else 0
if final_mem > initial_mem:
logger.warning(f"Memory leak detected: {(final_mem - initial_mem)/1e6:.2f} MB")
if __name__ == "__main__":
test_ai_integration_http() |