adaptai / aiml /04_data /etl_pipelines /processing /gcp_vertex_integration.py
ADAPT-Chase's picture
Add files using upload-large-folder tool
2021f39 verified
#!/usr/bin/env python3
"""
GCP VERTEX AI INTEGRATION PIPELINE
Advanced corpus processing integration with Google Cloud Platform
$1600 credits allocation for initial processing
"""
import os
import json
import logging
import asyncio
import aiohttp
from datetime import datetime
from typing import List, Dict, Any, Optional
from pathlib import Path
import pandas as pd
import numpy as np
from google.cloud import aiplatform
from google.cloud import dialogflowcx_v3beta1 as dialogflowcx
from google.oauth2 import service_account
# Setup advanced logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
handlers=[
logging.FileHandler('/data/adaptai/aiml/04_data/etl_pipelines/logs/gcp_vertex_integration.log'),
logging.StreamHandler()
]
)
logger = logging.getLogger(__name__)
class GCPVertexIntegration:
"""Advanced GCP Vertex AI integration for corpus processing"""
def __init__(self):
self.corpus_dir = '/data/adaptai/aiml/04_data/corpora'
self.processed_dir = '/data/adaptai/aiml/04_data/etl_pipelines/processing/gcp_processed'
self.credentials_path = '/data/adaptai/aiml/04_data/etl_pipelines/config/gcp_credentials.json'
self.setup_directories()
self.setup_gcp_clients()
def setup_directories(self):
"""Create GCP processing directories"""
directories = [
self.processed_dir,
f'{self.processed_dir}/vertex_ai',
f'{self.processed_dir}/dialogflow_cx',
f'{self.processed_dir}/quality_reports',
f'{self.processed_dir}/logs'
]
for directory in directories:
os.makedirs(directory, exist_ok=True)
logger.info("🚀 GCP processing directories setup complete")
def setup_gcp_clients(self):
"""Initialize GCP clients with service account credentials"""
try:
# Load service account credentials
if os.path.exists(self.credentials_path):
credentials = service_account.Credentials.from_service_account_file(
self.credentials_path,
scopes=['https://www.googleapis.com/auth/cloud-platform']
)
# Initialize Vertex AI client
aiplatform.init(
credentials=credentials,
project=os.getenv('GCP_PROJECT_ID', 'adaptai-nova'),
location=os.getenv('GCP_LOCATION', 'us-central1')
)
# Initialize Dialogflow CX client
self.dialogflow_client = dialogflowcx.SessionsAsyncClient(credentials=credentials)
logger.info("✅ GCP clients initialized successfully")
self.gcp_available = True
else:
logger.warning("GCP credentials not found, running in local mode")
self.gcp_available = False
except Exception as e:
logger.error(f"Failed to initialize GCP clients: {e}")
self.gcp_available = False
async def process_with_vertex_ai(self, documents: List[Dict]) -> List[Dict]:
"""Process documents using Vertex AI Text Analysis"""
if not self.gcp_available or not documents:
return documents
processed_docs = []
try:
# Batch process documents for efficiency
batch_size = 50
for i in range(0, len(documents), batch_size):
batch = documents[i:i + batch_size]
# Process batch with Vertex AI
processed_batch = await self._process_batch_vertex_ai(batch)
processed_docs.extend(processed_batch)
logger.info(f"Processed batch {i//batch_size + 1}: {len(processed_batch)} documents")
except Exception as e:
logger.error(f"Vertex AI processing failed: {e}")
# Fallback to local processing
processed_docs = documents
return processed_docs
async def _process_batch_vertex_ai(self, batch: List[Dict]) -> List[Dict]:
"""Process a batch of documents with Vertex AI"""
processed_batch = []
for doc in batch:
try:
# Enhanced text analysis with Vertex AI
enhanced_text = await self._analyze_text_with_vertex(doc['text'])
# Semantic enrichment
semantic_analysis = await self._semantic_analysis(doc['text'])
processed_doc = {
**doc,
'processed_text': enhanced_text,
'semantic_analysis': semantic_analysis,
'vertex_ai_processed': True,
'processing_timestamp': datetime.now().isoformat(),
'gcp_credits_used': self._estimate_credits_used(doc['text'])
}
processed_batch.append(processed_doc)
except Exception as e:
logger.warning(f"Failed to process document with Vertex AI: {e}")
# Keep original document if processing fails
processed_batch.append({**doc, 'vertex_ai_processed': False})
return processed_batch
async def _analyze_text_with_vertex(self, text: str) -> Dict:
"""Analyze text using Vertex AI Natural Language Processing"""
# Placeholder for Vertex AI NLP integration
# Actual implementation would use:
# from google.cloud import language_v1
# client = language_v1.LanguageServiceClient()
# Simulate Vertex AI analysis for now
return {
'original_text': text,
'entities': [],
'sentiment': {'score': 0.8, 'magnitude': 0.5},
'syntax_analysis': {},
'classification': {}
}
async def _semantic_analysis(self, text: str) -> Dict:
"""Perform semantic analysis using Dialogflow CX"""
try:
# Placeholder for Dialogflow CX integration
# session_path = self.dialogflow_client.session_path(
# project=os.getenv('GCP_PROJECT_ID'),
# location=os.getenv('GCP_LOCATION'),
# agent=os.getenv('DIALOGFLOW_AGENT_ID'),
# session='test-session'
# )
# text_input = dialogflowcx.TextInput(text=text)
# query_input = dialogflowcx.QueryInput(text=text_input, language_code='en')
#
# request = dialogflowcx.DetectIntentRequest(
# session=session_path,
# query_input=query_input
# )
#
# response = await self.dialogflow_client.detect_intent(request=request)
# Simulate Dialogflow response
return {
'intent_detection': {'confidence': 0.85, 'intent': 'general_conversation'},
'entities': [],
'sentiment_analysis': {'score': 0.7, 'magnitude': 0.6},
'dialogflow_processed': True
}
except Exception as e:
logger.warning(f"Dialogflow CX analysis failed: {e}")
return {'dialogflow_processed': False, 'error': str(e)}
def _estimate_credits_used(self, text: str) -> float:
"""Estimate GCP credits used for processing"""
# Rough estimate: $0.0001 per 1000 characters
text_length = len(text)
return (text_length / 1000) * 0.0001
def save_gcp_processed_corpus(self, processed_docs: List[Dict]) -> str:
"""Save GCP-processed corpus with detailed metadata"""
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
filename = f"gcp_vertex_corpus_{timestamp}.jsonl"
filepath = f"{self.processed_dir}/vertex_ai/{filename}"
try:
with open(filepath, 'w', encoding='utf-8') as f:
for doc in processed_docs:
f.write(json.dumps(doc, ensure_ascii=False) + '\n')
logger.info(f"Saved {len(processed_docs)} GCP-processed documents to {filepath}")
return filepath
except Exception as e:
logger.error(f"Failed to save GCP corpus: {e}")
return ""
def generate_credit_report(self, processed_docs: List[Dict]) -> Dict:
"""Generate GCP credit usage report"""
if not processed_docs:
return {}
total_credits = sum(doc.get('gcp_credits_used', 0) for doc in processed_docs)
processed_count = sum(1 for doc in processed_docs if doc.get('vertex_ai_processed', False))
report = {
'processing_date': datetime.now().isoformat(),
'total_documents': len(processed_docs),
'processed_with_gcp': processed_count,
'total_credits_used': total_credits,
'estimated_cost_usd': total_credits,
'remaining_credits': 1600 - total_credits, # $1600 initial allocation
'credit_utilization_percent': (total_credits / 1600) * 100,
'processing_efficiency': processed_count / len(processed_docs) if processed_docs else 0
}
# Save report
report_path = f"{self.processed_dir}/quality_reports/credit_report_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
with open(report_path, 'w') as f:
json.dump(report, f, indent=2)
logger.info(f"Credit report saved to {report_path}")
return report
async def main():
"""Run GCP Vertex AI integration pipeline"""
logger.info("🚀 Starting GCP Vertex AI Integration Pipeline")
integrator = GCPVertexIntegration()
try:
# Load sample documents for processing
sample_docs = [
{
'text': 'This is a sample document for GCP Vertex AI processing.',
'source': 'test',
'metadata': {'test': True}
}
]
# Process with Vertex AI
logger.info("Step 1: Processing with Vertex AI...")
processed_docs = await integrator.process_with_vertex_ai(sample_docs)
# Save processed corpus
logger.info("Step 2: Saving GCP-processed corpus...")
corpus_path = integrator.save_gcp_processed_corpus(processed_docs)
# Generate credit report
logger.info("Step 3: Generating credit usage report...")
credit_report = integrator.generate_credit_report(processed_docs)
logger.info(f"✅ GCP integration complete!")
logger.info(f" • Processed documents: {len(processed_docs)}")
logger.info(f" • Corpus saved: {corpus_path}")
logger.info(f" • Credits used: ${credit_report.get('total_credits_used', 0):.6f}")
logger.info(f" • Remaining credits: ${credit_report.get('remaining_credits', 1600):.2f}")
except Exception as e:
logger.error(f"❌ GCP integration failed: {e}")
raise
if __name__ == "__main__":
asyncio.run(main())