Proofly / sidecar /gemini_sidecar.py
Dipan04's picture
Initial clean commit for Hugging Face Space
2c41dce
"""
Gemini AI Sidecar
Non-authoritative assistant layer for proof explanation and guidance.
CRITICAL: This module is an OBSERVER, not a decision-maker.
- It explains proofs, never validates them
- It guides users, never creates proofs
- It fails gracefully, never blocks core operations
"""
from typing import Dict, Any, Optional
import logging
from dataclasses import dataclass
from models.proof import Proof, VerificationResult
from config.settings import settings
# Lazy import - only load if AI is enabled
try:
import google.generativeai as genai
GEMINI_AVAILABLE = True
except ImportError:
GEMINI_AVAILABLE = False
logger = logging.getLogger(__name__)
@dataclass
class AssistantResponse:
"""
Non-authoritative AI response wrapper.
Makes it explicit that this is guidance, not fact.
"""
provider: str = "gemini"
role: str = "explanatory"
response: str = ""
confidence: str = "non-authoritative"
error: Optional[str] = None
def to_dict(self) -> Dict[str, Any]:
"""Convert to dictionary for JSON serialization."""
result = {
"provider": self.provider,
"role": self.role,
"response": self.response,
"confidence": self.confidence
}
if self.error:
result["error"] = self.error
return result
class GeminiSidecar:
"""
AI Sidecar for providing explanations and guidance.
This is NOT part of the deterministic core.
All methods return graceful fallbacks on failure.
"""
def __init__(self):
"""Initialize Gemini client if available and configured."""
self.enabled = False
self.client = None
self._initialize()
def _initialize(self):
"""
Initialize Gemini client.
Fails gracefully if dependencies missing or API key invalid.
"""
# Check if AI is enabled in config
if not settings.AI_ENABLED:
logger.info("AI Sidecar is disabled in configuration")
return
# Check if dependencies available
if not GEMINI_AVAILABLE:
logger.warning(
"Gemini dependencies not available. "
"Install with: pip install google-generativeai"
)
return
# Check if API key is configured
if not settings.validate_ai():
logger.warning(
"Gemini API key not configured. "
"Set GEMINI_API_KEY environment variable to enable AI features."
)
return
try:
# Configure Gemini
genai.configure(api_key=settings.GEMINI_API_KEY)
self.client = genai.GenerativeModel(settings.GEMINI_MODEL)
self.enabled = True
logger.info(f"AI Sidecar initialized with model: {settings.GEMINI_MODEL}")
except Exception as e:
logger.error(f"Failed to initialize Gemini client: {str(e)}")
self.enabled = False
def explain_proof(self, proof: Proof) -> AssistantResponse:
"""
Explain what a proof means in plain language.
Args:
proof: Proof object to explain
Returns:
AssistantResponse with explanation or fallback
"""
if not self.enabled:
return self._fallback_explain_proof(proof)
try:
prompt = self._build_proof_explanation_prompt(proof)
response = self._call_gemini(prompt)
return AssistantResponse(
response=response
)
except Exception as e:
logger.error(f"AI explanation failed: {str(e)}")
return self._fallback_explain_proof(proof)
def explain_verification(
self,
verification_result: VerificationResult,
proof: Optional[Proof] = None
) -> AssistantResponse:
"""
Explain what a verification result means.
Args:
verification_result: Verification result to explain
proof: Optional original proof for context
Returns:
AssistantResponse with explanation or fallback
"""
if not self.enabled:
return self._fallback_explain_verification(verification_result)
try:
prompt = self._build_verification_explanation_prompt(
verification_result,
proof
)
response = self._call_gemini(prompt)
return AssistantResponse(
response=response
)
except Exception as e:
logger.error(f"AI verification explanation failed: {str(e)}")
return self._fallback_explain_verification(verification_result)
def summarize_content(self, extracted_text: str) -> AssistantResponse:
"""
Summarize extracted text content.
Args:
extracted_text: Text extracted from OCR or document
Returns:
AssistantResponse with summary or fallback
"""
if not self.enabled:
return self._fallback_summarize(extracted_text)
if not extracted_text or not extracted_text.strip():
return AssistantResponse(
response="No text content available to summarize."
)
try:
prompt = self._build_summarization_prompt(extracted_text)
response = self._call_gemini(prompt)
return AssistantResponse(
response=response
)
except Exception as e:
logger.error(f"AI summarization failed: {str(e)}")
return self._fallback_summarize(extracted_text)
def answer_question(
self,
question: str,
proof: Optional[Proof] = None,
context: Optional[str] = None
) -> AssistantResponse:
"""
Answer user questions about proofs or content.
Args:
question: User's question
proof: Optional proof for context
context: Optional additional context
Returns:
AssistantResponse with answer or fallback
"""
if not self.enabled:
return self._fallback_answer()
try:
prompt = self._build_question_prompt(question, proof, context)
response = self._call_gemini(prompt)
return AssistantResponse(
response=response
)
except Exception as e:
logger.error(f"AI question answering failed: {str(e)}")
return self._fallback_answer()
def _call_gemini(self, prompt: str) -> str:
"""
Make API call to Gemini with timeout and error handling.
Args:
prompt: Prompt to send to Gemini
Returns:
Generated response text
Raises:
Exception: If API call fails
"""
if not self.client:
raise Exception("Gemini client not initialized")
# Generate response with timeout
response = self.client.generate_content(
prompt,
generation_config={
"temperature": 0.7,
"max_output_tokens": 500,
}
)
return response.text.strip()
def _build_proof_explanation_prompt(self, proof: Proof) -> str:
"""Build prompt for proof explanation."""
return f"""You are an assistant explaining a cryptographic proof of existence.
Proof Details:
- Proof ID: {proof.proof_id}
- Content Type: {proof.content_type}
- Content Size: {proof.content_size} bytes
- Hash: {proof.content_hash[:16]}...
- Timestamp: {proof.timestamp}
- OCR Status: {proof.ocr_status or 'not applicable'}
Explain in 2-3 sentences what this proof means and why it's useful.
Focus on:
1. What was proven (that content existed at a point in time)
2. How it works (cryptographic hash)
3. Why it's trustworthy (immutable)
Keep it simple and non-technical."""
def _build_verification_explanation_prompt(
self,
result: VerificationResult,
proof: Optional[Proof]
) -> str:
"""Build prompt for verification explanation."""
status = "VALID" if result.is_valid else "INVALID"
prompt = f"""You are an assistant explaining a proof verification result.
Verification Result:
- Status: {status}
- Original Hash: {result.original_hash[:16]}...
- Computed Hash: {result.computed_hash[:16]}...
- Match: {result.is_valid}
Explain in 2-3 sentences what this verification result means.
If valid: explain that the content hasn't changed.
If invalid: explain that the content has been modified."""
if proof and proof.extracted_text:
prompt += f"\n\nOriginal content was: {proof.content_type}"
return prompt + "\n\nKeep it simple and actionable."
def _build_summarization_prompt(self, text: str) -> str:
"""Build prompt for content summarization."""
# Truncate if very long
max_chars = 2000
truncated = text[:max_chars]
if len(text) > max_chars:
truncated += "... (truncated)"
return f"""Summarize the following text in 2-3 sentences:
{truncated}
Focus on:
- Main topic or purpose
- Key information
- Document type (if identifiable)
Keep it concise and factual."""
def _build_question_prompt(
self,
question: str,
proof: Optional[Proof],
context: Optional[str]
) -> str:
"""Build prompt for question answering."""
prompt = f"""You are an assistant helping users understand cryptographic proofs.
User Question: {question}
"""
if proof:
prompt += f"""
Proof Context:
- Type: {proof.content_type}
- Size: {proof.content_size} bytes
- Created: {proof.timestamp}
- Hash: {proof.content_hash[:16]}...
"""
if proof.extracted_text:
prompt += f"- Extracted Text: {proof.extracted_text[:200]}...\n"
if context:
prompt += f"\nAdditional Context: {context}\n"
prompt += """
Provide a helpful answer in 2-3 sentences.
Remember: You are explaining, not validating.
Focus on helping the user understand proofs and their uses."""
return prompt
# Fallback methods - used when AI is unavailable
def _fallback_explain_proof(self, proof: Proof) -> AssistantResponse:
"""Fallback explanation when AI is unavailable."""
response = (
f"This is a cryptographic proof that {proof.content_type} content "
f"({proof.content_size} bytes) existed at {proof.timestamp}. "
f"The proof uses a SHA-256 hash to ensure the content cannot be altered "
f"without detection."
)
return AssistantResponse(
response=response,
error="AI assistant unavailable, using fallback explanation"
)
def _fallback_explain_verification(
self,
result: VerificationResult
) -> AssistantResponse:
"""Fallback verification explanation when AI is unavailable."""
if result.is_valid:
response = (
"Verification successful: The content matches the original proof. "
"The cryptographic hash is identical, confirming no modifications "
"have been made."
)
else:
response = (
"Verification failed: The content does not match the original proof. "
"The cryptographic hashes differ, indicating the content has been "
"modified since the proof was created."
)
return AssistantResponse(
response=response,
error="AI assistant unavailable, using fallback explanation"
)
def _fallback_summarize(self, text: str) -> AssistantResponse:
"""Fallback summarization when AI is unavailable."""
word_count = len(text.split())
char_count = len(text)
preview = text[:150] + "..." if len(text) > 150 else text
response = (
f"Content preview ({word_count} words, {char_count} characters): "
f"{preview}"
)
return AssistantResponse(
response=response,
error="AI assistant unavailable, showing content preview"
)
def _fallback_answer(self) -> AssistantResponse:
"""Fallback answer when AI is unavailable."""
return AssistantResponse(
response=(
"AI assistant is currently unavailable. "
"For questions about proofs, refer to the documentation or "
"contact support."
),
error="AI assistant unavailable"
)