| | """ |
| | Nomination Assistant for Award Identification |
| | |
| | AI-assisted preparation of award nominations and grant applications. |
| | |
| | FEATURES (Planned): |
| | ------------------ |
| | 1. DOCUMENT GENERATION: |
| | - Executive summaries |
| | - Project descriptions |
| | - Budget justifications |
| | - Team CVs and bios |
| | |
| | 2. TEMPLATE MATCHING: |
| | - Match to funder templates |
| | - Format compliance checking |
| | - Character/word limit validation |
| | |
| | 3. QUALITY ASSURANCE: |
| | - CriticAgent validation |
| | - Reviewer simulation |
| | - Gap identification |
| | |
| | 4. COLLABORATION: |
| | - Multi-author editing |
| | - Comment and review workflows |
| | - Version control |
| | |
| | HUMAN-IN-THE-LOOP: |
| | ----------------- |
| | Document preparation requires extensive human input: |
| | - Initial content drafting |
| | - Review and revision cycles |
| | - Final approval before submission |
| | |
| | This assistant accelerates the process but doesn't replace |
| | human expertise in grant writing. |
| | |
| | Author: SPARKNET Team |
| | Project: VISTA/Horizon EU |
| | Status: Placeholder - In Development |
| | """ |
| |
|
| | from typing import Optional, Dict, Any, List |
| | from dataclasses import dataclass, field |
| | from datetime import datetime |
| | from enum import Enum |
| | from loguru import logger |
| |
|
| |
|
| | class DocumentTemplate(str, Enum): |
| | """Standard document templates.""" |
| | HORIZON_PROPOSAL = "horizon_proposal" |
| | ERC_APPLICATION = "erc_application" |
| | NATIONAL_GRANT = "national_grant" |
| | AWARD_NOMINATION = "award_nomination" |
| | LETTER_OF_INTENT = "letter_of_intent" |
| | BUDGET_TEMPLATE = "budget_template" |
| | CV_EUROPASS = "cv_europass" |
| | CUSTOM = "custom" |
| |
|
| |
|
| | class ReviewStatus(str, Enum): |
| | """Document review status.""" |
| | DRAFT = "draft" |
| | INTERNAL_REVIEW = "internal_review" |
| | REVISION_NEEDED = "revision_needed" |
| | APPROVED = "approved" |
| | SUBMITTED = "submitted" |
| |
|
| |
|
| | @dataclass |
| | class DocumentSection: |
| | """ |
| | Section of a nomination document. |
| | |
| | Represents a structured section with content and metadata. |
| | """ |
| | section_id: str |
| | title: str |
| | content: str |
| | word_limit: Optional[int] = None |
| | current_words: int = 0 |
| | status: str = "draft" |
| | ai_generated: bool = False |
| | human_reviewed: bool = False |
| | reviewer_comments: List[str] = field(default_factory=list) |
| | suggestions: List[str] = field(default_factory=list) |
| |
|
| |
|
| | @dataclass |
| | class DocumentReview: |
| | """ |
| | Review of a nomination document. |
| | |
| | Contains feedback from AI and human reviewers. |
| | """ |
| | review_id: str |
| | document_id: str |
| | reviewer_type: str |
| | reviewer_name: Optional[str] = None |
| | overall_score: Optional[float] = None |
| | section_scores: Dict[str, float] = field(default_factory=dict) |
| | strengths: List[str] = field(default_factory=list) |
| | weaknesses: List[str] = field(default_factory=list) |
| | suggestions: List[str] = field(default_factory=list) |
| | decision: str = "pending" |
| | created_at: datetime = field(default_factory=datetime.now) |
| |
|
| |
|
| | class NominationAssistant: |
| | """ |
| | AI assistant for preparing nominations and applications. |
| | |
| | This component: |
| | - Generates document sections |
| | - Checks format compliance |
| | - Simulates reviewer feedback |
| | - Manages revision workflows |
| | |
| | INTEGRATION WITH CRITICAGENT: |
| | ----------------------------- |
| | Uses CriticAgent for: |
| | - Document quality validation |
| | - Format compliance checking |
| | - Reviewer perspective simulation |
| | - Gap and weakness identification |
| | |
| | CONFIDENCE SCORING: |
| | ------------------ |
| | All AI-generated content includes: |
| | - Confidence score (0.0-1.0) |
| | - Source references where applicable |
| | - Suggestions for improvement |
| | - Flag for human review |
| | |
| | Generated content with low confidence scores |
| | is automatically flagged for human review. |
| | """ |
| |
|
| | def __init__( |
| | self, |
| | llm_client: Optional[Any] = None, |
| | critic_agent: Optional[Any] = None, |
| | template_library: Optional[Dict[str, Any]] = None, |
| | ): |
| | """ |
| | Initialize Nomination Assistant. |
| | |
| | Args: |
| | llm_client: LangChain LLM client for content generation |
| | critic_agent: CriticAgent for validation |
| | template_library: Library of document templates |
| | """ |
| | self.llm_client = llm_client |
| | self.critic_agent = critic_agent |
| | self.template_library = template_library or {} |
| | self.name = "NominationAssistant" |
| |
|
| | |
| | self.confidence_threshold = 0.7 |
| |
|
| | logger.info(f"Initialized {self.name} (placeholder)") |
| |
|
| | async def generate_section( |
| | self, |
| | document_id: str, |
| | section_type: str, |
| | context: Dict[str, Any], |
| | word_limit: Optional[int] = None, |
| | ) -> DocumentSection: |
| | """ |
| | Generate a document section using AI. |
| | |
| | Args: |
| | document_id: Parent document ID |
| | section_type: Type of section to generate |
| | context: Context information for generation |
| | word_limit: Optional word limit |
| | |
| | Returns: |
| | Generated section with confidence score |
| | |
| | TODO: Implement actual LLM generation |
| | """ |
| | logger.info(f"Generating {section_type} section for document: {document_id}") |
| |
|
| | |
| | return DocumentSection( |
| | section_id=f"sec_{datetime.now().strftime('%Y%m%d_%H%M%S')}", |
| | title=section_type.replace("_", " ").title(), |
| | content="[AI-generated content placeholder]", |
| | word_limit=word_limit, |
| | current_words=0, |
| | status="draft", |
| | ai_generated=True, |
| | human_reviewed=False, |
| | suggestions=["Complete implementation with actual LLM generation"], |
| | ) |
| |
|
| | async def check_format_compliance( |
| | self, |
| | document_id: str, |
| | template: DocumentTemplate, |
| | ) -> Dict[str, Any]: |
| | """ |
| | Check document compliance with template requirements. |
| | |
| | Args: |
| | document_id: Document to check |
| | template: Template to check against |
| | |
| | Returns: |
| | Compliance report with issues |
| | |
| | TODO: Implement actual compliance checking |
| | """ |
| | logger.info(f"Checking format compliance for document: {document_id}") |
| |
|
| | |
| | return { |
| | "document_id": document_id, |
| | "template": template.value, |
| | "compliant": False, |
| | "issues": [ |
| | { |
| | "type": "placeholder", |
| | "message": "Compliance checking not yet implemented", |
| | "severity": "info", |
| | } |
| | ], |
| | "word_counts": {}, |
| | "missing_sections": [], |
| | } |
| |
|
| | async def simulate_review( |
| | self, |
| | document_id: str, |
| | reviewer_perspective: str = "general", |
| | ) -> DocumentReview: |
| | """ |
| | Simulate reviewer feedback using AI. |
| | |
| | Generates feedback from the perspective of a grant |
| | reviewer to identify potential weaknesses. |
| | |
| | Args: |
| | document_id: Document to review |
| | reviewer_perspective: Type of reviewer to simulate |
| | |
| | Returns: |
| | Simulated review with scores and feedback |
| | |
| | TODO: Implement actual review simulation |
| | """ |
| | logger.info(f"Simulating {reviewer_perspective} review for document: {document_id}") |
| |
|
| | |
| | return DocumentReview( |
| | review_id=f"rev_{datetime.now().strftime('%Y%m%d_%H%M%S')}", |
| | document_id=document_id, |
| | reviewer_type="ai", |
| | reviewer_name=f"AI ({reviewer_perspective})", |
| | overall_score=0.0, |
| | strengths=["Review simulation not yet implemented"], |
| | weaknesses=["Cannot assess without implementation"], |
| | suggestions=["Complete the AI review simulation feature"], |
| | decision="pending", |
| | ) |
| |
|
| | async def suggest_improvements( |
| | self, |
| | section: DocumentSection, |
| | ) -> List[str]: |
| | """ |
| | Suggest improvements for a document section. |
| | |
| | Uses CriticAgent to analyze section and generate |
| | actionable improvement suggestions. |
| | |
| | Args: |
| | section: Section to analyze |
| | |
| | Returns: |
| | List of improvement suggestions |
| | |
| | TODO: Implement CriticAgent integration |
| | """ |
| | logger.info(f"Generating improvement suggestions for section: {section.section_id}") |
| |
|
| | |
| | return [ |
| | "Improvement suggestions not yet implemented", |
| | "Will integrate with CriticAgent for validation", |
| | ] |
| |
|
| | async def validate_with_critic( |
| | self, |
| | document_id: str, |
| | ) -> Dict[str, Any]: |
| | """ |
| | Validate document using CriticAgent. |
| | |
| | Performs comprehensive validation including: |
| | - Content quality assessment |
| | - Format compliance |
| | - Logical consistency |
| | - Citation verification |
| | |
| | Args: |
| | document_id: Document to validate |
| | |
| | Returns: |
| | Validation result with scores and issues |
| | |
| | TODO: Implement CriticAgent integration |
| | """ |
| | logger.info(f"Validating document with CriticAgent: {document_id}") |
| |
|
| | |
| | return { |
| | "document_id": document_id, |
| | "valid": False, |
| | "overall_score": 0.0, |
| | "dimension_scores": { |
| | "completeness": 0.0, |
| | "clarity": 0.0, |
| | "accuracy": 0.0, |
| | "compliance": 0.0, |
| | }, |
| | "issues": ["CriticAgent validation not yet implemented"], |
| | "suggestions": ["Complete CriticAgent integration"], |
| | "human_review_required": True, |
| | "confidence": 0.0, |
| | } |
| |
|
| | def requires_human_review( |
| | self, |
| | confidence_score: float, |
| | section_type: str, |
| | ) -> bool: |
| | """ |
| | Determine if content requires human review. |
| | |
| | Human review is required when: |
| | - Confidence is below threshold |
| | - Section is critical (executive summary, budget) |
| | - Content makes claims about capabilities |
| | |
| | Args: |
| | confidence_score: AI confidence score |
| | section_type: Type of section |
| | |
| | Returns: |
| | True if human review required |
| | """ |
| | |
| | if confidence_score < self.confidence_threshold: |
| | return True |
| |
|
| | |
| | critical_sections = [ |
| | "executive_summary", |
| | "budget", |
| | "team_qualifications", |
| | "methodology", |
| | ] |
| | if section_type.lower() in critical_sections: |
| | return True |
| |
|
| | return False |
| |
|