| from fastapi import FastAPI, HTTPException, Depends, Query, BackgroundTasks
|
| from fastapi.middleware.cors import CORSMiddleware
|
| from pydantic import BaseModel, Field
|
| from typing import List, Dict, Optional, Any, Union, Tuple
|
| import numpy as np
|
| import networkx as nx
|
| from scipy import optimize, sparse
|
| from datetime import datetime, timedelta
|
| import uuid
|
| import random
|
| import math
|
| import os
|
| import asyncio
|
| import concurrent.futures
|
| from functools import lru_cache
|
| import logging
|
| import uvicorn
|
|
|
|
|
| logging.basicConfig(
|
| level=logging.INFO,
|
| format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
| handlers=[
|
| logging.FileHandler("app.log"),
|
| logging.StreamHandler()
|
| ]
|
| )
|
| logger = logging.getLogger(__name__)
|
|
|
|
|
| try:
|
| from transformers import (
|
| pipeline,
|
| AutoTokenizer,
|
| AutoModelForSequenceClassification,
|
| AutoModelForQuestionAnswering,
|
| AutoModelForCausalLM
|
| )
|
| from sentence_transformers import SentenceTransformer
|
| import torch
|
| from torch.multiprocessing import Pool, set_start_method
|
|
|
|
|
| try:
|
| set_start_method('spawn', force=True)
|
| except RuntimeError:
|
| logger.warning("Could not set multiprocessing start method to 'spawn'")
|
|
|
| ML_LIBS_AVAILABLE = True
|
| except ImportError:
|
| logger.warning("ML libraries not available. Running with limited functionality.")
|
| ML_LIBS_AVAILABLE = False
|
|
|
|
|
| try:
|
| from langchain_community.vectorstores import Chroma
|
| from langchain_community.embeddings import HuggingFaceEmbeddings
|
| from langchain_core.documents import Document
|
| VECTOR_DB_AVAILABLE = True
|
| except ImportError:
|
| logger.warning("Vector database libraries not available. Running with limited functionality.")
|
| VECTOR_DB_AVAILABLE = False
|
|
|
|
|
| try:
|
| import qutip as qt
|
| from pennylane import numpy as qnp
|
| import pennylane as qml
|
| QUANTUM_LIBS_AVAILABLE = True
|
| except ImportError:
|
| logger.warning("Quantum libraries not available. Running with limited functionality.")
|
| QUANTUM_LIBS_AVAILABLE = False
|
|
|
|
|
|
|
| CPU_COUNT = os.cpu_count() or 4
|
| EXECUTOR = concurrent.futures.ThreadPoolExecutor(max_workers=CPU_COUNT)
|
|
|
|
|
| ml_models = {}
|
| ml_model_usage = {}
|
| MAX_MODELS_LOADED = 2
|
|
|
| def get_ml_model(model_type):
|
| """Lazy-load ML models when needed with memory management"""
|
| if not ML_LIBS_AVAILABLE:
|
| logger.warning(f"ML libraries not available. Cannot load {model_type} model.")
|
| return None
|
|
|
| global ml_models, ml_model_usage
|
|
|
|
|
| if model_type in ml_models:
|
| ml_model_usage[model_type] = datetime.now()
|
| return ml_models.get(model_type)
|
|
|
|
|
| if len(ml_models) >= MAX_MODELS_LOADED:
|
|
|
| lru_model = min(ml_model_usage.items(), key=lambda x: x[1])[0]
|
| logger.info(f"Unloading model {lru_model} to free memory")
|
| ml_models.pop(lru_model)
|
| ml_model_usage.pop(lru_model)
|
|
|
|
|
| try:
|
| logger.info(f"Loading ML model: {model_type}")
|
|
|
| if model_type == "task_classifier":
|
| tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased")
|
| model = AutoModelForSequenceClassification.from_pretrained("distilbert-base-uncased")
|
| ml_models[model_type] = (tokenizer, pipeline("text-classification", model=model, tokenizer=tokenizer))
|
|
|
| elif model_type == "task_embedding":
|
|
|
| model_name = "all-MiniLM-L6-v2"
|
| model_path = os.path.join(os.getcwd(), "models", model_name)
|
|
|
|
|
| if not os.path.exists(model_path):
|
| os.makedirs(os.path.dirname(model_path), exist_ok=True)
|
| logger.info(f"Model not found locally. Will download to {model_path}")
|
| ml_models[model_type] = SentenceTransformer(model_name)
|
| else:
|
| logger.info(f"Loading model from local path: {model_path}")
|
| ml_models[model_type] = SentenceTransformer(model_path)
|
|
|
| elif model_type == "text_generator":
|
|
|
| tokenizer = AutoTokenizer.from_pretrained("distilgpt2")
|
| model = AutoModelForCausalLM.from_pretrained("distilgpt2")
|
| ml_models[model_type] = (tokenizer, model)
|
|
|
| elif model_type == "qa_assistant":
|
| tokenizer = AutoTokenizer.from_pretrained("deepset/minilm-uncased-squad2")
|
| model = AutoModelForQuestionAnswering.from_pretrained("deepset/minilm-uncased-squad2")
|
| ml_models[model_type] = (tokenizer, pipeline("question-answering", model=model, tokenizer=tokenizer))
|
|
|
|
|
| ml_model_usage[model_type] = datetime.now()
|
| logger.info(f"Successfully loaded {model_type} model")
|
|
|
| except Exception as e:
|
| logger.error(f"Error loading {model_type}: {str(e)}")
|
| return None
|
|
|
| return ml_models.get(model_type)
|
|
|
|
|
| class TaskState:
|
| PENDING = "PENDING"
|
| IN_PROGRESS = "IN_PROGRESS"
|
| COMPLETED = "COMPLETED"
|
| BLOCKED = "BLOCKED"
|
|
|
| class TaskCreate(BaseModel):
|
| title: str
|
| description: str
|
| assignee: Optional[str] = None
|
| due_date: Optional[datetime] = None
|
| tags: List[str] = Field(default_factory=list)
|
| priority: int = 1
|
|
|
| class TaskUpdate(BaseModel):
|
| title: Optional[str] = None
|
| description: Optional[str] = None
|
| assignee: Optional[str] = None
|
| due_date: Optional[datetime] = None
|
| state: Optional[str] = None
|
| tags: Optional[List[str]] = None
|
| priority: Optional[int] = None
|
|
|
| class ComplexValue(BaseModel):
|
| real: float
|
| imag: float
|
|
|
| class QuantumState(BaseModel):
|
| """Represents a quantum state for a task"""
|
| amplitudes: Dict[str, ComplexValue]
|
| fidelity: float
|
| coherence_time: float
|
|
|
| class Task(BaseModel):
|
| id: str
|
| title: str
|
| description: str
|
| assignee: Optional[str] = None
|
| created_at: datetime
|
| updated_at: datetime
|
| due_date: Optional[datetime] = None
|
| state: str = TaskState.PENDING
|
| tags: List[str] = []
|
| priority: int = 1
|
| entropy: float = 1.0
|
| probability_distribution: Dict[str, float] = {}
|
| embedding: Optional[List[float]] = None
|
| entangled_tasks: List[str] = []
|
| quantum_state: Optional[Dict[str, Any]] = None
|
| category: Optional[str] = None
|
| ml_summary: Optional[str] = None
|
|
|
| class EntanglementCreate(BaseModel):
|
| task_id_1: str
|
| task_id_2: str
|
| strength: float = 1.0
|
| entanglement_type: str = "standard"
|
|
|
| class EntanglementUpdate(BaseModel):
|
| strength: float
|
| entanglement_type: Optional[str] = None
|
|
|
| class Entanglement(BaseModel):
|
| id: str
|
| task_id_1: str
|
| task_id_2: str
|
| strength: float
|
| entanglement_type: str = "standard"
|
| created_at: datetime
|
| updated_at: datetime
|
|
|
| class SearchQuery(BaseModel):
|
| query: str
|
| limit: int = 10
|
| use_quantum: bool = False
|
|
|
| class SystemMetrics(BaseModel):
|
| total_entropy: float
|
| task_count: int
|
| completion_rate: float
|
| average_cognitive_load: float
|
| entanglement_density: float
|
| quantum_coherence: float = 0.0
|
|
|
| class QuantumSimulationRequest(BaseModel):
|
| task_ids: List[str]
|
| simulation_steps: int = 5
|
| decoherence_rate: float = 0.05
|
| measurement_type: str = "projective"
|
|
|
|
|
| tasks = {}
|
| entanglements = {}
|
| task_graph = nx.Graph()
|
|
|
|
|
| embeddings_model = None
|
| vector_store = None
|
|
|
|
|
|
|
| @lru_cache(maxsize=128)
|
| def calculate_task_embedding(task_text):
|
| """Calculate embedding for task text using ML model or fallback."""
|
| try:
|
|
|
| embedding_model = get_ml_model("task_embedding")
|
| if embedding_model:
|
|
|
| embedding = embedding_model.encode(task_text)
|
|
|
|
|
| return embedding.tolist()
|
| else:
|
|
|
| from sklearn.feature_extraction.text import CountVectorizer
|
| vectorizer = CountVectorizer(max_features=384)
|
| vectorizer.fit([task_text])
|
| vector = vectorizer.transform([task_text]).toarray()[0]
|
|
|
|
|
| if len(vector) > 0:
|
| vector = vector / (np.linalg.norm(vector) + 1e-6)
|
| if len(vector) > 384:
|
| vector = vector[:384]
|
| else:
|
| vector = np.pad(vector, (0, 384 - len(vector)))
|
|
|
| return vector.tolist()
|
| except Exception as e:
|
| logger.error(f"Embedding error: {str(e)}")
|
|
|
| embedding = np.random.randn(384)
|
| embedding = embedding / np.linalg.norm(embedding)
|
| return embedding.tolist()
|
|
|
| def classify_task(task_text):
|
| """Classify task using ML model or fallback rules."""
|
| try:
|
| classifier = get_ml_model("task_classifier")
|
| if classifier and classifier[1] is not None:
|
| tokenizer, pipeline = classifier
|
| result = pipeline(task_text)
|
| return result[0]['label']
|
| except Exception as e:
|
| logger.error(f"Classification error: {str(e)}")
|
|
|
|
|
| keywords = {
|
| "urgent": "HIGH_PRIORITY",
|
| "important": "HIGH_PRIORITY",
|
| "critical": "HIGH_PRIORITY",
|
| "bug": "BUG_FIX",
|
| "fix": "BUG_FIX",
|
| "feature": "FEATURE",
|
| "implement": "IMPLEMENTATION",
|
| "develop": "DEVELOPMENT",
|
| "research": "RESEARCH",
|
| "design": "DESIGN"
|
| }
|
|
|
| task_text_lower = task_text.lower()
|
| for keyword, category in keywords.items():
|
| if keyword in task_text_lower:
|
| return category
|
|
|
| return "GENERAL_TASK"
|
|
|
| def generate_task_summary(task_text, max_length=50):
|
| """Generate a summary for a task using ML or simple extraction."""
|
| try:
|
| generator = get_ml_model("text_generator")
|
| if generator and generator[0] is not None and generator[1] is not None:
|
| tokenizer, model = generator
|
| inputs = tokenizer(f"Summarize: {task_text}", return_tensors="pt", max_length=100, truncation=True)
|
| summary_ids = model.generate(
|
| inputs["input_ids"],
|
| max_length=max_length,
|
| num_beams=2,
|
| early_stopping=True
|
| )
|
| summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
|
| return summary
|
| except Exception as e:
|
| logger.error(f"Summary generation error: {str(e)}")
|
|
|
|
|
| first_sentence = task_text.split('.')[0]
|
| if len(first_sentence) > max_length:
|
| return first_sentence[:max_length] + "..."
|
| return first_sentence
|
|
|
| def answer_task_question(question, context):
|
| """Answer a question about a task using QA model or simple extraction."""
|
| try:
|
| qa_model = get_ml_model("qa_assistant")
|
| if qa_model and qa_model[1] is not None:
|
| tokenizer, pipeline = qa_model
|
| answer = pipeline(question=question, context=context)
|
| return answer
|
| except Exception as e:
|
| logger.error(f"QA error: {str(e)}")
|
|
|
|
|
| question_lower = question.lower()
|
| context_lower = context.lower()
|
|
|
|
|
| question_words = ["what", "when", "where", "who", "how", "why"]
|
| focus_word = next((word for word in question_words if word in question_lower), None)
|
|
|
| if focus_word and focus_word in question_lower:
|
|
|
| focus_index = question_lower.index(focus_word) + len(focus_word)
|
| focus = question_lower[focus_index:].strip()
|
|
|
|
|
| if focus in context_lower:
|
| focus_index = context_lower.index(focus)
|
|
|
| start = max(0, focus_index - 50)
|
| end = min(len(context_lower), focus_index + 100)
|
| answer_text = context[start:end]
|
| return {"answer": answer_text, "score": 0.5}
|
|
|
|
|
| return {"answer": "Information not found in the task details.", "score": 0.0}
|
|
|
| async def update_task_entropy(task_id, decay_factor=0.95):
|
| """Update task entropy based on quantum decoherence principles."""
|
| if task_id in tasks:
|
| task = tasks[task_id]
|
|
|
| task.entropy *= decay_factor
|
|
|
|
|
| interaction_factor = 0.9
|
| task.entropy *= interaction_factor
|
|
|
|
|
| coherence_factor = random.uniform(0.98, 1.02)
|
| task.entropy *= coherence_factor
|
|
|
|
|
| task.entropy = max(0.1, min(1.0, task.entropy))
|
|
|
|
|
| await update_probability_distribution(task_id)
|
|
|
|
|
| if task.quantum_state:
|
| task.quantum_state["fidelity"] *= decay_factor
|
|
|
| decoherence = 1.0 - (1.0 - task.entropy) * 0.5
|
| task.quantum_state["coherence_time"] *= decoherence
|
|
|
| return task.entropy
|
| return None
|
|
|
| async def update_probability_distribution(task_id):
|
| """Update the probability distribution of task states."""
|
| if task_id in tasks:
|
| task = tasks[task_id]
|
| states = [TaskState.PENDING, TaskState.IN_PROGRESS, TaskState.COMPLETED, TaskState.BLOCKED]
|
|
|
|
|
| probs = [0.1, 0.1, 0.1, 0.1]
|
| current_state_idx = states.index(task.state)
|
| probs[current_state_idx] = max(0.5, 1 - task.entropy)
|
|
|
|
|
| remaining = 1.0 - probs[current_state_idx]
|
| for i in range(len(states)):
|
| if i != current_state_idx:
|
| probs[i] = remaining / (len(states) - 1)
|
|
|
|
|
| task.probability_distribution = {states[i]: float(probs[i]) for i in range(len(states))}
|
|
|
|
|
| if not task.quantum_state:
|
| task.quantum_state = {
|
| "fidelity": 1.0,
|
| "coherence_time": 1.0,
|
| "eigenvalues": [0.7, 0.2, 0.05, 0.05],
|
| "amplitudes": {}
|
| }
|
|
|
|
|
| amplitudes = {}
|
| for i, state in enumerate(states):
|
| real_part = math.sqrt(probs[i])
|
| imag_part = 0.0
|
| amplitudes[state] = {"real": float(real_part), "imag": float(imag_part)}
|
|
|
| task.quantum_state["amplitudes"] = amplitudes
|
| tasks[task_id] = task
|
|
|
| async def propagate_entanglement(task_id):
|
| """Propagate changes through entangled tasks."""
|
| if task_id not in tasks:
|
| return
|
|
|
| visited = set()
|
| to_visit = [task_id]
|
|
|
| while to_visit:
|
| current_id = to_visit.pop(0)
|
| if current_id in visited:
|
| continue
|
|
|
| visited.add(current_id)
|
| current_task = tasks[current_id]
|
|
|
|
|
| connected_entanglements = []
|
| for e_id, entanglement in entanglements.items():
|
| if entanglement.task_id_1 == current_id or entanglement.task_id_2 == current_id:
|
| connected_entanglements.append((e_id, entanglement))
|
|
|
|
|
| for e_id, entanglement in connected_entanglements:
|
| other_id = None
|
| if entanglement.task_id_1 == current_id:
|
| other_id = entanglement.task_id_2
|
| elif entanglement.task_id_2 == current_id:
|
| other_id = entanglement.task_id_1
|
|
|
| if not other_id or other_id not in tasks:
|
| continue
|
|
|
|
|
| other_task = tasks[other_id]
|
|
|
|
|
| if entanglement.entanglement_type == "CNOT":
|
|
|
| if current_task.state == TaskState.COMPLETED and other_task.state == TaskState.PENDING:
|
| if random.random() < entanglement.strength * 0.5:
|
| other_task.state = TaskState.IN_PROGRESS
|
| await update_probability_distribution(other_id)
|
|
|
| elif entanglement.entanglement_type == "SWAP":
|
|
|
| if random.random() < entanglement.strength * 0.3:
|
|
|
| current_task.priority, other_task.priority = other_task.priority, current_task.priority
|
| await update_probability_distribution(current_id)
|
| await update_probability_distribution(other_id)
|
|
|
| else:
|
|
|
| entropy_shift = (current_task.entropy - other_task.entropy) * entanglement.strength * 0.3
|
| other_task.entropy = max(0.1, min(1.0, other_task.entropy + entropy_shift))
|
| await update_probability_distribution(other_id)
|
|
|
|
|
| if other_id not in visited:
|
| to_visit.append(other_id)
|
|
|
| def calculate_task_similarity(task1, task2):
|
| """Calculate similarity between tasks."""
|
| if not task1.embedding or not task2.embedding:
|
| return 0.0
|
|
|
|
|
| try:
|
| embedding1 = np.array(task1.embedding)
|
| embedding2 = np.array(task2.embedding)
|
|
|
|
|
| embedding1 = embedding1 / (np.linalg.norm(embedding1) + 1e-6)
|
| embedding2 = embedding2 / (np.linalg.norm(embedding2) + 1e-6)
|
|
|
|
|
| similarity = np.dot(embedding1, embedding2)
|
|
|
|
|
| similarity = (similarity + 1) / 2
|
| except Exception as e:
|
| logger.error(f"Similarity calculation error: {str(e)}")
|
| similarity = 0.0
|
|
|
|
|
| common_tags = set(task1.tags).intersection(set(task2.tags))
|
| tag_similarity = len(common_tags) / max(1, len(set(task1.tags).union(set(task2.tags))))
|
|
|
|
|
| return 0.7 * similarity + 0.3 * tag_similarity
|
|
|
| def suggest_entanglements(task_id, threshold=0.7):
|
| """Suggest possible task entanglements based on similarity."""
|
| if task_id not in tasks:
|
| return []
|
|
|
| target_task = tasks[task_id]
|
| suggestions = []
|
|
|
|
|
| for other_id, other_task in tasks.items():
|
| if other_id == task_id:
|
| continue
|
|
|
|
|
| similarity = calculate_task_similarity(target_task, other_task)
|
|
|
|
|
| already_entangled = any(
|
| (e.task_id_1 == task_id and e.task_id_2 == other_id) or
|
| (e.task_id_1 == other_id and e.task_id_2 == task_id)
|
| for e in entanglements.values()
|
| )
|
|
|
| if similarity >= threshold and not already_entangled:
|
| suggestions.append({
|
| "task_id": other_id,
|
| "title": other_task.title,
|
| "similarity": similarity
|
| })
|
|
|
|
|
| suggestions.sort(key=lambda x: x["similarity"], reverse=True)
|
| return suggestions
|
|
|
| def optimize_task_assignment(tasks_dict):
|
| """Optimize task assignment to balance workload."""
|
|
|
| assignees = {}
|
| for task_id, task in tasks_dict.items():
|
| if task.assignee:
|
| if task.assignee not in assignees:
|
| assignees[task.assignee] = []
|
| assignees[task.assignee].append(task_id)
|
|
|
| if not assignees:
|
| return []
|
|
|
|
|
| cognitive_loads = {}
|
| for assignee, task_ids in assignees.items():
|
|
|
| tasks_load = sum((tasks_dict[tid].entropy * tasks_dict[tid].priority) for tid in task_ids)
|
|
|
| adjustment = 1 + (0.1 * (len(task_ids) - 1))
|
|
|
| total_load = tasks_load * adjustment
|
| cognitive_loads[assignee] = total_load
|
|
|
|
|
| mean_load = sum(cognitive_loads.values()) / len(cognitive_loads)
|
| overloaded = [a for a, load in cognitive_loads.items() if load > mean_load * 1.2]
|
| underloaded = [a for a, load in cognitive_loads.items() if load < mean_load * 0.8]
|
|
|
|
|
| recommendations = []
|
|
|
|
|
| for over_assignee in overloaded:
|
| over_tasks = [tid for tid in assignees[over_assignee]]
|
|
|
|
|
| task_loads = [(tid, tasks_dict[tid].entropy * tasks_dict[tid].priority) for tid in over_tasks]
|
| task_loads.sort(key=lambda x: x[1], reverse=True)
|
|
|
|
|
| for under_assignee in underloaded:
|
|
|
| for task_id, load in task_loads[:2]:
|
|
|
| new_over_load = cognitive_loads[over_assignee] - load
|
| new_under_load = cognitive_loads[under_assignee] + load
|
|
|
|
|
| if new_over_load >= new_under_load * 0.8 and new_over_load <= new_under_load * 1.2:
|
| recommendations.append({
|
| "task_id": task_id,
|
| "task_title": tasks_dict[task_id].title,
|
| "from_assignee": over_assignee,
|
| "to_assignee": under_assignee,
|
| "load_improvement": cognitive_loads[over_assignee] - new_over_load
|
| })
|
| break
|
|
|
|
|
| recommendations.sort(key=lambda x: x.get("load_improvement", 0), reverse=True)
|
| return recommendations
|
|
|
| def calculate_system_metrics():
|
| """Calculate system-wide metrics."""
|
| if not tasks:
|
| return SystemMetrics(
|
| total_entropy=0,
|
| task_count=0,
|
| completion_rate=0,
|
| average_cognitive_load=0,
|
| entanglement_density=0,
|
| quantum_coherence=0.0
|
| )
|
|
|
|
|
| task_count = len(tasks)
|
|
|
|
|
| total_entropy = sum(task.entropy for task in tasks.values())
|
|
|
|
|
| completed_tasks = sum(1 for task in tasks.values() if task.state == TaskState.COMPLETED)
|
| completion_rate = completed_tasks / task_count if task_count > 0 else 0
|
|
|
|
|
| avg_cognitive_load = sum(task.priority * task.entropy for task in tasks.values()) / task_count if task_count > 0 else 0
|
|
|
|
|
| max_possible_entanglements = task_count * (task_count - 1) / 2 if task_count > 1 else 1
|
| entanglement_density = len(entanglements) / max_possible_entanglements
|
|
|
|
|
| quantum_coherence = 1.0 - (total_entropy / task_count) if task_count > 0 else 0.0
|
|
|
|
|
| return SystemMetrics(
|
| total_entropy=total_entropy,
|
| task_count=task_count,
|
| completion_rate=completion_rate,
|
| average_cognitive_load=avg_cognitive_load,
|
| entanglement_density=entanglement_density,
|
| quantum_coherence=quantum_coherence
|
| )
|
|
|
|
|
|
|
| def simulate_quantum_circuit(tasks_dict, task_ids, steps=5):
|
| """Run a simplified quantum circuit simulation on selected tasks."""
|
| if not QUANTUM_LIBS_AVAILABLE:
|
|
|
| return simulate_simplified_circuit(tasks_dict, task_ids, steps)
|
|
|
| if not task_ids or len(task_ids) > 8:
|
| return {"error": "Invalid number of tasks (should be 1-8)"}
|
|
|
|
|
| selected_tasks = {tid: tasks_dict[tid] for tid in task_ids if tid in tasks_dict}
|
| if not selected_tasks:
|
| return {"error": "No valid tasks found"}
|
|
|
| try:
|
|
|
| import qutip as qt
|
|
|
|
|
| states = []
|
| for tid in task_ids:
|
| if tid in selected_tasks:
|
| task = selected_tasks[tid]
|
|
|
| purity = 1.0 - task.entropy
|
|
|
| state = math.sqrt(purity) * qt.basis(2, 0) + math.sqrt(1-purity) * qt.basis(2, 1)
|
| states.append(state)
|
| else:
|
|
|
| states.append(qt.basis(2, 0))
|
|
|
|
|
| initial_state = states[0]
|
| for state in states[1:]:
|
| initial_state = qt.tensor(initial_state, state)
|
|
|
|
|
| current_state = initial_state
|
| results = []
|
|
|
| for step in range(steps):
|
|
|
| op_type = random.choice(["CNOT", "SWAP", "Hadamard"])
|
|
|
| try:
|
|
|
| if op_type == "Hadamard":
|
|
|
| qubit = random.randint(0, len(task_ids)-1)
|
| h_gate = qt.hadamard_transform()
|
| gate = qt.gate_expand_1toN(h_gate, len(task_ids), qubit)
|
| current_state = gate * current_state
|
| elif len(task_ids) > 1:
|
| if op_type == "CNOT":
|
|
|
| control = random.randint(0, len(task_ids)-1)
|
| target = random.randint(0, len(task_ids)-1)
|
| while target == control:
|
| target = random.randint(0, len(task_ids)-1)
|
|
|
|
|
| cnot = qt.cnot()
|
| gate = qt.gate_expand_2toN(cnot, len(task_ids), control, target)
|
| current_state = gate * current_state
|
| elif op_type == "SWAP":
|
|
|
| q1 = random.randint(0, len(task_ids)-1)
|
| q2 = random.randint(0, len(task_ids)-1)
|
| while q2 == q1:
|
| q2 = random.randint(0, len(task_ids)-1)
|
|
|
|
|
| swap = qt.swap()
|
| gate = qt.gate_expand_2toN(swap, len(task_ids), q1, q2)
|
| current_state = gate * current_state
|
| except Exception as e:
|
| logger.error(f"Quantum operation error: {str(e)}")
|
|
|
| op_type = "Identity"
|
|
|
|
|
| dm = current_state * current_state.dag()
|
|
|
|
|
| step_result = {
|
| "step": step,
|
| "operation": op_type,
|
| "task_states": {}
|
| }
|
|
|
|
|
| for i, tid in enumerate(task_ids):
|
|
|
| try:
|
| reduced_dm = dm.ptrace(i)
|
|
|
|
|
| prob_0 = float(reduced_dm[0,0])
|
| prob_1 = float(reduced_dm[1,1])
|
|
|
|
|
| coherence = float(abs(reduced_dm[0,1]))
|
|
|
| step_result["task_states"][tid] = {
|
| "pending_prob": prob_0,
|
| "completed_prob": prob_1,
|
| "coherence": coherence,
|
| "task_title": selected_tasks[tid].title if tid in selected_tasks else "Unknown"
|
| }
|
| except Exception as e:
|
| logger.error(f"Error calculating task state: {str(e)}")
|
| step_result["task_states"][tid] = {
|
| "pending_prob": 0.5,
|
| "completed_prob": 0.5,
|
| "coherence": 0.0,
|
| "task_title": selected_tasks[tid].title if tid in selected_tasks else "Unknown",
|
| "error": str(e)
|
| }
|
|
|
| results.append(step_result)
|
|
|
|
|
| final_result = {
|
| "final_state": {},
|
| "entanglement_measure": {},
|
| "measurement_outcomes": {}
|
| }
|
|
|
|
|
| final_dm = current_state * current_state.dag()
|
|
|
|
|
| for i, tid1 in enumerate(task_ids):
|
| for j, tid2 in enumerate(task_ids):
|
| if i < j:
|
| try:
|
|
|
| sub_systems = [i, j]
|
| rho_ij = final_dm.ptrace(sub_systems)
|
|
|
| concurrence = 2 * abs(float(rho_ij[0,3]) if rho_ij.shape[0] > 3 else 0)
|
|
|
| final_result["entanglement_measure"][f"{tid1}-{tid2}"] = {
|
| "concurrence": concurrence,
|
| "task1_title": selected_tasks.get(tid1, Task(id="unknown", title="Unknown", description="", created_at=datetime.now(), updated_at=datetime.now())).title,
|
| "task2_title": selected_tasks.get(tid2, Task(id="unknown", title="Unknown", description="", created_at=datetime.now(), updated_at=datetime.now())).title
|
| }
|
| except Exception as e:
|
| logger.error(f"Entanglement calculation error: {str(e)}")
|
| final_result["entanglement_measure"][f"{tid1}-{tid2}"] = {
|
| "error": str(e),
|
| "task1_title": selected_tasks.get(tid1, Task(id="unknown", title="Unknown", description="", created_at=datetime.now(), updated_at=datetime.now())).title,
|
| "task2_title": selected_tasks.get(tid2, Task(id="unknown", title="Unknown", description="", created_at=datetime.now(), updated_at=datetime.now())).title
|
| }
|
|
|
|
|
| measurements = {}
|
| for i, tid in enumerate(task_ids):
|
| try:
|
|
|
| reduced_dm = final_dm.ptrace(i)
|
|
|
|
|
| prob_0 = float(reduced_dm[0,0])
|
| prob_1 = float(reduced_dm[1,1])
|
|
|
|
|
| if random.random() < prob_1:
|
| outcome = "COMPLETED"
|
| else:
|
| outcome = "PENDING"
|
|
|
| measurements[tid] = {
|
| "outcome": outcome,
|
| "pending_prob": prob_0,
|
| "completed_prob": prob_1,
|
| "task_title": selected_tasks.get(tid, Task(id="unknown", title="Unknown", description="", created_at=datetime.now(), updated_at=datetime.now())).title
|
| }
|
| except Exception as e:
|
| logger.error(f"Measurement error: {str(e)}")
|
| measurements[tid] = {
|
| "outcome": "PENDING",
|
| "error": str(e),
|
| "task_title": selected_tasks.get(tid, Task(id="unknown", title="Unknown", description="", created_at=datetime.now(), updated_at=datetime.now())).title
|
| }
|
|
|
| final_result["measurement_outcomes"] = measurements
|
|
|
| return {
|
| "simulation_steps": results,
|
| "final_results": final_result
|
| }
|
|
|
| except Exception as e:
|
| logger.error(f"Quantum simulation error: {str(e)}")
|
| return {"error": f"Simulation failed: {str(e)}"}
|
|
|
| def simulate_simplified_circuit(tasks_dict, task_ids, steps=5):
|
| """Fallback simulation when quantum libraries are not available."""
|
| if not task_ids or len(task_ids) > 8:
|
| return {"error": "Invalid number of tasks (should be 1-8)"}
|
|
|
|
|
| selected_tasks = {tid: tasks_dict[tid] for tid in task_ids if tid in tasks_dict}
|
| if not selected_tasks:
|
| return {"error": "No valid tasks found"}
|
|
|
|
|
| results = []
|
|
|
|
|
| task_states = {}
|
| for tid in task_ids:
|
| if tid in selected_tasks:
|
| task = selected_tasks[tid]
|
|
|
| pending_prob = 1.0 - task.entropy * 0.5
|
| completed_prob = 1.0 - pending_prob
|
| coherence = task.entropy * 0.3
|
|
|
| task_states[tid] = {
|
| "pending_prob": pending_prob,
|
| "completed_prob": completed_prob,
|
| "coherence": coherence
|
| }
|
| else:
|
|
|
| task_states[tid] = {
|
| "pending_prob": 0.8,
|
| "completed_prob": 0.2,
|
| "coherence": 0.1
|
| }
|
|
|
|
|
| current_states = task_states.copy()
|
|
|
| for step in range(steps):
|
|
|
| op_type = random.choice(["CNOT", "SWAP", "Hadamard", "Noise"])
|
|
|
|
|
| step_result = {
|
| "step": step,
|
| "operation": op_type,
|
| "task_states": {}
|
| }
|
|
|
|
|
| if op_type == "Hadamard":
|
|
|
| random_task = random.choice(task_ids)
|
| if random_task in current_states:
|
|
|
| state = current_states[random_task]
|
| state["pending_prob"] = 0.5 + (state["pending_prob"] - 0.5) * 0.3
|
| state["completed_prob"] = 1.0 - state["pending_prob"]
|
| state["coherence"] = min(1.0, state["coherence"] + 0.2)
|
|
|
| elif op_type == "CNOT" and len(task_ids) > 1:
|
|
|
| control = random.choice(task_ids)
|
| target = random.choice([t for t in task_ids if t != control])
|
|
|
| if control in current_states and target in current_states:
|
|
|
| if current_states[control]["completed_prob"] > 0.7:
|
| current_states[target]["completed_prob"] = (
|
| current_states[target]["completed_prob"] * 0.3 +
|
| current_states[control]["completed_prob"] * 0.7
|
| )
|
| current_states[target]["pending_prob"] = 1.0 - current_states[target]["completed_prob"]
|
|
|
| elif op_type == "SWAP" and len(task_ids) > 1:
|
|
|
| t1 = random.choice(task_ids)
|
| t2 = random.choice([t for t in task_ids if t != t1])
|
|
|
| if t1 in current_states and t2 in current_states:
|
|
|
| current_states[t1]["pending_prob"], current_states[t2]["pending_prob"] = (
|
| current_states[t2]["pending_prob"], current_states[t1]["pending_prob"]
|
| )
|
| current_states[t1]["completed_prob"], current_states[t2]["completed_prob"] = (
|
| current_states[t2]["completed_prob"], current_states[t1]["completed_prob"]
|
| )
|
|
|
| elif op_type == "Noise":
|
|
|
| for tid in task_ids:
|
| if tid in current_states:
|
| noise = random.uniform(-0.1, 0.1)
|
| current_states[tid]["pending_prob"] = max(0.1, min(0.9, current_states[tid]["pending_prob"] + noise))
|
| current_states[tid]["completed_prob"] = 1.0 - current_states[tid]["pending_prob"]
|
| current_states[tid]["coherence"] = max(0.0, current_states[tid]["coherence"] - 0.05)
|
|
|
|
|
| for tid in task_ids:
|
| if tid in current_states:
|
| state = current_states[tid].copy()
|
| state["task_title"] = selected_tasks[tid].title if tid in selected_tasks else "Unknown"
|
| step_result["task_states"][tid] = state
|
|
|
| results.append(step_result)
|
|
|
|
|
| final_result = {
|
| "final_state": {},
|
| "entanglement_measure": {},
|
| "measurement_outcomes": {}
|
| }
|
|
|
|
|
| for i, tid1 in enumerate(task_ids):
|
| for j, tid2 in enumerate(task_ids):
|
| if i < j:
|
|
|
| if tid1 in current_states and tid2 in current_states:
|
|
|
| coherence1 = current_states[tid1]["coherence"]
|
| coherence2 = current_states[tid2]["coherence"]
|
| concurrence = coherence1 * coherence2 * random.uniform(0.8, 1.2)
|
|
|
| final_result["entanglement_measure"][f"{tid1}-{tid2}"] = {
|
| "concurrence": min(1.0, concurrence),
|
| "task1_title": selected_tasks.get(tid1, Task(id="unknown", title="Unknown", description="", created_at=datetime.now(), updated_at=datetime.now())).title,
|
| "task2_title": selected_tasks.get(tid2, Task(id="unknown", title="Unknown", description="", created_at=datetime.now(), updated_at=datetime.now())).title
|
| }
|
|
|
|
|
| measurements = {}
|
| for tid in task_ids:
|
| if tid in current_states:
|
|
|
| if random.random() < current_states[tid]["completed_prob"]:
|
| outcome = "COMPLETED"
|
| else:
|
| outcome = "PENDING"
|
|
|
| measurements[tid] = {
|
| "outcome": outcome,
|
| "pending_prob": current_states[tid]["pending_prob"],
|
| "completed_prob": current_states[tid]["completed_prob"],
|
| "task_title": selected_tasks.get(tid, Task(id="unknown", title="Unknown", description="", created_at=datetime.now(), updated_at=datetime.now())).title
|
| }
|
|
|
| final_result["measurement_outcomes"] = measurements
|
|
|
| return {
|
| "simulation_steps": results,
|
| "final_results": final_result,
|
| "note": "Simplified simulation (quantum libraries not available)"
|
| }
|
|
|
|
|
|
|
|
|
| async def lifespan(app: FastAPI):
|
| """App initialization and cleanup"""
|
|
|
| logger.info("Starting application...")
|
|
|
|
|
| global embeddings_model, vector_store
|
|
|
| if VECTOR_DB_AVAILABLE:
|
| try:
|
|
|
| model_name = "all-MiniLM-L6-v2"
|
| model_path = os.path.join(os.getcwd(), "models", model_name)
|
|
|
|
|
| os.makedirs(os.path.dirname(model_path), exist_ok=True)
|
|
|
| logger.info(f"Initializing embeddings with model path: {model_path}")
|
|
|
| try:
|
|
|
| if os.path.exists(model_path):
|
| logger.info(f"Loading embedding model from local path: {model_path}")
|
| embeddings_model = HuggingFaceEmbeddings(model_name=model_path)
|
| else:
|
|
|
| logger.info(f"Loading embedding model from HuggingFace: {model_name}")
|
| embeddings_model = HuggingFaceEmbeddings(
|
| model_name=model_name,
|
| cache_folder=os.path.dirname(model_path)
|
| )
|
| except Exception as embed_err:
|
| logger.error(f"Error loading embedding model: {str(embed_err)}")
|
|
|
| logger.info("Using fallback embedding method")
|
| embeddings_model = None
|
|
|
|
|
| os.makedirs("./data/task_embeddings", exist_ok=True)
|
|
|
|
|
| if embeddings_model:
|
| try:
|
| vector_store = Chroma(
|
| persist_directory="./data/task_embeddings",
|
| embedding_function=embeddings_model
|
| )
|
| logger.info("Vector store initialized successfully!")
|
| except Exception as vs_err:
|
| logger.error(f"Error initializing vector store: {str(vs_err)}")
|
| vector_store = None
|
| else:
|
| logger.warning("Embeddings model not available. Vector search will be limited.")
|
| except Exception as e:
|
| logger.error(f"Error during startup: {str(e)}")
|
| logger.warning("Running without vector search capabilities.")
|
| else:
|
| logger.warning("Vector database libraries not available. Running without vector search.")
|
|
|
| yield
|
|
|
|
|
| logger.info("Shutting down application...")
|
|
|
|
|
| if vector_store is not None:
|
| try:
|
| vector_store.persist()
|
| logger.info("Vector store persisted successfully!")
|
| except Exception as e:
|
| logger.error(f"Error persisting vector store: {str(e)}")
|
|
|
|
|
| app = FastAPI(
|
| title="Advanced Neuromorphic Quantum-Cognitive Task System",
|
| description="A quantum-inspired task management system with ML capabilities",
|
| version="2.0.0",
|
| lifespan=lifespan
|
| )
|
|
|
|
|
| app.add_middleware(
|
| CORSMiddleware,
|
| allow_origins=["*"],
|
| allow_credentials=True,
|
| allow_methods=["*"],
|
| allow_headers=["*"],
|
| )
|
|
|
| @app.get("/")
|
| async def root():
|
| """Root endpoint with system information."""
|
| return {
|
| "name": "Advanced Neuromorphic Quantum-Cognitive Task System",
|
| "version": "2.0.0-quantum",
|
| "status": "operational",
|
| "capabilities": {
|
| "ml_enabled": ML_LIBS_AVAILABLE,
|
| "quantum_enabled": QUANTUM_LIBS_AVAILABLE,
|
| "vector_search": VECTOR_DB_AVAILABLE and embeddings_model is not None
|
| },
|
| "endpoints": [
|
| "/tasks", "/tasks/{task_id}", "/entanglements",
|
| "/entanglements/{entanglement_id}", "/metrics",
|
| "/quantum-simulation", "/search", "/optimize-assignments"
|
| ]
|
| }
|
|
|
| @app.get("/health")
|
| async def health_check():
|
| """Health check endpoint for monitoring."""
|
| return {
|
| "status": "healthy",
|
| "time": datetime.now().isoformat(),
|
| "system_load": {
|
| "tasks": len(tasks),
|
| "entanglements": len(entanglements),
|
| "memory_usage": {
|
| "ml_models_loaded": len(ml_models)
|
| }
|
| }
|
| }
|
|
|
| @app.get("/metrics")
|
| async def get_metrics():
|
| """Get system-wide metrics."""
|
| return calculate_system_metrics()
|
|
|
| @app.post("/tasks", response_model=Task)
|
| async def create_task(task: TaskCreate, background_tasks: BackgroundTasks):
|
| """Create a new task with quantum state initialization."""
|
| task_id = str(uuid.uuid4())
|
| now = datetime.now()
|
|
|
|
|
| task_text = f"{task.title} {task.description}"
|
| embedding = None
|
| category = None
|
| ml_summary = None
|
|
|
| try:
|
|
|
| embedding = calculate_task_embedding(task_text)
|
|
|
|
|
| category = classify_task(task_text)
|
|
|
|
|
| ml_summary = task_text.split('.')[0][:50]
|
| except Exception as e:
|
| logger.error(f"Task processing error: {str(e)}")
|
|
|
|
|
| states = [TaskState.PENDING, TaskState.IN_PROGRESS, TaskState.COMPLETED, TaskState.BLOCKED]
|
| probs = [0.7, 0.2, 0.05, 0.05]
|
|
|
|
|
| quantum_state = {
|
| "fidelity": 1.0,
|
| "coherence_time": 1.0,
|
| "amplitudes": {
|
| states[i]: {"real": math.sqrt(probs[i]), "imag": 0.0}
|
| for i in range(len(states))
|
| }
|
| }
|
|
|
|
|
| new_task = Task(
|
| id=task_id,
|
| title=task.title,
|
| description=task.description,
|
| assignee=task.assignee,
|
| created_at=now,
|
| updated_at=now,
|
| due_date=task.due_date,
|
| state=TaskState.PENDING,
|
| tags=list(task.tags) if task.tags else [],
|
| priority=task.priority,
|
| entropy=1.0,
|
| probability_distribution={states[i]: probs[i] for i in range(len(states))},
|
| embedding=embedding,
|
| entangled_tasks=[],
|
| quantum_state=quantum_state,
|
| category=category,
|
| ml_summary=ml_summary
|
| )
|
|
|
|
|
| tasks[task_id] = new_task
|
|
|
|
|
| background_tasks.add_task(update_task_graph, task_id)
|
| background_tasks.add_task(update_vector_store, task_id)
|
|
|
|
|
| if ML_LIBS_AVAILABLE:
|
| background_tasks.add_task(enhance_task_with_ml, task_id, task_text)
|
|
|
| return new_task
|
|
|
| @app.get("/tasks", response_model=List[Task])
|
| async def list_tasks(
|
| state: Optional[str] = None,
|
| assignee: Optional[str] = None,
|
| sort_by: str = "created_at",
|
| limit: int = Query(100, ge=1, le=500),
|
| tags: Optional[List[str]] = Query(None)
|
| ):
|
| """List all tasks with filtering and sorting."""
|
| filtered_tasks = list(tasks.values())
|
|
|
|
|
| if state:
|
| filtered_tasks = [t for t in filtered_tasks if t.state == state]
|
| if assignee:
|
| filtered_tasks = [t for t in filtered_tasks if t.assignee == assignee]
|
| if tags:
|
| filtered_tasks = [t for t in filtered_tasks if any(tag in t.tags for tag in tags)]
|
|
|
|
|
| if sort_by == "priority":
|
| filtered_tasks.sort(key=lambda t: t.priority, reverse=True)
|
| elif sort_by == "due_date":
|
|
|
| filtered_tasks.sort(key=lambda t: (t.due_date is None, t.due_date))
|
| elif sort_by == "entropy":
|
| filtered_tasks.sort(key=lambda t: t.entropy, reverse=True)
|
| else:
|
| filtered_tasks.sort(key=lambda t: t.created_at, reverse=True)
|
|
|
|
|
| return filtered_tasks[:limit]
|
|
|
| @app.get("/tasks/{task_id}", response_model=Task)
|
| async def get_task(task_id: str, background_tasks: BackgroundTasks):
|
| """Get a task by ID and update its entropy."""
|
| if task_id not in tasks:
|
| raise HTTPException(status_code=404, detail="Task not found")
|
|
|
|
|
| background_tasks.add_task(update_task_entropy, task_id)
|
|
|
| return tasks[task_id]
|
|
|
| @app.put("/tasks/{task_id}", response_model=Task)
|
| async def update_task(task_id: str, task_update: TaskUpdate, background_tasks: BackgroundTasks):
|
| """Update a task with quantum state changes."""
|
| if task_id not in tasks:
|
| raise HTTPException(status_code=404, detail="Task not found")
|
|
|
| task = tasks[task_id]
|
| update_data = task_update.dict(exclude_unset=True)
|
|
|
|
|
| for field, value in update_data.items():
|
| if field != "entangled_tasks" and field != "quantum_state" and field != "embedding":
|
| setattr(task, field, value)
|
|
|
|
|
| task.updated_at = datetime.now()
|
|
|
|
|
| if "title" in update_data or "description" in update_data:
|
| task_text = f"{task.title} {task.description}"
|
| task.embedding = calculate_task_embedding(task_text)
|
|
|
|
|
| task.category = classify_task(task_text)
|
| task.ml_summary = generate_task_summary(task_text)
|
|
|
|
|
| if ML_LIBS_AVAILABLE:
|
| background_tasks.add_task(enhance_task_with_ml, task_id, task_text)
|
|
|
|
|
| background_tasks.add_task(update_task_entropy, task_id, decay_factor=0.98)
|
| background_tasks.add_task(propagate_entanglement, task_id)
|
| background_tasks.add_task(update_vector_store, task_id)
|
|
|
| return task
|
|
|
| @app.delete("/tasks/{task_id}")
|
| async def delete_task(task_id: str, background_tasks: BackgroundTasks):
|
| """Delete a task and update entanglements."""
|
| if task_id not in tasks:
|
| raise HTTPException(status_code=404, detail="Task not found")
|
|
|
|
|
| task = tasks.pop(task_id)
|
|
|
|
|
| if task_id in task_graph:
|
| task_graph.remove_node(task_id)
|
|
|
|
|
| entanglements_to_remove = []
|
| for e_id, e in entanglements.items():
|
| if e.task_id_1 == task_id or e.task_id_2 == task_id:
|
| entanglements_to_remove.append(e_id)
|
|
|
| for e_id in entanglements_to_remove:
|
| entanglements.pop(e_id)
|
|
|
|
|
| if vector_store is not None:
|
| try:
|
| background_tasks.add_task(remove_from_vector_store, task_id)
|
| except Exception as e:
|
| logger.error(f"Error scheduling vector store update: {str(e)}")
|
|
|
| return {"message": "Task deleted"}
|
|
|
| @app.post("/entanglements", response_model=Entanglement)
|
| async def create_entanglement(entanglement: EntanglementCreate, background_tasks: BackgroundTasks):
|
| """Create an entanglement between two tasks."""
|
|
|
| if entanglement.task_id_1 not in tasks:
|
| raise HTTPException(status_code=404, detail=f"Task {entanglement.task_id_1} not found")
|
| if entanglement.task_id_2 not in tasks:
|
| raise HTTPException(status_code=404, detail=f"Task {entanglement.task_id_2} not found")
|
|
|
|
|
| for e in entanglements.values():
|
| if ((e.task_id_1 == entanglement.task_id_1 and e.task_id_2 == entanglement.task_id_2) or
|
| (e.task_id_1 == entanglement.task_id_2 and e.task_id_2 == entanglement.task_id_1)):
|
| raise HTTPException(status_code=400, detail="Entanglement already exists")
|
|
|
|
|
| entanglement_id = str(uuid.uuid4())
|
| now = datetime.now()
|
|
|
| new_entanglement = Entanglement(
|
| id=entanglement_id,
|
| task_id_1=entanglement.task_id_1,
|
| task_id_2=entanglement.task_id_2,
|
| strength=entanglement.strength,
|
| entanglement_type=entanglement.entanglement_type,
|
| created_at=now,
|
| updated_at=now
|
| )
|
|
|
|
|
| entanglements[entanglement_id] = new_entanglement
|
|
|
|
|
| task1 = tasks[entanglement.task_id_1]
|
| task2 = tasks[entanglement.task_id_2]
|
|
|
| if entanglement.task_id_2 not in task1.entangled_tasks:
|
| task1.entangled_tasks.append(entanglement.task_id_2)
|
|
|
| if entanglement.task_id_1 not in task2.entangled_tasks:
|
| task2.entangled_tasks.append(entanglement.task_id_1)
|
|
|
|
|
| task_graph.add_edge(
|
| entanglement.task_id_1,
|
| entanglement.task_id_2,
|
| weight=entanglement.strength,
|
| type=entanglement.entanglement_type
|
| )
|
|
|
|
|
| background_tasks.add_task(propagate_entanglement, entanglement.task_id_1)
|
|
|
| return new_entanglement
|
|
|
| @app.get("/entanglements", response_model=List[Entanglement])
|
| async def list_entanglements(
|
| task_id: Optional[str] = None,
|
| entanglement_type: Optional[str] = None
|
| ):
|
| """List all entanglements with optional filtering."""
|
| filtered_entanglements = list(entanglements.values())
|
|
|
|
|
| if task_id:
|
| filtered_entanglements = [
|
| e for e in filtered_entanglements
|
| if e.task_id_1 == task_id or e.task_id_2 == task_id
|
| ]
|
|
|
| if entanglement_type:
|
| filtered_entanglements = [
|
| e for e in filtered_entanglements
|
| if e.entanglement_type == entanglement_type
|
| ]
|
|
|
| return filtered_entanglements
|
|
|
| @app.get("/entanglements/{entanglement_id}", response_model=Entanglement)
|
| async def get_entanglement(entanglement_id: str):
|
| """Get an entanglement by ID."""
|
| if entanglement_id not in entanglements:
|
| raise HTTPException(status_code=404, detail="Entanglement not found")
|
|
|
| return entanglements[entanglement_id]
|
|
|
| @app.put("/entanglements/{entanglement_id}", response_model=Entanglement)
|
| async def update_entanglement(
|
| entanglement_id: str,
|
| entanglement_update: EntanglementUpdate,
|
| background_tasks: BackgroundTasks
|
| ):
|
| """Update an entanglement's properties."""
|
| if entanglement_id not in entanglements:
|
| raise HTTPException(status_code=404, detail="Entanglement not found")
|
|
|
| entanglement = entanglements[entanglement_id]
|
|
|
|
|
| update_data = entanglement_update.dict(exclude_unset=True)
|
| for field, value in update_data.items():
|
| setattr(entanglement, field, value)
|
|
|
|
|
| entanglement.updated_at = datetime.now()
|
|
|
|
|
| if task_graph.has_edge(entanglement.task_id_1, entanglement.task_id_2):
|
| task_graph[entanglement.task_id_1][entanglement.task_id_2]["weight"] = entanglement.strength
|
| task_graph[entanglement.task_id_1][entanglement.task_id_2]["type"] = entanglement.entanglement_type
|
|
|
|
|
| background_tasks.add_task(propagate_entanglement, entanglement.task_id_1)
|
|
|
| return entanglement
|
|
|
| @app.delete("/entanglements/{entanglement_id}")
|
| async def delete_entanglement(entanglement_id: str):
|
| """Delete an entanglement."""
|
| if entanglement_id not in entanglements:
|
| raise HTTPException(status_code=404, detail="Entanglement not found")
|
|
|
| entanglement = entanglements.pop(entanglement_id)
|
|
|
|
|
| if entanglement.task_id_1 in tasks and entanglement.task_id_2 in tasks[entanglement.task_id_1].entangled_tasks:
|
| tasks[entanglement.task_id_1].entangled_tasks.remove(entanglement.task_id_2)
|
|
|
| if entanglement.task_id_2 in tasks and entanglement.task_id_1 in tasks[entanglement.task_id_2].entangled_tasks:
|
| tasks[entanglement.task_id_2].entangled_tasks.remove(entanglement.task_id_1)
|
|
|
|
|
| if task_graph.has_edge(entanglement.task_id_1, entanglement.task_id_2):
|
| task_graph.remove_edge(entanglement.task_id_1, entanglement.task_id_2)
|
|
|
| return {"message": "Entanglement deleted"}
|
|
|
| @app.post("/search")
|
| async def search_tasks(search_query: SearchQuery):
|
| """Search for tasks using vector search or direct comparison."""
|
| if not tasks:
|
| return []
|
|
|
| query_text = search_query.query.strip()
|
| if not query_text:
|
| return []
|
|
|
|
|
| query_embedding = calculate_task_embedding(query_text)
|
|
|
|
|
| if vector_store is not None and VECTOR_DB_AVAILABLE and not search_query.use_quantum:
|
|
|
| try:
|
| results = vector_store.similarity_search_with_score(
|
| query_text,
|
| k=search_query.limit
|
| )
|
|
|
|
|
| search_results = []
|
| for doc, score in results:
|
| task_id = doc.metadata.get("task_id")
|
| if task_id in tasks:
|
| search_results.append({
|
| "task": tasks[task_id],
|
| "relevance_score": float(score)
|
| })
|
|
|
| return search_results
|
| except Exception as e:
|
| logger.error(f"Vector search error: {str(e)}")
|
|
|
|
|
|
|
| similarities = []
|
|
|
|
|
| for task_id, task in tasks.items():
|
| if task.embedding:
|
|
|
| try:
|
| task_embedding = np.array(task.embedding)
|
| query_embedding_np = np.array(query_embedding)
|
|
|
|
|
| task_embedding = task_embedding / (np.linalg.norm(task_embedding) + 1e-6)
|
| query_embedding_np = query_embedding_np / (np.linalg.norm(query_embedding_np) + 1e-6)
|
|
|
| similarity = float(np.dot(task_embedding, query_embedding_np))
|
|
|
|
|
| if search_query.use_quantum:
|
|
|
| quantum_factor = 1.0 - (task.entropy * 0.3)
|
|
|
|
|
| phase_factor = math.sin(random.uniform(0, math.pi))
|
| quantum_similarity = similarity * quantum_factor * (1.0 + 0.2 * phase_factor)
|
|
|
|
|
| if task.state == TaskState.IN_PROGRESS:
|
| quantum_similarity *= 1.1
|
|
|
| similarity = quantum_similarity
|
|
|
| similarities.append((task_id, similarity))
|
| except Exception as e:
|
| logger.error(f"Search similarity calculation error for task {task_id}: {str(e)}")
|
|
|
|
|
| similarities.sort(key=lambda x: x[1], reverse=True)
|
|
|
|
|
| search_results = []
|
| for task_id, score in similarities[:search_query.limit]:
|
| search_results.append({
|
| "task": tasks[task_id],
|
| "relevance_score": score
|
| })
|
|
|
| return search_results
|
|
|
| @app.post("/task-suggestions/{task_id}")
|
| async def suggest_related_tasks(task_id: str, threshold: float = Query(0.7, ge=0.1, le=1.0)):
|
| """Suggest tasks for entanglement based on similarity."""
|
| if task_id not in tasks:
|
| raise HTTPException(status_code=404, detail="Task not found")
|
|
|
|
|
| suggestions = suggest_entanglements(task_id, threshold)
|
|
|
| return suggestions
|
|
|
| @app.post("/quantum-simulation")
|
| async def run_quantum_simulation(simulation_request: QuantumSimulationRequest):
|
| """Run quantum simulation on selected tasks."""
|
|
|
| invalid_ids = [tid for tid in simulation_request.task_ids if tid not in tasks]
|
| if invalid_ids:
|
| raise HTTPException(
|
| status_code=400,
|
| detail=f"Tasks not found: {', '.join(invalid_ids)}"
|
| )
|
|
|
|
|
| steps = min(simulation_request.simulation_steps, 10)
|
|
|
|
|
| simulation_results = simulate_quantum_circuit(
|
| tasks,
|
| simulation_request.task_ids,
|
| steps=steps
|
| )
|
|
|
|
|
| if (not simulation_results.get("error") and
|
| simulation_request.measurement_type == "projective" and
|
| "final_results" in simulation_results and
|
| "measurement_outcomes" in simulation_results["final_results"]):
|
|
|
|
|
| if random.random() < 0.3:
|
| for task_id, outcome in simulation_results["final_results"]["measurement_outcomes"].items():
|
| if task_id in tasks and random.random() < 0.5:
|
| if outcome["outcome"] == "COMPLETED" and tasks[task_id].state != TaskState.COMPLETED:
|
| tasks[task_id].state = TaskState.COMPLETED
|
|
|
| asyncio.create_task(update_probability_distribution(task_id))
|
|
|
| return simulation_results
|
|
|
| @app.post("/optimize-assignments")
|
| async def optimize_task_assignments():
|
| """Optimize task assignments using intelligent methods."""
|
|
|
| recommendations = optimize_task_assignment(tasks)
|
|
|
| return {
|
| "recommendations": recommendations,
|
| "total_recommendations": len(recommendations)
|
| }
|
|
|
| @app.post("/ask-question")
|
| async def ask_task_question(task_id: str, question: str):
|
| """Ask a question about a task using NLP model or simple extraction."""
|
| if task_id not in tasks:
|
| raise HTTPException(status_code=404, detail="Task not found")
|
|
|
| task = tasks[task_id]
|
| context = f"Task Title: {task.title}\nDescription: {task.description}\n"
|
| if task.ml_summary:
|
| context += f"Summary: {task.ml_summary}\n"
|
| context += f"Status: {task.state}\nPriority: {task.priority}\n"
|
| context += f"Due Date: {task.due_date}\nAssignee: {task.assignee or 'Unassigned'}"
|
|
|
|
|
| answer = answer_task_question(question, context)
|
|
|
| return {
|
| "task_id": task_id,
|
| "question": question,
|
| "answer": answer,
|
| "task_title": task.title
|
| }
|
|
|
| @app.get("/system-graph")
|
| async def get_system_graph():
|
| """Return a representation of the task graph for visualization."""
|
|
|
| nodes = []
|
| for node_id in task_graph.nodes():
|
| if node_id in tasks:
|
| task = tasks[node_id]
|
| nodes.append({
|
| "id": node_id,
|
| "title": task.title,
|
| "state": task.state,
|
| "priority": task.priority,
|
| "entropy": task.entropy
|
| })
|
|
|
| edges = []
|
| for u, v, data in task_graph.edges(data=True):
|
| edges.append({
|
| "source": u,
|
| "target": v,
|
| "weight": data.get("weight", 1.0),
|
| "type": data.get("type", "standard")
|
| })
|
|
|
| return {
|
| "nodes": nodes,
|
| "edges": edges,
|
| "task_count": len(tasks),
|
| "entanglement_count": len(entanglements)
|
| }
|
|
|
|
|
|
|
| async def enhance_task_with_ml(task_id, task_text):
|
| """Enhance task with ML-generated information in background."""
|
| if task_id not in tasks:
|
| return
|
|
|
| task = tasks[task_id]
|
|
|
| try:
|
|
|
| if ML_LIBS_AVAILABLE:
|
|
|
| embedding_model = get_ml_model("task_embedding")
|
| if embedding_model:
|
| task.embedding = embedding_model.encode(task_text).tolist()
|
|
|
|
|
| classifier = get_ml_model("task_classifier")
|
| if classifier and classifier[1] is not None:
|
| tokenizer, pipeline = classifier
|
| result = pipeline(task_text)
|
| task.category = result[0]['label']
|
|
|
|
|
| generator = get_ml_model("text_generator")
|
| if generator and generator[0] is not None and generator[1] is not None:
|
| tokenizer, model = generator
|
| inputs = tokenizer(f"Summarize: {task_text}", return_tensors="pt", max_length=100, truncation=True)
|
| summary_ids = model.generate(
|
| inputs["input_ids"],
|
| max_length=50,
|
| num_beams=2,
|
| early_stopping=True
|
| )
|
| task.ml_summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
|
|
|
|
|
| await update_vector_store(task_id)
|
|
|
| except Exception as e:
|
| logger.error(f"Error enhancing task with ML: {str(e)}")
|
|
|
| async def update_task_graph(task_id):
|
| """Update task graph with a new task."""
|
| if task_id not in task_graph.nodes and task_id in tasks:
|
| task = tasks[task_id]
|
|
|
| task_graph.add_node(
|
| task_id,
|
| title=task.title,
|
| state=task.state,
|
| priority=task.priority,
|
| entropy=task.entropy
|
| )
|
|
|
| async def update_vector_store(task_id):
|
| """Update vector store with task embedding for efficient retrieval."""
|
| global vector_store
|
| if vector_store is None or task_id not in tasks or not VECTOR_DB_AVAILABLE:
|
| return
|
|
|
| task = tasks[task_id]
|
| if not task.embedding:
|
|
|
| task_text = f"{task.title} {task.description}"
|
| task.embedding = calculate_task_embedding(task_text)
|
| tasks[task_id] = task
|
|
|
| try:
|
|
|
| doc = Document(
|
| page_content=f"{task.title}\n{task.description}",
|
| metadata={
|
| "task_id": task_id,
|
| "title": task.title,
|
| "state": task.state
|
| }
|
| )
|
|
|
|
|
| try:
|
|
|
| existing_docs = vector_store.get([task_id])
|
|
|
| if existing_docs and len(existing_docs) > 0:
|
|
|
| vector_store.update_document(task_id, doc)
|
| logger.debug(f"Updated task {task_id} in vector store")
|
| else:
|
|
|
| vector_store.add_documents([doc], ids=[task_id])
|
| logger.debug(f"Added task {task_id} to vector store")
|
| except Exception as e:
|
| logger.warning(f"Error checking document existence, attempting direct add: {str(e)}")
|
|
|
| vector_store.add_documents([doc], ids=[task_id])
|
|
|
| except Exception as e:
|
| logger.error(f"Error updating vector store: {str(e)}")
|
|
|
| async def remove_from_vector_store(task_id):
|
| """Remove a task from the vector store."""
|
| if vector_store is None or not VECTOR_DB_AVAILABLE:
|
| return
|
|
|
| try:
|
| vector_store.delete([task_id])
|
| logger.debug(f"Removed task {task_id} from vector store")
|
| except Exception as e:
|
| logger.error(f"Error removing task from vector store: {str(e)}")
|
|
|