repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/openmemory/api/app/utils/prompts.py | openmemory/api/app/utils/prompts.py | MEMORY_CATEGORIZATION_PROMPT = """Your task is to assign each piece of information (or “memory”) to one or more of the following categories. Feel free to use multiple categories per item when appropriate.
- Personal: family, friends, home, hobbies, lifestyle
- Relationships: social network, significant others, colleagues
- Preferences: likes, dislikes, habits, favorite media
- Health: physical fitness, mental health, diet, sleep
- Travel: trips, commutes, favorite places, itineraries
- Work: job roles, companies, projects, promotions
- Education: courses, degrees, certifications, skills development
- Projects: to‑dos, milestones, deadlines, status updates
- AI, ML & Technology: infrastructure, algorithms, tools, research
- Technical Support: bug reports, error logs, fixes
- Finance: income, expenses, investments, billing
- Shopping: purchases, wishlists, returns, deliveries
- Legal: contracts, policies, regulations, privacy
- Entertainment: movies, music, games, books, events
- Messages: emails, SMS, alerts, reminders
- Customer Support: tickets, inquiries, resolutions
- Product Feedback: ratings, bug reports, feature requests
- News: articles, headlines, trending topics
- Organization: meetings, appointments, calendars
- Goals: ambitions, KPIs, long‑term objectives
Guidelines:
- Return only the categories under 'categories' key in the JSON format.
- If you cannot categorize the memory, return an empty list with key 'categories'.
- Don't limit yourself to the categories listed above only. Feel free to create new categories based on the memory. Make sure that it is a single phrase.
"""
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/openmemory/api/app/utils/permissions.py | openmemory/api/app/utils/permissions.py | from typing import Optional
from uuid import UUID
from app.models import App, Memory, MemoryState
from sqlalchemy.orm import Session
def check_memory_access_permissions(
db: Session,
memory: Memory,
app_id: Optional[UUID] = None
) -> bool:
"""
Check if the given app has permission to access a memory based on:
1. Memory state (must be active)
2. App state (must not be paused)
3. App-specific access controls
Args:
db: Database session
memory: Memory object to check access for
app_id: Optional app ID to check permissions for
Returns:
bool: True if access is allowed, False otherwise
"""
# Check if memory is active
if memory.state != MemoryState.active:
return False
# If no app_id provided, only check memory state
if not app_id:
return True
# Check if app exists and is active
app = db.query(App).filter(App.id == app_id).first()
if not app:
return False
# Check if app is paused/inactive
if not app.is_active:
return False
# Check app-specific access controls
from app.routers.memories import get_accessible_memory_ids
accessible_memory_ids = get_accessible_memory_ids(db, app_id)
# If accessible_memory_ids is None, all memories are accessible
if accessible_memory_ids is None:
return True
# Check if memory is in the accessible set
return memory.id in accessible_memory_ids
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/openmemory/api/app/utils/memory.py | openmemory/api/app/utils/memory.py | """
Memory client utilities for OpenMemory.
This module provides functionality to initialize and manage the Mem0 memory client
with automatic configuration management and Docker environment support.
Docker Ollama Configuration:
When running inside a Docker container and using Ollama as the LLM or embedder provider,
the system automatically detects the Docker environment and adjusts localhost URLs
to properly reach the host machine where Ollama is running.
Supported Docker host resolution (in order of preference):
1. OLLAMA_HOST environment variable (if set)
2. host.docker.internal (Docker Desktop for Mac/Windows)
3. Docker bridge gateway IP (typically 172.17.0.1 on Linux)
4. Fallback to 172.17.0.1
Example configuration that will be automatically adjusted:
{
"llm": {
"provider": "ollama",
"config": {
"model": "llama3.1:latest",
"ollama_base_url": "http://localhost:11434" # Auto-adjusted in Docker
}
}
}
"""
import hashlib
import json
import os
import socket
from app.database import SessionLocal
from app.models import Config as ConfigModel
from mem0 import Memory
_memory_client = None
_config_hash = None
def _get_config_hash(config_dict):
"""Generate a hash of the config to detect changes."""
config_str = json.dumps(config_dict, sort_keys=True)
return hashlib.md5(config_str.encode()).hexdigest()
def _get_docker_host_url():
"""
Determine the appropriate host URL to reach host machine from inside Docker container.
Returns the best available option for reaching the host from inside a container.
"""
# Check for custom environment variable first
custom_host = os.environ.get('OLLAMA_HOST')
if custom_host:
print(f"Using custom Ollama host from OLLAMA_HOST: {custom_host}")
return custom_host.replace('http://', '').replace('https://', '').split(':')[0]
# Check if we're running inside Docker
if not os.path.exists('/.dockerenv'):
# Not in Docker, return localhost as-is
return "localhost"
print("Detected Docker environment, adjusting host URL for Ollama...")
# Try different host resolution strategies
host_candidates = []
# 1. host.docker.internal (works on Docker Desktop for Mac/Windows)
try:
socket.gethostbyname('host.docker.internal')
host_candidates.append('host.docker.internal')
print("Found host.docker.internal")
except socket.gaierror:
pass
# 2. Docker bridge gateway (typically 172.17.0.1 on Linux)
try:
with open('/proc/net/route', 'r') as f:
for line in f:
fields = line.strip().split()
if fields[1] == '00000000': # Default route
gateway_hex = fields[2]
gateway_ip = socket.inet_ntoa(bytes.fromhex(gateway_hex)[::-1])
host_candidates.append(gateway_ip)
print(f"Found Docker gateway: {gateway_ip}")
break
except (FileNotFoundError, IndexError, ValueError):
pass
# 3. Fallback to common Docker bridge IP
if not host_candidates:
host_candidates.append('172.17.0.1')
print("Using fallback Docker bridge IP: 172.17.0.1")
# Return the first available candidate
return host_candidates[0]
def _fix_ollama_urls(config_section):
"""
Fix Ollama URLs for Docker environment.
Replaces localhost URLs with appropriate Docker host URLs.
Sets default ollama_base_url if not provided.
"""
if not config_section or "config" not in config_section:
return config_section
ollama_config = config_section["config"]
# Set default ollama_base_url if not provided
if "ollama_base_url" not in ollama_config:
ollama_config["ollama_base_url"] = "http://host.docker.internal:11434"
else:
# Check for ollama_base_url and fix if it's localhost
url = ollama_config["ollama_base_url"]
if "localhost" in url or "127.0.0.1" in url:
docker_host = _get_docker_host_url()
if docker_host != "localhost":
new_url = url.replace("localhost", docker_host).replace("127.0.0.1", docker_host)
ollama_config["ollama_base_url"] = new_url
print(f"Adjusted Ollama URL from {url} to {new_url}")
return config_section
def reset_memory_client():
"""Reset the global memory client to force reinitialization with new config."""
global _memory_client, _config_hash
_memory_client = None
_config_hash = None
def get_default_memory_config():
"""Get default memory client configuration with sensible defaults."""
# Detect vector store based on environment variables
vector_store_config = {
"collection_name": "openmemory",
"host": "mem0_store",
}
# Check for different vector store configurations based on environment variables
if os.environ.get('CHROMA_HOST') and os.environ.get('CHROMA_PORT'):
vector_store_provider = "chroma"
vector_store_config.update({
"host": os.environ.get('CHROMA_HOST'),
"port": int(os.environ.get('CHROMA_PORT'))
})
elif os.environ.get('QDRANT_HOST') and os.environ.get('QDRANT_PORT'):
vector_store_provider = "qdrant"
vector_store_config.update({
"host": os.environ.get('QDRANT_HOST'),
"port": int(os.environ.get('QDRANT_PORT'))
})
elif os.environ.get('WEAVIATE_CLUSTER_URL') or (os.environ.get('WEAVIATE_HOST') and os.environ.get('WEAVIATE_PORT')):
vector_store_provider = "weaviate"
# Prefer an explicit cluster URL if provided; otherwise build from host/port
cluster_url = os.environ.get('WEAVIATE_CLUSTER_URL')
if not cluster_url:
weaviate_host = os.environ.get('WEAVIATE_HOST')
weaviate_port = int(os.environ.get('WEAVIATE_PORT'))
cluster_url = f"http://{weaviate_host}:{weaviate_port}"
vector_store_config = {
"collection_name": "openmemory",
"cluster_url": cluster_url
}
elif os.environ.get('REDIS_URL'):
vector_store_provider = "redis"
vector_store_config = {
"collection_name": "openmemory",
"redis_url": os.environ.get('REDIS_URL')
}
elif os.environ.get('PG_HOST') and os.environ.get('PG_PORT'):
vector_store_provider = "pgvector"
vector_store_config.update({
"host": os.environ.get('PG_HOST'),
"port": int(os.environ.get('PG_PORT')),
"dbname": os.environ.get('PG_DB', 'mem0'),
"user": os.environ.get('PG_USER', 'mem0'),
"password": os.environ.get('PG_PASSWORD', 'mem0')
})
elif os.environ.get('MILVUS_HOST') and os.environ.get('MILVUS_PORT'):
vector_store_provider = "milvus"
# Construct the full URL as expected by MilvusDBConfig
milvus_host = os.environ.get('MILVUS_HOST')
milvus_port = int(os.environ.get('MILVUS_PORT'))
milvus_url = f"http://{milvus_host}:{milvus_port}"
vector_store_config = {
"collection_name": "openmemory",
"url": milvus_url,
"token": os.environ.get('MILVUS_TOKEN', ''), # Always include, empty string for local setup
"db_name": os.environ.get('MILVUS_DB_NAME', ''),
"embedding_model_dims": 1536,
"metric_type": "COSINE" # Using COSINE for better semantic similarity
}
elif os.environ.get('ELASTICSEARCH_HOST') and os.environ.get('ELASTICSEARCH_PORT'):
vector_store_provider = "elasticsearch"
# Construct the full URL with scheme since Elasticsearch client expects it
elasticsearch_host = os.environ.get('ELASTICSEARCH_HOST')
elasticsearch_port = int(os.environ.get('ELASTICSEARCH_PORT'))
# Use http:// scheme since we're not using SSL
full_host = f"http://{elasticsearch_host}"
vector_store_config.update({
"host": full_host,
"port": elasticsearch_port,
"user": os.environ.get('ELASTICSEARCH_USER', 'elastic'),
"password": os.environ.get('ELASTICSEARCH_PASSWORD', 'changeme'),
"verify_certs": False,
"use_ssl": False,
"embedding_model_dims": 1536
})
elif os.environ.get('OPENSEARCH_HOST') and os.environ.get('OPENSEARCH_PORT'):
vector_store_provider = "opensearch"
vector_store_config.update({
"host": os.environ.get('OPENSEARCH_HOST'),
"port": int(os.environ.get('OPENSEARCH_PORT'))
})
elif os.environ.get('FAISS_PATH'):
vector_store_provider = "faiss"
vector_store_config = {
"collection_name": "openmemory",
"path": os.environ.get('FAISS_PATH'),
"embedding_model_dims": 1536,
"distance_strategy": "cosine"
}
else:
# Default fallback to Qdrant
vector_store_provider = "qdrant"
vector_store_config.update({
"port": 6333,
})
print(f"Auto-detected vector store: {vector_store_provider} with config: {vector_store_config}")
return {
"vector_store": {
"provider": vector_store_provider,
"config": vector_store_config
},
"llm": {
"provider": "openai",
"config": {
"model": "gpt-4o-mini",
"temperature": 0.1,
"max_tokens": 2000,
"api_key": "env:OPENAI_API_KEY"
}
},
"embedder": {
"provider": "openai",
"config": {
"model": "text-embedding-3-small",
"api_key": "env:OPENAI_API_KEY"
}
},
"version": "v1.1"
}
def _parse_environment_variables(config_dict):
"""
Parse environment variables in config values.
Converts 'env:VARIABLE_NAME' to actual environment variable values.
"""
if isinstance(config_dict, dict):
parsed_config = {}
for key, value in config_dict.items():
if isinstance(value, str) and value.startswith("env:"):
env_var = value.split(":", 1)[1]
env_value = os.environ.get(env_var)
if env_value:
parsed_config[key] = env_value
print(f"Loaded {env_var} from environment for {key}")
else:
print(f"Warning: Environment variable {env_var} not found, keeping original value")
parsed_config[key] = value
elif isinstance(value, dict):
parsed_config[key] = _parse_environment_variables(value)
else:
parsed_config[key] = value
return parsed_config
return config_dict
def get_memory_client(custom_instructions: str = None):
"""
Get or initialize the Mem0 client.
Args:
custom_instructions: Optional instructions for the memory project.
Returns:
Initialized Mem0 client instance or None if initialization fails.
Raises:
Exception: If required API keys are not set or critical configuration is missing.
"""
global _memory_client, _config_hash
try:
# Start with default configuration
config = get_default_memory_config()
# Variable to track custom instructions
db_custom_instructions = None
# Load configuration from database
try:
db = SessionLocal()
db_config = db.query(ConfigModel).filter(ConfigModel.key == "main").first()
if db_config:
json_config = db_config.value
# Extract custom instructions from openmemory settings
if "openmemory" in json_config and "custom_instructions" in json_config["openmemory"]:
db_custom_instructions = json_config["openmemory"]["custom_instructions"]
# Override defaults with configurations from the database
if "mem0" in json_config:
mem0_config = json_config["mem0"]
# Update LLM configuration if available
if "llm" in mem0_config and mem0_config["llm"] is not None:
config["llm"] = mem0_config["llm"]
# Fix Ollama URLs for Docker if needed
if config["llm"].get("provider") == "ollama":
config["llm"] = _fix_ollama_urls(config["llm"])
# Update Embedder configuration if available
if "embedder" in mem0_config and mem0_config["embedder"] is not None:
config["embedder"] = mem0_config["embedder"]
# Fix Ollama URLs for Docker if needed
if config["embedder"].get("provider") == "ollama":
config["embedder"] = _fix_ollama_urls(config["embedder"])
if "vector_store" in mem0_config and mem0_config["vector_store"] is not None:
config["vector_store"] = mem0_config["vector_store"]
else:
print("No configuration found in database, using defaults")
db.close()
except Exception as e:
print(f"Warning: Error loading configuration from database: {e}")
print("Using default configuration")
# Continue with default configuration if database config can't be loaded
# Use custom_instructions parameter first, then fall back to database value
instructions_to_use = custom_instructions or db_custom_instructions
if instructions_to_use:
config["custom_fact_extraction_prompt"] = instructions_to_use
# ALWAYS parse environment variables in the final config
# This ensures that even default config values like "env:OPENAI_API_KEY" get parsed
print("Parsing environment variables in final config...")
config = _parse_environment_variables(config)
# Check if config has changed by comparing hashes
current_config_hash = _get_config_hash(config)
# Only reinitialize if config changed or client doesn't exist
if _memory_client is None or _config_hash != current_config_hash:
print(f"Initializing memory client with config hash: {current_config_hash}")
try:
_memory_client = Memory.from_config(config_dict=config)
_config_hash = current_config_hash
print("Memory client initialized successfully")
except Exception as init_error:
print(f"Warning: Failed to initialize memory client: {init_error}")
print("Server will continue running with limited memory functionality")
_memory_client = None
_config_hash = None
return None
return _memory_client
except Exception as e:
print(f"Warning: Exception occurred while initializing memory client: {e}")
print("Server will continue running with limited memory functionality")
return None
def get_default_user_id():
return "default_user"
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/openmemory/api/app/utils/db.py | openmemory/api/app/utils/db.py | from typing import Tuple
from app.models import App, User
from sqlalchemy.orm import Session
def get_or_create_user(db: Session, user_id: str) -> User:
"""Get or create a user with the given user_id"""
user = db.query(User).filter(User.user_id == user_id).first()
if not user:
user = User(user_id=user_id)
db.add(user)
db.commit()
db.refresh(user)
return user
def get_or_create_app(db: Session, user: User, app_id: str) -> App:
"""Get or create an app for the given user"""
app = db.query(App).filter(App.owner_id == user.id, App.name == app_id).first()
if not app:
app = App(owner_id=user.id, name=app_id)
db.add(app)
db.commit()
db.refresh(app)
return app
def get_user_and_app(db: Session, user_id: str, app_id: str) -> Tuple[User, App]:
"""Get or create both user and their app"""
user = get_or_create_user(db, user_id)
app = get_or_create_app(db, user, app_id)
return user, app
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/openmemory/api/app/utils/categorization.py | openmemory/api/app/utils/categorization.py | import logging
from typing import List
from app.utils.prompts import MEMORY_CATEGORIZATION_PROMPT
from dotenv import load_dotenv
from openai import OpenAI
from pydantic import BaseModel
from tenacity import retry, stop_after_attempt, wait_exponential
load_dotenv()
openai_client = OpenAI()
class MemoryCategories(BaseModel):
categories: List[str]
@retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=4, max=15))
def get_categories_for_memory(memory: str) -> List[str]:
try:
messages = [
{"role": "system", "content": MEMORY_CATEGORIZATION_PROMPT},
{"role": "user", "content": memory}
]
# Let OpenAI handle the pydantic parsing directly
completion = openai_client.beta.chat.completions.parse(
model="gpt-4o-mini",
messages=messages,
response_format=MemoryCategories,
temperature=0
)
parsed: MemoryCategories = completion.choices[0].message.parsed
return [cat.strip().lower() for cat in parsed.categories]
except Exception as e:
logging.error(f"[ERROR] Failed to get categories: {e}")
try:
logging.debug(f"[DEBUG] Raw response: {completion.choices[0].message.content}")
except Exception as debug_e:
logging.debug(f"[DEBUG] Could not extract raw response: {debug_e}")
raise
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/openmemory/api/app/utils/__init__.py | openmemory/api/app/utils/__init__.py | python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false | |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/openmemory/api/app/routers/config.py | openmemory/api/app/routers/config.py | from typing import Any, Dict, Optional
from app.database import get_db
from app.models import Config as ConfigModel
from app.utils.memory import reset_memory_client
from fastapi import APIRouter, Depends, HTTPException
from pydantic import BaseModel, Field
from sqlalchemy.orm import Session
router = APIRouter(prefix="/api/v1/config", tags=["config"])
class LLMConfig(BaseModel):
model: str = Field(..., description="LLM model name")
temperature: float = Field(..., description="Temperature setting for the model")
max_tokens: int = Field(..., description="Maximum tokens to generate")
api_key: Optional[str] = Field(None, description="API key or 'env:API_KEY' to use environment variable")
ollama_base_url: Optional[str] = Field(None, description="Base URL for Ollama server (e.g., http://host.docker.internal:11434)")
class LLMProvider(BaseModel):
provider: str = Field(..., description="LLM provider name")
config: LLMConfig
class EmbedderConfig(BaseModel):
model: str = Field(..., description="Embedder model name")
api_key: Optional[str] = Field(None, description="API key or 'env:API_KEY' to use environment variable")
ollama_base_url: Optional[str] = Field(None, description="Base URL for Ollama server (e.g., http://host.docker.internal:11434)")
class EmbedderProvider(BaseModel):
provider: str = Field(..., description="Embedder provider name")
config: EmbedderConfig
class VectorStoreProvider(BaseModel):
provider: str = Field(..., description="Vector store provider name")
# Below config can vary widely based on the vector store used. Refer https://docs.mem0.ai/components/vectordbs/config
config: Dict[str, Any] = Field(..., description="Vector store-specific configuration")
class OpenMemoryConfig(BaseModel):
custom_instructions: Optional[str] = Field(None, description="Custom instructions for memory management and fact extraction")
class Mem0Config(BaseModel):
llm: Optional[LLMProvider] = None
embedder: Optional[EmbedderProvider] = None
vector_store: Optional[VectorStoreProvider] = None
class ConfigSchema(BaseModel):
openmemory: Optional[OpenMemoryConfig] = None
mem0: Optional[Mem0Config] = None
def get_default_configuration():
"""Get the default configuration with sensible defaults for LLM and embedder."""
return {
"openmemory": {
"custom_instructions": None
},
"mem0": {
"llm": {
"provider": "openai",
"config": {
"model": "gpt-4o-mini",
"temperature": 0.1,
"max_tokens": 2000,
"api_key": "env:OPENAI_API_KEY"
}
},
"embedder": {
"provider": "openai",
"config": {
"model": "text-embedding-3-small",
"api_key": "env:OPENAI_API_KEY"
}
},
"vector_store": None
}
}
def get_config_from_db(db: Session, key: str = "main"):
"""Get configuration from database."""
config = db.query(ConfigModel).filter(ConfigModel.key == key).first()
if not config:
# Create default config with proper provider configurations
default_config = get_default_configuration()
db_config = ConfigModel(key=key, value=default_config)
db.add(db_config)
db.commit()
db.refresh(db_config)
return default_config
# Ensure the config has all required sections with defaults
config_value = config.value
default_config = get_default_configuration()
# Merge with defaults to ensure all required fields exist
if "openmemory" not in config_value:
config_value["openmemory"] = default_config["openmemory"]
if "mem0" not in config_value:
config_value["mem0"] = default_config["mem0"]
else:
# Ensure LLM config exists with defaults
if "llm" not in config_value["mem0"] or config_value["mem0"]["llm"] is None:
config_value["mem0"]["llm"] = default_config["mem0"]["llm"]
# Ensure embedder config exists with defaults
if "embedder" not in config_value["mem0"] or config_value["mem0"]["embedder"] is None:
config_value["mem0"]["embedder"] = default_config["mem0"]["embedder"]
# Ensure vector_store config exists with defaults
if "vector_store" not in config_value["mem0"]:
config_value["mem0"]["vector_store"] = default_config["mem0"]["vector_store"]
# Save the updated config back to database if it was modified
if config_value != config.value:
config.value = config_value
db.commit()
db.refresh(config)
return config_value
def save_config_to_db(db: Session, config: Dict[str, Any], key: str = "main"):
"""Save configuration to database."""
db_config = db.query(ConfigModel).filter(ConfigModel.key == key).first()
if db_config:
db_config.value = config
db_config.updated_at = None # Will trigger the onupdate to set current time
else:
db_config = ConfigModel(key=key, value=config)
db.add(db_config)
db.commit()
db.refresh(db_config)
return db_config.value
@router.get("/", response_model=ConfigSchema)
async def get_configuration(db: Session = Depends(get_db)):
"""Get the current configuration."""
config = get_config_from_db(db)
return config
@router.put("/", response_model=ConfigSchema)
async def update_configuration(config: ConfigSchema, db: Session = Depends(get_db)):
"""Update the configuration."""
current_config = get_config_from_db(db)
# Convert to dict for processing
updated_config = current_config.copy()
# Update openmemory settings if provided
if config.openmemory is not None:
if "openmemory" not in updated_config:
updated_config["openmemory"] = {}
updated_config["openmemory"].update(config.openmemory.dict(exclude_none=True))
# Update mem0 settings
updated_config["mem0"] = config.mem0.dict(exclude_none=True)
@router.patch("/", response_model=ConfigSchema)
async def patch_configuration(config_update: ConfigSchema, db: Session = Depends(get_db)):
"""Update parts of the configuration."""
current_config = get_config_from_db(db)
def deep_update(source, overrides):
for key, value in overrides.items():
if isinstance(value, dict) and key in source and isinstance(source[key], dict):
source[key] = deep_update(source[key], value)
else:
source[key] = value
return source
update_data = config_update.dict(exclude_unset=True)
updated_config = deep_update(current_config, update_data)
save_config_to_db(db, updated_config)
reset_memory_client()
return updated_config
@router.post("/reset", response_model=ConfigSchema)
async def reset_configuration(db: Session = Depends(get_db)):
"""Reset the configuration to default values."""
try:
# Get the default configuration with proper provider setups
default_config = get_default_configuration()
# Save it as the current configuration in the database
save_config_to_db(db, default_config)
reset_memory_client()
return default_config
except Exception as e:
raise HTTPException(
status_code=500,
detail=f"Failed to reset configuration: {str(e)}"
)
@router.get("/mem0/llm", response_model=LLMProvider)
async def get_llm_configuration(db: Session = Depends(get_db)):
"""Get only the LLM configuration."""
config = get_config_from_db(db)
llm_config = config.get("mem0", {}).get("llm", {})
return llm_config
@router.put("/mem0/llm", response_model=LLMProvider)
async def update_llm_configuration(llm_config: LLMProvider, db: Session = Depends(get_db)):
"""Update only the LLM configuration."""
current_config = get_config_from_db(db)
# Ensure mem0 key exists
if "mem0" not in current_config:
current_config["mem0"] = {}
# Update the LLM configuration
current_config["mem0"]["llm"] = llm_config.dict(exclude_none=True)
# Save the configuration to database
save_config_to_db(db, current_config)
reset_memory_client()
return current_config["mem0"]["llm"]
@router.get("/mem0/embedder", response_model=EmbedderProvider)
async def get_embedder_configuration(db: Session = Depends(get_db)):
"""Get only the Embedder configuration."""
config = get_config_from_db(db)
embedder_config = config.get("mem0", {}).get("embedder", {})
return embedder_config
@router.put("/mem0/embedder", response_model=EmbedderProvider)
async def update_embedder_configuration(embedder_config: EmbedderProvider, db: Session = Depends(get_db)):
"""Update only the Embedder configuration."""
current_config = get_config_from_db(db)
# Ensure mem0 key exists
if "mem0" not in current_config:
current_config["mem0"] = {}
# Update the Embedder configuration
current_config["mem0"]["embedder"] = embedder_config.dict(exclude_none=True)
# Save the configuration to database
save_config_to_db(db, current_config)
reset_memory_client()
return current_config["mem0"]["embedder"]
@router.get("/mem0/vector_store", response_model=Optional[VectorStoreProvider])
async def get_vector_store_configuration(db: Session = Depends(get_db)):
"""Get only the Vector Store configuration."""
config = get_config_from_db(db)
vector_store_config = config.get("mem0", {}).get("vector_store", None)
return vector_store_config
@router.put("/mem0/vector_store", response_model=VectorStoreProvider)
async def update_vector_store_configuration(vector_store_config: VectorStoreProvider, db: Session = Depends(get_db)):
"""Update only the Vector Store configuration."""
current_config = get_config_from_db(db)
# Ensure mem0 key exists
if "mem0" not in current_config:
current_config["mem0"] = {}
# Update the Vector Store configuration
current_config["mem0"]["vector_store"] = vector_store_config.dict(exclude_none=True)
# Save the configuration to database
save_config_to_db(db, current_config)
reset_memory_client()
return current_config["mem0"]["vector_store"]
@router.get("/openmemory", response_model=OpenMemoryConfig)
async def get_openmemory_configuration(db: Session = Depends(get_db)):
"""Get only the OpenMemory configuration."""
config = get_config_from_db(db)
openmemory_config = config.get("openmemory", {})
return openmemory_config
@router.put("/openmemory", response_model=OpenMemoryConfig)
async def update_openmemory_configuration(openmemory_config: OpenMemoryConfig, db: Session = Depends(get_db)):
"""Update only the OpenMemory configuration."""
current_config = get_config_from_db(db)
# Ensure openmemory key exists
if "openmemory" not in current_config:
current_config["openmemory"] = {}
# Update the OpenMemory configuration
current_config["openmemory"].update(openmemory_config.dict(exclude_none=True))
# Save the configuration to database
save_config_to_db(db, current_config)
reset_memory_client()
return current_config["openmemory"]
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/openmemory/api/app/routers/__init__.py | openmemory/api/app/routers/__init__.py | from .apps import router as apps_router
from .backup import router as backup_router
from .config import router as config_router
from .memories import router as memories_router
from .stats import router as stats_router
__all__ = ["memories_router", "apps_router", "stats_router", "config_router", "backup_router"]
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/openmemory/api/app/routers/memories.py | openmemory/api/app/routers/memories.py | import logging
from datetime import UTC, datetime
from typing import List, Optional, Set
from uuid import UUID
from app.database import get_db
from app.models import (
AccessControl,
App,
Category,
Memory,
MemoryAccessLog,
MemoryState,
MemoryStatusHistory,
User,
)
from app.schemas import MemoryResponse
from app.utils.memory import get_memory_client
from app.utils.permissions import check_memory_access_permissions
from fastapi import APIRouter, Depends, HTTPException, Query
from fastapi_pagination import Page, Params
from fastapi_pagination.ext.sqlalchemy import paginate as sqlalchemy_paginate
from pydantic import BaseModel
from sqlalchemy import func
from sqlalchemy.orm import Session, joinedload
router = APIRouter(prefix="/api/v1/memories", tags=["memories"])
def get_memory_or_404(db: Session, memory_id: UUID) -> Memory:
memory = db.query(Memory).filter(Memory.id == memory_id).first()
if not memory:
raise HTTPException(status_code=404, detail="Memory not found")
return memory
def update_memory_state(db: Session, memory_id: UUID, new_state: MemoryState, user_id: UUID):
memory = get_memory_or_404(db, memory_id)
old_state = memory.state
# Update memory state
memory.state = new_state
if new_state == MemoryState.archived:
memory.archived_at = datetime.now(UTC)
elif new_state == MemoryState.deleted:
memory.deleted_at = datetime.now(UTC)
# Record state change
history = MemoryStatusHistory(
memory_id=memory_id,
changed_by=user_id,
old_state=old_state,
new_state=new_state
)
db.add(history)
db.commit()
return memory
def get_accessible_memory_ids(db: Session, app_id: UUID) -> Set[UUID]:
"""
Get the set of memory IDs that the app has access to based on app-level ACL rules.
Returns all memory IDs if no specific restrictions are found.
"""
# Get app-level access controls
app_access = db.query(AccessControl).filter(
AccessControl.subject_type == "app",
AccessControl.subject_id == app_id,
AccessControl.object_type == "memory"
).all()
# If no app-level rules exist, return None to indicate all memories are accessible
if not app_access:
return None
# Initialize sets for allowed and denied memory IDs
allowed_memory_ids = set()
denied_memory_ids = set()
# Process app-level rules
for rule in app_access:
if rule.effect == "allow":
if rule.object_id: # Specific memory access
allowed_memory_ids.add(rule.object_id)
else: # All memories access
return None # All memories allowed
elif rule.effect == "deny":
if rule.object_id: # Specific memory denied
denied_memory_ids.add(rule.object_id)
else: # All memories denied
return set() # No memories accessible
# Remove denied memories from allowed set
if allowed_memory_ids:
allowed_memory_ids -= denied_memory_ids
return allowed_memory_ids
# List all memories with filtering
@router.get("/", response_model=Page[MemoryResponse])
async def list_memories(
user_id: str,
app_id: Optional[UUID] = None,
from_date: Optional[int] = Query(
None,
description="Filter memories created after this date (timestamp)",
examples=[1718505600]
),
to_date: Optional[int] = Query(
None,
description="Filter memories created before this date (timestamp)",
examples=[1718505600]
),
categories: Optional[str] = None,
params: Params = Depends(),
search_query: Optional[str] = None,
sort_column: Optional[str] = Query(None, description="Column to sort by (memory, categories, app_name, created_at)"),
sort_direction: Optional[str] = Query(None, description="Sort direction (asc or desc)"),
db: Session = Depends(get_db)
):
user = db.query(User).filter(User.user_id == user_id).first()
if not user:
raise HTTPException(status_code=404, detail="User not found")
# Build base query
query = db.query(Memory).filter(
Memory.user_id == user.id,
Memory.state != MemoryState.deleted,
Memory.state != MemoryState.archived,
Memory.content.ilike(f"%{search_query}%") if search_query else True
)
# Apply filters
if app_id:
query = query.filter(Memory.app_id == app_id)
if from_date:
from_datetime = datetime.fromtimestamp(from_date, tz=UTC)
query = query.filter(Memory.created_at >= from_datetime)
if to_date:
to_datetime = datetime.fromtimestamp(to_date, tz=UTC)
query = query.filter(Memory.created_at <= to_datetime)
# Add joins for app and categories after filtering
query = query.outerjoin(App, Memory.app_id == App.id)
query = query.outerjoin(Memory.categories)
# Apply category filter if provided
if categories:
category_list = [c.strip() for c in categories.split(",")]
query = query.filter(Category.name.in_(category_list))
# Apply sorting if specified
if sort_column:
sort_field = getattr(Memory, sort_column, None)
if sort_field:
query = query.order_by(sort_field.desc()) if sort_direction == "desc" else query.order_by(sort_field.asc())
# Add eager loading for app and categories
query = query.options(
joinedload(Memory.app),
joinedload(Memory.categories)
).distinct(Memory.id)
# Get paginated results with transformer
return sqlalchemy_paginate(
query,
params,
transformer=lambda items: [
MemoryResponse(
id=memory.id,
content=memory.content,
created_at=memory.created_at,
state=memory.state.value,
app_id=memory.app_id,
app_name=memory.app.name if memory.app else None,
categories=[category.name for category in memory.categories],
metadata_=memory.metadata_
)
for memory in items
if check_memory_access_permissions(db, memory, app_id)
]
)
# Get all categories
@router.get("/categories")
async def get_categories(
user_id: str,
db: Session = Depends(get_db)
):
user = db.query(User).filter(User.user_id == user_id).first()
if not user:
raise HTTPException(status_code=404, detail="User not found")
# Get unique categories associated with the user's memories
# Get all memories
memories = db.query(Memory).filter(Memory.user_id == user.id, Memory.state != MemoryState.deleted, Memory.state != MemoryState.archived).all()
# Get all categories from memories
categories = [category for memory in memories for category in memory.categories]
# Get unique categories
unique_categories = list(set(categories))
return {
"categories": unique_categories,
"total": len(unique_categories)
}
class CreateMemoryRequest(BaseModel):
user_id: str
text: str
metadata: dict = {}
infer: bool = True
app: str = "openmemory"
# Create new memory
@router.post("/")
async def create_memory(
request: CreateMemoryRequest,
db: Session = Depends(get_db)
):
user = db.query(User).filter(User.user_id == request.user_id).first()
if not user:
raise HTTPException(status_code=404, detail="User not found")
# Get or create app
app_obj = db.query(App).filter(App.name == request.app,
App.owner_id == user.id).first()
if not app_obj:
app_obj = App(name=request.app, owner_id=user.id)
db.add(app_obj)
db.commit()
db.refresh(app_obj)
# Check if app is active
if not app_obj.is_active:
raise HTTPException(status_code=403, detail=f"App {request.app} is currently paused on OpenMemory. Cannot create new memories.")
# Log what we're about to do
logging.info(f"Creating memory for user_id: {request.user_id} with app: {request.app}")
# Try to get memory client safely
try:
memory_client = get_memory_client()
if not memory_client:
raise Exception("Memory client is not available")
except Exception as client_error:
logging.warning(f"Memory client unavailable: {client_error}. Creating memory in database only.")
# Return a json response with the error
return {
"error": str(client_error)
}
# Try to save to Qdrant via memory_client
try:
qdrant_response = memory_client.add(
request.text,
user_id=request.user_id, # Use string user_id to match search
metadata={
"source_app": "openmemory",
"mcp_client": request.app,
},
infer=request.infer
)
# Log the response for debugging
logging.info(f"Qdrant response: {qdrant_response}")
# Process Qdrant response
if isinstance(qdrant_response, dict) and 'results' in qdrant_response:
created_memories = []
for result in qdrant_response['results']:
if result['event'] == 'ADD':
# Get the Qdrant-generated ID
memory_id = UUID(result['id'])
# Check if memory already exists
existing_memory = db.query(Memory).filter(Memory.id == memory_id).first()
if existing_memory:
# Update existing memory
existing_memory.state = MemoryState.active
existing_memory.content = result['memory']
memory = existing_memory
else:
# Create memory with the EXACT SAME ID from Qdrant
memory = Memory(
id=memory_id, # Use the same ID that Qdrant generated
user_id=user.id,
app_id=app_obj.id,
content=result['memory'],
metadata_=request.metadata,
state=MemoryState.active
)
db.add(memory)
# Create history entry
history = MemoryStatusHistory(
memory_id=memory_id,
changed_by=user.id,
old_state=MemoryState.deleted if existing_memory else MemoryState.deleted,
new_state=MemoryState.active
)
db.add(history)
created_memories.append(memory)
# Commit all changes at once
if created_memories:
db.commit()
for memory in created_memories:
db.refresh(memory)
# Return the first memory (for API compatibility)
# but all memories are now saved to the database
return created_memories[0]
except Exception as qdrant_error:
logging.warning(f"Qdrant operation failed: {qdrant_error}.")
# Return a json response with the error
return {
"error": str(qdrant_error)
}
# Get memory by ID
@router.get("/{memory_id}")
async def get_memory(
memory_id: UUID,
db: Session = Depends(get_db)
):
memory = get_memory_or_404(db, memory_id)
return {
"id": memory.id,
"text": memory.content,
"created_at": int(memory.created_at.timestamp()),
"state": memory.state.value,
"app_id": memory.app_id,
"app_name": memory.app.name if memory.app else None,
"categories": [category.name for category in memory.categories],
"metadata_": memory.metadata_
}
class DeleteMemoriesRequest(BaseModel):
memory_ids: List[UUID]
user_id: str
# Delete multiple memories
@router.delete("/")
async def delete_memories(
request: DeleteMemoriesRequest,
db: Session = Depends(get_db)
):
user = db.query(User).filter(User.user_id == request.user_id).first()
if not user:
raise HTTPException(status_code=404, detail="User not found")
# Get memory client to delete from vector store
try:
memory_client = get_memory_client()
if not memory_client:
raise HTTPException(
status_code=503,
detail="Memory client is not available"
)
except HTTPException:
raise
except Exception as client_error:
logging.error(f"Memory client initialization failed: {client_error}")
raise HTTPException(
status_code=503,
detail=f"Memory service unavailable: {str(client_error)}"
)
# Delete from vector store then mark as deleted in database
for memory_id in request.memory_ids:
try:
memory_client.delete(str(memory_id))
except Exception as delete_error:
logging.warning(f"Failed to delete memory {memory_id} from vector store: {delete_error}")
update_memory_state(db, memory_id, MemoryState.deleted, user.id)
return {"message": f"Successfully deleted {len(request.memory_ids)} memories"}
# Archive memories
@router.post("/actions/archive")
async def archive_memories(
memory_ids: List[UUID],
user_id: UUID,
db: Session = Depends(get_db)
):
for memory_id in memory_ids:
update_memory_state(db, memory_id, MemoryState.archived, user_id)
return {"message": f"Successfully archived {len(memory_ids)} memories"}
class PauseMemoriesRequest(BaseModel):
memory_ids: Optional[List[UUID]] = None
category_ids: Optional[List[UUID]] = None
app_id: Optional[UUID] = None
all_for_app: bool = False
global_pause: bool = False
state: Optional[MemoryState] = None
user_id: str
# Pause access to memories
@router.post("/actions/pause")
async def pause_memories(
request: PauseMemoriesRequest,
db: Session = Depends(get_db)
):
global_pause = request.global_pause
all_for_app = request.all_for_app
app_id = request.app_id
memory_ids = request.memory_ids
category_ids = request.category_ids
state = request.state or MemoryState.paused
user = db.query(User).filter(User.user_id == request.user_id).first()
if not user:
raise HTTPException(status_code=404, detail="User not found")
user_id = user.id
if global_pause:
# Pause all memories
memories = db.query(Memory).filter(
Memory.state != MemoryState.deleted,
Memory.state != MemoryState.archived
).all()
for memory in memories:
update_memory_state(db, memory.id, state, user_id)
return {"message": "Successfully paused all memories"}
if app_id:
# Pause all memories for an app
memories = db.query(Memory).filter(
Memory.app_id == app_id,
Memory.user_id == user.id,
Memory.state != MemoryState.deleted,
Memory.state != MemoryState.archived
).all()
for memory in memories:
update_memory_state(db, memory.id, state, user_id)
return {"message": f"Successfully paused all memories for app {app_id}"}
if all_for_app and memory_ids:
# Pause all memories for an app
memories = db.query(Memory).filter(
Memory.user_id == user.id,
Memory.state != MemoryState.deleted,
Memory.id.in_(memory_ids)
).all()
for memory in memories:
update_memory_state(db, memory.id, state, user_id)
return {"message": "Successfully paused all memories"}
if memory_ids:
# Pause specific memories
for memory_id in memory_ids:
update_memory_state(db, memory_id, state, user_id)
return {"message": f"Successfully paused {len(memory_ids)} memories"}
if category_ids:
# Pause memories by category
memories = db.query(Memory).join(Memory.categories).filter(
Category.id.in_(category_ids),
Memory.state != MemoryState.deleted,
Memory.state != MemoryState.archived
).all()
for memory in memories:
update_memory_state(db, memory.id, state, user_id)
return {"message": f"Successfully paused memories in {len(category_ids)} categories"}
raise HTTPException(status_code=400, detail="Invalid pause request parameters")
# Get memory access logs
@router.get("/{memory_id}/access-log")
async def get_memory_access_log(
memory_id: UUID,
page: int = Query(1, ge=1),
page_size: int = Query(10, ge=1, le=100),
db: Session = Depends(get_db)
):
query = db.query(MemoryAccessLog).filter(MemoryAccessLog.memory_id == memory_id)
total = query.count()
logs = query.order_by(MemoryAccessLog.accessed_at.desc()).offset((page - 1) * page_size).limit(page_size).all()
# Get app name
for log in logs:
app = db.query(App).filter(App.id == log.app_id).first()
log.app_name = app.name if app else None
return {
"total": total,
"page": page,
"page_size": page_size,
"logs": logs
}
class UpdateMemoryRequest(BaseModel):
memory_content: str
user_id: str
# Update a memory
@router.put("/{memory_id}")
async def update_memory(
memory_id: UUID,
request: UpdateMemoryRequest,
db: Session = Depends(get_db)
):
user = db.query(User).filter(User.user_id == request.user_id).first()
if not user:
raise HTTPException(status_code=404, detail="User not found")
memory = get_memory_or_404(db, memory_id)
memory.content = request.memory_content
db.commit()
db.refresh(memory)
return memory
class FilterMemoriesRequest(BaseModel):
user_id: str
page: int = 1
size: int = 10
search_query: Optional[str] = None
app_ids: Optional[List[UUID]] = None
category_ids: Optional[List[UUID]] = None
sort_column: Optional[str] = None
sort_direction: Optional[str] = None
from_date: Optional[int] = None
to_date: Optional[int] = None
show_archived: Optional[bool] = False
@router.post("/filter", response_model=Page[MemoryResponse])
async def filter_memories(
request: FilterMemoriesRequest,
db: Session = Depends(get_db)
):
user = db.query(User).filter(User.user_id == request.user_id).first()
if not user:
raise HTTPException(status_code=404, detail="User not found")
# Build base query
query = db.query(Memory).filter(
Memory.user_id == user.id,
Memory.state != MemoryState.deleted,
)
# Filter archived memories based on show_archived parameter
if not request.show_archived:
query = query.filter(Memory.state != MemoryState.archived)
# Apply search filter
if request.search_query:
query = query.filter(Memory.content.ilike(f"%{request.search_query}%"))
# Apply app filter
if request.app_ids:
query = query.filter(Memory.app_id.in_(request.app_ids))
# Add joins for app and categories
query = query.outerjoin(App, Memory.app_id == App.id)
# Apply category filter
if request.category_ids:
query = query.join(Memory.categories).filter(Category.id.in_(request.category_ids))
else:
query = query.outerjoin(Memory.categories)
# Apply date filters
if request.from_date:
from_datetime = datetime.fromtimestamp(request.from_date, tz=UTC)
query = query.filter(Memory.created_at >= from_datetime)
if request.to_date:
to_datetime = datetime.fromtimestamp(request.to_date, tz=UTC)
query = query.filter(Memory.created_at <= to_datetime)
# Apply sorting
if request.sort_column and request.sort_direction:
sort_direction = request.sort_direction.lower()
if sort_direction not in ['asc', 'desc']:
raise HTTPException(status_code=400, detail="Invalid sort direction")
sort_mapping = {
'memory': Memory.content,
'app_name': App.name,
'created_at': Memory.created_at
}
if request.sort_column not in sort_mapping:
raise HTTPException(status_code=400, detail="Invalid sort column")
sort_field = sort_mapping[request.sort_column]
if sort_direction == 'desc':
query = query.order_by(sort_field.desc())
else:
query = query.order_by(sort_field.asc())
else:
# Default sorting
query = query.order_by(Memory.created_at.desc())
# Add eager loading for categories and make the query distinct
query = query.options(
joinedload(Memory.categories)
).distinct(Memory.id)
# Use fastapi-pagination's paginate function
return sqlalchemy_paginate(
query,
Params(page=request.page, size=request.size),
transformer=lambda items: [
MemoryResponse(
id=memory.id,
content=memory.content,
created_at=memory.created_at,
state=memory.state.value,
app_id=memory.app_id,
app_name=memory.app.name if memory.app else None,
categories=[category.name for category in memory.categories],
metadata_=memory.metadata_
)
for memory in items
]
)
@router.get("/{memory_id}/related", response_model=Page[MemoryResponse])
async def get_related_memories(
memory_id: UUID,
user_id: str,
params: Params = Depends(),
db: Session = Depends(get_db)
):
# Validate user
user = db.query(User).filter(User.user_id == user_id).first()
if not user:
raise HTTPException(status_code=404, detail="User not found")
# Get the source memory
memory = get_memory_or_404(db, memory_id)
# Extract category IDs from the source memory
category_ids = [category.id for category in memory.categories]
if not category_ids:
return Page.create([], total=0, params=params)
# Build query for related memories
query = db.query(Memory).distinct(Memory.id).filter(
Memory.user_id == user.id,
Memory.id != memory_id,
Memory.state != MemoryState.deleted
).join(Memory.categories).filter(
Category.id.in_(category_ids)
).options(
joinedload(Memory.categories),
joinedload(Memory.app)
).order_by(
func.count(Category.id).desc(),
Memory.created_at.desc()
).group_by(Memory.id)
# ⚡ Force page size to be 5
params = Params(page=params.page, size=5)
return sqlalchemy_paginate(
query,
params,
transformer=lambda items: [
MemoryResponse(
id=memory.id,
content=memory.content,
created_at=memory.created_at,
state=memory.state.value,
app_id=memory.app_id,
app_name=memory.app.name if memory.app else None,
categories=[category.name for category in memory.categories],
metadata_=memory.metadata_
)
for memory in items
]
) | python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/openmemory/api/app/routers/apps.py | openmemory/api/app/routers/apps.py | from typing import Optional
from uuid import UUID
from app.database import get_db
from app.models import App, Memory, MemoryAccessLog, MemoryState
from fastapi import APIRouter, Depends, HTTPException, Query
from sqlalchemy import desc, func
from sqlalchemy.orm import Session, joinedload
router = APIRouter(prefix="/api/v1/apps", tags=["apps"])
# Helper functions
def get_app_or_404(db: Session, app_id: UUID) -> App:
app = db.query(App).filter(App.id == app_id).first()
if not app:
raise HTTPException(status_code=404, detail="App not found")
return app
# List all apps with filtering
@router.get("/")
async def list_apps(
name: Optional[str] = None,
is_active: Optional[bool] = None,
sort_by: str = 'name',
sort_direction: str = 'asc',
page: int = Query(1, ge=1),
page_size: int = Query(10, ge=1, le=100),
db: Session = Depends(get_db)
):
# Create a subquery for memory counts
memory_counts = db.query(
Memory.app_id,
func.count(Memory.id).label('memory_count')
).filter(
Memory.state.in_([MemoryState.active, MemoryState.paused, MemoryState.archived])
).group_by(Memory.app_id).subquery()
# Create a subquery for access counts
access_counts = db.query(
MemoryAccessLog.app_id,
func.count(func.distinct(MemoryAccessLog.memory_id)).label('access_count')
).group_by(MemoryAccessLog.app_id).subquery()
# Base query
query = db.query(
App,
func.coalesce(memory_counts.c.memory_count, 0).label('total_memories_created'),
func.coalesce(access_counts.c.access_count, 0).label('total_memories_accessed')
)
# Join with subqueries
query = query.outerjoin(
memory_counts,
App.id == memory_counts.c.app_id
).outerjoin(
access_counts,
App.id == access_counts.c.app_id
)
if name:
query = query.filter(App.name.ilike(f"%{name}%"))
if is_active is not None:
query = query.filter(App.is_active == is_active)
# Apply sorting
if sort_by == 'name':
sort_field = App.name
elif sort_by == 'memories':
sort_field = func.coalesce(memory_counts.c.memory_count, 0)
elif sort_by == 'memories_accessed':
sort_field = func.coalesce(access_counts.c.access_count, 0)
else:
sort_field = App.name # default sort
if sort_direction == 'desc':
query = query.order_by(desc(sort_field))
else:
query = query.order_by(sort_field)
total = query.count()
apps = query.offset((page - 1) * page_size).limit(page_size).all()
return {
"total": total,
"page": page,
"page_size": page_size,
"apps": [
{
"id": app[0].id,
"name": app[0].name,
"is_active": app[0].is_active,
"total_memories_created": app[1],
"total_memories_accessed": app[2]
}
for app in apps
]
}
# Get app details
@router.get("/{app_id}")
async def get_app_details(
app_id: UUID,
db: Session = Depends(get_db)
):
app = get_app_or_404(db, app_id)
# Get memory access statistics
access_stats = db.query(
func.count(MemoryAccessLog.id).label("total_memories_accessed"),
func.min(MemoryAccessLog.accessed_at).label("first_accessed"),
func.max(MemoryAccessLog.accessed_at).label("last_accessed")
).filter(MemoryAccessLog.app_id == app_id).first()
return {
"is_active": app.is_active,
"total_memories_created": db.query(Memory)
.filter(Memory.app_id == app_id)
.count(),
"total_memories_accessed": access_stats.total_memories_accessed or 0,
"first_accessed": access_stats.first_accessed,
"last_accessed": access_stats.last_accessed
}
# List memories created by app
@router.get("/{app_id}/memories")
async def list_app_memories(
app_id: UUID,
page: int = Query(1, ge=1),
page_size: int = Query(10, ge=1, le=100),
db: Session = Depends(get_db)
):
get_app_or_404(db, app_id)
query = db.query(Memory).filter(
Memory.app_id == app_id,
Memory.state.in_([MemoryState.active, MemoryState.paused, MemoryState.archived])
)
# Add eager loading for categories
query = query.options(joinedload(Memory.categories))
total = query.count()
memories = query.order_by(Memory.created_at.desc()).offset((page - 1) * page_size).limit(page_size).all()
return {
"total": total,
"page": page,
"page_size": page_size,
"memories": [
{
"id": memory.id,
"content": memory.content,
"created_at": memory.created_at,
"state": memory.state.value,
"app_id": memory.app_id,
"categories": [category.name for category in memory.categories],
"metadata_": memory.metadata_
}
for memory in memories
]
}
# List memories accessed by app
@router.get("/{app_id}/accessed")
async def list_app_accessed_memories(
app_id: UUID,
page: int = Query(1, ge=1),
page_size: int = Query(10, ge=1, le=100),
db: Session = Depends(get_db)
):
# Get memories with access counts
query = db.query(
Memory,
func.count(MemoryAccessLog.id).label("access_count")
).join(
MemoryAccessLog,
Memory.id == MemoryAccessLog.memory_id
).filter(
MemoryAccessLog.app_id == app_id
).group_by(
Memory.id
).order_by(
desc("access_count")
)
# Add eager loading for categories
query = query.options(joinedload(Memory.categories))
total = query.count()
results = query.offset((page - 1) * page_size).limit(page_size).all()
return {
"total": total,
"page": page,
"page_size": page_size,
"memories": [
{
"memory": {
"id": memory.id,
"content": memory.content,
"created_at": memory.created_at,
"state": memory.state.value,
"app_id": memory.app_id,
"app_name": memory.app.name if memory.app else None,
"categories": [category.name for category in memory.categories],
"metadata_": memory.metadata_
},
"access_count": count
}
for memory, count in results
]
}
@router.put("/{app_id}")
async def update_app_details(
app_id: UUID,
is_active: bool,
db: Session = Depends(get_db)
):
app = get_app_or_404(db, app_id)
app.is_active = is_active
db.commit()
return {"status": "success", "message": "Updated app details successfully"}
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/openmemory/api/app/routers/backup.py | openmemory/api/app/routers/backup.py | from datetime import UTC, datetime
import io
import json
import gzip
import zipfile
from typing import Optional, List, Dict, Any
from uuid import UUID
from fastapi import APIRouter, Depends, HTTPException, UploadFile, File, Query, Form
from fastapi.responses import StreamingResponse
from pydantic import BaseModel
from sqlalchemy.orm import Session, joinedload
from sqlalchemy import and_
from app.database import get_db
from app.models import (
User, App, Memory, MemoryState, Category, memory_categories,
MemoryStatusHistory, AccessControl
)
from app.utils.memory import get_memory_client
from uuid import uuid4
router = APIRouter(prefix="/api/v1/backup", tags=["backup"])
class ExportRequest(BaseModel):
user_id: str
app_id: Optional[UUID] = None
from_date: Optional[int] = None
to_date: Optional[int] = None
include_vectors: bool = True
def _iso(dt: Optional[datetime]) -> Optional[str]:
if isinstance(dt, datetime):
try:
return dt.astimezone(UTC).isoformat()
except:
return dt.replace(tzinfo=UTC).isoformat()
return None
def _parse_iso(dt: Optional[str]) -> Optional[datetime]:
if not dt:
return None
try:
return datetime.fromisoformat(dt)
except Exception:
try:
return datetime.fromisoformat(dt.replace("Z", "+00:00"))
except Exception:
return None
def _export_sqlite(db: Session, req: ExportRequest) -> Dict[str, Any]:
user = db.query(User).filter(User.user_id == req.user_id).first()
if not user:
raise HTTPException(status_code=404, detail="User not found")
time_filters = []
if req.from_date:
time_filters.append(Memory.created_at >= datetime.fromtimestamp(req.from_date, tz=UTC))
if req.to_date:
time_filters.append(Memory.created_at <= datetime.fromtimestamp(req.to_date, tz=UTC))
mem_q = (
db.query(Memory)
.options(joinedload(Memory.categories), joinedload(Memory.app))
.filter(
Memory.user_id == user.id,
*(time_filters or []),
* ( [Memory.app_id == req.app_id] if req.app_id else [] ),
)
)
memories = mem_q.all()
memory_ids = [m.id for m in memories]
app_ids = sorted({m.app_id for m in memories if m.app_id})
apps = db.query(App).filter(App.id.in_(app_ids)).all() if app_ids else []
cats = sorted({c for m in memories for c in m.categories}, key = lambda c: str(c.id))
mc_rows = db.execute(
memory_categories.select().where(memory_categories.c.memory_id.in_(memory_ids))
).fetchall() if memory_ids else []
history = db.query(MemoryStatusHistory).filter(MemoryStatusHistory.memory_id.in_(memory_ids)).all() if memory_ids else []
acls = db.query(AccessControl).filter(
AccessControl.subject_type == "app",
AccessControl.subject_id.in_(app_ids) if app_ids else False
).all() if app_ids else []
return {
"user": {
"id": str(user.id),
"user_id": user.user_id,
"name": user.name,
"email": user.email,
"metadata": user.metadata_,
"created_at": _iso(user.created_at),
"updated_at": _iso(user.updated_at)
},
"apps": [
{
"id": str(a.id),
"owner_id": str(a.owner_id),
"name": a.name,
"description": a.description,
"metadata": a.metadata_,
"is_active": a.is_active,
"created_at": _iso(a.created_at),
"updated_at": _iso(a.updated_at),
}
for a in apps
],
"categories": [
{
"id": str(c.id),
"name": c.name,
"description": c.description,
"created_at": _iso(c.created_at),
"updated_at": _iso(c.updated_at),
}
for c in cats
],
"memories": [
{
"id": str(m.id),
"user_id": str(m.user_id),
"app_id": str(m.app_id) if m.app_id else None,
"content": m.content,
"metadata": m.metadata_,
"state": m.state.value,
"created_at": _iso(m.created_at),
"updated_at": _iso(m.updated_at),
"archived_at": _iso(m.archived_at),
"deleted_at": _iso(m.deleted_at),
"category_ids": [str(c.id) for c in m.categories], #TODO: figure out a way to add category names simply to this
}
for m in memories
],
"memory_categories": [
{"memory_id": str(r.memory_id), "category_id": str(r.category_id)}
for r in mc_rows
],
"status_history": [
{
"id": str(h.id),
"memory_id": str(h.memory_id),
"changed_by": str(h.changed_by),
"old_state": h.old_state.value,
"new_state": h.new_state.value,
"changed_at": _iso(h.changed_at),
}
for h in history
],
"access_controls": [
{
"id": str(ac.id),
"subject_type": ac.subject_type,
"subject_id": str(ac.subject_id) if ac.subject_id else None,
"object_type": ac.object_type,
"object_id": str(ac.object_id) if ac.object_id else None,
"effect": ac.effect,
"created_at": _iso(ac.created_at),
}
for ac in acls
],
"export_meta": {
"app_id_filter": str(req.app_id) if req.app_id else None,
"from_date": req.from_date,
"to_date": req.to_date,
"version": "1",
"generated_at": datetime.now(UTC).isoformat(),
},
}
def _export_logical_memories_gz(
db: Session,
*,
user_id: str,
app_id: Optional[UUID] = None,
from_date: Optional[int] = None,
to_date: Optional[int] = None
) -> bytes:
"""
Export a provider-agnostic backup of memories so they can be restored to any vector DB
by re-embedding content. One JSON object per line, gzip-compressed.
Schema (per line):
{
"id": "<uuid>",
"content": "<text>",
"metadata": {...},
"created_at": "<iso8601 or null>",
"updated_at": "<iso8601 or null>",
"state": "active|paused|archived|deleted",
"app": "<app name or null>",
"categories": ["catA", "catB", ...]
}
"""
user = db.query(User).filter(User.user_id == user_id).first()
if not user:
raise HTTPException(status_code=404, detail="User not found")
time_filters = []
if from_date:
time_filters.append(Memory.created_at >= datetime.fromtimestamp(from_date, tz=UTC))
if to_date:
time_filters.append(Memory.created_at <= datetime.fromtimestamp(to_date, tz=UTC))
q = (
db.query(Memory)
.options(joinedload(Memory.categories), joinedload(Memory.app))
.filter(
Memory.user_id == user.id,
*(time_filters or []),
)
)
if app_id:
q = q.filter(Memory.app_id == app_id)
buf = io.BytesIO()
with gzip.GzipFile(fileobj=buf, mode="wb") as gz:
for m in q.all():
record = {
"id": str(m.id),
"content": m.content,
"metadata": m.metadata_ or {},
"created_at": _iso(m.created_at),
"updated_at": _iso(m.updated_at),
"state": m.state.value,
"app": m.app.name if m.app else None,
"categories": [c.name for c in m.categories],
}
gz.write((json.dumps(record) + "\n").encode("utf-8"))
return buf.getvalue()
@router.post("/export")
async def export_backup(req: ExportRequest, db: Session = Depends(get_db)):
sqlite_payload = _export_sqlite(db=db, req=req)
memories_blob = _export_logical_memories_gz(
db=db,
user_id=req.user_id,
app_id=req.app_id,
from_date=req.from_date,
to_date=req.to_date,
)
#TODO: add vector store specific exports in future for speed
zip_buf = io.BytesIO()
with zipfile.ZipFile(zip_buf, "w", compression=zipfile.ZIP_DEFLATED) as zf:
zf.writestr("memories.json", json.dumps(sqlite_payload, indent=2))
zf.writestr("memories.jsonl.gz", memories_blob)
zip_buf.seek(0)
return StreamingResponse(
zip_buf,
media_type="application/zip",
headers={"Content-Disposition": f'attachment; filename="memories_export_{req.user_id}.zip"'},
)
@router.post("/import")
async def import_backup(
file: UploadFile = File(..., description="Zip with memories.json and memories.jsonl.gz"),
user_id: str = Form(..., description="Import memories into this user_id"),
mode: str = Query("overwrite"),
db: Session = Depends(get_db)
):
if not file.filename.endswith(".zip"):
raise HTTPException(status_code=400, detail="Expected a zip file.")
if mode not in {"skip", "overwrite"}:
raise HTTPException(status_code=400, detail="Invalid mode. Must be 'skip' or 'overwrite'.")
user = db.query(User).filter(User.user_id == user_id).first()
if not user:
raise HTTPException(status_code=404, detail="User not found")
content = await file.read()
try:
with zipfile.ZipFile(io.BytesIO(content), "r") as zf:
names = zf.namelist()
def find_member(filename: str) -> Optional[str]:
for name in names:
# Skip directory entries
if name.endswith('/'):
continue
if name.rsplit('/', 1)[-1] == filename:
return name
return None
sqlite_member = find_member("memories.json")
if not sqlite_member:
raise HTTPException(status_code=400, detail="memories.json missing in zip")
memories_member = find_member("memories.jsonl.gz")
sqlite_data = json.loads(zf.read(sqlite_member))
memories_blob = zf.read(memories_member) if memories_member else None
except Exception:
raise HTTPException(status_code=400, detail="Invalid zip file")
default_app = db.query(App).filter(App.owner_id == user.id, App.name == "openmemory").first()
if not default_app:
default_app = App(owner_id=user.id, name="openmemory", is_active=True, metadata_={})
db.add(default_app)
db.commit()
db.refresh(default_app)
cat_id_map: Dict[str, UUID] = {}
for c in sqlite_data.get("categories", []):
cat = db.query(Category).filter(Category.name == c["name"]).first()
if not cat:
cat = Category(name=c["name"], description=c.get("description"))
db.add(cat)
db.commit()
db.refresh(cat)
cat_id_map[c["id"]] = cat.id
old_to_new_id: Dict[str, UUID] = {}
for m in sqlite_data.get("memories", []):
incoming_id = UUID(m["id"])
existing = db.query(Memory).filter(Memory.id == incoming_id).first()
# Cross-user collision: always mint a new UUID and import as a new memory
if existing and existing.user_id != user.id:
target_id = uuid4()
else:
target_id = incoming_id
old_to_new_id[m["id"]] = target_id
# Same-user collision + skip mode: leave existing row untouched
if existing and (existing.user_id == user.id) and mode == "skip":
continue
# Same-user collision + overwrite mode: treat import as ground truth
if existing and (existing.user_id == user.id) and mode == "overwrite":
incoming_state = m.get("state", "active")
existing.user_id = user.id
existing.app_id = default_app.id
existing.content = m.get("content") or ""
existing.metadata_ = m.get("metadata") or {}
try:
existing.state = MemoryState(incoming_state)
except Exception:
existing.state = MemoryState.active
# Update state-related timestamps from import (ground truth)
existing.archived_at = _parse_iso(m.get("archived_at"))
existing.deleted_at = _parse_iso(m.get("deleted_at"))
existing.created_at = _parse_iso(m.get("created_at")) or existing.created_at
existing.updated_at = _parse_iso(m.get("updated_at")) or existing.updated_at
db.add(existing)
db.commit()
continue
new_mem = Memory(
id=target_id,
user_id=user.id,
app_id=default_app.id,
content=m.get("content") or "",
metadata_=m.get("metadata") or {},
state=MemoryState(m.get("state", "active")) if m.get("state") else MemoryState.active,
created_at=_parse_iso(m.get("created_at")) or datetime.now(UTC),
updated_at=_parse_iso(m.get("updated_at")) or datetime.now(UTC),
archived_at=_parse_iso(m.get("archived_at")),
deleted_at=_parse_iso(m.get("deleted_at")),
)
db.add(new_mem)
db.commit()
for link in sqlite_data.get("memory_categories", []):
mid = old_to_new_id.get(link["memory_id"])
cid = cat_id_map.get(link["category_id"])
if not (mid and cid):
continue
exists = db.execute(
memory_categories.select().where(
(memory_categories.c.memory_id == mid) & (memory_categories.c.category_id == cid)
)
).first()
if not exists:
db.execute(memory_categories.insert().values(memory_id=mid, category_id=cid))
db.commit()
for h in sqlite_data.get("status_history", []):
hid = UUID(h["id"])
mem_id = old_to_new_id.get(h["memory_id"], UUID(h["memory_id"]))
exists = db.query(MemoryStatusHistory).filter(MemoryStatusHistory.id == hid).first()
if exists and mode == "skip":
continue
rec = exists if exists else MemoryStatusHistory(id=hid)
rec.memory_id = mem_id
rec.changed_by = user.id
try:
rec.old_state = MemoryState(h.get("old_state", "active"))
rec.new_state = MemoryState(h.get("new_state", "active"))
except Exception:
rec.old_state = MemoryState.active
rec.new_state = MemoryState.active
rec.changed_at = _parse_iso(h.get("changed_at")) or datetime.now(UTC)
db.add(rec)
db.commit()
memory_client = get_memory_client()
vector_store = getattr(memory_client, "vector_store", None) if memory_client else None
if vector_store and memory_client and hasattr(memory_client, "embedding_model"):
def iter_logical_records():
if memories_blob:
gz_buf = io.BytesIO(memories_blob)
with gzip.GzipFile(fileobj=gz_buf, mode="rb") as gz:
for raw in gz:
yield json.loads(raw.decode("utf-8"))
else:
for m in sqlite_data.get("memories", []):
yield {
"id": m["id"],
"content": m.get("content"),
"metadata": m.get("metadata") or {},
"created_at": m.get("created_at"),
"updated_at": m.get("updated_at"),
}
for rec in iter_logical_records():
old_id = rec["id"]
new_id = old_to_new_id.get(old_id, UUID(old_id))
content = rec.get("content") or ""
metadata = rec.get("metadata") or {}
created_at = rec.get("created_at")
updated_at = rec.get("updated_at")
if mode == "skip":
try:
get_fn = getattr(vector_store, "get", None)
if callable(get_fn) and vector_store.get(str(new_id)):
continue
except Exception:
pass
payload = dict(metadata)
payload["data"] = content
if created_at:
payload["created_at"] = created_at
if updated_at:
payload["updated_at"] = updated_at
payload["user_id"] = user_id
payload.setdefault("source_app", "openmemory")
try:
vec = memory_client.embedding_model.embed(content, "add")
vector_store.insert(vectors=[vec], payloads=[payload], ids=[str(new_id)])
except Exception as e:
print(f"Vector upsert failed for memory {new_id}: {e}")
continue
return {"message": f'Import completed into user "{user_id}"'}
return {"message": f'Import completed into user "{user_id}"'}
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/openmemory/api/app/routers/stats.py | openmemory/api/app/routers/stats.py | from app.database import get_db
from app.models import App, Memory, MemoryState, User
from fastapi import APIRouter, Depends, HTTPException
from sqlalchemy.orm import Session
router = APIRouter(prefix="/api/v1/stats", tags=["stats"])
@router.get("/")
async def get_profile(
user_id: str,
db: Session = Depends(get_db)
):
user = db.query(User).filter(User.user_id == user_id).first()
if not user:
raise HTTPException(status_code=404, detail="User not found")
# Get total number of memories
total_memories = db.query(Memory).filter(Memory.user_id == user.id, Memory.state != MemoryState.deleted).count()
# Get total number of apps
apps = db.query(App).filter(App.owner == user)
total_apps = apps.count()
return {
"total_memories": total_memories,
"total_apps": total_apps,
"apps": apps.all()
}
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/openmemory/api/alembic/env.py | openmemory/api/alembic/env.py | import os
import sys
from logging.config import fileConfig
from alembic import context
from dotenv import load_dotenv
from sqlalchemy import engine_from_config, pool
# Add the parent directory to the Python path
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Load environment variables
load_dotenv()
# Import your models here - moved after path setup
from app.database import Base # noqa: E402
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
if config.config_file_name is not None:
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
target_metadata = Base.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline() -> None:
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = os.getenv("DATABASE_URL", "sqlite:///./openmemory.db")
context.configure(
url=url,
target_metadata=target_metadata,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online() -> None:
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
configuration = config.get_section(config.config_ini_section)
configuration["sqlalchemy.url"] = os.getenv("DATABASE_URL", "sqlite:///./openmemory.db")
connectable = engine_from_config(
configuration,
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
with connectable.connect() as connection:
context.configure(
connection=connection, target_metadata=target_metadata
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/openmemory/api/alembic/versions/afd00efbd06b_add_unique_user_id_constraints.py | openmemory/api/alembic/versions/afd00efbd06b_add_unique_user_id_constraints.py | """remove_global_unique_constraint_on_app_name_add_composite_unique
Revision ID: afd00efbd06b
Revises: add_config_table
Create Date: 2025-06-04 01:59:41.637440
"""
from typing import Sequence, Union
from alembic import op
# revision identifiers, used by Alembic.
revision: str = 'afd00efbd06b'
down_revision: Union[str, None] = 'add_config_table'
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
"""Upgrade schema."""
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_apps_name', table_name='apps')
op.create_index(op.f('ix_apps_name'), 'apps', ['name'], unique=False)
op.create_index('idx_app_owner_name', 'apps', ['owner_id', 'name'], unique=True)
# ### end Alembic commands ###
def downgrade() -> None:
"""Downgrade schema."""
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index('idx_app_owner_name', table_name='apps')
op.drop_index(op.f('ix_apps_name'), table_name='apps')
op.create_index('ix_apps_name', 'apps', ['name'], unique=True)
# ### end Alembic commands ### | python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/openmemory/api/alembic/versions/add_config_table.py | openmemory/api/alembic/versions/add_config_table.py | """add_config_table
Revision ID: add_config_table
Revises: 0b53c747049a
Create Date: 2023-06-01 10:00:00.000000
"""
import uuid
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = 'add_config_table'
down_revision = '0b53c747049a'
branch_labels = None
depends_on = None
def upgrade():
# Create configs table if it doesn't exist
op.create_table(
'configs',
sa.Column('id', sa.UUID(), nullable=False, default=lambda: uuid.uuid4()),
sa.Column('key', sa.String(), nullable=False),
sa.Column('value', sa.JSON(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('key')
)
# Create index for key lookups
op.create_index('idx_configs_key', 'configs', ['key'])
def downgrade():
# Drop the configs table
op.drop_index('idx_configs_key', 'configs')
op.drop_table('configs') | python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/openmemory/api/alembic/versions/0b53c747049a_initial_migration.py | openmemory/api/alembic/versions/0b53c747049a_initial_migration.py | """Initial migration
Revision ID: 0b53c747049a
Revises:
Create Date: 2025-04-19 00:59:56.244203
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = '0b53c747049a'
down_revision: Union[str, None] = None
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
"""Upgrade schema."""
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('access_controls',
sa.Column('id', sa.UUID(), nullable=False),
sa.Column('subject_type', sa.String(), nullable=False),
sa.Column('subject_id', sa.UUID(), nullable=True),
sa.Column('object_type', sa.String(), nullable=False),
sa.Column('object_id', sa.UUID(), nullable=True),
sa.Column('effect', sa.String(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index('idx_access_object', 'access_controls', ['object_type', 'object_id'], unique=False)
op.create_index('idx_access_subject', 'access_controls', ['subject_type', 'subject_id'], unique=False)
op.create_index(op.f('ix_access_controls_created_at'), 'access_controls', ['created_at'], unique=False)
op.create_index(op.f('ix_access_controls_effect'), 'access_controls', ['effect'], unique=False)
op.create_index(op.f('ix_access_controls_object_id'), 'access_controls', ['object_id'], unique=False)
op.create_index(op.f('ix_access_controls_object_type'), 'access_controls', ['object_type'], unique=False)
op.create_index(op.f('ix_access_controls_subject_id'), 'access_controls', ['subject_id'], unique=False)
op.create_index(op.f('ix_access_controls_subject_type'), 'access_controls', ['subject_type'], unique=False)
op.create_table('archive_policies',
sa.Column('id', sa.UUID(), nullable=False),
sa.Column('criteria_type', sa.String(), nullable=False),
sa.Column('criteria_id', sa.UUID(), nullable=True),
sa.Column('days_to_archive', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index('idx_policy_criteria', 'archive_policies', ['criteria_type', 'criteria_id'], unique=False)
op.create_index(op.f('ix_archive_policies_created_at'), 'archive_policies', ['created_at'], unique=False)
op.create_index(op.f('ix_archive_policies_criteria_id'), 'archive_policies', ['criteria_id'], unique=False)
op.create_index(op.f('ix_archive_policies_criteria_type'), 'archive_policies', ['criteria_type'], unique=False)
op.create_table('categories',
sa.Column('id', sa.UUID(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.Column('description', sa.String(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_categories_created_at'), 'categories', ['created_at'], unique=False)
op.create_index(op.f('ix_categories_name'), 'categories', ['name'], unique=True)
op.create_table('users',
sa.Column('id', sa.UUID(), nullable=False),
sa.Column('user_id', sa.String(), nullable=False),
sa.Column('name', sa.String(), nullable=True),
sa.Column('email', sa.String(), nullable=True),
sa.Column('metadata', sa.JSON(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_users_created_at'), 'users', ['created_at'], unique=False)
op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
op.create_index(op.f('ix_users_name'), 'users', ['name'], unique=False)
op.create_index(op.f('ix_users_user_id'), 'users', ['user_id'], unique=True)
op.create_table('apps',
sa.Column('id', sa.UUID(), nullable=False),
sa.Column('owner_id', sa.UUID(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.Column('description', sa.String(), nullable=True),
sa.Column('metadata', sa.JSON(), nullable=True),
sa.Column('is_active', sa.Boolean(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['owner_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_apps_created_at'), 'apps', ['created_at'], unique=False)
op.create_index(op.f('ix_apps_is_active'), 'apps', ['is_active'], unique=False)
op.create_index(op.f('ix_apps_name'), 'apps', ['name'], unique=True)
op.create_index(op.f('ix_apps_owner_id'), 'apps', ['owner_id'], unique=False)
op.create_table('memories',
sa.Column('id', sa.UUID(), nullable=False),
sa.Column('user_id', sa.UUID(), nullable=False),
sa.Column('app_id', sa.UUID(), nullable=False),
sa.Column('content', sa.String(), nullable=False),
sa.Column('vector', sa.String(), nullable=True),
sa.Column('metadata', sa.JSON(), nullable=True),
sa.Column('state', sa.Enum('active', 'paused', 'archived', 'deleted', name='memorystate'), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('archived_at', sa.DateTime(), nullable=True),
sa.Column('deleted_at', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['app_id'], ['apps.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index('idx_memory_app_state', 'memories', ['app_id', 'state'], unique=False)
op.create_index('idx_memory_user_app', 'memories', ['user_id', 'app_id'], unique=False)
op.create_index('idx_memory_user_state', 'memories', ['user_id', 'state'], unique=False)
op.create_index(op.f('ix_memories_app_id'), 'memories', ['app_id'], unique=False)
op.create_index(op.f('ix_memories_archived_at'), 'memories', ['archived_at'], unique=False)
op.create_index(op.f('ix_memories_created_at'), 'memories', ['created_at'], unique=False)
op.create_index(op.f('ix_memories_deleted_at'), 'memories', ['deleted_at'], unique=False)
op.create_index(op.f('ix_memories_state'), 'memories', ['state'], unique=False)
op.create_index(op.f('ix_memories_user_id'), 'memories', ['user_id'], unique=False)
op.create_table('memory_access_logs',
sa.Column('id', sa.UUID(), nullable=False),
sa.Column('memory_id', sa.UUID(), nullable=False),
sa.Column('app_id', sa.UUID(), nullable=False),
sa.Column('accessed_at', sa.DateTime(), nullable=True),
sa.Column('access_type', sa.String(), nullable=False),
sa.Column('metadata', sa.JSON(), nullable=True),
sa.ForeignKeyConstraint(['app_id'], ['apps.id'], ),
sa.ForeignKeyConstraint(['memory_id'], ['memories.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index('idx_access_app_time', 'memory_access_logs', ['app_id', 'accessed_at'], unique=False)
op.create_index('idx_access_memory_time', 'memory_access_logs', ['memory_id', 'accessed_at'], unique=False)
op.create_index(op.f('ix_memory_access_logs_access_type'), 'memory_access_logs', ['access_type'], unique=False)
op.create_index(op.f('ix_memory_access_logs_accessed_at'), 'memory_access_logs', ['accessed_at'], unique=False)
op.create_index(op.f('ix_memory_access_logs_app_id'), 'memory_access_logs', ['app_id'], unique=False)
op.create_index(op.f('ix_memory_access_logs_memory_id'), 'memory_access_logs', ['memory_id'], unique=False)
op.create_table('memory_categories',
sa.Column('memory_id', sa.UUID(), nullable=False),
sa.Column('category_id', sa.UUID(), nullable=False),
sa.ForeignKeyConstraint(['category_id'], ['categories.id'], ),
sa.ForeignKeyConstraint(['memory_id'], ['memories.id'], ),
sa.PrimaryKeyConstraint('memory_id', 'category_id')
)
op.create_index('idx_memory_category', 'memory_categories', ['memory_id', 'category_id'], unique=False)
op.create_index(op.f('ix_memory_categories_category_id'), 'memory_categories', ['category_id'], unique=False)
op.create_index(op.f('ix_memory_categories_memory_id'), 'memory_categories', ['memory_id'], unique=False)
op.create_table('memory_status_history',
sa.Column('id', sa.UUID(), nullable=False),
sa.Column('memory_id', sa.UUID(), nullable=False),
sa.Column('changed_by', sa.UUID(), nullable=False),
sa.Column('old_state', sa.Enum('active', 'paused', 'archived', 'deleted', name='memorystate'), nullable=False),
sa.Column('new_state', sa.Enum('active', 'paused', 'archived', 'deleted', name='memorystate'), nullable=False),
sa.Column('changed_at', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['changed_by'], ['users.id'], ),
sa.ForeignKeyConstraint(['memory_id'], ['memories.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index('idx_history_memory_state', 'memory_status_history', ['memory_id', 'new_state'], unique=False)
op.create_index('idx_history_user_time', 'memory_status_history', ['changed_by', 'changed_at'], unique=False)
op.create_index(op.f('ix_memory_status_history_changed_at'), 'memory_status_history', ['changed_at'], unique=False)
op.create_index(op.f('ix_memory_status_history_changed_by'), 'memory_status_history', ['changed_by'], unique=False)
op.create_index(op.f('ix_memory_status_history_memory_id'), 'memory_status_history', ['memory_id'], unique=False)
op.create_index(op.f('ix_memory_status_history_new_state'), 'memory_status_history', ['new_state'], unique=False)
op.create_index(op.f('ix_memory_status_history_old_state'), 'memory_status_history', ['old_state'], unique=False)
# ### end Alembic commands ###
def downgrade() -> None:
"""Downgrade schema."""
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_memory_status_history_old_state'), table_name='memory_status_history')
op.drop_index(op.f('ix_memory_status_history_new_state'), table_name='memory_status_history')
op.drop_index(op.f('ix_memory_status_history_memory_id'), table_name='memory_status_history')
op.drop_index(op.f('ix_memory_status_history_changed_by'), table_name='memory_status_history')
op.drop_index(op.f('ix_memory_status_history_changed_at'), table_name='memory_status_history')
op.drop_index('idx_history_user_time', table_name='memory_status_history')
op.drop_index('idx_history_memory_state', table_name='memory_status_history')
op.drop_table('memory_status_history')
op.drop_index(op.f('ix_memory_categories_memory_id'), table_name='memory_categories')
op.drop_index(op.f('ix_memory_categories_category_id'), table_name='memory_categories')
op.drop_index('idx_memory_category', table_name='memory_categories')
op.drop_table('memory_categories')
op.drop_index(op.f('ix_memory_access_logs_memory_id'), table_name='memory_access_logs')
op.drop_index(op.f('ix_memory_access_logs_app_id'), table_name='memory_access_logs')
op.drop_index(op.f('ix_memory_access_logs_accessed_at'), table_name='memory_access_logs')
op.drop_index(op.f('ix_memory_access_logs_access_type'), table_name='memory_access_logs')
op.drop_index('idx_access_memory_time', table_name='memory_access_logs')
op.drop_index('idx_access_app_time', table_name='memory_access_logs')
op.drop_table('memory_access_logs')
op.drop_index(op.f('ix_memories_user_id'), table_name='memories')
op.drop_index(op.f('ix_memories_state'), table_name='memories')
op.drop_index(op.f('ix_memories_deleted_at'), table_name='memories')
op.drop_index(op.f('ix_memories_created_at'), table_name='memories')
op.drop_index(op.f('ix_memories_archived_at'), table_name='memories')
op.drop_index(op.f('ix_memories_app_id'), table_name='memories')
op.drop_index('idx_memory_user_state', table_name='memories')
op.drop_index('idx_memory_user_app', table_name='memories')
op.drop_index('idx_memory_app_state', table_name='memories')
op.drop_table('memories')
op.drop_index(op.f('ix_apps_owner_id'), table_name='apps')
op.drop_index(op.f('ix_apps_name'), table_name='apps')
op.drop_index(op.f('ix_apps_is_active'), table_name='apps')
op.drop_index(op.f('ix_apps_created_at'), table_name='apps')
op.drop_table('apps')
op.drop_index(op.f('ix_users_user_id'), table_name='users')
op.drop_index(op.f('ix_users_name'), table_name='users')
op.drop_index(op.f('ix_users_email'), table_name='users')
op.drop_index(op.f('ix_users_created_at'), table_name='users')
op.drop_table('users')
op.drop_index(op.f('ix_categories_name'), table_name='categories')
op.drop_index(op.f('ix_categories_created_at'), table_name='categories')
op.drop_table('categories')
op.drop_index(op.f('ix_archive_policies_criteria_type'), table_name='archive_policies')
op.drop_index(op.f('ix_archive_policies_criteria_id'), table_name='archive_policies')
op.drop_index(op.f('ix_archive_policies_created_at'), table_name='archive_policies')
op.drop_index('idx_policy_criteria', table_name='archive_policies')
op.drop_table('archive_policies')
op.drop_index(op.f('ix_access_controls_subject_type'), table_name='access_controls')
op.drop_index(op.f('ix_access_controls_subject_id'), table_name='access_controls')
op.drop_index(op.f('ix_access_controls_object_type'), table_name='access_controls')
op.drop_index(op.f('ix_access_controls_object_id'), table_name='access_controls')
op.drop_index(op.f('ix_access_controls_effect'), table_name='access_controls')
op.drop_index(op.f('ix_access_controls_created_at'), table_name='access_controls')
op.drop_index('idx_access_subject', table_name='access_controls')
op.drop_index('idx_access_object', table_name='access_controls')
op.drop_table('access_controls')
# ### end Alembic commands ###
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/examples/misc/personal_assistant_agno.py | examples/misc/personal_assistant_agno.py | """
Create your personal AI Assistant powered by memory that supports both text and images and remembers your preferences
In order to run this file, you need to set up your Mem0 API at Mem0 platform and also need a OpenAI API key.
export OPENAI_API_KEY="your_openai_api_key"
export MEM0_API_KEY="your_mem0_api_key"
"""
import base64
from pathlib import Path
from agno.agent import Agent
from agno.media import Image
from agno.models.openai import OpenAIChat
from mem0 import MemoryClient
# Initialize the Mem0 client
client = MemoryClient()
# Define the agent
agent = Agent(
name="Personal Agent",
model=OpenAIChat(id="gpt-4.1-nano-2025-04-14"),
description="You are a helpful personal agent that helps me with day to day activities."
"You can process both text and images.",
markdown=True,
)
# Function to handle user input with memory integration with support for images
def chat_user(user_input: str = None, user_id: str = "user_123", image_path: str = None):
if image_path:
with open(image_path, "rb") as image_file:
base64_image = base64.b64encode(image_file.read()).decode("utf-8")
# First: the text message
text_msg = {"role": "user", "content": user_input}
# Second: the image message
image_msg = {
"role": "user",
"content": {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"}},
}
# Send both as separate message objects
client.add([text_msg, image_msg], user_id=user_id)
print("✅ Image uploaded and stored in memory.")
if user_input:
memories = client.search(user_input, user_id=user_id)
memory_context = "\n".join(f"- {m['memory']}" for m in memories)
prompt = f"""
You are a helpful personal assistant who helps user with his day-to-day activities and keep track of everything.
Your task is to:
1. Analyze the given image (if present) and extract meaningful details to answer the user's question.
2. Use your past memory of the user to personalize your answer.
3. Combine the image content and memory to generate a helpful, context-aware response.
Here is what remember about the user:
{memory_context}
User question:
{user_input}
"""
if image_path:
response = agent.run(prompt, images=[Image(filepath=Path(image_path))])
else:
response = agent.run(prompt)
client.add(f"User: {user_input}\nAssistant: {response.content}", user_id=user_id)
return response.content
return "No user input or image provided."
# Example Usage
user_id = "user_123"
print(chat_user("What did I ask you to remind me about?", user_id))
# # OUTPUT: You asked me to remind you to call your mom tomorrow. 📞
#
print(chat_user("When is my test?", user_id=user_id))
# OUTPUT: Your pilot's test is on your birthday, which is in five days. You're turning 25!
# Good luck with your preparations, and remember to take some time to relax amidst the studying.
print(
chat_user(
"This is the picture of what I brought with me in the trip to Bahamas",
image_path="travel_items.jpeg", # this will be added to Mem0 memory
user_id=user_id,
)
)
print(chat_user("hey can you quickly tell me if brought my sunglasses to my trip, not able to find", user_id=user_id))
# OUTPUT: Yes, you did bring your sunglasses on your trip to the Bahamas along with your laptop, face masks and other items..
# Since you can't find them now, perhaps check the pockets of jackets you wore or in your luggage compartments.
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/examples/misc/voice_assistant_elevenlabs.py | examples/misc/voice_assistant_elevenlabs.py | """
Personal Voice Assistant with Memory (Whisper + CrewAI + Mem0 + ElevenLabs)
This script creates a personalized AI assistant that can:
- Understand voice commands using Whisper (OpenAI STT)
- Respond intelligently using CrewAI Agent and LLMs
- Remember user preferences and facts using Mem0 memory
- Speak responses back using ElevenLabs text-to-speech
Initial user memory is bootstrapped from predefined preferences, and the assistant can remember new context dynamically over time.
To run this file, you need to set the following environment variables:
export OPENAI_API_KEY="your_openai_api_key"
export MEM0_API_KEY="your_mem0_api_key"
export ELEVENLABS_API_KEY="your_elevenlabs_api_key"
You must also have:
- A working microphone setup (pyaudio)
- A valid ElevenLabs voice ID
- Python packages: openai, elevenlabs, crewai, mem0ai, pyaudio
"""
import tempfile
import wave
import pyaudio
from crewai import Agent, Crew, Process, Task
from elevenlabs import play
from elevenlabs.client import ElevenLabs
from openai import OpenAI
from mem0 import MemoryClient
# ------------------ SETUP ------------------
USER_ID = "Alex"
openai_client = OpenAI()
tts_client = ElevenLabs()
memory_client = MemoryClient()
# Function to store user preferences in memory
def store_user_preferences(user_id: str, conversation: list):
"""Store user preferences from conversation history"""
memory_client.add(conversation, user_id=user_id)
# Initialize memory with some basic preferences
def initialize_memory():
# Example conversation storage with voice assistant relevant preferences
messages = [
{
"role": "user",
"content": "Hi, my name is Alex Thompson. I'm 32 years old and work as a software engineer at TechCorp.",
},
{
"role": "assistant",
"content": "Hello Alex Thompson! Nice to meet you. I've noted that you're 32 and work as a software engineer at TechCorp. How can I help you today?",
},
{
"role": "user",
"content": "I prefer brief and concise responses without unnecessary explanations. I get frustrated when assistants are too wordy or repeat information I already know.",
},
{
"role": "assistant",
"content": "Got it. I'll keep my responses short, direct, and without redundancy.",
},
{
"role": "user",
"content": "I like to listen to jazz music when I'm working, especially artists like Miles Davis and John Coltrane. I find it helps me focus and be more productive.",
},
{
"role": "assistant",
"content": "I'll remember your preference for jazz while working, particularly Miles Davis and John Coltrane. It's great for focus.",
},
{
"role": "user",
"content": "I usually wake up at 7 AM and prefer reminders for meetings 30 minutes in advance. My most productive hours are between 9 AM and noon, so I try to schedule important tasks during that time.",
},
{
"role": "assistant",
"content": "Noted. You wake up at 7 AM, need meeting reminders 30 minutes ahead, and are most productive between 9 AM and noon for important tasks.",
},
{
"role": "user",
"content": "My favorite color is navy blue, and I prefer dark mode in all my apps. I'm allergic to peanuts, so please remind me to check ingredients when I ask about recipes or restaurants.",
},
{
"role": "assistant",
"content": "I've noted that you prefer navy blue and dark mode interfaces. I'll also help you remember to check for peanuts in food recommendations due to your allergy.",
},
{
"role": "user",
"content": "My partner's name is Jamie, and we have a golden retriever named Max who is 3 years old. My parents live in Chicago, and I try to visit them once every two months.",
},
{
"role": "assistant",
"content": "I'll remember that your partner is Jamie, your dog Max is a 3-year-old golden retriever, and your parents live in Chicago whom you visit bimonthly.",
},
]
# Store the initial preferences
store_user_preferences(USER_ID, messages)
print("✅ Memory initialized with user preferences")
voice_agent = Agent(
role="Memory-based Voice Assistant",
goal="Help the user with day-to-day tasks and remember their preferences over time.",
backstory="You are a voice assistant who understands the user well and converse with them.",
verbose=True,
memory=True,
memory_config={
"provider": "mem0",
"config": {"user_id": USER_ID},
},
)
# ------------------ AUDIO RECORDING ------------------
def record_audio(filename="input.wav", record_seconds=5):
print("🎙️ Recording (speak now)...")
chunk = 1024
fmt = pyaudio.paInt16
channels = 1
rate = 44100
p = pyaudio.PyAudio()
stream = p.open(format=fmt, channels=channels, rate=rate, input=True, frames_per_buffer=chunk)
frames = []
for _ in range(0, int(rate / chunk * record_seconds)):
data = stream.read(chunk)
frames.append(data)
stream.stop_stream()
stream.close()
p.terminate()
with wave.open(filename, "wb") as wf:
wf.setnchannels(channels)
wf.setsampwidth(p.get_sample_size(fmt))
wf.setframerate(rate)
wf.writeframes(b"".join(frames))
# ------------------ STT USING WHISPER ------------------
def transcribe_whisper(audio_path):
print("🔎 Transcribing with Whisper...")
try:
with open(audio_path, "rb") as audio_file:
transcript = openai_client.audio.transcriptions.create(model="whisper-1", file=audio_file)
print(f"🗣️ You said: {transcript.text}")
return transcript.text
except Exception as e:
print(f"Error during transcription: {e}")
return ""
# ------------------ AGENT RESPONSE ------------------
def get_agent_response(user_input):
if not user_input:
return "I didn't catch that. Could you please repeat?"
try:
task = Task(
description=f"Respond to: {user_input}", expected_output="A short and relevant reply.", agent=voice_agent
)
crew = Crew(
agents=[voice_agent],
tasks=[task],
process=Process.sequential,
verbose=True,
memory=True,
memory_config={"provider": "mem0", "config": {"user_id": USER_ID}},
)
result = crew.kickoff()
# Extract the text response from the complex result object
if hasattr(result, "raw"):
return result.raw
elif isinstance(result, dict) and "raw" in result:
return result["raw"]
elif isinstance(result, dict) and "tasks_output" in result:
outputs = result["tasks_output"]
if outputs and isinstance(outputs, list) and len(outputs) > 0:
return outputs[0].get("raw", str(result))
# Fallback to string representation if we can't extract the raw response
return str(result)
except Exception as e:
print(f"Error getting agent response: {e}")
return "I'm having trouble processing that request. Can we try again?"
# ------------------ SPEAK WITH ELEVENLABS ------------------
def speak_response(text):
print(f"🤖 Agent: {text}")
audio = tts_client.text_to_speech.convert(
text=text, voice_id="JBFqnCBsd6RMkjVDRZzb", model_id="eleven_multilingual_v2", output_format="mp3_44100_128"
)
play(audio)
# ------------------ MAIN LOOP ------------------
def run_voice_agent():
print("🧠 Voice agent (Whisper + Mem0 + ElevenLabs) is ready! Say something.")
while True:
with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as tmp_audio:
record_audio(tmp_audio.name)
try:
user_text = transcribe_whisper(tmp_audio.name)
if user_text.lower() in ["exit", "quit", "stop"]:
print("👋 Exiting.")
break
response = get_agent_response(user_text)
speak_response(response)
except Exception as e:
print(f"❌ Error: {e}")
if __name__ == "__main__":
try:
# Initialize memory with user preferences before starting the voice agent (this can be done once)
initialize_memory()
# Run the voice assistant
run_voice_agent()
except KeyboardInterrupt:
print("\n👋 Program interrupted. Exiting.")
except Exception as e:
print(f"❌ Fatal error: {e}")
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/examples/misc/vllm_example.py | examples/misc/vllm_example.py | """
Example of using vLLM with mem0 for high-performance memory operations.
SETUP INSTRUCTIONS:
1. Install vLLM:
pip install vllm
2. Start vLLM server (in a separate terminal):
vllm serve microsoft/DialoGPT-small --port 8000
Wait for the message: "Uvicorn running on http://0.0.0.0:8000"
(Small model: ~500MB download, much faster!)
3. Verify server is running:
curl http://localhost:8000/health
4. Run this example:
python examples/misc/vllm_example.py
Optional environment variables:
export VLLM_BASE_URL="http://localhost:8000/v1"
export VLLM_API_KEY="vllm-api-key"
"""
from mem0 import Memory
# Configuration for vLLM integration
config = {
"llm": {
"provider": "vllm",
"config": {
"model": "Qwen/Qwen2.5-32B-Instruct",
"vllm_base_url": "http://localhost:8000/v1",
"api_key": "vllm-api-key",
"temperature": 0.7,
"max_tokens": 100,
},
},
"embedder": {"provider": "openai", "config": {"model": "text-embedding-3-small"}},
"vector_store": {
"provider": "qdrant",
"config": {"collection_name": "vllm_memories", "host": "localhost", "port": 6333},
},
}
def main():
"""
Demonstrate vLLM integration with mem0
"""
print("--> Initializing mem0 with vLLM...")
# Initialize memory with vLLM
memory = Memory.from_config(config)
print("--> Memory initialized successfully!")
# Example conversations to store
conversations = [
{
"messages": [
{"role": "user", "content": "I love playing chess on weekends"},
{
"role": "assistant",
"content": "That's great! Chess is an excellent strategic game that helps improve critical thinking.",
},
],
"user_id": "user_123",
},
{
"messages": [
{"role": "user", "content": "I'm learning Python programming"},
{
"role": "assistant",
"content": "Python is a fantastic language for beginners! What specific areas are you focusing on?",
},
],
"user_id": "user_123",
},
{
"messages": [
{"role": "user", "content": "I prefer working late at night, I'm more productive then"},
{
"role": "assistant",
"content": "Many people find they're more creative and focused during nighttime hours. It's important to maintain a consistent schedule that works for you.",
},
],
"user_id": "user_123",
},
]
print("\n--> Adding memories using vLLM...")
# Add memories - now powered by vLLM's high-performance inference
for i, conversation in enumerate(conversations, 1):
result = memory.add(messages=conversation["messages"], user_id=conversation["user_id"])
print(f"Memory {i} added: {result}")
print("\n🔍 Searching memories...")
# Search memories - vLLM will process the search and memory operations
search_queries = [
"What does the user like to do on weekends?",
"What is the user learning?",
"When is the user most productive?",
]
for query in search_queries:
print(f"\nQuery: {query}")
memories = memory.search(query=query, user_id="user_123")
for memory_item in memories:
print(f" - {memory_item['memory']}")
print("\n--> Getting all memories for user...")
all_memories = memory.get_all(user_id="user_123")
print(f"Total memories stored: {len(all_memories)}")
for memory_item in all_memories:
print(f" - {memory_item['memory']}")
print("\n--> vLLM integration demo completed successfully!")
print("\nBenefits of using vLLM:")
print(" -> 2.7x higher throughput compared to standard implementations")
print(" -> 5x faster time-per-output-token")
print(" -> Efficient memory usage with PagedAttention")
print(" -> Simple configuration, same as other providers")
if __name__ == "__main__":
try:
main()
except Exception as e:
print(f"=> Error: {e}")
print("\nTroubleshooting:")
print("1. Make sure vLLM server is running: vllm serve microsoft/DialoGPT-small --port 8000")
print("2. Check if the model is downloaded and accessible")
print("3. Verify the base URL and port configuration")
print("4. Ensure you have the required dependencies installed")
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/examples/misc/personalized_search.py | examples/misc/personalized_search.py | """
Personalized Search Agent with Mem0 + Tavily
Uses LangChain agent pattern with Tavily tools for personalized search based on user memories stored in Mem0.
"""
from dotenv import load_dotenv
from mem0 import MemoryClient
from langchain.agents import create_openai_tools_agent, AgentExecutor
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_openai import ChatOpenAI
from langchain_tavily import TavilySearch
from langchain.schema import HumanMessage
from datetime import datetime
import logging
# Load environment variables
load_dotenv()
# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
# Initialize clients
mem0_client = MemoryClient()
# Set custom instructions to infer facts and memory to understand user preferences
mem0_client.project.update(
custom_instructions='''
INFER THE MEMORIES FROM USER QUERIES EVEN IF IT'S A QUESTION.
We are building the personalized search for which we need to understand about user's preferences and life
and extract facts and memories out of it accordingly.
BE IT TIME, LOCATION, USER'S PERSONAL LIFE, CHOICES, USER'S PREFERENCES, we need to store those for better personalized search.
'''
)
llm = ChatOpenAI(model="gpt-4.1-nano-2025-04-14", temperature=0.2)
def setup_user_history(user_id):
"""Simulate realistic user conversation history"""
conversations = [
[
{"role": "user", "content": "What will be the weather today at Los Angeles? I need to go to pick up my daughter from office."},
{"role": "assistant", "content": "I'll check the weather in LA for you, so that you can plan you daughter's pickup accordingly."}
],
[
{"role": "user", "content": "I'm looking for vegan restaurants in Santa Monica"},
{"role": "assistant", "content": "I'll find great vegan options in Santa Monica."}
],
[
{"role": "user", "content": "My 7-year-old daughter is allergic to peanuts"},
{"role": "assistant",
"content": "I'll remember to check for peanut-free options in future recommendations."}
],
[
{"role": "user", "content": "I work remotely and need coffee shops with good wifi"},
{"role": "assistant", "content": "I'll find remote-work-friendly coffee shops."}
],
[
{"role": "user", "content": "We love hiking and outdoor activities on weekends"},
{"role": "assistant", "content": "Great! I'll keep your outdoor activity preferences in mind."}
]
]
logger.info(f"Setting up user history for {user_id}")
for conversation in conversations:
mem0_client.add(conversation, user_id=user_id)
def get_user_context(user_id, query):
"""Retrieve relevant user memories from Mem0"""
try:
filters = {
"AND": [
{"user_id": user_id}
]
}
user_memories = mem0_client.search(
query=query,
version="v2",
filters=filters
)
if user_memories:
context = "\n".join([f"- {memory['memory']}" for memory in user_memories])
logger.info(f"Found {len(user_memories)} relevant memories for user {user_id}")
return context
else:
logger.info(f"No relevant memories found for user {user_id}")
return "No previous user context available."
except Exception as e:
logger.error(f"Error retrieving user context: {e}")
return "Error retrieving user context."
def create_personalized_search_agent(user_context):
"""Create a LangChain agent for personalized search using Tavily"""
# Create Tavily search tool
tavily_search = TavilySearch(
max_results=10,
search_depth="advanced",
include_answer=True,
topic="general"
)
tools = [tavily_search]
# Create personalized search prompt
prompt = ChatPromptTemplate.from_messages([
("system", f"""You are a personalized search assistant. You help users find information that's relevant to their specific context and preferences.
USER CONTEXT AND PREFERENCES:
{user_context}
YOUR ROLE:
1. Analyze the user's query and their personal context/preferences above
2. Look for patterns in the context to understand their preferences, location, lifestyle, family situation, etc.
3. Create enhanced search queries that incorporate relevant personal context you discover
4. Use the tavily_search tool everytime with enhanced queries to find personalized results
INSTRUCTIONS:
- Study the user memories carefully to understand their situation
- If any questions ask something related to nearby, close to, etc. refer to previous user context for identifying locations and enhance search query based on that.
- If memories mention specific locations, consider them for local searches
- If memories reveal dietary preferences or restrictions, factor those in for food-related queries
- If memories show family context, consider family-friendly options
- If memories indicate work style or interests, incorporate those when relevant
- Use tavily_search tool everytime with enhanced queries (based on above context)
- Always explain which specific memories led you to personalize the search in certain ways
Do NOT assume anything not present in the user memories."""),
MessagesPlaceholder(variable_name="messages"),
MessagesPlaceholder(variable_name="agent_scratchpad"),
])
# Create agent
agent = create_openai_tools_agent(llm=llm, tools=tools, prompt=prompt)
agent_executor = AgentExecutor(
agent=agent,
tools=tools,
verbose=True,
return_intermediate_steps=True
)
return agent_executor
def conduct_personalized_search(user_id, query):
"""
Personalized search workflow using LangChain agent + Tavily + Mem0
Returns search results with user personalization details
"""
logger.info(f"Starting personalized search for user {user_id}: {query}")
start_time = datetime.now()
try:
# Get user context from Mem0
user_context = get_user_context(user_id, query)
# Create personalized search agent
agent_executor = create_personalized_search_agent(user_context)
# Run the agent
response = agent_executor.invoke({
"messages": [HumanMessage(content=query)]
})
# Extract search details from intermediate steps
search_queries_used = []
total_results = 0
for step in response.get("intermediate_steps", []):
tool_call, tool_output = step
if hasattr(tool_call, 'tool') and tool_call.tool == "tavily_search":
search_query = tool_call.tool_input.get('query', '')
search_queries_used.append(search_query)
if isinstance(tool_output, dict) and 'results' in tool_output:
total_results += len(tool_output.get('results', []))
# Store this search interaction in Mem0 for user preferences
store_search_interaction(user_id, query, response['output'])
# Compile results
duration = (datetime.now() - start_time).total_seconds()
results = {"agent_response": response['output']}
logger.info(f"Personalized search completed in {duration:.2f}s")
return results
except Exception as e:
logger.error(f"Error in personalized search workflow: {e}")
return {"error": str(e)}
def store_search_interaction(user_id, original_query, agent_response):
"""Store search interaction in Mem0 for future personalization"""
try:
interaction = [
{"role": "user", "content": f"Searched for: {original_query}"},
{"role": "assistant", "content": f"Provided personalized results based on user preferences: {agent_response}"}
]
mem0_client.add(messages=interaction, user_id=user_id)
logger.info(f"Stored search interaction for user {user_id}")
except Exception as e:
logger.error(f"Error storing search interaction: {e}")
def personalized_search_agent():
"""Example of the personalized search agent"""
user_id = "john"
# Setup user history
print("\nSetting up user history from past conversations...")
setup_user_history(user_id) # This is one-time setup
# Test personalized searches
test_queries = [
"good coffee shops nearby for working",
"what can we gift our daughter for birthday? what's trending?"
]
for i, query in enumerate(test_queries, 1):
print(f"\n ----- {i}️⃣ PERSONALIZED SEARCH -----")
print(f"Query: '{query}'")
# Run personalized search
results = conduct_personalized_search(user_id, query)
if results.get("error"):
print(f"Error: {results['error']}")
else:
print(f"Agent response: {results['agent_response']}")
if __name__ == "__main__":
personalized_search_agent()
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/examples/misc/strands_agent_aws_elasticache_neptune.py | examples/misc/strands_agent_aws_elasticache_neptune.py |
"""
GitHub Repository Research Agent with Persistent Memory
This example demonstrates how to build an AI agent with persistent memory using:
- Mem0 for memory orchestration and lifecycle management
- Amazon ElastiCache for Valkey for high-performance vector similarity search
- Amazon Neptune Analytics for graph-based relationship storage and traversal
- Strands Agents framework for agent orchestration and tool management
The agent can research GitHub repositories, store information in both vector and graph memory,
and retrieve relevant information for future queries with significant performance improvements.
For detailed explanation and architecture, see the blog posts:
- AWS Blog: https://aws.amazon.com/blogs/database/build-persistent-memory-for-agentic-ai-applications-with-mem0-open-source-amazon-elasticache-for-valkey-and-amazon-neptune-analytics/
- Mem0 Blog: https://mem0.ai/blog/build-persistent-memory-for-agentic-ai-applications-with-mem0-open-source-amazon-elasticache-for-valkey-and-amazon-neptune-analytics
Prerequisites:
1. ElastiCache cluster running Valkey 8.2+ with vector search support
2. Neptune Analytics graph with vector indexes and public access
3. AWS credentials with access to Bedrock, ElastiCache, and Neptune
Environment Variables:
- AWS_REGION=us-east-1
- AWS_ACCESS_KEY_ID=your_aws_access_key
- AWS_SECRET_ACCESS_KEY=your_aws_secret_key
- NEPTUNE_ENDPOINT=neptune-graph://your-graph-id (optional, defaults to g-6n3v83av7a)
- VALKEY_URL=valkey://your-cluster-endpoint:6379 (optional, defaults to localhost:6379)
Installation:
pip install strands-agents strands-agents-tools mem0ai streamlit
Usage:
streamlit run agent1.py
Example queries:
1. "What is the URL for the project mem0 and its most important metrics?"
2. "Find the top contributors for Mem0 and store this information in a graph"
3. "Who works in the core packages and the SDK updates?"
"""
import os
import streamlit as st
from strands import Agent, tool
from strands_tools import http_request
from mem0.memory.main import Memory
config = {
"embedder": {
"provider": "aws_bedrock",
"config": {
"model": "amazon.titan-embed-text-v2:0"
}
},
"llm": {
"provider": "aws_bedrock",
"config": {
"model": "us.anthropic.claude-sonnet-4-20250514-v1:0",
"max_tokens": 512,
"temperature": 0.5
}
},
"vector_store": {
"provider": "valkey",
"config": {
"collection_name": "blogpost1",
"embedding_model_dims": 1024,
"valkey_url": os.getenv("VALKEY_URL", "valkey://localhost:6379"),
"index_type": "hnsw",
"hnsw_m": 32,
"hnsw_ef_construction": 400,
"hnsw_ef_runtime": 40
}
}
,
"graph_store": {
"provider": "neptune",
"config": {
"endpoint": os.getenv("NEPTUNE_ENDPOINT", "neptune-graph://g-6n3v83av7a"),
},
}
}
m = Memory.from_config(config)
def get_assistant_response(messages):
"""
Send the entire conversation thread to the agent in the proper Strands message format.
Args:
messages: List of message dictionaries with 'role' and 'content' keys
Returns:
Agent response result
"""
# Format messages for Strands Agent
formatted_messages = []
for message in messages:
formatted_message = {
"role": message["role"],
"content": [{"text": message["content"]}]
}
formatted_messages.append(formatted_message)
# Send the properly formatted message list to the agent
result = agent(formatted_messages)
return result
@tool
def store_memory_tool(information: str, user_id: str = "user", category: str = "conversation") -> str:
"""
Store standalone facts, preferences, descriptions, or unstructured information in vector-based memory.
Use this tool for:
- User preferences ("User prefers dark mode", "Alice likes coffee")
- Standalone facts ("The meeting was productive", "Project deadline is next Friday")
- Descriptions ("Alice is a software engineer", "The office is located downtown")
- General context that doesn't involve relationships between entities
Do NOT use for relationship information - use store_graph_memory_tool instead.
Args:
information: The standalone information to store in vector memory
user_id: User identifier for memory storage (default: "user")
category: Category for organizing memories (e.g., "preferences", "projects", "facts")
Returns:
Confirmation message about memory storage
"""
try:
# Create a simple message format for mem0 vector storage
memory_message = [{"role": "user", "content": information}]
m.add(memory_message, user_id=user_id, metadata={"category": category, "storage_type": "vector"})
return f"✅ Successfully stored information in vector memory: '{information[:100]}...'"
except Exception as e:
print(f"Error storing vector memory: {e}")
return f"❌ Failed to store vector memory: {str(e)}"
@tool
def store_graph_memory_tool(information: str, user_id: str = "user", category: str = "relationships") -> str:
"""
Store relationship-based information, connections, or structured data in graph-based memory.
In memory we will keep the information about projects and repositories we've learned about, including its URL and key metrics
Use this tool for:
- Relationships between people ("John manages Sarah", "Alice works with Bob")
- Entity connections ("Project A depends on Project B", "Alice is part of Team X")
- Hierarchical information ("Sarah reports to John", "Department A contains Team B")
- Network connections ("Alice knows Bob through work", "Company X partners with Company Y")
- Temporal sequences ("Event A led to Event B", "Meeting A was scheduled after Meeting B")
- Any information where entities are connected to each other
Use this instead of store_memory_tool when the information describes relationships or connections.
Args:
information: The relationship or connection information to store in graph memory
user_id: User identifier for memory storage (default: "user")
category: Category for organizing memories (default: "relationships")
Returns:
Confirmation message about graph memory storage
"""
try:
memory_message = [{"role": "user", "content": f"RELATIONSHIP: {information}"}]
m.add(memory_message, user_id=user_id, metadata={"category": category, "storage_type": "graph"})
return f"✅ Successfully stored relationship in graph memory: '{information[:100]}...'"
except Exception as e:
return f"❌ Failed to store graph memory: {str(e)}"
@tool
def search_memory_tool(query: str, user_id: str = "user") -> str:
"""
Search through vector-based memories using semantic similarity to find relevant standalone information.
In memory we will keep the information about projects and repositories we've learned about, including its URL and key metrics
Use this tool for:
- Finding similar concepts or topics ("What do we know about AI?")
- Semantic searches ("Find information about preferences")
- Content-based searches ("What was said about the project deadline?")
- General information retrieval that doesn't involve relationships
For relationship-based queries, use search_graph_memory_tool instead.
Args:
query: Search query to find semantically similar memories
user_id: User identifier to search memories for (default: "user")
Returns:
Relevant vector memories found or message if none found
"""
try:
results = m.search(query, user_id=user_id)
if isinstance(results, dict) and 'results' in results:
memory_list = results['results']
if memory_list:
memory_texts = []
for i, result in enumerate(memory_list, 1):
memory_text = result.get('memory', 'No memory text available')
metadata = result.get('metadata', {})
category = metadata.get('category', 'unknown') if isinstance(metadata, dict) else 'unknown'
storage_type = metadata.get('storage_type', 'unknown') if isinstance(metadata, dict) else 'unknown'
score = result.get('score', 0)
memory_texts.append(f"{i}. [{category}|{storage_type}] {memory_text} (score: {score:.3f})")
return f"🔍 Found {len(memory_list)} relevant vector memories:\n" + "\n".join(memory_texts)
else:
return f"🔍 No vector memories found for query: '{query}'"
else:
return f"🔍 No vector memories found for query: '{query}'"
except Exception as e:
print(f"Error searching vector memories: {e}")
return f"❌ Failed to search vector memories: {str(e)}"
@tool
def search_graph_memory_tool(query: str, user_id: str = "user") -> str:
"""
Search through graph-based memories to find relationship and connection information.
Use this tool for:
- Finding connections between entities ("How is Alice related to the project?")
- Discovering relationships ("Who works with whom?")
- Path-based queries ("What connects concept A to concept B?")
- Hierarchical questions ("Who reports to whom?")
- Network analysis ("What are all the connections to this person/entity?")
- Relationship-based searches ("Find all partnerships", "Show team structures")
This searches specifically for relationship and connection information stored in the graph.
Args:
query: Search query focused on relationships and connections
user_id: User identifier to search memories for (default: "user")
Returns:
Relevant graph memories and relationships found or message if none found
"""
try:
graph_query = f"relationships connections {query}"
results = m.search(graph_query, user_id=user_id)
if isinstance(results, dict) and 'results' in results:
memory_list = results['results']
if memory_list:
memory_texts = []
relationship_count = 0
for i, result in enumerate(memory_list, 1):
memory_text = result.get('memory', 'No memory text available')
metadata = result.get('metadata', {})
category = metadata.get('category', 'unknown') if isinstance(metadata, dict) else 'unknown'
storage_type = metadata.get('storage_type', 'unknown') if isinstance(metadata, dict) else 'unknown'
score = result.get('score', 0)
# Prioritize graph/relationship memories
if 'RELATIONSHIP:' in memory_text or storage_type == 'graph' or category == 'relationships':
relationship_count += 1
memory_texts.append(f"{i}. 🔗 [{category}|{storage_type}] {memory_text} (score: {score:.3f})")
else:
memory_texts.append(f"{i}. [{category}|{storage_type}] {memory_text} (score: {score:.3f})")
result_summary = f"🔗 Found {len(memory_list)} relevant memories ({relationship_count} relationship-focused):\n"
return result_summary + "\n".join(memory_texts)
else:
return f"🔗 No graph memories found for query: '{query}'"
else:
return f"🔗 No graph memories found for query: '{query}'"
except Exception as e:
print(f"Error searching graph memories: {e}")
return f"Failed to search graph memories: {str(e)}"
@tool
def get_all_memories_tool(user_id: str = "user") -> str:
"""
Retrieve all stored memories for a user to get comprehensive context.
Use this tool when you need to understand the full history of what has been remembered
about a user or when you need comprehensive context for decision making.
Args:
user_id: User identifier to get all memories for (default: "user")
Returns:
All memories for the user or message if none found
"""
try:
all_memories = m.get_all(user_id=user_id)
if isinstance(all_memories, dict) and 'results' in all_memories:
memory_list = all_memories['results']
if memory_list:
memory_texts = []
for i, memory in enumerate(memory_list, 1):
memory_text = memory.get('memory', 'No memory text available')
metadata = memory.get('metadata', {})
category = metadata.get('category', 'unknown') if isinstance(metadata, dict) else 'unknown'
created_at = memory.get('created_at', 'unknown time')
memory_texts.append(f"{i}. [{category}] {memory_text} (stored: {created_at})")
return f"📚 Found {len(memory_list)} total memories:\n" + "\n".join(memory_texts)
else:
return f"📚 No memories found for user: '{user_id}'"
else:
return f"📚 No memories found for user: '{user_id}'"
except Exception as e:
print(f"Error retrieving all memories: {e}")
return f"❌ Failed to retrieve memories: {str(e)}"
# Initialize agent with tools (must be after tool definitions)
agent = Agent(tools=[http_request, store_memory_tool, store_graph_memory_tool, search_memory_tool, search_graph_memory_tool, get_all_memories_tool])
def store_memory(messages, user_id="alice", category="conversation"):
"""
Store the conversation thread in mem0 memory.
Args:
messages: List of message dictionaries with 'role' and 'content' keys
user_id: User identifier for memory storage
category: Category for organizing memories
Returns:
Memory storage result
"""
try:
result = m.add(messages, user_id=user_id, metadata={"category": category})
#print(f"Memory stored successfully: {result}")
return result
except Exception:
#print(f"Error storing memory: {e}")
return None
def get_agent_metrics(result):
agent_metrics = f"I've used {result.metrics.cycle_count} cycle counts," + f" {result.metrics.accumulated_usage['totalTokens']} tokens" + f", and {sum(result.metrics.cycle_durations):.2f} seconds finding that answer"
print(agent_metrics)
return agent_metrics
st.title("Repo Research Agent")
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Create a container with the chat frame styling
with st.container():
st.markdown('<div class="chat-frame">', unsafe_allow_html=True)
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
st.markdown('</div>', unsafe_allow_html=True)
# React to user input
if prompt := st.chat_input("Send a message"):
# Display user message in chat message container
with st.chat_message("user"):
st.markdown(prompt)
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
# Let the agent decide autonomously when to store memories
# Pass the entire conversation thread to the agent
response = get_assistant_response(st.session_state.messages)
# Extract the text content from the AgentResult
response_text = str(response)
# Display assistant response in chat message container
with st.chat_message("assistant"):
st.markdown(response_text)
# Add assistant response to chat history (store as string, not AgentResult)
st.session_state.messages.append({"role": "assistant", "content": response_text})
tokenusage = get_agent_metrics(response)
# Add assistant token usage to chat history
with st.chat_message("assistant"):
st.markdown(tokenusage)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/examples/misc/fitness_checker.py | examples/misc/fitness_checker.py | """
Simple Fitness Memory Tracker that tracks your fitness progress and knows your health priorities.
Uses Mem0 for memory and gpt-4.1-nano for image understanding.
In order to run this file, you need to set up your Mem0 API at Mem0 platform and also need an OpenAI API key.
export OPENAI_API_KEY="your_openai_api_key"
export MEM0_API_KEY="your_mem0_api_key"
"""
from agno.agent import Agent
from agno.models.openai import OpenAIChat
from mem0 import MemoryClient
# Initialize memory
memory_client = MemoryClient(api_key="your-mem0-api-key")
USER_ID = "Anish"
agent = Agent(
name="Fitness Agent",
model=OpenAIChat(id="gpt-4.1-nano-2025-04-14"),
description="You are a helpful fitness assistant who remembers past logs and gives personalized suggestions for Anish's training and diet.",
markdown=True,
)
# Store user preferences as memory
def store_user_preferences(conversation: list, user_id: str = USER_ID):
"""Store user preferences from conversation history"""
memory_client.add(conversation, user_id=user_id)
# Memory-aware assistant function
def fitness_coach(user_input: str, user_id: str = USER_ID):
memories = memory_client.search(user_input, user_id=user_id) # Search relevant memories bases on user query
memory_context = "\n".join(f"- {m['memory']}" for m in memories)
prompt = f"""You are a fitness assistant who helps Anish with his training, recovery, and diet. You have long-term memory of his health, routines, preferences, and past conversations.
Use your memory to personalize suggestions — consider his constraints, goals, patterns, and lifestyle when responding.
Here is what you remember about {user_id}:
{memory_context}
User query:
{user_input}"""
response = agent.run(prompt)
memory_client.add(f"User: {user_input}\nAssistant: {response.content}", user_id=user_id)
return response.content
# --------------------------------------------------
# Store user preferences and memories
messages = [
{
"role": "user",
"content": "Hi, I’m Anish. I'm 26 years old, 5'10\", and weigh 72kg. I started working out 6 months ago with the goal of building lean muscle.",
},
{
"role": "assistant",
"content": "Got it — you're 26, 5'10\", 72kg, and on a lean muscle journey. Started gym 6 months ago.",
},
{
"role": "user",
"content": "I follow a push-pull-legs routine and train 5 times a week. My rest days are Wednesday and Sunday.",
},
{
"role": "assistant",
"content": "Understood — push-pull-legs split, training 5x/week with rest on Wednesdays and Sundays.",
},
{"role": "user", "content": "After push days, I usually eat high-protein and moderate-carb meals to recover."},
{"role": "assistant", "content": "Noted — high-protein, moderate-carb meals after push workouts."},
{"role": "user", "content": "For pull days, I take whey protein and eat a banana after training."},
{"role": "assistant", "content": "Logged — whey protein and banana post pull workouts."},
{"role": "user", "content": "On leg days, I make sure to have complex carbs like rice or oats."},
{"role": "assistant", "content": "Noted — complex carbs like rice and oats are part of your leg day meals."},
{
"role": "user",
"content": "I often feel sore after leg days, so I use turmeric milk and magnesium to help with recovery.",
},
{"role": "assistant", "content": "I'll remember turmeric milk and magnesium as part of your leg day recovery."},
{
"role": "user",
"content": "Last push day, I did 3x8 bench press at 60kg, 4x12 overhead press, and dips. Felt fatigued after.",
},
{
"role": "assistant",
"content": "Push day logged — 60kg bench, overhead press, dips. You felt fatigued afterward.",
},
{"role": "user", "content": "I prefer light dinners post-workout like tofu, soup, and vegetables."},
{"role": "assistant", "content": "Got it — light dinners post-workout: tofu, soup, and veggies."},
{
"role": "user",
"content": "I have mild lactose intolerance, so I avoid dairy. I use almond milk or lactose-free whey.",
},
{"role": "assistant", "content": "Understood — avoiding regular dairy, using almond milk and lactose-free whey."},
{
"role": "user",
"content": "I get occasional knee pain, so I avoid deep squats and do more hamstring curls and glute bridges on leg days.",
},
{
"role": "assistant",
"content": "Noted — due to knee discomfort, you substitute deep squats with curls and glute bridges.",
},
{"role": "user", "content": "I track sleep and notice poor performance when I sleep less than 6 hours."},
{"role": "assistant", "content": "Logged — performance drops when you get under 6 hours of sleep."},
{"role": "user", "content": "I take magnesium supplements to help with muscle recovery and sleep quality."},
{"role": "assistant", "content": "Remembered — magnesium helps you with recovery and sleep."},
{"role": "user", "content": "I avoid caffeine after 4 PM because it affects my sleep."},
{"role": "assistant", "content": "Got it — you avoid caffeine post-4 PM to protect your sleep."},
]
store_user_preferences(messages)
# Example usage with fitness coach
fitness_coach("How much was I lifting for bench press a month ago?")
# OUTPUT: A month ago, you were lifting 55kg for your bench press as part of your push day routine. It looks like you've increased your bench press weight by 5kg since then! Keep up the good work on your journey to gain lean muscle.
fitness_coach("Suggest a post-workout meal, but I’ve had poor sleep last night.")
# OUTPUT: Anish, since you had poor sleep, focus on a recovery-friendly, lactose-free meal: tofu or chicken for protein, paired with quinoa or brown rice for lasting energy. Turmeric almond milk will help with inflammation. Based on your past leg day recovery, continue magnesium, stay well-hydrated, and avoid caffeine after 4PM. Aim for 7–8 hours of sleep, and consider light stretching or a warm bath to ease soreness.
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/examples/misc/study_buddy.py | examples/misc/study_buddy.py | """
Create your personal AI Study Buddy that remembers what you’ve studied (and where you struggled),
helps with spaced repetition and topic review, personalizes responses using your past interactions.
Supports both text and PDF/image inputs.
In order to run this file, you need to set up your Mem0 API at Mem0 platform and also need a OpenAI API key.
export OPENAI_API_KEY="your_openai_api_key"
export MEM0_API_KEY="your_mem0_api_key"
"""
import asyncio
from agents import Agent, Runner
from mem0 import MemoryClient
client = MemoryClient()
# Define your study buddy agent
study_agent = Agent(
name="StudyBuddy",
instructions="""You are a helpful study coach. You:
- Track what the user has studied before
- Identify topics the user has struggled with (e.g., "I'm confused", "this is hard")
- Help with spaced repetition by suggesting topics to revisit based on last review time
- Personalize answers using stored memories
- Summarize PDFs or notes the user uploads""",
)
# Upload and store PDF to Mem0
def upload_pdf(pdf_url: str, user_id: str):
pdf_message = {"role": "user", "content": {"type": "pdf_url", "pdf_url": {"url": pdf_url}}}
client.add([pdf_message], user_id=user_id)
print("✅ PDF uploaded and processed into memory.")
# Main interaction loop with your personal study buddy
async def study_buddy(user_id: str, topic: str, user_input: str):
memories = client.search(f"{topic}", user_id=user_id)
memory_context = "n".join(f"- {m['memory']}" for m in memories)
prompt = f"""
You are helping the user study the topic: {topic}.
Here are past memories from previous sessions:
{memory_context}
Now respond to the user's new question or comment:
{user_input}
"""
result = await Runner.run(study_agent, prompt)
response = result.final_output
client.add(
[{"role": "user", "content": f"""Topic: {topic}nUser: {user_input}nnStudy Assistant: {response}"""}],
user_id=user_id,
metadata={"topic": topic},
)
return response
# Example usage
async def main():
user_id = "Ajay"
pdf_url = "https://pages.physics.ua.edu/staff/fabi/ph101/classnotes/8RotD101.pdf"
upload_pdf(pdf_url, user_id) # Upload a relevant lecture PDF to memory
topic = "Lagrangian Mechanics"
# Demonstrate tracking previously learned topics
print(await study_buddy(user_id, topic, "Can you remind me of what we discussed about generalized coordinates?"))
# Demonstrate weakness detection
print(await study_buddy(user_id, topic, "I still don’t get what frequency domain really means."))
# Demonstrate spaced repetition prompting
topic = "Momentum Conservation"
print(
await study_buddy(
user_id, topic, "I think we covered this last week. Is it time to review momentum conservation again?"
)
)
if __name__ == "__main__":
asyncio.run(main())
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/examples/misc/movie_recommendation_grok3.py | examples/misc/movie_recommendation_grok3.py | """
Memory-Powered Movie Recommendation Assistant (Grok 3 + Mem0)
This script builds a personalized movie recommender that remembers your preferences
(e.g. dislikes horror, loves romcoms) using Mem0 as a memory layer and Grok 3 for responses.
In order to run this file, you need to set up your Mem0 API at Mem0 platform and also need an XAI API key.
export XAI_API_KEY="your_xai_api_key"
export MEM0_API_KEY="your_mem0_api_key"
"""
import os
from openai import OpenAI
from mem0 import Memory
# Configure Mem0 with Grok 3 and Qdrant
config = {
"vector_store": {"provider": "qdrant", "config": {"embedding_model_dims": 384}},
"llm": {
"provider": "xai",
"config": {
"model": "grok-3-beta",
"temperature": 0.1,
"max_tokens": 2000,
},
},
"embedder": {
"provider": "huggingface",
"config": {
"model": "all-MiniLM-L6-v2" # open embedding model
},
},
}
# Instantiate memory layer
memory = Memory.from_config(config)
# Initialize Grok 3 client
grok_client = OpenAI(
api_key=os.getenv("XAI_API_KEY"),
base_url="https://api.x.ai/v1",
)
def recommend_movie_with_memory(user_id: str, user_query: str):
# Retrieve prior memory about movies
past_memories = memory.search("movie preferences", user_id=user_id)
prompt = user_query
if past_memories:
prompt += f"\nPreviously, the user mentioned: {past_memories}"
# Generate movie recommendation using Grok 3
response = grok_client.chat.completions.create(model="grok-3-beta", messages=[{"role": "user", "content": prompt}])
recommendation = response.choices[0].message.content
# Store conversation in memory
memory.add(
[{"role": "user", "content": user_query}, {"role": "assistant", "content": recommendation}],
user_id=user_id,
metadata={"category": "movie"},
)
return recommendation
# Example Usage
if __name__ == "__main__":
user_id = "arshi"
recommend_movie_with_memory(user_id, "I'm looking for a movie to watch tonight. Any suggestions?")
# OUTPUT: You have watched Intersteller last weekend and you don't like horror movies, maybe you can watch "Purple Hearts" today.
recommend_movie_with_memory(
user_id, "Can we skip the tearjerkers? I really enjoyed Notting Hill and Crazy Rich Asians."
)
# OUTPUT: Got it — no sad endings! You might enjoy "The Proposal" or "Love, Rosie". They’re both light-hearted romcoms with happy vibes.
recommend_movie_with_memory(user_id, "Any light-hearted movie I can watch after work today?")
# OUTPUT: Since you liked Crazy Rich Asians and The Proposal, how about "The Intern" or "Isn’t It Romantic"? Both are upbeat, funny, and perfect for relaxing.
recommend_movie_with_memory(user_id, "I’ve already watched The Intern. Something new maybe?")
# OUTPUT: No problem! Try "Your Place or Mine" - romcoms that match your taste and are tear-free!
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/examples/misc/healthcare_assistant_google_adk.py | examples/misc/healthcare_assistant_google_adk.py | import asyncio
import warnings
from google.adk.agents import Agent
from google.adk.runners import Runner
from google.adk.sessions import InMemorySessionService
from google.genai import types
from mem0 import MemoryClient
warnings.filterwarnings("ignore", category=DeprecationWarning)
# Initialize Mem0 client
mem0_client = MemoryClient()
# Define Memory Tools
def save_patient_info(information: str) -> dict:
"""Saves important patient information to memory."""
print(f"Storing patient information: {information[:30]}...")
# Get user_id from session state or use default
user_id = getattr(save_patient_info, "user_id", "default_user")
# Store in Mem0
mem0_client.add(
[{"role": "user", "content": information}],
user_id=user_id,
run_id="healthcare_session",
metadata={"type": "patient_information"},
)
return {"status": "success", "message": "Information saved"}
def retrieve_patient_info(query: str) -> str:
"""Retrieves relevant patient information from memory."""
print(f"Searching for patient information: {query}")
# Get user_id from session state or use default
user_id = getattr(retrieve_patient_info, "user_id", "default_user")
# Search Mem0
results = mem0_client.search(
query,
user_id=user_id,
run_id="healthcare_session",
limit=5,
threshold=0.7, # Higher threshold for more relevant results
)
if not results:
return "I don't have any relevant memories about this topic."
memories = [f"• {result['memory']}" for result in results]
return "Here's what I remember that might be relevant:\n" + "\n".join(memories)
# Define Healthcare Tools
def schedule_appointment(date: str, time: str, reason: str) -> dict:
"""Schedules a doctor's appointment."""
# In a real app, this would connect to a scheduling system
appointment_id = f"APT-{hash(date + time) % 10000}"
return {
"status": "success",
"appointment_id": appointment_id,
"confirmation": f"Appointment scheduled for {date} at {time} for {reason}",
"message": "Please arrive 15 minutes early to complete paperwork.",
}
# Create the Healthcare Assistant Agent
healthcare_agent = Agent(
name="healthcare_assistant",
model="gemini-1.5-flash", # Using Gemini for healthcare assistant
description="Healthcare assistant that helps patients with health information and appointment scheduling.",
instruction="""You are a helpful Healthcare Assistant with memory capabilities.
Your primary responsibilities are to:
1. Remember patient information using the 'save_patient_info' tool when they share symptoms, conditions, or preferences.
2. Retrieve past patient information using the 'retrieve_patient_info' tool when relevant to the current conversation.
3. Help schedule appointments using the 'schedule_appointment' tool.
IMPORTANT GUIDELINES:
- Always be empathetic, professional, and helpful.
- Save important patient information like symptoms, conditions, allergies, and preferences.
- Check if you have relevant patient information before asking for details they may have shared previously.
- Make it clear you are not a doctor and cannot provide medical diagnosis or treatment.
- For serious symptoms, always recommend consulting a healthcare professional.
- Keep all patient information confidential.
""",
tools=[save_patient_info, retrieve_patient_info, schedule_appointment],
)
# Set Up Session and Runner
session_service = InMemorySessionService()
# Define constants for the conversation
APP_NAME = "healthcare_assistant_app"
USER_ID = "Alex"
SESSION_ID = "session_001"
# Create a session
session = session_service.create_session(app_name=APP_NAME, user_id=USER_ID, session_id=SESSION_ID)
# Create the runner
runner = Runner(agent=healthcare_agent, app_name=APP_NAME, session_service=session_service)
# Interact with the Healthcare Assistant
async def call_agent_async(query, runner, user_id, session_id):
"""Sends a query to the agent and returns the final response."""
print(f"\n>>> Patient: {query}")
# Format the user's message
content = types.Content(role="user", parts=[types.Part(text=query)])
# Set user_id for tools to access
save_patient_info.user_id = user_id
retrieve_patient_info.user_id = user_id
# Run the agent
async for event in runner.run_async(user_id=user_id, session_id=session_id, new_message=content):
if event.is_final_response():
if event.content and event.content.parts:
response = event.content.parts[0].text
print(f"<<< Assistant: {response}")
return response
return "No response received."
# Example conversation flow
async def run_conversation():
# First interaction - patient introduces themselves with key information
await call_agent_async(
"Hi, I'm Alex. I've been having headaches for the past week, and I have a penicillin allergy.",
runner=runner,
user_id=USER_ID,
session_id=SESSION_ID,
)
# Request for health information
await call_agent_async(
"Can you tell me more about what might be causing my headaches?",
runner=runner,
user_id=USER_ID,
session_id=SESSION_ID,
)
# Schedule an appointment
await call_agent_async(
"I think I should see a doctor. Can you help me schedule an appointment for next Monday at 2pm?",
runner=runner,
user_id=USER_ID,
session_id=SESSION_ID,
)
# Test memory - should remember patient name, symptoms, and allergy
await call_agent_async(
"What medications should I avoid for my headaches?", runner=runner, user_id=USER_ID, session_id=SESSION_ID
)
# Interactive mode
async def interactive_mode():
"""Run an interactive chat session with the healthcare assistant."""
print("=== Healthcare Assistant Interactive Mode ===")
print("Enter 'exit' to quit at any time.")
# Get user information
patient_id = input("Enter patient ID (or press Enter for default): ").strip() or USER_ID
session_id = f"session_{hash(patient_id) % 1000:03d}"
# Create session for this user
session_service.create_session(app_name=APP_NAME, user_id=patient_id, session_id=session_id)
print(f"\nStarting conversation with patient ID: {patient_id}")
print("Type your message and press Enter.")
while True:
user_input = input("\n>>> Patient: ").strip()
if user_input.lower() in ["exit", "quit", "bye"]:
print("Ending conversation. Thank you!")
break
await call_agent_async(user_input, runner=runner, user_id=patient_id, session_id=session_id)
# Main execution
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Healthcare Assistant with Memory")
parser.add_argument("--demo", action="store_true", help="Run the demo conversation")
parser.add_argument("--interactive", action="store_true", help="Run in interactive mode")
parser.add_argument("--patient-id", type=str, default=USER_ID, help="Patient ID for the conversation")
args = parser.parse_args()
if args.demo:
asyncio.run(run_conversation())
elif args.interactive:
asyncio.run(interactive_mode())
else:
# Default to demo mode if no arguments provided
asyncio.run(run_conversation())
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/examples/misc/multillm_memory.py | examples/misc/multillm_memory.py | """
Multi-LLM Research Team with Shared Knowledge Base
Use Case: AI Research Team where each model has different strengths:
- GPT-4: Technical analysis and code review
- Claude: Writing and documentation
All models share a common knowledge base, building on each other's work.
Example: GPT-4 analyzes a tech stack → Claude writes documentation →
Data analyst analyzes user data → All models can reference previous research.
"""
import logging
from dotenv import load_dotenv
from litellm import completion
from mem0 import MemoryClient
load_dotenv()
# Configure logging
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
handlers=[logging.StreamHandler(), logging.FileHandler("research_team.log")],
)
logger = logging.getLogger(__name__)
# Initialize memory client (platform version)
memory = MemoryClient()
# Research team models with specialized roles
RESEARCH_TEAM = {
"tech_analyst": {
"model": "gpt-4.1-nano-2025-04-14",
"role": "Technical Analyst - Code review, architecture, and technical decisions",
},
"writer": {
"model": "claude-3-5-sonnet-20241022",
"role": "Documentation Writer - Clear explanations and user guides",
},
"data_analyst": {
"model": "gpt-4.1-nano-2025-04-14",
"role": "Data Analyst - Insights, trends, and data-driven recommendations",
},
}
def get_team_knowledge(topic: str, project_id: str) -> str:
"""Get relevant research from the team's shared knowledge base"""
memories = memory.search(query=topic, user_id=project_id, limit=5)
if memories:
knowledge = "Team Knowledge Base:\n"
for mem in memories:
if "memory" in mem:
# Get metadata to show which team member contributed
metadata = mem.get("metadata", {})
contributor = metadata.get("contributor", "Unknown")
knowledge += f"• [{contributor}] {mem['memory']}\n"
return knowledge
return "Team Knowledge Base: Empty - starting fresh research"
def research_with_specialist(task: str, specialist: str, project_id: str) -> str:
"""Assign research task to specialist with access to team knowledge"""
if specialist not in RESEARCH_TEAM:
return f"Unknown specialist. Available: {list(RESEARCH_TEAM.keys())}"
# Get team's accumulated knowledge
team_knowledge = get_team_knowledge(task, project_id)
# Specialist role and model
spec_info = RESEARCH_TEAM[specialist]
system_prompt = f"""You are the {spec_info['role']}.
{team_knowledge}
Build upon the team's existing research. Reference previous findings when relevant.
Provide actionable insights in your area of expertise."""
# Call the specialist's model
response = completion(
model=spec_info["model"],
messages=[{"role": "system", "content": system_prompt}, {"role": "user", "content": task}],
)
result = response.choices[0].message.content
# Store research in shared knowledge base using both user_id and agent_id
research_entry = [{"role": "user", "content": f"Task: {task}"}, {"role": "assistant", "content": result}]
memory.add(
research_entry,
user_id=project_id, # Project-level memory
agent_id=specialist, # Agent-specific memory
metadata={"contributor": specialist, "task_type": "research", "model_used": spec_info["model"]},
)
return result
def show_team_knowledge(project_id: str):
"""Display the team's accumulated research"""
memories = memory.get_all(user_id=project_id)
if not memories:
logger.info("No research found for this project")
return
logger.info(f"Team Research Summary (Project: {project_id}):")
# Group by contributor
by_contributor = {}
for mem in memories:
if "metadata" in mem and mem["metadata"]:
contributor = mem["metadata"].get("contributor", "Unknown")
if contributor not in by_contributor:
by_contributor[contributor] = []
by_contributor[contributor].append(mem.get("memory", ""))
for contributor, research_items in by_contributor.items():
logger.info(f"{contributor.upper()}:")
for i, item in enumerate(research_items[:3], 1): # Show latest 3
logger.info(f" {i}. {item[:100]}...")
def demo_research_team():
"""Demo: Building a SaaS product with the research team"""
project = "saas_product_research"
# Define research pipeline
research_pipeline = [
{
"stage": "Technical Architecture",
"specialist": "tech_analyst",
"task": "Analyze the best tech stack for a multi-tenant SaaS platform handling 10k+ users. Consider scalability, cost, and development speed.",
},
{
"stage": "Product Documentation",
"specialist": "writer",
"task": "Based on the technical analysis, write a clear product overview and user onboarding guide for our SaaS platform.",
},
{
"stage": "Market Analysis",
"specialist": "data_analyst",
"task": "Analyze market trends and pricing strategies for our SaaS platform. What metrics should we track?",
},
{
"stage": "Strategic Decision",
"specialist": "tech_analyst",
"task": "Given our technical architecture, documentation, and market analysis - what should be our MVP feature priority?",
},
]
logger.info("AI Research Team: Building a SaaS Product")
# Execute research pipeline
for i, step in enumerate(research_pipeline, 1):
logger.info(f"\nStage {i}: {step['stage']}")
logger.info(f"Specialist: {step['specialist']}")
result = research_with_specialist(step["task"], step["specialist"], project)
logger.info(f"Task: {step['task']}")
logger.info(f"Result: {result[:200]}...\n")
show_team_knowledge(project)
if __name__ == "__main__":
logger.info("Multi-LLM Research Team")
demo_research_team()
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/examples/misc/diet_assistant_voice_cartesia.py | examples/misc/diet_assistant_voice_cartesia.py | """Simple Voice Agent with Memory: Personal Food Assistant.
A food assistant that remembers your dietary preferences and speaks recommendations
Powered by Agno + Cartesia + Mem0
export MEM0_API_KEY=your_mem0_api_key
export OPENAI_API_KEY=your_openai_api_key
export CARTESIA_API_KEY=your_cartesia_api_key
"""
from textwrap import dedent
from agno.agent import Agent
from agno.models.openai import OpenAIChat
from agno.tools.cartesia import CartesiaTools
from agno.utils.audio import write_audio_to_file
from mem0 import MemoryClient
memory_client = MemoryClient()
USER_ID = "food_user_01"
# Agent instructions
agent_instructions = dedent(
"""Follow these steps SEQUENTIALLY to provide personalized food recommendations with voice:
1. Analyze the user's food request and identify what type of recommendation they need.
2. Consider their dietary preferences, restrictions, and cooking habits from memory context.
3. Generate a personalized food recommendation based on their stored preferences.
4. Analyze the appropriate tone for the response (helpful, enthusiastic, cautious for allergies).
5. Call `list_voices` to retrieve available voices.
6. Select a voice that matches the helpful, friendly tone.
7. Call `text_to_speech` to generate the final audio recommendation.
"""
)
# Simple agent that remembers food preferences
food_agent = Agent(
name="Personal Food Assistant",
description="Provides personalized food recommendations with memory and generates voice responses using Cartesia TTS tools.",
instructions=agent_instructions,
model=OpenAIChat(id="gpt-4.1-nano-2025-04-14"),
tools=[CartesiaTools(voice_localize_enabled=True)],
show_tool_calls=True,
)
def get_food_recommendation(user_query: str, user_id):
"""Get food recommendation with memory context"""
# Search memory for relevant food preferences
memories_result = memory_client.search(query=user_query, user_id=user_id, limit=5)
# Add memory context to the message
memories = [f"- {result['memory']}" for result in memories_result]
memory_context = "Memories about user that might be relevant:\n" + "\n".join(memories)
# Combine memory context with user request
full_request = f"""
{memory_context}
User: {user_query}
Answer the user query based on provided context and create a voice note.
"""
# Generate response with voice (same pattern as translator)
food_agent.print_response(full_request)
response = food_agent.run_response
# Save audio file
if response.audio:
import time
timestamp = int(time.time())
filename = f"food_recommendation_{timestamp}.mp3"
write_audio_to_file(
response.audio[0].base64_audio,
filename=filename,
)
print(f"Audio saved as {filename}")
return response.content
def initialize_food_memory(user_id):
"""Initialize memory with food preferences"""
messages = [
{
"role": "user",
"content": "Hi, I'm Sarah. I'm vegetarian and lactose intolerant. I love spicy food, especially Thai and Indian cuisine.",
},
{
"role": "assistant",
"content": "Hello Sarah! I've noted that you're vegetarian, lactose intolerant, and love spicy Thai and Indian food.",
},
{
"role": "user",
"content": "I prefer quick breakfasts since I'm always rushing, but I like cooking elaborate dinners. I also meal prep on Sundays.",
},
{
"role": "assistant",
"content": "Got it! Quick breakfasts, elaborate dinners, and Sunday meal prep. I'll remember this for future recommendations.",
},
{
"role": "user",
"content": "I'm trying to eat more protein. I like quinoa, lentils, chickpeas, and tofu. I hate mushrooms though.",
},
{
"role": "assistant",
"content": "Perfect! I'll focus on protein-rich options like quinoa, lentils, chickpeas, and tofu, and avoid mushrooms.",
},
]
memory_client.add(messages, user_id=user_id)
print("Food preferences stored in memory")
# Initialize the memory for the user once in order for the agent to learn the user preference
initialize_food_memory(user_id=USER_ID)
print(
get_food_recommendation(
"Which type of restaurants should I go tonight for dinner and cuisines preferred?", user_id=USER_ID
)
)
# OUTPUT: 🎵 Audio saved as food_recommendation_1750162610.mp3
# For dinner tonight, considering your love for healthy spic optionsy, you could try a nice Thai, Indian, or Mexican restaurant.
# You might find dishes with quinoa, chickpeas, tofu, and fresh herbs delightful. Enjoy your dinner!
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/examples/misc/test.py | examples/misc/test.py | from agents import Agent, Runner, enable_verbose_stdout_logging, function_tool
from dotenv import load_dotenv
from mem0 import MemoryClient
enable_verbose_stdout_logging()
load_dotenv()
# Initialize Mem0 client
mem0 = MemoryClient()
# Define memory tools for the agent
@function_tool
def search_memory(query: str, user_id: str) -> str:
"""Search through past conversations and memories"""
memories = mem0.search(query, user_id=user_id, limit=3)
if memories:
return "\n".join([f"- {mem['memory']}" for mem in memories])
return "No relevant memories found."
@function_tool
def save_memory(content: str, user_id: str) -> str:
"""Save important information to memory"""
mem0.add([{"role": "user", "content": content}], user_id=user_id)
return "Information saved to memory."
# Specialized agents
travel_agent = Agent(
name="Travel Planner",
instructions="""You are a travel planning specialist. Use get_user_context to
understand the user's travel preferences and history before making recommendations.
After providing your response, use store_conversation to save important details.""",
tools=[search_memory, save_memory],
model="gpt-4.1-nano-2025-04-14",
)
health_agent = Agent(
name="Health Advisor",
instructions="""You are a health and wellness advisor. Use get_user_context to
understand the user's health goals and dietary preferences.
After providing advice, use store_conversation to save relevant information.""",
tools=[search_memory, save_memory],
model="gpt-4.1-nano-2025-04-14",
)
# Triage agent with handoffs
triage_agent = Agent(
name="Personal Assistant",
instructions="""You are a helpful personal assistant that routes requests to specialists.
For travel-related questions (trips, hotels, flights, destinations), hand off to Travel Planner.
For health-related questions (fitness, diet, wellness, exercise), hand off to Health Advisor.
For general questions, you can handle them directly using available tools.""",
handoffs=[travel_agent, health_agent],
model="gpt-4.1-nano-2025-04-14",
)
def chat_with_handoffs(user_input: str, user_id: str) -> str:
"""
Handle user input with automatic agent handoffs and memory integration.
Args:
user_input: The user's message
user_id: Unique identifier for the user
Returns:
The agent's response
"""
# Run the triage agent (it will automatically handoffs when needed)
result = Runner.run_sync(triage_agent, user_input)
# Store the original conversation in memory
conversation = [{"role": "user", "content": user_input}, {"role": "assistant", "content": result.final_output}]
mem0.add(conversation, user_id=user_id)
return result.final_output
# Example usage
# response = chat_with_handoffs("Which places should I vist?", user_id="alex")
# print(response)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/examples/multiagents/llamaindex_learning_system.py | examples/multiagents/llamaindex_learning_system.py | """
Multi-Agent Personal Learning System: Mem0 + LlamaIndex AgentWorkflow Example
INSTALLATIONS:
!pip install llama-index-core llama-index-memory-mem0 openai
You need MEM0_API_KEY and OPENAI_API_KEY to run the example.
"""
import asyncio
import logging
from datetime import datetime
from dotenv import load_dotenv
# LlamaIndex imports
from llama_index.core.agent.workflow import AgentWorkflow, FunctionAgent
from llama_index.core.tools import FunctionTool
from llama_index.llms.openai import OpenAI
# Memory integration
from llama_index.memory.mem0 import Mem0Memory
load_dotenv()
# Configure logging
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
handlers=[logging.StreamHandler(), logging.FileHandler("learning_system.log")],
)
logger = logging.getLogger(__name__)
class MultiAgentLearningSystem:
"""
Multi-Agent Architecture:
- TutorAgent: Main teaching and explanations
- PracticeAgent: Exercises and skill reinforcement
- Shared Memory: Both agents learn from student interactions
"""
def __init__(self, student_id: str):
self.student_id = student_id
self.llm = OpenAI(model="gpt-4.1-nano-2025-04-14", temperature=0.2)
# Memory context for this student
self.memory_context = {"user_id": student_id, "app": "learning_assistant"}
self.memory = Mem0Memory.from_client(context=self.memory_context)
self._setup_agents()
def _setup_agents(self):
"""Setup two agents that work together and share memory"""
# TOOLS
async def assess_understanding(topic: str, student_response: str) -> str:
"""Assess student's understanding of a topic and save insights"""
# Simulate assessment logic
if "confused" in student_response.lower() or "don't understand" in student_response.lower():
assessment = f"STRUGGLING with {topic}: {student_response}"
insight = f"Student needs more help with {topic}. Prefers step-by-step explanations."
elif "makes sense" in student_response.lower() or "got it" in student_response.lower():
assessment = f"UNDERSTANDS {topic}: {student_response}"
insight = f"Student grasped {topic} quickly. Can move to advanced concepts."
else:
assessment = f"PARTIAL understanding of {topic}: {student_response}"
insight = f"Student has basic understanding of {topic}. Needs reinforcement."
return f"Assessment: {assessment}\nInsight saved: {insight}"
async def track_progress(topic: str, success_rate: str) -> str:
"""Track learning progress and identify patterns"""
progress_note = f"Progress on {topic}: {success_rate} - {datetime.now().strftime('%Y-%m-%d')}"
return f"Progress tracked: {progress_note}"
# Convert to FunctionTools
tools = [
FunctionTool.from_defaults(async_fn=assess_understanding),
FunctionTool.from_defaults(async_fn=track_progress),
]
# === AGENTS ===
# Tutor Agent - Main teaching and explanation
self.tutor_agent = FunctionAgent(
name="TutorAgent",
description="Primary instructor that explains concepts and adapts to student needs",
system_prompt="""
You are a patient, adaptive programming tutor. Your key strength is REMEMBERING and BUILDING on previous interactions.
Key Behaviors:
1. Always check what the student has learned before (use memory context)
2. Adapt explanations based on their preferred learning style
3. Reference previous struggles or successes
4. Build progressively on past lessons
5. Use assess_understanding to evaluate responses and save insights
MEMORY-DRIVEN TEACHING:
- "Last time you struggled with X, so let's approach Y differently..."
- "Since you prefer visual examples, here's a diagram..."
- "Building on the functions we covered yesterday..."
When student shows understanding, hand off to PracticeAgent for exercises.
""",
tools=tools,
llm=self.llm,
can_handoff_to=["PracticeAgent"],
)
# Practice Agent - Exercises and reinforcement
self.practice_agent = FunctionAgent(
name="PracticeAgent",
description="Creates practice exercises and tracks progress based on student's learning history",
system_prompt="""
You create personalized practice exercises based on the student's learning history and current level.
Key Behaviors:
1. Generate problems that match their skill level (from memory)
2. Focus on areas they've struggled with previously
3. Gradually increase difficulty based on their progress
4. Use track_progress to record their performance
5. Provide encouraging feedback that references their growth
MEMORY-DRIVEN PRACTICE:
- "Let's practice loops again since you wanted more examples..."
- "Here's a harder version of the problem you solved yesterday..."
- "You've improved a lot in functions, ready for the next level?"
After practice, can hand back to TutorAgent for concept review if needed.
""",
tools=tools,
llm=self.llm,
can_handoff_to=["TutorAgent"],
)
# Create the multi-agent workflow
self.workflow = AgentWorkflow(
agents=[self.tutor_agent, self.practice_agent],
root_agent=self.tutor_agent.name,
initial_state={
"current_topic": "",
"student_level": "beginner",
"learning_style": "unknown",
"session_goals": [],
},
)
async def start_learning_session(self, topic: str, student_message: str = "") -> str:
"""
Start a learning session with multi-agent memory-aware teaching
"""
if student_message:
request = f"I want to learn about {topic}. {student_message}"
else:
request = f"I want to learn about {topic}."
# The magic happens here - multi-agent memory is automatically shared!
response = await self.workflow.run(user_msg=request, memory=self.memory)
return str(response)
async def get_learning_history(self) -> str:
"""Show what the system remembers about this student"""
try:
# Search memory for learning patterns
memories = self.memory.search(user_id=self.student_id, query="learning machine learning")
if memories and len(memories):
history = "\n".join(f"- {m['memory']}" for m in memories)
return history
else:
return "No learning history found yet. Let's start building your profile!"
except Exception as e:
return f"Memory retrieval error: {str(e)}"
async def run_learning_agent():
learning_system = MultiAgentLearningSystem(student_id="Alexander")
# First session
logger.info("Session 1:")
response = await learning_system.start_learning_session(
"Vision Language Models",
"I'm new to machine learning but I have good hold on Python and have 4 years of work experience.",
)
logger.info(response)
# Second session - multi-agent memory will remember the first
logger.info("\nSession 2:")
response2 = await learning_system.start_learning_session("Machine Learning", "what all did I cover so far?")
logger.info(response2)
# Show what the multi-agent system remembers
logger.info("\nLearning History:")
history = await learning_system.get_learning_history()
logger.info(history)
if __name__ == "__main__":
"""Run the example"""
logger.info("Multi-agent Learning System powered by LlamaIndex and Mem0")
async def main():
await run_learning_agent()
asyncio.run(main())
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/server/main.py | server/main.py | import logging
import os
from typing import Any, Dict, List, Optional
from dotenv import load_dotenv
from fastapi import FastAPI, HTTPException
from fastapi.responses import JSONResponse, RedirectResponse
from pydantic import BaseModel, Field
from mem0 import Memory
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
# Load environment variables
load_dotenv()
POSTGRES_HOST = os.environ.get("POSTGRES_HOST", "postgres")
POSTGRES_PORT = os.environ.get("POSTGRES_PORT", "5432")
POSTGRES_DB = os.environ.get("POSTGRES_DB", "postgres")
POSTGRES_USER = os.environ.get("POSTGRES_USER", "postgres")
POSTGRES_PASSWORD = os.environ.get("POSTGRES_PASSWORD", "postgres")
POSTGRES_COLLECTION_NAME = os.environ.get("POSTGRES_COLLECTION_NAME", "memories")
NEO4J_URI = os.environ.get("NEO4J_URI", "bolt://neo4j:7687")
NEO4J_USERNAME = os.environ.get("NEO4J_USERNAME", "neo4j")
NEO4J_PASSWORD = os.environ.get("NEO4J_PASSWORD", "mem0graph")
MEMGRAPH_URI = os.environ.get("MEMGRAPH_URI", "bolt://localhost:7687")
MEMGRAPH_USERNAME = os.environ.get("MEMGRAPH_USERNAME", "memgraph")
MEMGRAPH_PASSWORD = os.environ.get("MEMGRAPH_PASSWORD", "mem0graph")
OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
HISTORY_DB_PATH = os.environ.get("HISTORY_DB_PATH", "/app/history/history.db")
DEFAULT_CONFIG = {
"version": "v1.1",
"vector_store": {
"provider": "pgvector",
"config": {
"host": POSTGRES_HOST,
"port": int(POSTGRES_PORT),
"dbname": POSTGRES_DB,
"user": POSTGRES_USER,
"password": POSTGRES_PASSWORD,
"collection_name": POSTGRES_COLLECTION_NAME,
},
},
"graph_store": {
"provider": "neo4j",
"config": {"url": NEO4J_URI, "username": NEO4J_USERNAME, "password": NEO4J_PASSWORD},
},
"llm": {"provider": "openai", "config": {"api_key": OPENAI_API_KEY, "temperature": 0.2, "model": "gpt-4.1-nano-2025-04-14"}},
"embedder": {"provider": "openai", "config": {"api_key": OPENAI_API_KEY, "model": "text-embedding-3-small"}},
"history_db_path": HISTORY_DB_PATH,
}
MEMORY_INSTANCE = Memory.from_config(DEFAULT_CONFIG)
app = FastAPI(
title="Mem0 REST APIs",
description="A REST API for managing and searching memories for your AI Agents and Apps.",
version="1.0.0",
)
class Message(BaseModel):
role: str = Field(..., description="Role of the message (user or assistant).")
content: str = Field(..., description="Message content.")
class MemoryCreate(BaseModel):
messages: List[Message] = Field(..., description="List of messages to store.")
user_id: Optional[str] = None
agent_id: Optional[str] = None
run_id: Optional[str] = None
metadata: Optional[Dict[str, Any]] = None
class SearchRequest(BaseModel):
query: str = Field(..., description="Search query.")
user_id: Optional[str] = None
run_id: Optional[str] = None
agent_id: Optional[str] = None
filters: Optional[Dict[str, Any]] = None
@app.post("/configure", summary="Configure Mem0")
def set_config(config: Dict[str, Any]):
"""Set memory configuration."""
global MEMORY_INSTANCE
MEMORY_INSTANCE = Memory.from_config(config)
return {"message": "Configuration set successfully"}
@app.post("/memories", summary="Create memories")
def add_memory(memory_create: MemoryCreate):
"""Store new memories."""
if not any([memory_create.user_id, memory_create.agent_id, memory_create.run_id]):
raise HTTPException(status_code=400, detail="At least one identifier (user_id, agent_id, run_id) is required.")
params = {k: v for k, v in memory_create.model_dump().items() if v is not None and k != "messages"}
try:
response = MEMORY_INSTANCE.add(messages=[m.model_dump() for m in memory_create.messages], **params)
return JSONResponse(content=response)
except Exception as e:
logging.exception("Error in add_memory:") # This will log the full traceback
raise HTTPException(status_code=500, detail=str(e))
@app.get("/memories", summary="Get memories")
def get_all_memories(
user_id: Optional[str] = None,
run_id: Optional[str] = None,
agent_id: Optional[str] = None,
):
"""Retrieve stored memories."""
if not any([user_id, run_id, agent_id]):
raise HTTPException(status_code=400, detail="At least one identifier is required.")
try:
params = {
k: v for k, v in {"user_id": user_id, "run_id": run_id, "agent_id": agent_id}.items() if v is not None
}
return MEMORY_INSTANCE.get_all(**params)
except Exception as e:
logging.exception("Error in get_all_memories:")
raise HTTPException(status_code=500, detail=str(e))
@app.get("/memories/{memory_id}", summary="Get a memory")
def get_memory(memory_id: str):
"""Retrieve a specific memory by ID."""
try:
return MEMORY_INSTANCE.get(memory_id)
except Exception as e:
logging.exception("Error in get_memory:")
raise HTTPException(status_code=500, detail=str(e))
@app.post("/search", summary="Search memories")
def search_memories(search_req: SearchRequest):
"""Search for memories based on a query."""
try:
params = {k: v for k, v in search_req.model_dump().items() if v is not None and k != "query"}
return MEMORY_INSTANCE.search(query=search_req.query, **params)
except Exception as e:
logging.exception("Error in search_memories:")
raise HTTPException(status_code=500, detail=str(e))
@app.put("/memories/{memory_id}", summary="Update a memory")
def update_memory(memory_id: str, updated_memory: Dict[str, Any]):
"""Update an existing memory with new content.
Args:
memory_id (str): ID of the memory to update
updated_memory (str): New content to update the memory with
Returns:
dict: Success message indicating the memory was updated
"""
try:
return MEMORY_INSTANCE.update(memory_id=memory_id, data=updated_memory)
except Exception as e:
logging.exception("Error in update_memory:")
raise HTTPException(status_code=500, detail=str(e))
@app.get("/memories/{memory_id}/history", summary="Get memory history")
def memory_history(memory_id: str):
"""Retrieve memory history."""
try:
return MEMORY_INSTANCE.history(memory_id=memory_id)
except Exception as e:
logging.exception("Error in memory_history:")
raise HTTPException(status_code=500, detail=str(e))
@app.delete("/memories/{memory_id}", summary="Delete a memory")
def delete_memory(memory_id: str):
"""Delete a specific memory by ID."""
try:
MEMORY_INSTANCE.delete(memory_id=memory_id)
return {"message": "Memory deleted successfully"}
except Exception as e:
logging.exception("Error in delete_memory:")
raise HTTPException(status_code=500, detail=str(e))
@app.delete("/memories", summary="Delete all memories")
def delete_all_memories(
user_id: Optional[str] = None,
run_id: Optional[str] = None,
agent_id: Optional[str] = None,
):
"""Delete all memories for a given identifier."""
if not any([user_id, run_id, agent_id]):
raise HTTPException(status_code=400, detail="At least one identifier is required.")
try:
params = {
k: v for k, v in {"user_id": user_id, "run_id": run_id, "agent_id": agent_id}.items() if v is not None
}
MEMORY_INSTANCE.delete_all(**params)
return {"message": "All relevant memories deleted"}
except Exception as e:
logging.exception("Error in delete_all_memories:")
raise HTTPException(status_code=500, detail=str(e))
@app.post("/reset", summary="Reset all memories")
def reset_memory():
"""Completely reset stored memories."""
try:
MEMORY_INSTANCE.reset()
return {"message": "All memories reset"}
except Exception as e:
logging.exception("Error in reset_memory:")
raise HTTPException(status_code=500, detail=str(e))
@app.get("/", summary="Redirect to the OpenAPI documentation", include_in_schema=False)
def home():
"""Redirect to the OpenAPI documentation."""
return RedirectResponse(url="/docs")
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/cookbooks/helper/mem0_teachability.py | cookbooks/helper/mem0_teachability.py | # Copyright (c) 2023 - 2024, Owners of https://github.com/autogen-ai
#
# SPDX-License-Identifier: Apache-2.0
#
# Portions derived from https://github.com/microsoft/autogen are under the MIT License.
# SPDX-License-Identifier: MIT
# forked from autogen.agentchat.contrib.capabilities.teachability.Teachability
from typing import Dict, Optional, Union
from autogen.agentchat.assistant_agent import ConversableAgent
from autogen.agentchat.contrib.capabilities.agent_capability import AgentCapability
from autogen.agentchat.contrib.text_analyzer_agent import TextAnalyzerAgent
from termcolor import colored
from mem0 import Memory
class Mem0Teachability(AgentCapability):
def __init__(
self,
verbosity: Optional[int] = 0,
reset_db: Optional[bool] = False,
recall_threshold: Optional[float] = 1.5,
max_num_retrievals: Optional[int] = 10,
llm_config: Optional[Union[Dict, bool]] = None,
agent_id: Optional[str] = None,
memory_client: Optional[Memory] = None,
):
self.verbosity = verbosity
self.recall_threshold = recall_threshold
self.max_num_retrievals = max_num_retrievals
self.llm_config = llm_config
self.analyzer = None
self.teachable_agent = None
self.agent_id = agent_id
self.memory = memory_client if memory_client else Memory()
if reset_db:
self.memory.reset()
def add_to_agent(self, agent: ConversableAgent):
self.teachable_agent = agent
agent.register_hook(hookable_method="process_last_received_message", hook=self.process_last_received_message)
if self.llm_config is None:
self.llm_config = agent.llm_config
assert self.llm_config, "Teachability requires a valid llm_config."
self.analyzer = TextAnalyzerAgent(llm_config=self.llm_config)
agent.update_system_message(
agent.system_message
+ "\nYou've been given the special ability to remember user teachings from prior conversations."
)
def process_last_received_message(self, text: Union[Dict, str]):
expanded_text = text
if self.memory.get_all(agent_id=self.agent_id):
expanded_text = self._consider_memo_retrieval(text)
self._consider_memo_storage(text)
return expanded_text
def _consider_memo_storage(self, comment: Union[Dict, str]):
response = self._analyze(
comment,
"Does any part of the TEXT ask the agent to perform a task or solve a problem? Answer with just one word, yes or no.",
)
if "yes" in response.lower():
advice = self._analyze(
comment,
"Briefly copy any advice from the TEXT that may be useful for a similar but different task in the future. But if no advice is present, just respond with 'none'.",
)
if "none" not in advice.lower():
task = self._analyze(
comment,
"Briefly copy just the task from the TEXT, then stop. Don't solve it, and don't include any advice.",
)
general_task = self._analyze(
task,
"Summarize very briefly, in general terms, the type of task described in the TEXT. Leave out details that might not appear in a similar problem.",
)
if self.verbosity >= 1:
print(colored("\nREMEMBER THIS TASK-ADVICE PAIR", "light_yellow"))
self.memory.add(
[{"role": "user", "content": f"Task: {general_task}\nAdvice: {advice}"}], agent_id=self.agent_id
)
response = self._analyze(
comment,
"Does the TEXT contain information that could be committed to memory? Answer with just one word, yes or no.",
)
if "yes" in response.lower():
question = self._analyze(
comment,
"Imagine that the user forgot this information in the TEXT. How would they ask you for this information? Include no other text in your response.",
)
answer = self._analyze(
comment, "Copy the information from the TEXT that should be committed to memory. Add no explanation."
)
if self.verbosity >= 1:
print(colored("\nREMEMBER THIS QUESTION-ANSWER PAIR", "light_yellow"))
self.memory.add(
[{"role": "user", "content": f"Question: {question}\nAnswer: {answer}"}], agent_id=self.agent_id
)
def _consider_memo_retrieval(self, comment: Union[Dict, str]):
if self.verbosity >= 1:
print(colored("\nLOOK FOR RELEVANT MEMOS, AS QUESTION-ANSWER PAIRS", "light_yellow"))
memo_list = self._retrieve_relevant_memos(comment)
response = self._analyze(
comment,
"Does any part of the TEXT ask the agent to perform a task or solve a problem? Answer with just one word, yes or no.",
)
if "yes" in response.lower():
if self.verbosity >= 1:
print(colored("\nLOOK FOR RELEVANT MEMOS, AS TASK-ADVICE PAIRS", "light_yellow"))
task = self._analyze(
comment, "Copy just the task from the TEXT, then stop. Don't solve it, and don't include any advice."
)
general_task = self._analyze(
task,
"Summarize very briefly, in general terms, the type of task described in the TEXT. Leave out details that might not appear in a similar problem.",
)
memo_list.extend(self._retrieve_relevant_memos(general_task))
memo_list = list(set(memo_list))
return comment + self._concatenate_memo_texts(memo_list)
def _retrieve_relevant_memos(self, input_text: str) -> list:
search_results = self.memory.search(input_text, agent_id=self.agent_id, limit=self.max_num_retrievals)
memo_list = [result["memory"] for result in search_results if result["score"] <= self.recall_threshold]
if self.verbosity >= 1 and not memo_list:
print(colored("\nTHE CLOSEST MEMO IS BEYOND THE THRESHOLD:", "light_yellow"))
if search_results["results"]:
print(search_results["results"][0])
print()
return memo_list
def _concatenate_memo_texts(self, memo_list: list) -> str:
memo_texts = ""
if memo_list:
info = "\n# Memories that might help\n"
for memo in memo_list:
info += f"- {memo}\n"
if self.verbosity >= 1:
print(colored(f"\nMEMOS APPENDED TO LAST MESSAGE...\n{info}\n", "light_yellow"))
memo_texts += "\n" + info
return memo_texts
def _analyze(self, text_to_analyze: Union[Dict, str], analysis_instructions: Union[Dict, str]):
self.analyzer.reset()
self.teachable_agent.send(
recipient=self.analyzer, message=text_to_analyze, request_reply=False, silent=(self.verbosity < 2)
)
self.teachable_agent.send(
recipient=self.analyzer, message=analysis_instructions, request_reply=True, silent=(self.verbosity < 2)
)
return self.teachable_agent.last_message(self.analyzer)["content"]
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/cookbooks/helper/__init__.py | cookbooks/helper/__init__.py | python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false | |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/evaluation/prompts.py | evaluation/prompts.py | ANSWER_PROMPT_GRAPH = """
You are an intelligent memory assistant tasked with retrieving accurate information from
conversation memories.
# CONTEXT:
You have access to memories from two speakers in a conversation. These memories contain
timestamped information that may be relevant to answering the question. You also have
access to knowledge graph relations for each user, showing connections between entities,
concepts, and events relevant to that user.
# INSTRUCTIONS:
1. Carefully analyze all provided memories from both speakers
2. Pay special attention to the timestamps to determine the answer
3. If the question asks about a specific event or fact, look for direct evidence in the
memories
4. If the memories contain contradictory information, prioritize the most recent memory
5. If there is a question about time references (like "last year", "two months ago",
etc.), calculate the actual date based on the memory timestamp. For example, if a
memory from 4 May 2022 mentions "went to India last year," then the trip occurred
in 2021.
6. Always convert relative time references to specific dates, months, or years. For
example, convert "last year" to "2022" or "two months ago" to "March 2023" based
on the memory timestamp. Ignore the reference while answering the question.
7. Focus only on the content of the memories from both speakers. Do not confuse
character names mentioned in memories with the actual users who created those
memories.
8. The answer should be less than 5-6 words.
9. Use the knowledge graph relations to understand the user's knowledge network and
identify important relationships between entities in the user's world.
# APPROACH (Think step by step):
1. First, examine all memories that contain information related to the question
2. Examine the timestamps and content of these memories carefully
3. Look for explicit mentions of dates, times, locations, or events that answer the
question
4. If the answer requires calculation (e.g., converting relative time references),
show your work
5. Analyze the knowledge graph relations to understand the user's knowledge context
6. Formulate a precise, concise answer based solely on the evidence in the memories
7. Double-check that your answer directly addresses the question asked
8. Ensure your final answer is specific and avoids vague time references
Memories for user {{speaker_1_user_id}}:
{{speaker_1_memories}}
Relations for user {{speaker_1_user_id}}:
{{speaker_1_graph_memories}}
Memories for user {{speaker_2_user_id}}:
{{speaker_2_memories}}
Relations for user {{speaker_2_user_id}}:
{{speaker_2_graph_memories}}
Question: {{question}}
Answer:
"""
ANSWER_PROMPT = """
You are an intelligent memory assistant tasked with retrieving accurate information from conversation memories.
# CONTEXT:
You have access to memories from two speakers in a conversation. These memories contain
timestamped information that may be relevant to answering the question.
# INSTRUCTIONS:
1. Carefully analyze all provided memories from both speakers
2. Pay special attention to the timestamps to determine the answer
3. If the question asks about a specific event or fact, look for direct evidence in the memories
4. If the memories contain contradictory information, prioritize the most recent memory
5. If there is a question about time references (like "last year", "two months ago", etc.),
calculate the actual date based on the memory timestamp. For example, if a memory from
4 May 2022 mentions "went to India last year," then the trip occurred in 2021.
6. Always convert relative time references to specific dates, months, or years. For example,
convert "last year" to "2022" or "two months ago" to "March 2023" based on the memory
timestamp. Ignore the reference while answering the question.
7. Focus only on the content of the memories from both speakers. Do not confuse character
names mentioned in memories with the actual users who created those memories.
8. The answer should be less than 5-6 words.
# APPROACH (Think step by step):
1. First, examine all memories that contain information related to the question
2. Examine the timestamps and content of these memories carefully
3. Look for explicit mentions of dates, times, locations, or events that answer the question
4. If the answer requires calculation (e.g., converting relative time references), show your work
5. Formulate a precise, concise answer based solely on the evidence in the memories
6. Double-check that your answer directly addresses the question asked
7. Ensure your final answer is specific and avoids vague time references
Memories for user {{speaker_1_user_id}}:
{{speaker_1_memories}}
Memories for user {{speaker_2_user_id}}:
{{speaker_2_memories}}
Question: {{question}}
Answer:
"""
ANSWER_PROMPT_ZEP = """
You are an intelligent memory assistant tasked with retrieving accurate information from conversation memories.
# CONTEXT:
You have access to memories from a conversation. These memories contain
timestamped information that may be relevant to answering the question.
# INSTRUCTIONS:
1. Carefully analyze all provided memories
2. Pay special attention to the timestamps to determine the answer
3. If the question asks about a specific event or fact, look for direct evidence in the memories
4. If the memories contain contradictory information, prioritize the most recent memory
5. If there is a question about time references (like "last year", "two months ago", etc.),
calculate the actual date based on the memory timestamp. For example, if a memory from
4 May 2022 mentions "went to India last year," then the trip occurred in 2021.
6. Always convert relative time references to specific dates, months, or years. For example,
convert "last year" to "2022" or "two months ago" to "March 2023" based on the memory
timestamp. Ignore the reference while answering the question.
7. Focus only on the content of the memories. Do not confuse character
names mentioned in memories with the actual users who created those memories.
8. The answer should be less than 5-6 words.
# APPROACH (Think step by step):
1. First, examine all memories that contain information related to the question
2. Examine the timestamps and content of these memories carefully
3. Look for explicit mentions of dates, times, locations, or events that answer the question
4. If the answer requires calculation (e.g., converting relative time references), show your work
5. Formulate a precise, concise answer based solely on the evidence in the memories
6. Double-check that your answer directly addresses the question asked
7. Ensure your final answer is specific and avoids vague time references
Memories:
{{memories}}
Question: {{question}}
Answer:
"""
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/evaluation/generate_scores.py | evaluation/generate_scores.py | import json
import pandas as pd
# Load the evaluation metrics data
with open("evaluation_metrics.json", "r") as f:
data = json.load(f)
# Flatten the data into a list of question items
all_items = []
for key in data:
all_items.extend(data[key])
# Convert to DataFrame
df = pd.DataFrame(all_items)
# Convert category to numeric type
df["category"] = pd.to_numeric(df["category"])
# Calculate mean scores by category
result = df.groupby("category").agg({"bleu_score": "mean", "f1_score": "mean", "llm_score": "mean"}).round(4)
# Add count of questions per category
result["count"] = df.groupby("category").size()
# Print the results
print("Mean Scores Per Category:")
print(result)
# Calculate overall means
overall_means = df.agg({"bleu_score": "mean", "f1_score": "mean", "llm_score": "mean"}).round(4)
print("\nOverall Mean Scores:")
print(overall_means)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/evaluation/run_experiments.py | evaluation/run_experiments.py | import argparse
import os
from src.langmem import LangMemManager
from src.memzero.add import MemoryADD
from src.memzero.search import MemorySearch
from src.openai.predict import OpenAIPredict
from src.rag import RAGManager
from src.utils import METHODS, TECHNIQUES
from src.zep.add import ZepAdd
from src.zep.search import ZepSearch
class Experiment:
def __init__(self, technique_type, chunk_size):
self.technique_type = technique_type
self.chunk_size = chunk_size
def run(self):
print(f"Running experiment with technique: {self.technique_type}, chunk size: {self.chunk_size}")
def main():
parser = argparse.ArgumentParser(description="Run memory experiments")
parser.add_argument("--technique_type", choices=TECHNIQUES, default="mem0", help="Memory technique to use")
parser.add_argument("--method", choices=METHODS, default="add", help="Method to use")
parser.add_argument("--chunk_size", type=int, default=1000, help="Chunk size for processing")
parser.add_argument("--output_folder", type=str, default="results/", help="Output path for results")
parser.add_argument("--top_k", type=int, default=30, help="Number of top memories to retrieve")
parser.add_argument("--filter_memories", action="store_true", default=False, help="Whether to filter memories")
parser.add_argument("--is_graph", action="store_true", default=False, help="Whether to use graph-based search")
parser.add_argument("--num_chunks", type=int, default=1, help="Number of chunks to process")
args = parser.parse_args()
# Add your experiment logic here
print(f"Running experiments with technique: {args.technique_type}, chunk size: {args.chunk_size}")
if args.technique_type == "mem0":
if args.method == "add":
memory_manager = MemoryADD(data_path="dataset/locomo10.json", is_graph=args.is_graph)
memory_manager.process_all_conversations()
elif args.method == "search":
output_file_path = os.path.join(
args.output_folder,
f"mem0_results_top_{args.top_k}_filter_{args.filter_memories}_graph_{args.is_graph}.json",
)
memory_searcher = MemorySearch(output_file_path, args.top_k, args.filter_memories, args.is_graph)
memory_searcher.process_data_file("dataset/locomo10.json")
elif args.technique_type == "rag":
output_file_path = os.path.join(args.output_folder, f"rag_results_{args.chunk_size}_k{args.num_chunks}.json")
rag_manager = RAGManager(data_path="dataset/locomo10_rag.json", chunk_size=args.chunk_size, k=args.num_chunks)
rag_manager.process_all_conversations(output_file_path)
elif args.technique_type == "langmem":
output_file_path = os.path.join(args.output_folder, "langmem_results.json")
langmem_manager = LangMemManager(dataset_path="dataset/locomo10_rag.json")
langmem_manager.process_all_conversations(output_file_path)
elif args.technique_type == "zep":
if args.method == "add":
zep_manager = ZepAdd(data_path="dataset/locomo10.json")
zep_manager.process_all_conversations("1")
elif args.method == "search":
output_file_path = os.path.join(args.output_folder, "zep_search_results.json")
zep_manager = ZepSearch()
zep_manager.process_data_file("dataset/locomo10.json", "1", output_file_path)
elif args.technique_type == "openai":
output_file_path = os.path.join(args.output_folder, "openai_results.json")
openai_manager = OpenAIPredict()
openai_manager.process_data_file("dataset/locomo10.json", output_file_path)
else:
raise ValueError(f"Invalid technique type: {args.technique_type}")
if __name__ == "__main__":
main()
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/evaluation/evals.py | evaluation/evals.py | import argparse
import concurrent.futures
import json
import threading
from collections import defaultdict
from metrics.llm_judge import evaluate_llm_judge
from metrics.utils import calculate_bleu_scores, calculate_metrics
from tqdm import tqdm
def process_item(item_data):
k, v = item_data
local_results = defaultdict(list)
for item in v:
gt_answer = str(item["answer"])
pred_answer = str(item["response"])
category = str(item["category"])
question = str(item["question"])
# Skip category 5
if category == "5":
continue
metrics = calculate_metrics(pred_answer, gt_answer)
bleu_scores = calculate_bleu_scores(pred_answer, gt_answer)
llm_score = evaluate_llm_judge(question, gt_answer, pred_answer)
local_results[k].append(
{
"question": question,
"answer": gt_answer,
"response": pred_answer,
"category": category,
"bleu_score": bleu_scores["bleu1"],
"f1_score": metrics["f1"],
"llm_score": llm_score,
}
)
return local_results
def main():
parser = argparse.ArgumentParser(description="Evaluate RAG results")
parser.add_argument(
"--input_file", type=str, default="results/rag_results_500_k1.json", help="Path to the input dataset file"
)
parser.add_argument(
"--output_file", type=str, default="evaluation_metrics.json", help="Path to save the evaluation results"
)
parser.add_argument("--max_workers", type=int, default=10, help="Maximum number of worker threads")
args = parser.parse_args()
with open(args.input_file, "r") as f:
data = json.load(f)
results = defaultdict(list)
results_lock = threading.Lock()
# Use ThreadPoolExecutor with specified workers
with concurrent.futures.ThreadPoolExecutor(max_workers=args.max_workers) as executor:
futures = [executor.submit(process_item, item_data) for item_data in data.items()]
for future in tqdm(concurrent.futures.as_completed(futures), total=len(futures)):
local_results = future.result()
with results_lock:
for k, items in local_results.items():
results[k].extend(items)
# Save results to JSON file
with open(args.output_file, "w") as f:
json.dump(results, f, indent=4)
print(f"Results saved to {args.output_file}")
if __name__ == "__main__":
main()
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/evaluation/src/langmem.py | evaluation/src/langmem.py | import json
import multiprocessing as mp
import os
import time
from collections import defaultdict
from dotenv import load_dotenv
from jinja2 import Template
from langgraph.checkpoint.memory import MemorySaver
from langgraph.prebuilt import create_react_agent
from langgraph.store.memory import InMemoryStore
from langgraph.utils.config import get_store
from langmem import create_manage_memory_tool, create_search_memory_tool
from openai import OpenAI
from prompts import ANSWER_PROMPT
from tqdm import tqdm
load_dotenv()
client = OpenAI()
ANSWER_PROMPT_TEMPLATE = Template(ANSWER_PROMPT)
def get_answer(question, speaker_1_user_id, speaker_1_memories, speaker_2_user_id, speaker_2_memories):
prompt = ANSWER_PROMPT_TEMPLATE.render(
question=question,
speaker_1_user_id=speaker_1_user_id,
speaker_1_memories=speaker_1_memories,
speaker_2_user_id=speaker_2_user_id,
speaker_2_memories=speaker_2_memories,
)
t1 = time.time()
response = client.chat.completions.create(
model=os.getenv("MODEL"), messages=[{"role": "system", "content": prompt}], temperature=0.0
)
t2 = time.time()
return response.choices[0].message.content, t2 - t1
def prompt(state):
"""Prepare the messages for the LLM."""
store = get_store()
memories = store.search(
("memories",),
query=state["messages"][-1].content,
)
system_msg = f"""You are a helpful assistant.
## Memories
<memories>
{memories}
</memories>
"""
return [{"role": "system", "content": system_msg}, *state["messages"]]
class LangMem:
def __init__(
self,
):
self.store = InMemoryStore(
index={
"dims": 1536,
"embed": f"openai:{os.getenv('EMBEDDING_MODEL')}",
}
)
self.checkpointer = MemorySaver() # Checkpoint graph state
self.agent = create_react_agent(
f"openai:{os.getenv('MODEL')}",
prompt=prompt,
tools=[
create_manage_memory_tool(namespace=("memories",)),
create_search_memory_tool(namespace=("memories",)),
],
store=self.store,
checkpointer=self.checkpointer,
)
def add_memory(self, message, config):
return self.agent.invoke({"messages": [{"role": "user", "content": message}]}, config=config)
def search_memory(self, query, config):
try:
t1 = time.time()
response = self.agent.invoke({"messages": [{"role": "user", "content": query}]}, config=config)
t2 = time.time()
return response["messages"][-1].content, t2 - t1
except Exception as e:
print(f"Error in search_memory: {e}")
return "", t2 - t1
class LangMemManager:
def __init__(self, dataset_path):
self.dataset_path = dataset_path
with open(self.dataset_path, "r") as f:
self.data = json.load(f)
def process_all_conversations(self, output_file_path):
OUTPUT = defaultdict(list)
# Process conversations in parallel with multiple workers
def process_conversation(key_value_pair):
key, value = key_value_pair
result = defaultdict(list)
chat_history = value["conversation"]
questions = value["question"]
agent1 = LangMem()
agent2 = LangMem()
config = {"configurable": {"thread_id": f"thread-{key}"}}
speakers = set()
# Identify speakers
for conv in chat_history:
speakers.add(conv["speaker"])
if len(speakers) != 2:
raise ValueError(f"Expected 2 speakers, got {len(speakers)}")
speaker1 = list(speakers)[0]
speaker2 = list(speakers)[1]
# Add memories for each message
for conv in tqdm(chat_history, desc=f"Processing messages {key}", leave=False):
message = f"{conv['timestamp']} | {conv['speaker']}: {conv['text']}"
if conv["speaker"] == speaker1:
agent1.add_memory(message, config)
elif conv["speaker"] == speaker2:
agent2.add_memory(message, config)
else:
raise ValueError(f"Expected speaker1 or speaker2, got {conv['speaker']}")
# Process questions
for q in tqdm(questions, desc=f"Processing questions {key}", leave=False):
category = q["category"]
if int(category) == 5:
continue
answer = q["answer"]
question = q["question"]
response1, speaker1_memory_time = agent1.search_memory(question, config)
response2, speaker2_memory_time = agent2.search_memory(question, config)
generated_answer, response_time = get_answer(question, speaker1, response1, speaker2, response2)
result[key].append(
{
"question": question,
"answer": answer,
"response1": response1,
"response2": response2,
"category": category,
"speaker1_memory_time": speaker1_memory_time,
"speaker2_memory_time": speaker2_memory_time,
"response_time": response_time,
"response": generated_answer,
}
)
return result
# Use multiprocessing to process conversations in parallel
with mp.Pool(processes=10) as pool:
results = list(
tqdm(
pool.imap(process_conversation, list(self.data.items())),
total=len(self.data),
desc="Processing conversations",
)
)
# Combine results from all workers
for result in results:
for key, items in result.items():
OUTPUT[key].extend(items)
# Save final results
with open(output_file_path, "w") as f:
json.dump(OUTPUT, f, indent=4)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/evaluation/src/utils.py | evaluation/src/utils.py | TECHNIQUES = ["mem0", "rag", "langmem", "zep", "openai"]
METHODS = ["add", "search"]
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/evaluation/src/rag.py | evaluation/src/rag.py | import json
import os
import time
from collections import defaultdict
import numpy as np
import tiktoken
from dotenv import load_dotenv
from jinja2 import Template
from openai import OpenAI
from tqdm import tqdm
load_dotenv()
PROMPT = """
# Question:
{{QUESTION}}
# Context:
{{CONTEXT}}
# Short answer:
"""
class RAGManager:
def __init__(self, data_path="dataset/locomo10_rag.json", chunk_size=500, k=1):
self.model = os.getenv("MODEL")
self.client = OpenAI()
self.data_path = data_path
self.chunk_size = chunk_size
self.k = k
def generate_response(self, question, context):
template = Template(PROMPT)
prompt = template.render(CONTEXT=context, QUESTION=question)
max_retries = 3
retries = 0
while retries <= max_retries:
try:
t1 = time.time()
response = self.client.chat.completions.create(
model=self.model,
messages=[
{
"role": "system",
"content": "You are a helpful assistant that can answer "
"questions based on the provided context."
"If the question involves timing, use the conversation date for reference."
"Provide the shortest possible answer."
"Use words directly from the conversation when possible."
"Avoid using subjects in your answer.",
},
{"role": "user", "content": prompt},
],
temperature=0,
)
t2 = time.time()
return response.choices[0].message.content.strip(), t2 - t1
except Exception as e:
retries += 1
if retries > max_retries:
raise e
time.sleep(1) # Wait before retrying
def clean_chat_history(self, chat_history):
cleaned_chat_history = ""
for c in chat_history:
cleaned_chat_history += f"{c['timestamp']} | {c['speaker']}: {c['text']}\n"
return cleaned_chat_history
def calculate_embedding(self, document):
response = self.client.embeddings.create(model=os.getenv("EMBEDDING_MODEL"), input=document)
return response.data[0].embedding
def calculate_similarity(self, embedding1, embedding2):
return np.dot(embedding1, embedding2) / (np.linalg.norm(embedding1) * np.linalg.norm(embedding2))
def search(self, query, chunks, embeddings, k=1):
"""
Search for the top-k most similar chunks to the query.
Args:
query: The query string
chunks: List of text chunks
embeddings: List of embeddings for each chunk
k: Number of top chunks to return (default: 1)
Returns:
combined_chunks: The combined text of the top-k chunks
search_time: Time taken for the search
"""
t1 = time.time()
query_embedding = self.calculate_embedding(query)
similarities = [self.calculate_similarity(query_embedding, embedding) for embedding in embeddings]
# Get indices of top-k most similar chunks
if k == 1:
# Original behavior - just get the most similar chunk
top_indices = [np.argmax(similarities)]
else:
# Get indices of top-k chunks
top_indices = np.argsort(similarities)[-k:][::-1]
# Combine the top-k chunks
combined_chunks = "\n<->\n".join([chunks[i] for i in top_indices])
t2 = time.time()
return combined_chunks, t2 - t1
def create_chunks(self, chat_history, chunk_size=500):
"""
Create chunks using tiktoken for more accurate token counting
"""
# Get the encoding for the model
encoding = tiktoken.encoding_for_model(os.getenv("EMBEDDING_MODEL"))
documents = self.clean_chat_history(chat_history)
if chunk_size == -1:
return [documents], []
chunks = []
# Encode the document
tokens = encoding.encode(documents)
# Split into chunks based on token count
for i in range(0, len(tokens), chunk_size):
chunk_tokens = tokens[i : i + chunk_size]
chunk = encoding.decode(chunk_tokens)
chunks.append(chunk)
embeddings = []
for chunk in chunks:
embedding = self.calculate_embedding(chunk)
embeddings.append(embedding)
return chunks, embeddings
def process_all_conversations(self, output_file_path):
with open(self.data_path, "r") as f:
data = json.load(f)
FINAL_RESULTS = defaultdict(list)
for key, value in tqdm(data.items(), desc="Processing conversations"):
chat_history = value["conversation"]
questions = value["question"]
chunks, embeddings = self.create_chunks(chat_history, self.chunk_size)
for item in tqdm(questions, desc="Answering questions", leave=False):
question = item["question"]
answer = item.get("answer", "")
category = item["category"]
if self.chunk_size == -1:
context = chunks[0]
search_time = 0
else:
context, search_time = self.search(question, chunks, embeddings, k=self.k)
response, response_time = self.generate_response(question, context)
FINAL_RESULTS[key].append(
{
"question": question,
"answer": answer,
"category": category,
"context": context,
"response": response,
"search_time": search_time,
"response_time": response_time,
}
)
with open(output_file_path, "w+") as f:
json.dump(FINAL_RESULTS, f, indent=4)
# Save results
with open(output_file_path, "w+") as f:
json.dump(FINAL_RESULTS, f, indent=4)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/evaluation/src/openai/predict.py | evaluation/src/openai/predict.py | import argparse
import json
import os
import time
from collections import defaultdict
from dotenv import load_dotenv
from jinja2 import Template
from openai import OpenAI
from tqdm import tqdm
load_dotenv()
ANSWER_PROMPT = """
You are an intelligent memory assistant tasked with retrieving accurate information from conversation memories.
# CONTEXT:
You have access to memories from a conversation. These memories contain
timestamped information that may be relevant to answering the question.
# INSTRUCTIONS:
1. Carefully analyze all provided memories
2. Pay special attention to the timestamps to determine the answer
3. If the question asks about a specific event or fact, look for direct evidence in the memories
4. If the memories contain contradictory information, prioritize the most recent memory
5. If there is a question about time references (like "last year", "two months ago", etc.),
calculate the actual date based on the memory timestamp. For example, if a memory from
4 May 2022 mentions "went to India last year," then the trip occurred in 2021.
6. Always convert relative time references to specific dates, months, or years. For example,
convert "last year" to "2022" or "two months ago" to "March 2023" based on the memory
timestamp. Ignore the reference while answering the question.
7. Focus only on the content of the memories. Do not confuse character
names mentioned in memories with the actual users who created those memories.
8. The answer should be less than 5-6 words.
# APPROACH (Think step by step):
1. First, examine all memories that contain information related to the question
2. Examine the timestamps and content of these memories carefully
3. Look for explicit mentions of dates, times, locations, or events that answer the question
4. If the answer requires calculation (e.g., converting relative time references), show your work
5. Formulate a precise, concise answer based solely on the evidence in the memories
6. Double-check that your answer directly addresses the question asked
7. Ensure your final answer is specific and avoids vague time references
Memories:
{{memories}}
Question: {{question}}
Answer:
"""
class OpenAIPredict:
def __init__(self, model="gpt-4o-mini"):
self.model = model
self.openai_client = OpenAI()
self.results = defaultdict(list)
def search_memory(self, idx):
with open(f"memories/{idx}.txt", "r") as file:
memories = file.read()
return memories, 0
def process_question(self, val, idx):
question = val.get("question", "")
answer = val.get("answer", "")
category = val.get("category", -1)
evidence = val.get("evidence", [])
adversarial_answer = val.get("adversarial_answer", "")
response, search_memory_time, response_time, context = self.answer_question(idx, question)
result = {
"question": question,
"answer": answer,
"category": category,
"evidence": evidence,
"response": response,
"adversarial_answer": adversarial_answer,
"search_memory_time": search_memory_time,
"response_time": response_time,
"context": context,
}
return result
def answer_question(self, idx, question):
memories, search_memory_time = self.search_memory(idx)
template = Template(ANSWER_PROMPT)
answer_prompt = template.render(memories=memories, question=question)
t1 = time.time()
response = self.openai_client.chat.completions.create(
model=os.getenv("MODEL"), messages=[{"role": "system", "content": answer_prompt}], temperature=0.0
)
t2 = time.time()
response_time = t2 - t1
return response.choices[0].message.content, search_memory_time, response_time, memories
def process_data_file(self, file_path, output_file_path):
with open(file_path, "r") as f:
data = json.load(f)
for idx, item in tqdm(enumerate(data), total=len(data), desc="Processing conversations"):
qa = item["qa"]
for question_item in tqdm(
qa, total=len(qa), desc=f"Processing questions for conversation {idx}", leave=False
):
result = self.process_question(question_item, idx)
self.results[idx].append(result)
# Save results after each question is processed
with open(output_file_path, "w") as f:
json.dump(self.results, f, indent=4)
# Final save at the end
with open(output_file_path, "w") as f:
json.dump(self.results, f, indent=4)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--output_file_path", type=str, required=True)
args = parser.parse_args()
openai_predict = OpenAIPredict()
openai_predict.process_data_file("../../dataset/locomo10.json", args.output_file_path)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/evaluation/src/zep/search.py | evaluation/src/zep/search.py | import argparse
import json
import os
import time
from collections import defaultdict
from dotenv import load_dotenv
from jinja2 import Template
from openai import OpenAI
from prompts import ANSWER_PROMPT_ZEP
from tqdm import tqdm
from zep_cloud import EntityEdge, EntityNode
from zep_cloud.client import Zep
load_dotenv()
TEMPLATE = """
FACTS and ENTITIES represent relevant context to the current conversation.
# These are the most relevant facts and their valid date ranges
# format: FACT (Date range: from - to)
{facts}
# These are the most relevant entities
# ENTITY_NAME: entity summary
{entities}
"""
class ZepSearch:
def __init__(self):
self.zep_client = Zep(api_key=os.getenv("ZEP_API_KEY"))
self.results = defaultdict(list)
self.openai_client = OpenAI()
def format_edge_date_range(self, edge: EntityEdge) -> str:
# return f"{datetime(edge.valid_at).strftime('%Y-%m-%d %H:%M:%S') if edge.valid_at else 'date unknown'} - {(edge.invalid_at.strftime('%Y-%m-%d %H:%M:%S') if edge.invalid_at else 'present')}"
return f"{edge.valid_at if edge.valid_at else 'date unknown'} - {(edge.invalid_at if edge.invalid_at else 'present')}"
def compose_search_context(self, edges: list[EntityEdge], nodes: list[EntityNode]) -> str:
facts = [f" - {edge.fact} ({self.format_edge_date_range(edge)})" for edge in edges]
entities = [f" - {node.name}: {node.summary}" for node in nodes]
return TEMPLATE.format(facts="\n".join(facts), entities="\n".join(entities))
def search_memory(self, run_id, idx, query, max_retries=3, retry_delay=1):
start_time = time.time()
retries = 0
while retries < max_retries:
try:
user_id = f"run_id_{run_id}_experiment_user_{idx}"
edges_results = (
self.zep_client.graph.search(
user_id=user_id, reranker="cross_encoder", query=query, scope="edges", limit=20
)
).edges
node_results = (
self.zep_client.graph.search(user_id=user_id, reranker="rrf", query=query, scope="nodes", limit=20)
).nodes
context = self.compose_search_context(edges_results, node_results)
break
except Exception as e:
print("Retrying...")
retries += 1
if retries >= max_retries:
raise e
time.sleep(retry_delay)
end_time = time.time()
return context, end_time - start_time
def process_question(self, run_id, val, idx):
question = val.get("question", "")
answer = val.get("answer", "")
category = val.get("category", -1)
evidence = val.get("evidence", [])
adversarial_answer = val.get("adversarial_answer", "")
response, search_memory_time, response_time, context = self.answer_question(run_id, idx, question)
result = {
"question": question,
"answer": answer,
"category": category,
"evidence": evidence,
"response": response,
"adversarial_answer": adversarial_answer,
"search_memory_time": search_memory_time,
"response_time": response_time,
"context": context,
}
return result
def answer_question(self, run_id, idx, question):
context, search_memory_time = self.search_memory(run_id, idx, question)
template = Template(ANSWER_PROMPT_ZEP)
answer_prompt = template.render(memories=context, question=question)
t1 = time.time()
response = self.openai_client.chat.completions.create(
model=os.getenv("MODEL"), messages=[{"role": "system", "content": answer_prompt}], temperature=0.0
)
t2 = time.time()
response_time = t2 - t1
return response.choices[0].message.content, search_memory_time, response_time, context
def process_data_file(self, file_path, run_id, output_file_path):
with open(file_path, "r") as f:
data = json.load(f)
for idx, item in tqdm(enumerate(data), total=len(data), desc="Processing conversations"):
qa = item["qa"]
for question_item in tqdm(
qa, total=len(qa), desc=f"Processing questions for conversation {idx}", leave=False
):
result = self.process_question(run_id, question_item, idx)
self.results[idx].append(result)
# Save results after each question is processed
with open(output_file_path, "w") as f:
json.dump(self.results, f, indent=4)
# Final save at the end
with open(output_file_path, "w") as f:
json.dump(self.results, f, indent=4)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--run_id", type=str, required=True)
args = parser.parse_args()
zep_search = ZepSearch()
zep_search.process_data_file("../../dataset/locomo10.json", args.run_id, "results/zep_search_results.json")
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/evaluation/src/zep/add.py | evaluation/src/zep/add.py | import argparse
import json
import os
from dotenv import load_dotenv
from tqdm import tqdm
from zep_cloud import Message
from zep_cloud.client import Zep
load_dotenv()
class ZepAdd:
def __init__(self, data_path=None):
self.zep_client = Zep(api_key=os.getenv("ZEP_API_KEY"))
self.data_path = data_path
self.data = None
if data_path:
self.load_data()
def load_data(self):
with open(self.data_path, "r") as f:
self.data = json.load(f)
return self.data
def process_conversation(self, run_id, item, idx):
conversation = item["conversation"]
user_id = f"run_id_{run_id}_experiment_user_{idx}"
session_id = f"run_id_{run_id}_experiment_session_{idx}"
# # delete all memories for the two users
# self.zep_client.user.delete(user_id=user_id)
# self.zep_client.memory.delete(session_id=session_id)
self.zep_client.user.add(user_id=user_id)
self.zep_client.memory.add_session(
user_id=user_id,
session_id=session_id,
)
print("Starting to add memories... for user", user_id)
for key in tqdm(conversation.keys(), desc=f"Processing user {user_id}"):
if key in ["speaker_a", "speaker_b"] or "date" in key:
continue
date_time_key = key + "_date_time"
timestamp = conversation[date_time_key]
chats = conversation[key]
for chat in tqdm(chats, desc=f"Adding chats for {key}", leave=False):
self.zep_client.memory.add(
session_id=session_id,
messages=[
Message(
role=chat["speaker"],
role_type="user",
content=f"{timestamp}: {chat['text']}",
)
],
)
def process_all_conversations(self, run_id):
if not self.data:
raise ValueError("No data loaded. Please set data_path and call load_data() first.")
for idx, item in tqdm(enumerate(self.data)):
if idx == 0:
self.process_conversation(run_id, item, idx)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--run_id", type=str, required=True)
args = parser.parse_args()
zep_add = ZepAdd(data_path="../../dataset/locomo10.json")
zep_add.process_all_conversations(args.run_id)
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/evaluation/src/memzero/search.py | evaluation/src/memzero/search.py | import json
import os
import time
from collections import defaultdict
from concurrent.futures import ThreadPoolExecutor
from dotenv import load_dotenv
from jinja2 import Template
from openai import OpenAI
from prompts import ANSWER_PROMPT, ANSWER_PROMPT_GRAPH
from tqdm import tqdm
from mem0 import MemoryClient
load_dotenv()
class MemorySearch:
def __init__(self, output_path="results.json", top_k=10, filter_memories=False, is_graph=False):
self.mem0_client = MemoryClient(
api_key=os.getenv("MEM0_API_KEY"),
org_id=os.getenv("MEM0_ORGANIZATION_ID"),
project_id=os.getenv("MEM0_PROJECT_ID"),
)
self.top_k = top_k
self.openai_client = OpenAI()
self.results = defaultdict(list)
self.output_path = output_path
self.filter_memories = filter_memories
self.is_graph = is_graph
if self.is_graph:
self.ANSWER_PROMPT = ANSWER_PROMPT_GRAPH
else:
self.ANSWER_PROMPT = ANSWER_PROMPT
def search_memory(self, user_id, query, max_retries=3, retry_delay=1):
start_time = time.time()
retries = 0
while retries < max_retries:
try:
if self.is_graph:
print("Searching with graph")
memories = self.mem0_client.search(
query,
user_id=user_id,
top_k=self.top_k,
filter_memories=self.filter_memories,
enable_graph=True,
output_format="v1.1",
)
else:
memories = self.mem0_client.search(
query, user_id=user_id, top_k=self.top_k, filter_memories=self.filter_memories
)
break
except Exception as e:
print("Retrying...")
retries += 1
if retries >= max_retries:
raise e
time.sleep(retry_delay)
end_time = time.time()
if not self.is_graph:
semantic_memories = [
{
"memory": memory["memory"],
"timestamp": memory["metadata"]["timestamp"],
"score": round(memory["score"], 2),
}
for memory in memories
]
graph_memories = None
else:
semantic_memories = [
{
"memory": memory["memory"],
"timestamp": memory["metadata"]["timestamp"],
"score": round(memory["score"], 2),
}
for memory in memories["results"]
]
graph_memories = [
{"source": relation["source"], "relationship": relation["relationship"], "target": relation["target"]}
for relation in memories["relations"]
]
return semantic_memories, graph_memories, end_time - start_time
def answer_question(self, speaker_1_user_id, speaker_2_user_id, question, answer, category):
speaker_1_memories, speaker_1_graph_memories, speaker_1_memory_time = self.search_memory(
speaker_1_user_id, question
)
speaker_2_memories, speaker_2_graph_memories, speaker_2_memory_time = self.search_memory(
speaker_2_user_id, question
)
search_1_memory = [f"{item['timestamp']}: {item['memory']}" for item in speaker_1_memories]
search_2_memory = [f"{item['timestamp']}: {item['memory']}" for item in speaker_2_memories]
template = Template(self.ANSWER_PROMPT)
answer_prompt = template.render(
speaker_1_user_id=speaker_1_user_id.split("_")[0],
speaker_2_user_id=speaker_2_user_id.split("_")[0],
speaker_1_memories=json.dumps(search_1_memory, indent=4),
speaker_2_memories=json.dumps(search_2_memory, indent=4),
speaker_1_graph_memories=json.dumps(speaker_1_graph_memories, indent=4),
speaker_2_graph_memories=json.dumps(speaker_2_graph_memories, indent=4),
question=question,
)
t1 = time.time()
response = self.openai_client.chat.completions.create(
model=os.getenv("MODEL"), messages=[{"role": "system", "content": answer_prompt}], temperature=0.0
)
t2 = time.time()
response_time = t2 - t1
return (
response.choices[0].message.content,
speaker_1_memories,
speaker_2_memories,
speaker_1_memory_time,
speaker_2_memory_time,
speaker_1_graph_memories,
speaker_2_graph_memories,
response_time,
)
def process_question(self, val, speaker_a_user_id, speaker_b_user_id):
question = val.get("question", "")
answer = val.get("answer", "")
category = val.get("category", -1)
evidence = val.get("evidence", [])
adversarial_answer = val.get("adversarial_answer", "")
(
response,
speaker_1_memories,
speaker_2_memories,
speaker_1_memory_time,
speaker_2_memory_time,
speaker_1_graph_memories,
speaker_2_graph_memories,
response_time,
) = self.answer_question(speaker_a_user_id, speaker_b_user_id, question, answer, category)
result = {
"question": question,
"answer": answer,
"category": category,
"evidence": evidence,
"response": response,
"adversarial_answer": adversarial_answer,
"speaker_1_memories": speaker_1_memories,
"speaker_2_memories": speaker_2_memories,
"num_speaker_1_memories": len(speaker_1_memories),
"num_speaker_2_memories": len(speaker_2_memories),
"speaker_1_memory_time": speaker_1_memory_time,
"speaker_2_memory_time": speaker_2_memory_time,
"speaker_1_graph_memories": speaker_1_graph_memories,
"speaker_2_graph_memories": speaker_2_graph_memories,
"response_time": response_time,
}
# Save results after each question is processed
with open(self.output_path, "w") as f:
json.dump(self.results, f, indent=4)
return result
def process_data_file(self, file_path):
with open(file_path, "r") as f:
data = json.load(f)
for idx, item in tqdm(enumerate(data), total=len(data), desc="Processing conversations"):
qa = item["qa"]
conversation = item["conversation"]
speaker_a = conversation["speaker_a"]
speaker_b = conversation["speaker_b"]
speaker_a_user_id = f"{speaker_a}_{idx}"
speaker_b_user_id = f"{speaker_b}_{idx}"
for question_item in tqdm(
qa, total=len(qa), desc=f"Processing questions for conversation {idx}", leave=False
):
result = self.process_question(question_item, speaker_a_user_id, speaker_b_user_id)
self.results[idx].append(result)
# Save results after each question is processed
with open(self.output_path, "w") as f:
json.dump(self.results, f, indent=4)
# Final save at the end
with open(self.output_path, "w") as f:
json.dump(self.results, f, indent=4)
def process_questions_parallel(self, qa_list, speaker_a_user_id, speaker_b_user_id, max_workers=1):
def process_single_question(val):
result = self.process_question(val, speaker_a_user_id, speaker_b_user_id)
# Save results after each question is processed
with open(self.output_path, "w") as f:
json.dump(self.results, f, indent=4)
return result
with ThreadPoolExecutor(max_workers=max_workers) as executor:
results = list(
tqdm(executor.map(process_single_question, qa_list), total=len(qa_list), desc="Answering Questions")
)
# Final save at the end
with open(self.output_path, "w") as f:
json.dump(self.results, f, indent=4)
return results
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/evaluation/src/memzero/add.py | evaluation/src/memzero/add.py | import json
import os
import threading
import time
from concurrent.futures import ThreadPoolExecutor
from dotenv import load_dotenv
from tqdm import tqdm
from mem0 import MemoryClient
load_dotenv()
# Update custom instructions
custom_instructions = """
Generate personal memories that follow these guidelines:
1. Each memory should be self-contained with complete context, including:
- The person's name, do not use "user" while creating memories
- Personal details (career aspirations, hobbies, life circumstances)
- Emotional states and reactions
- Ongoing journeys or future plans
- Specific dates when events occurred
2. Include meaningful personal narratives focusing on:
- Identity and self-acceptance journeys
- Family planning and parenting
- Creative outlets and hobbies
- Mental health and self-care activities
- Career aspirations and education goals
- Important life events and milestones
3. Make each memory rich with specific details rather than general statements
- Include timeframes (exact dates when possible)
- Name specific activities (e.g., "charity race for mental health" rather than just "exercise")
- Include emotional context and personal growth elements
4. Extract memories only from user messages, not incorporating assistant responses
5. Format each memory as a paragraph with a clear narrative structure that captures the person's experience, challenges, and aspirations
"""
class MemoryADD:
def __init__(self, data_path=None, batch_size=2, is_graph=False):
self.mem0_client = MemoryClient(
api_key=os.getenv("MEM0_API_KEY"),
org_id=os.getenv("MEM0_ORGANIZATION_ID"),
project_id=os.getenv("MEM0_PROJECT_ID"),
)
self.mem0_client.update_project(custom_instructions=custom_instructions)
self.batch_size = batch_size
self.data_path = data_path
self.data = None
self.is_graph = is_graph
if data_path:
self.load_data()
def load_data(self):
with open(self.data_path, "r") as f:
self.data = json.load(f)
return self.data
def add_memory(self, user_id, message, metadata, retries=3):
for attempt in range(retries):
try:
_ = self.mem0_client.add(
message, user_id=user_id, version="v2", metadata=metadata, enable_graph=self.is_graph
)
return
except Exception as e:
if attempt < retries - 1:
time.sleep(1) # Wait before retrying
continue
else:
raise e
def add_memories_for_speaker(self, speaker, messages, timestamp, desc):
for i in tqdm(range(0, len(messages), self.batch_size), desc=desc):
batch_messages = messages[i : i + self.batch_size]
self.add_memory(speaker, batch_messages, metadata={"timestamp": timestamp})
def process_conversation(self, item, idx):
conversation = item["conversation"]
speaker_a = conversation["speaker_a"]
speaker_b = conversation["speaker_b"]
speaker_a_user_id = f"{speaker_a}_{idx}"
speaker_b_user_id = f"{speaker_b}_{idx}"
# delete all memories for the two users
self.mem0_client.delete_all(user_id=speaker_a_user_id)
self.mem0_client.delete_all(user_id=speaker_b_user_id)
for key in conversation.keys():
if key in ["speaker_a", "speaker_b"] or "date" in key or "timestamp" in key:
continue
date_time_key = key + "_date_time"
timestamp = conversation[date_time_key]
chats = conversation[key]
messages = []
messages_reverse = []
for chat in chats:
if chat["speaker"] == speaker_a:
messages.append({"role": "user", "content": f"{speaker_a}: {chat['text']}"})
messages_reverse.append({"role": "assistant", "content": f"{speaker_a}: {chat['text']}"})
elif chat["speaker"] == speaker_b:
messages.append({"role": "assistant", "content": f"{speaker_b}: {chat['text']}"})
messages_reverse.append({"role": "user", "content": f"{speaker_b}: {chat['text']}"})
else:
raise ValueError(f"Unknown speaker: {chat['speaker']}")
# add memories for the two users on different threads
thread_a = threading.Thread(
target=self.add_memories_for_speaker,
args=(speaker_a_user_id, messages, timestamp, "Adding Memories for Speaker A"),
)
thread_b = threading.Thread(
target=self.add_memories_for_speaker,
args=(speaker_b_user_id, messages_reverse, timestamp, "Adding Memories for Speaker B"),
)
thread_a.start()
thread_b.start()
thread_a.join()
thread_b.join()
print("Messages added successfully")
def process_all_conversations(self, max_workers=10):
if not self.data:
raise ValueError("No data loaded. Please set data_path and call load_data() first.")
with ThreadPoolExecutor(max_workers=max_workers) as executor:
futures = [executor.submit(self.process_conversation, item, idx) for idx, item in enumerate(self.data)]
for future in futures:
future.result()
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/evaluation/metrics/llm_judge.py | evaluation/metrics/llm_judge.py | import argparse
import json
from collections import defaultdict
import numpy as np
from openai import OpenAI
from mem0.memory.utils import extract_json
client = OpenAI()
ACCURACY_PROMPT = """
Your task is to label an answer to a question as ’CORRECT’ or ’WRONG’. You will be given the following data:
(1) a question (posed by one user to another user),
(2) a ’gold’ (ground truth) answer,
(3) a generated answer
which you will score as CORRECT/WRONG.
The point of the question is to ask about something one user should know about the other user based on their prior conversations.
The gold answer will usually be a concise and short answer that includes the referenced topic, for example:
Question: Do you remember what I got the last time I went to Hawaii?
Gold answer: A shell necklace
The generated answer might be much longer, but you should be generous with your grading - as long as it touches on the same topic as the gold answer, it should be counted as CORRECT.
For time related questions, the gold answer will be a specific date, month, year, etc. The generated answer might be much longer or use relative time references (like "last Tuesday" or "next month"), but you should be generous with your grading - as long as it refers to the same date or time period as the gold answer, it should be counted as CORRECT. Even if the format differs (e.g., "May 7th" vs "7 May"), consider it CORRECT if it's the same date.
Now it's time for the real question:
Question: {question}
Gold answer: {gold_answer}
Generated answer: {generated_answer}
First, provide a short (one sentence) explanation of your reasoning, then finish with CORRECT or WRONG.
Do NOT include both CORRECT and WRONG in your response, or it will break the evaluation script.
Just return the label CORRECT or WRONG in a json format with the key as "label".
"""
def evaluate_llm_judge(question, gold_answer, generated_answer):
"""Evaluate the generated answer against the gold answer using an LLM judge."""
response = client.chat.completions.create(
model="gpt-4o-mini",
messages=[
{
"role": "user",
"content": ACCURACY_PROMPT.format(
question=question, gold_answer=gold_answer, generated_answer=generated_answer
),
}
],
response_format={"type": "json_object"},
temperature=0.0,
)
label = json.loads(extract_json(response.choices[0].message.content))["label"]
return 1 if label == "CORRECT" else 0
def main():
"""Main function to evaluate RAG results using LLM judge."""
parser = argparse.ArgumentParser(description="Evaluate RAG results using LLM judge")
parser.add_argument(
"--input_file",
type=str,
default="results/default_run_v4_k30_new_graph.json",
help="Path to the input dataset file",
)
args = parser.parse_args()
dataset_path = args.input_file
output_path = f"results/llm_judge_{dataset_path.split('/')[-1]}"
with open(dataset_path, "r") as f:
data = json.load(f)
LLM_JUDGE = defaultdict(list)
RESULTS = defaultdict(list)
index = 0
for k, v in data.items():
for x in v:
question = x["question"]
gold_answer = x["answer"]
generated_answer = x["response"]
category = x["category"]
# Skip category 5
if int(category) == 5:
continue
# Evaluate the answer
label = evaluate_llm_judge(question, gold_answer, generated_answer)
LLM_JUDGE[category].append(label)
# Store the results
RESULTS[index].append(
{
"question": question,
"gt_answer": gold_answer,
"response": generated_answer,
"category": category,
"llm_label": label,
}
)
# Save intermediate results
with open(output_path, "w") as f:
json.dump(RESULTS, f, indent=4)
# Print current accuracy for all categories
print("All categories accuracy:")
for cat, results in LLM_JUDGE.items():
if results: # Only print if there are results for this category
print(f" Category {cat}: {np.mean(results):.4f} ({sum(results)}/{len(results)})")
print("------------------------------------------")
index += 1
# Save final results
with open(output_path, "w") as f:
json.dump(RESULTS, f, indent=4)
# Print final summary
print("PATH: ", dataset_path)
print("------------------------------------------")
for k, v in LLM_JUDGE.items():
print(k, np.mean(v))
if __name__ == "__main__":
main()
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
mem0ai/mem0 | https://github.com/mem0ai/mem0/blob/69a552d8a85637452ea20382a6ac3991fd6d60b3/evaluation/metrics/utils.py | evaluation/metrics/utils.py | """
Borrowed from https://github.com/WujiangXu/AgenticMemory/blob/main/utils.py
@article{xu2025mem,
title={A-mem: Agentic memory for llm agents},
author={Xu, Wujiang and Liang, Zujie and Mei, Kai and Gao, Hang and Tan, Juntao
and Zhang, Yongfeng},
journal={arXiv preprint arXiv:2502.12110},
year={2025}
}
"""
import statistics
from collections import defaultdict
from typing import Dict, List, Union
import nltk
from bert_score import score as bert_score
from nltk.translate.bleu_score import SmoothingFunction, sentence_bleu
from nltk.translate.meteor_score import meteor_score
from rouge_score import rouge_scorer
from sentence_transformers import SentenceTransformer
# from load_dataset import load_locomo_dataset, QA, Turn, Session, Conversation
from sentence_transformers.util import pytorch_cos_sim
# Download required NLTK data
try:
nltk.download("punkt", quiet=True)
nltk.download("wordnet", quiet=True)
except Exception as e:
print(f"Error downloading NLTK data: {e}")
# Initialize SentenceTransformer model (this will be reused)
try:
sentence_model = SentenceTransformer("all-MiniLM-L6-v2")
except Exception as e:
print(f"Warning: Could not load SentenceTransformer model: {e}")
sentence_model = None
def simple_tokenize(text):
"""Simple tokenization function."""
# Convert to string if not already
text = str(text)
return text.lower().replace(".", " ").replace(",", " ").replace("!", " ").replace("?", " ").split()
def calculate_rouge_scores(prediction: str, reference: str) -> Dict[str, float]:
"""Calculate ROUGE scores for prediction against reference."""
scorer = rouge_scorer.RougeScorer(["rouge1", "rouge2", "rougeL"], use_stemmer=True)
scores = scorer.score(reference, prediction)
return {
"rouge1_f": scores["rouge1"].fmeasure,
"rouge2_f": scores["rouge2"].fmeasure,
"rougeL_f": scores["rougeL"].fmeasure,
}
def calculate_bleu_scores(prediction: str, reference: str) -> Dict[str, float]:
"""Calculate BLEU scores with different n-gram settings."""
pred_tokens = nltk.word_tokenize(prediction.lower())
ref_tokens = [nltk.word_tokenize(reference.lower())]
weights_list = [(1, 0, 0, 0), (0.5, 0.5, 0, 0), (0.33, 0.33, 0.33, 0), (0.25, 0.25, 0.25, 0.25)]
smooth = SmoothingFunction().method1
scores = {}
for n, weights in enumerate(weights_list, start=1):
try:
score = sentence_bleu(ref_tokens, pred_tokens, weights=weights, smoothing_function=smooth)
except Exception as e:
print(f"Error calculating BLEU score: {e}")
score = 0.0
scores[f"bleu{n}"] = score
return scores
def calculate_bert_scores(prediction: str, reference: str) -> Dict[str, float]:
"""Calculate BERTScore for semantic similarity."""
try:
P, R, F1 = bert_score([prediction], [reference], lang="en", verbose=False)
return {"bert_precision": P.item(), "bert_recall": R.item(), "bert_f1": F1.item()}
except Exception as e:
print(f"Error calculating BERTScore: {e}")
return {"bert_precision": 0.0, "bert_recall": 0.0, "bert_f1": 0.0}
def calculate_meteor_score(prediction: str, reference: str) -> float:
"""Calculate METEOR score for the prediction."""
try:
return meteor_score([reference.split()], prediction.split())
except Exception as e:
print(f"Error calculating METEOR score: {e}")
return 0.0
def calculate_sentence_similarity(prediction: str, reference: str) -> float:
"""Calculate sentence embedding similarity using SentenceBERT."""
if sentence_model is None:
return 0.0
try:
# Encode sentences
embedding1 = sentence_model.encode([prediction], convert_to_tensor=True)
embedding2 = sentence_model.encode([reference], convert_to_tensor=True)
# Calculate cosine similarity
similarity = pytorch_cos_sim(embedding1, embedding2).item()
return float(similarity)
except Exception as e:
print(f"Error calculating sentence similarity: {e}")
return 0.0
def calculate_metrics(prediction: str, reference: str) -> Dict[str, float]:
"""Calculate comprehensive evaluation metrics for a prediction."""
# Handle empty or None values
if not prediction or not reference:
return {
"exact_match": 0,
"f1": 0.0,
"rouge1_f": 0.0,
"rouge2_f": 0.0,
"rougeL_f": 0.0,
"bleu1": 0.0,
"bleu2": 0.0,
"bleu3": 0.0,
"bleu4": 0.0,
"bert_f1": 0.0,
"meteor": 0.0,
"sbert_similarity": 0.0,
}
# Convert to strings if they're not already
prediction = str(prediction).strip()
reference = str(reference).strip()
# Calculate exact match
exact_match = int(prediction.lower() == reference.lower())
# Calculate token-based F1 score
pred_tokens = set(simple_tokenize(prediction))
ref_tokens = set(simple_tokenize(reference))
common_tokens = pred_tokens & ref_tokens
if not pred_tokens or not ref_tokens:
f1 = 0.0
else:
precision = len(common_tokens) / len(pred_tokens)
recall = len(common_tokens) / len(ref_tokens)
f1 = 2 * precision * recall / (precision + recall) if (precision + recall) > 0 else 0.0
# Calculate all scores
bleu_scores = calculate_bleu_scores(prediction, reference)
# Combine all metrics
metrics = {
"exact_match": exact_match,
"f1": f1,
**bleu_scores,
}
return metrics
def aggregate_metrics(
all_metrics: List[Dict[str, float]], all_categories: List[int]
) -> Dict[str, Dict[str, Union[float, Dict[str, float]]]]:
"""Calculate aggregate statistics for all metrics, split by category."""
if not all_metrics:
return {}
# Initialize aggregates for overall and per-category metrics
aggregates = defaultdict(list)
category_aggregates = defaultdict(lambda: defaultdict(list))
# Collect all values for each metric, both overall and per category
for metrics, category in zip(all_metrics, all_categories):
for metric_name, value in metrics.items():
aggregates[metric_name].append(value)
category_aggregates[category][metric_name].append(value)
# Calculate statistics for overall metrics
results = {"overall": {}}
for metric_name, values in aggregates.items():
results["overall"][metric_name] = {
"mean": statistics.mean(values),
"std": statistics.stdev(values) if len(values) > 1 else 0.0,
"median": statistics.median(values),
"min": min(values),
"max": max(values),
"count": len(values),
}
# Calculate statistics for each category
for category in sorted(category_aggregates.keys()):
results[f"category_{category}"] = {}
for metric_name, values in category_aggregates[category].items():
if values: # Only calculate if we have values for this category
results[f"category_{category}"][metric_name] = {
"mean": statistics.mean(values),
"std": statistics.stdev(values) if len(values) > 1 else 0.0,
"median": statistics.median(values),
"min": min(values),
"max": max(values),
"count": len(values),
}
return results
| python | Apache-2.0 | 69a552d8a85637452ea20382a6ac3991fd6d60b3 | 2026-01-04T14:39:42.142319Z | false |
d2l-ai/d2l-zh | https://github.com/d2l-ai/d2l-zh/blob/e6b18ccea71451a55fcd861d7b96fddf2587b09a/setup.py | setup.py | from setuptools import setup, find_packages
import d2l
requirements = [
'jupyter==1.0.0',
'numpy==1.21.5',
'matplotlib==3.5.1',
'requests==2.25.1',
'pandas==1.2.4'
]
setup(
name='d2l',
version=d2l.__version__,
python_requires='>=3.5',
author='D2L Developers',
author_email='d2l.devs@gmail.com',
url='https://d2l.ai',
description='Dive into Deep Learning',
license='MIT-0',
packages=find_packages(),
zip_safe=True,
install_requires=requirements,
)
| python | Apache-2.0 | e6b18ccea71451a55fcd861d7b96fddf2587b09a | 2026-01-04T14:38:16.201239Z | false |
d2l-ai/d2l-zh | https://github.com/d2l-ai/d2l-zh/blob/e6b18ccea71451a55fcd861d7b96fddf2587b09a/static/post_latex/main.py | static/post_latex/main.py | import os
import re
import regex
import sys
def _unnumber_chaps_and_secs(lines):
def _startswith_unnumbered(l):
UNNUMBERED = {'\\section{小结',
'\\section{练习',
'\\subsection{小结',
'\\subsection{练习'}
for unnum in UNNUMBERED:
if l.startswith(unnum):
return True
return False
# Preface, Installation, and Notation are unnumbered chapters
NUM_UNNUMBERED_CHAPS = 3
# Prelimilaries
TOC2_START_CHAP_NO = 5
preface_reached = False
ch2_reached = False
num_chaps = 0
for i, l in enumerate(lines):
if l.startswith('\\chapter{'):
num_chaps += 1
# Unnumber unnumbered chapters
if num_chaps <= NUM_UNNUMBERED_CHAPS:
chap_name = re.split('{|}', l)[1]
lines[i] = ('\\chapter*{' + chap_name
+ '}\\addcontentsline{toc}{chapter}{'
+ chap_name + '}\n')
# Set tocdepth to 2 after Chap 1
elif num_chaps == TOC2_START_CHAP_NO:
lines[i] = ('\\addtocontents{toc}{\\protect\\setcounter{tocdepth}{2}}\n'
+ lines[i])
# Unnumber all sections in unnumbered chapters
elif 1 <= num_chaps <= NUM_UNNUMBERED_CHAPS:
if (l.startswith('\\section') or l.startswith('\\subsection')
or l.startswith('\\subsubsection')):
lines[i] = l.replace('section{', 'section*{')
# Unnumber summary, references, exercises, qr code in numbered chapters
elif _startswith_unnumbered(l):
lines[i] = l.replace('section{', 'section*{')
# Since we inserted '\n' in some lines[i], re-build the list
lines = '\n'.join(lines).split('\n')
# If label is of chap*/index.md title, its numref is Chapter X instead of Section X
def _sec_to_chap(lines):
for i, l in enumerate(lines):
# e.g., {Section \ref{\detokenize{chapter_dlc/index:chap-dlc}}} matches
# {Section \ref{\detokenize{chapter_prelim/nd:sec-nd}}} does not match
# Note that there can be multiple {Section } in one line
longest_balanced_braces = regex.findall('\{(?>[^{}]|(?R))*\}', l)
for src in longest_balanced_braces:
if src.startswith('{Section \\ref') and 'index:' in src:
tgt = src.replace('Section \\ref', 'Chapter \\ref')
lines[i] = lines[i].replace(src, tgt)
# Remove date
def _edit_titlepage(pdf_dir):
smanual = os.path.join(pdf_dir, 'sphinxmanual.cls')
with open(smanual, 'r') as f:
lines = f.read().split('\n')
for i, l in enumerate(lines):
lines[i] = lines[i].replace('\\@date', '')
with open(smanual, 'w') as f:
f.write('\n'.join(lines))
def delete_lines(lines, deletes):
return [line for i, line in enumerate(lines) if i not in deletes]
def _delete_discussions_title(lines):
deletes = []
to_delete = False
for i, l in enumerate(lines):
if 'section*{Discussion' in l or 'section{Discussion' in l:
to_delete = True
elif to_delete and '\\sphinxincludegraphics' in l:
to_delete = False
if to_delete:
deletes.append(i)
return delete_lines(lines, deletes)
def main():
tex_file = sys.argv[1]
with open(tex_file, 'r') as f:
lines = f.read().split('\n')
_unnumber_chaps_and_secs(lines)
_sec_to_chap(lines)
#lines = _delete_discussions_title(lines)
with open(tex_file, 'w') as f:
f.write('\n'.join(lines))
pdf_dir = os.path.dirname(tex_file)
#_edit_titlepage(pdf_dir)
if __name__ == "__main__":
main()
| python | Apache-2.0 | e6b18ccea71451a55fcd861d7b96fddf2587b09a | 2026-01-04T14:38:16.201239Z | false |
d2l-ai/d2l-zh | https://github.com/d2l-ai/d2l-zh/blob/e6b18ccea71451a55fcd861d7b96fddf2587b09a/ci/submit-job.py | ci/submit-job.py | import argparse
import random
import os
import re
import sys
import time
from datetime import datetime
import boto3
from botocore.compat import total_seconds
from botocore.config import Config
job_type_info = {
'ci-cpu': {
'job_definition': 'd2l-ci-cpu-builder:2',
'job_queue': 'D2L-CI-CPU'
},
'ci-cpu-push': {
'job_definition': 'd2l-ci-cpu-builder-push:7',
'job_queue': 'D2L-CI-CPU'
},
'ci-cpu-release': {
'job_definition': 'd2l-ci-cpu-builder-release:1',
'job_queue': 'D2L-CI-CPU'
},
'ci-gpu-torch': {
'job_definition': 'd2l-ci-zh-gpu-torch:1',
'job_queue': 'D2L-CI-GPU'
},
'ci-gpu-tf': {
'job_definition': 'd2l-ci-zh-gpu-tf:1',
'job_queue': 'D2L-CI-GPU'
},
'ci-gpu-mxnet': {
'job_definition': 'd2l-ci-zh-gpu-mxnet:1',
'job_queue': 'D2L-CI-GPU'
},
'ci-gpu-paddle': {
'job_definition': 'd2l-ci-zh-gpu-paddle:1',
'job_queue': 'D2L-CI-GPU'
}
}
# Create push job types for GPUs with same definitions
for job_type in list(job_type_info.keys()):
if job_type.startswith('ci-gpu'):
job_type_info[job_type+'-push'] = job_type_info[job_type]
job_type_info[job_type+'-release'] = job_type_info[job_type]
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--profile', help='profile name of aws account.', type=str,
default=None)
parser.add_argument('--region', help='Default region when creating new connections', type=str,
default='us-west-2')
parser.add_argument('--name', help='name of the job', type=str, default='d2l-ci')
parser.add_argument('--job-type', help='type of job to submit.', type=str,
choices=job_type_info.keys(), default='ci-cpu')
parser.add_argument('--source-ref',
help='ref in d2l-zh main github. e.g. master, refs/pull/500/head',
type=str, default='master')
parser.add_argument('--work-dir',
help='working directory inside the repo. e.g. scripts/preprocess',
type=str, default='.')
parser.add_argument('--saved-output',
help='output to be saved, relative to working directory. '
'it can be either a single file or a directory',
type=str, default='None')
parser.add_argument('--save-path',
help='s3 path where files are saved.',
type=str, default='batch/temp/{}'.format(datetime.now().isoformat()))
parser.add_argument('--command', help='command to run', type=str,
default='git rev-parse HEAD | tee stdout.log')
parser.add_argument('--remote',
help='git repo address. https://github.com/d2l-ai/d2l-zh',
type=str, default="https://github.com/d2l-ai/d2l-zh")
parser.add_argument('--safe-to-use-script',
help='whether the script changes from the actor is safe. We assume it is safe if the actor has write permission to our repo',
action='store_true')
parser.add_argument('--original-repo', help='name of the repo', type=str, default='d2l-zh')
parser.add_argument('--wait', help='block wait until the job completes. '
'Non-zero exit code if job fails.', action='store_true')
parser.add_argument('--timeout', help='job timeout in seconds', default=7200, type=int)
args = parser.parse_args()
session = boto3.Session(profile_name=args.profile, region_name=args.region)
config = Config(
retries = dict(
max_attempts = 20
)
)
batch, cloudwatch = [session.client(service_name=sn, config=config) for sn in ['batch', 'logs']]
def printLogs(logGroupName, logStreamName, startTime):
kwargs = {'logGroupName': logGroupName,
'logStreamName': logStreamName,
'startTime': startTime,
'startFromHead': True}
lastTimestamp = startTime - 1
while True:
logEvents = cloudwatch.get_log_events(**kwargs)
for event in logEvents['events']:
lastTimestamp = event['timestamp']
timestamp = datetime.utcfromtimestamp(lastTimestamp / 1000.0).isoformat()
print('[{}] {}'.format((timestamp + '.000')[:23] + 'Z', event['message']))
nextToken = logEvents['nextForwardToken']
if nextToken and kwargs.get('nextToken') != nextToken:
kwargs['nextToken'] = nextToken
else:
break
return lastTimestamp
def nowInMillis():
endTime = int(total_seconds(datetime.utcnow() - datetime(1970, 1, 1))) * 1000
return endTime
def main():
spin = ['-', '/', '|', '\\', '-', '/', '|', '\\']
logGroupName = '/aws/batch/job'
jobName = re.sub('[^A-Za-z0-9_\-]', '', args.name)[:128] # Enforce AWS Batch jobName rules
jobType = args.job_type
jobQueue = job_type_info[jobType]['job_queue']
jobDefinition = job_type_info[jobType]['job_definition']
wait = args.wait
safe_to_use_script = 'False'
if args.safe_to_use_script:
safe_to_use_script = 'True'
parameters = {
'SOURCE_REF': args.source_ref,
'WORK_DIR': args.work_dir,
'SAVED_OUTPUT': args.saved_output,
'SAVE_PATH': args.save_path,
'COMMAND': f"\"{args.command}\"", # wrap command with double quotation mark, so that batch can treat it as a single command
'REMOTE': args.remote,
'SAFE_TO_USE_SCRIPT': safe_to_use_script,
'ORIGINAL_REPO': args.original_repo
}
kwargs = dict(
jobName=jobName,
jobQueue=jobQueue,
jobDefinition=jobDefinition,
parameters=parameters,
)
if args.timeout is not None:
kwargs['timeout'] = {'attemptDurationSeconds': args.timeout}
submitJobResponse = batch.submit_job(**kwargs)
jobId = submitJobResponse['jobId']
# Export Batch_JobID to Github Actions Environment Variable
with open(os.environ['GITHUB_ENV'], 'a') as f:
f.write(f'Batch_JobID={jobId}\n')
os.environ['batch_jobid'] = jobId
print('Submitted job [{} - {}] to the job queue [{}]'.format(jobName, jobId, jobQueue))
spinner = 0
running = False
status_set = set()
startTime = 0
logStreamName = None
while wait:
time.sleep(random.randint(5, 10))
describeJobsResponse = batch.describe_jobs(jobs=[jobId])
status = describeJobsResponse['jobs'][0]['status']
if status == 'SUCCEEDED' or status == 'FAILED':
if logStreamName:
startTime = printLogs(logGroupName, logStreamName, startTime) + 1
print('=' * 80)
print('Job [{} - {}] {}'.format(jobName, jobId, status))
sys.exit(status == 'FAILED')
elif status == 'RUNNING':
logStreamName = describeJobsResponse['jobs'][0]['container']['logStreamName']
if not running:
running = True
print('\rJob [{}, {}] is RUNNING.'.format(jobName, jobId))
if logStreamName:
print('Output [{}]:\n {}'.format(logStreamName, '=' * 80))
if logStreamName:
startTime = printLogs(logGroupName, logStreamName, startTime) + 1
elif status not in status_set:
status_set.add(status)
print('\rJob [%s - %s] is %-9s... %s' % (jobName, jobId, status, spin[spinner % len(spin)]),)
sys.stdout.flush()
spinner += 1
if __name__ == '__main__':
main()
| python | Apache-2.0 | e6b18ccea71451a55fcd861d7b96fddf2587b09a | 2026-01-04T14:38:16.201239Z | false |
d2l-ai/d2l-zh | https://github.com/d2l-ai/d2l-zh/blob/e6b18ccea71451a55fcd861d7b96fddf2587b09a/ci/docker/print_versions.py | ci/docker/print_versions.py | import os
import sys
if len(sys.argv) > 1:
framework_name = sys.argv[1]
else:
# Assume using d2l-builder docker container
# Here all the frameworks are installed and no CUDA support
framework_name = None
print("*"*10, "D2L Framework Version Details", "*"*10)
if framework_name:
# Print CUDA version
print("nvcc --version")
print(os.system("nvcc --version"))
if framework_name=="pytorch":
# Print PyTorch versions
print(f"Framework Name: {framework_name}")
import torch; print(f"torch version: {torch.__version__}")
import torchvision; print(f"torchvision version: {torchvision.__version__}")
import gym; print(f"gym version: {gym.__version__}")
import gpytorch; print(f"gpytorch version: {gpytorch.__version__}")
import syne_tune; print(f"syne_tune version: {syne_tune.__version__}")
if framework_name=="tensorflow":
# Print TensorFlow versions
print(f"Framework Name: {framework_name}")
import tensorflow; print(f"tensorflow version: {tensorflow.__version__}")
import tensorflow_probability; print(f"tensorflow_probability version: {tensorflow_probability.__version__}")
if framework_name=="jax":
# Print JAX versions
print(f"Framework Name: {framework_name}")
import jax; print(f"jax version: {jax.__version__}")
import jaxlib; print(f"jaxlib version: {jaxlib.__version__}")
import flax; print(f"flax version: {flax.__version__}")
import tensorflow_datasets; print(f"tensorflow_datasets version: {tensorflow_datasets.__version__}")
if framework_name=="mxnet":
# Print MXNet versions
print(f"Framework Name: {framework_name}")
import mxnet; print(f"MXNet version: {mxnet.__version__}")
# Print d2lbook version
import d2lbook; print(f"d2lbook version: {d2lbook.__version__}")
print("*"*10, "D2L Framework Version Details", "*"*10)
| python | Apache-2.0 | e6b18ccea71451a55fcd861d7b96fddf2587b09a | 2026-01-04T14:38:16.201239Z | false |
d2l-ai/d2l-zh | https://github.com/d2l-ai/d2l-zh/blob/e6b18ccea71451a55fcd861d7b96fddf2587b09a/contrib/to-rm-mx-contrib-text/d2lzh/utils.py | contrib/to-rm-mx-contrib-text/d2lzh/utils.py | import collections
from d2lzh import text
import math
import os
import random
import sys
import tarfile
import time
import zipfile
from IPython import display
from matplotlib import pyplot as plt
import mxnet as mx
from mxnet import autograd, gluon, image, init, nd
from mxnet.gluon import data as gdata, loss as gloss, nn, utils as gutils
import numpy as np
VOC_CLASSES = ['background', 'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair', 'cow',
'diningtable', 'dog', 'horse', 'motorbike', 'person',
'potted plant', 'sheep', 'sofa', 'train', 'tv/monitor']
VOC_COLORMAP = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0],
[0, 0, 128], [128, 0, 128], [0, 128, 128], [128, 128, 128],
[64, 0, 0], [192, 0, 0], [64, 128, 0], [192, 128, 0],
[64, 0, 128], [192, 0, 128], [64, 128, 128], [192, 128, 128],
[0, 64, 0], [128, 64, 0], [0, 192, 0], [128, 192, 0],
[0, 64, 128]]
def bbox_to_rect(bbox, color):
"""Convert bounding box to matplotlib format."""
return plt.Rectangle(xy=(bbox[0], bbox[1]), width=bbox[2]-bbox[0],
height=bbox[3]-bbox[1], fill=False, edgecolor=color,
linewidth=2)
class Benchmark():
"""Benchmark programs."""
def __init__(self, prefix=None):
self.prefix = prefix + ' ' if prefix else ''
def __enter__(self):
self.start = time.time()
def __exit__(self, *args):
print('%stime: %.4f sec' % (self.prefix, time.time() - self.start))
def corr2d(X, K):
"""Compute 2D cross-correlation."""
h, w = K.shape
Y = nd.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))
for i in range(Y.shape[0]):
for j in range(Y.shape[1]):
Y[i, j] = (X[i: i + h, j: j + w] * K).sum()
return Y
def count_tokens(samples):
"""Count tokens in the data set."""
token_counter = collections.Counter()
for sample in samples:
for token in sample:
if token not in token_counter:
token_counter[token] = 1
else:
token_counter[token] += 1
return token_counter
def data_iter(batch_size, features, labels):
"""Iterate through a data set."""
num_examples = len(features)
indices = list(range(num_examples))
random.shuffle(indices)
for i in range(0, num_examples, batch_size):
j = nd.array(indices[i: min(i + batch_size, num_examples)])
yield features.take(j), labels.take(j)
def data_iter_consecutive(corpus_indices, batch_size, num_steps, ctx=None):
"""Sample mini-batches in a consecutive order from sequential data."""
corpus_indices = nd.array(corpus_indices, ctx=ctx)
data_len = len(corpus_indices)
batch_len = data_len // batch_size
indices = corpus_indices[0 : batch_size * batch_len].reshape((
batch_size, batch_len))
epoch_size = (batch_len - 1) // num_steps
for i in range(epoch_size):
i = i * num_steps
X = indices[:, i : i + num_steps]
Y = indices[:, i + 1 : i + num_steps + 1]
yield X, Y
def data_iter_random(corpus_indices, batch_size, num_steps, ctx=None):
"""Sample mini-batches in a random order from sequential data."""
num_examples = (len(corpus_indices) - 1) // num_steps
epoch_size = num_examples // batch_size
example_indices = list(range(num_examples))
random.shuffle(example_indices)
def _data(pos):
return corpus_indices[pos : pos + num_steps]
for i in range(epoch_size):
i = i * batch_size
batch_indices = example_indices[i : i + batch_size]
X = nd.array(
[_data(j * num_steps) for j in batch_indices], ctx=ctx)
Y = nd.array(
[_data(j * num_steps + 1) for j in batch_indices], ctx=ctx)
yield X, Y
def download_imdb(data_dir='../data'):
"""Download the IMDB data set for sentiment analysis."""
url = ('http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz')
sha1 = '01ada507287d82875905620988597833ad4e0903'
fname = gutils.download(url, data_dir, sha1_hash=sha1)
with tarfile.open(fname, 'r') as f:
f.extractall(data_dir)
def _download_pikachu(data_dir):
root_url = ('https://apache-mxnet.s3-accelerate.amazonaws.com/'
'gluon/dataset/pikachu/')
dataset = {'train.rec': 'e6bcb6ffba1ac04ff8a9b1115e650af56ee969c8',
'train.idx': 'dcf7318b2602c06428b9988470c731621716c393',
'val.rec': 'd6c33f799b4d058e82f2cb5bd9a976f69d72d520'}
for k, v in dataset.items():
gutils.download(root_url + k, os.path.join(data_dir, k), sha1_hash=v)
def download_voc_pascal(data_dir='../data'):
"""Download the Pascal VOC2012 Dataset."""
voc_dir = os.path.join(data_dir, 'VOCdevkit/VOC2012')
url = ('http://host.robots.ox.ac.uk/pascal/VOC/voc2012'
'/VOCtrainval_11-May-2012.tar')
sha1 = '4e443f8a2eca6b1dac8a6c57641b67dd40621a49'
fname = gutils.download(url, data_dir, sha1_hash=sha1)
with tarfile.open(fname, 'r') as f:
f.extractall(data_dir)
return voc_dir
def evaluate_accuracy(data_iter, net, ctx=[mx.cpu()]):
"""Evaluate accuracy of a model on the given data set."""
if isinstance(ctx, mx.Context):
ctx = [ctx]
acc_sum, n = nd.array([0]), 0
for batch in data_iter:
features, labels, _ = _get_batch(batch, ctx)
for X, y in zip(features, labels):
y = y.astype('float32')
acc_sum += (net(X).argmax(axis=1) == y).sum().copyto(mx.cpu())
n += y.size
acc_sum.wait_to_read()
return acc_sum.asscalar() / n
def _get_batch(batch, ctx):
"""Return features and labels on ctx."""
features, labels = batch
if labels.dtype != features.dtype:
labels = labels.astype(features.dtype)
return (gutils.split_and_load(features, ctx),
gutils.split_and_load(labels, ctx), features.shape[0])
def get_data_ch7():
"""Get the data set used in Chapter 7."""
data = np.genfromtxt('../data/airfoil_self_noise.dat', delimiter='\t')
data = (data - data.mean(axis=0)) / data.std(axis=0)
return nd.array(data[:, :-1]), nd.array(data[:, -1])
def get_fashion_mnist_labels(labels):
"""Get text label for fashion mnist."""
text_labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat',
'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot']
return [text_labels[int(i)] for i in labels]
def get_tokenized_imdb(data):
"""Get the tokenized IMDB data set for sentiment analysis."""
def tokenizer(text):
return [tok.lower() for tok in text.split(' ')]
return [tokenizer(review) for review, _ in data]
def get_vocab_imdb(data):
"""Get the vocab for the IMDB data set for sentiment analysis."""
tokenized_data = get_tokenized_imdb(data)
counter = collections.Counter([tk for st in tokenized_data for tk in st])
return text.vocab.Vocabulary(counter, min_freq=5,
reserved_tokens=['<pad>'])
def grad_clipping(params, theta, ctx):
"""Clip the gradient."""
if theta is not None:
norm = nd.array([0], ctx)
for param in params:
norm += (param.grad ** 2).sum()
norm = norm.sqrt().asscalar()
if norm > theta:
for param in params:
param.grad[:] *= theta / norm
def linreg(X, w, b):
"""Linear regression."""
return nd.dot(X, w) + b
def load_data_fashion_mnist(batch_size, resize=None, root=os.path.join(
'~', '.mxnet', 'datasets', 'fashion-mnist')):
"""Download the fashion mnist dataset and then load into memory."""
root = os.path.expanduser(root)
transformer = []
if resize:
transformer += [gdata.vision.transforms.Resize(resize)]
transformer += [gdata.vision.transforms.ToTensor()]
transformer = gdata.vision.transforms.Compose(transformer)
mnist_train = gdata.vision.FashionMNIST(root=root, train=True)
mnist_test = gdata.vision.FashionMNIST(root=root, train=False)
num_workers = 0 if sys.platform.startswith('win32') else 4
train_iter = gdata.DataLoader(mnist_train.transform_first(transformer),
batch_size, shuffle=True,
num_workers=num_workers)
test_iter = gdata.DataLoader(mnist_test.transform_first(transformer),
batch_size, shuffle=False,
num_workers=num_workers)
return train_iter, test_iter
def load_data_jay_lyrics():
"""Load the Jay Chou lyric data set (available in the Chinese book)."""
with zipfile.ZipFile('../data/jaychou_lyrics.txt.zip') as zin:
with zin.open('jaychou_lyrics.txt') as f:
corpus_chars = f.read().decode('utf-8')
corpus_chars = corpus_chars.replace('\n', ' ').replace('\r', ' ')
corpus_chars = corpus_chars[0:10000]
idx_to_char = list(set(corpus_chars))
char_to_idx = dict([(char, i) for i, char in enumerate(idx_to_char)])
vocab_size = len(char_to_idx)
corpus_indices = [char_to_idx[char] for char in corpus_chars]
return corpus_indices, char_to_idx, idx_to_char, vocab_size
def load_data_pikachu(batch_size, edge_size=256):
"""Download the pikachu dataest and then load into memory."""
data_dir = '../data/pikachu'
_download_pikachu(data_dir)
train_iter = image.ImageDetIter(
path_imgrec=os.path.join(data_dir, 'train.rec'),
path_imgidx=os.path.join(data_dir, 'train.idx'),
batch_size=batch_size,
data_shape=(3, edge_size, edge_size),
shuffle=True,
rand_crop=1,
min_object_covered=0.95,
max_attempts=200)
val_iter = image.ImageDetIter(
path_imgrec=os.path.join(data_dir, 'val.rec'),
batch_size=batch_size,
data_shape=(3, edge_size, edge_size),
shuffle=False)
return train_iter, val_iter
def load_data_time_machine():
"""Load the time machine data set (available in the English book)."""
with open('../data/timemachine.txt') as f:
corpus_chars = f.read()
corpus_chars = corpus_chars.replace('\n', ' ').replace('\r', ' ').lower()
corpus_chars = corpus_chars[0:10000]
idx_to_char = list(set(corpus_chars))
char_to_idx = dict([(char, i) for i, char in enumerate(idx_to_char)])
vocab_size = len(char_to_idx)
corpus_indices = [char_to_idx[char] for char in corpus_chars]
return corpus_indices, char_to_idx, idx_to_char, vocab_size
def _make_list(obj, default_values=None):
if obj is None:
obj = default_values
elif not isinstance(obj, (list, tuple)):
obj = [obj]
return obj
def mkdir_if_not_exist(path):
"""Make a directory if it does not exist."""
if not os.path.exists(os.path.join(*path)):
os.makedirs(os.path.join(*path))
def predict_rnn(prefix, num_chars, rnn, params, init_rnn_state,
num_hiddens, vocab_size, ctx, idx_to_char, char_to_idx):
"""Predict next chars with a RNN model"""
state = init_rnn_state(1, num_hiddens, ctx)
output = [char_to_idx[prefix[0]]]
for t in range(num_chars + len(prefix) - 1):
X = to_onehot(nd.array([output[-1]], ctx=ctx), vocab_size)
(Y, state) = rnn(X, state, params)
if t < len(prefix) - 1:
output.append(char_to_idx[prefix[t + 1]])
else:
output.append(int(Y[0].argmax(axis=1).asscalar()))
return ''.join([idx_to_char[i] for i in output])
def predict_rnn_gluon(prefix, num_chars, model, vocab_size, ctx, idx_to_char,
char_to_idx):
"""Precit next chars with a Gluon RNN model"""
state = model.begin_state(batch_size=1, ctx=ctx)
output = [char_to_idx[prefix[0]]]
for t in range(num_chars + len(prefix) - 1):
X = nd.array([output[-1]], ctx=ctx).reshape((1, 1))
(Y, state) = model(X, state)
if t < len(prefix) - 1:
output.append(char_to_idx[prefix[t + 1]])
else:
output.append(int(Y.argmax(axis=1).asscalar()))
return ''.join([idx_to_char[i] for i in output])
def predict_sentiment(net, vocab, sentence):
"""Predict the sentiment of a given sentence."""
sentence = nd.array(vocab.to_indices(sentence), ctx=try_gpu())
label = nd.argmax(net(sentence.reshape((1, -1))), axis=1)
return 'positive' if label.asscalar() == 1 else 'negative'
def preprocess_imdb(data, vocab):
"""Preprocess the IMDB data set for sentiment analysis."""
max_l = 500
def pad(x):
return x[:max_l] if len(x) > max_l else x + [
vocab.token_to_idx['<pad>']] * (max_l - len(x))
tokenized_data = get_tokenized_imdb(data)
features = nd.array([pad(vocab.to_indices(x)) for x in tokenized_data])
labels = nd.array([score for _, score in data])
return features, labels
def read_imdb(folder='train'):
"""Read the IMDB data set for sentiment analysis."""
data = []
for label in ['pos', 'neg']:
folder_name = os.path.join('../data/aclImdb/', folder, label)
for file in os.listdir(folder_name):
with open(os.path.join(folder_name, file), 'rb') as f:
review = f.read().decode('utf-8').replace('\n', '').lower()
data.append([review, 1 if label == 'pos' else 0])
random.shuffle(data)
return data
def read_voc_images(root='../data/VOCdevkit/VOC2012', is_train=True):
"""Read VOC images."""
txt_fname = '%s/ImageSets/Segmentation/%s' % (
root, 'train.txt' if is_train else 'val.txt')
with open(txt_fname, 'r') as f:
images = f.read().split()
features, labels = [None] * len(images), [None] * len(images)
for i, fname in enumerate(images):
features[i] = image.imread('%s/JPEGImages/%s.jpg' % (root, fname))
labels[i] = image.imread(
'%s/SegmentationClass/%s.png' % (root, fname))
return features, labels
class Residual(nn.Block):
"""The residual block."""
def __init__(self, num_channels, use_1x1conv=False, strides=1, **kwargs):
super(Residual, self).__init__(**kwargs)
self.conv1 = nn.Conv2D(num_channels, kernel_size=3, padding=1,
strides=strides)
self.conv2 = nn.Conv2D(num_channels, kernel_size=3, padding=1)
if use_1x1conv:
self.conv3 = nn.Conv2D(num_channels, kernel_size=1,
strides=strides)
else:
self.conv3 = None
self.bn1 = nn.BatchNorm()
self.bn2 = nn.BatchNorm()
def forward(self, X):
Y = nd.relu(self.bn1(self.conv1(X)))
Y = self.bn2(self.conv2(Y))
if self.conv3:
X = self.conv3(X)
return nd.relu(Y + X)
def resnet18(num_classes):
"""The ResNet-18 model."""
net = nn.Sequential()
net.add(nn.Conv2D(64, kernel_size=3, strides=1, padding=1),
nn.BatchNorm(), nn.Activation('relu'))
def resnet_block(num_channels, num_residuals, first_block=False):
blk = nn.Sequential()
for i in range(num_residuals):
if i == 0 and not first_block:
blk.add(Residual(num_channels, use_1x1conv=True, strides=2))
else:
blk.add(Residual(num_channels))
return blk
net.add(resnet_block(64, 2, first_block=True),
resnet_block(128, 2),
resnet_block(256, 2),
resnet_block(512, 2))
net.add(nn.GlobalAvgPool2D(), nn.Dense(num_classes))
return net
class RNNModel(nn.Block):
"""RNN model."""
def __init__(self, rnn_layer, vocab_size, **kwargs):
super(RNNModel, self).__init__(**kwargs)
self.rnn = rnn_layer
self.vocab_size = vocab_size
self.dense = nn.Dense(vocab_size)
def forward(self, inputs, state):
X = nd.one_hot(inputs.T, self.vocab_size)
Y, state = self.rnn(X, state)
output = self.dense(Y.reshape((-1, Y.shape[-1])))
return output, state
def begin_state(self, *args, **kwargs):
return self.rnn.begin_state(*args, **kwargs)
def semilogy(x_vals, y_vals, x_label, y_label, x2_vals=None, y2_vals=None,
legend=None, figsize=(3.5, 2.5)):
"""Plot x and log(y)."""
set_figsize(figsize)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.semilogy(x_vals, y_vals)
if x2_vals and y2_vals:
plt.semilogy(x2_vals, y2_vals, linestyle=':')
plt.legend(legend)
plt.show()
def set_figsize(figsize=(3.5, 2.5)):
"""Set matplotlib figure size."""
use_svg_display()
plt.rcParams['figure.figsize'] = figsize
def sgd(params, lr, batch_size):
"""Mini-batch stochastic gradient descent."""
for param in params:
param[:] = param - lr * param.grad / batch_size
def show_bboxes(axes, bboxes, labels=None, colors=None):
"""Show bounding boxes."""
labels = _make_list(labels)
colors = _make_list(colors, ['b', 'g', 'r', 'm', 'k'])
for i, bbox in enumerate(bboxes):
color = colors[i % len(colors)]
rect = bbox_to_rect(bbox.asnumpy(), color)
axes.add_patch(rect)
if labels and len(labels) > i:
text_color = 'k' if color == 'w' else 'w'
axes.text(rect.xy[0], rect.xy[1], labels[i],
va='center', ha='center', fontsize=9, color=text_color,
bbox=dict(facecolor=color, lw=0))
def show_fashion_mnist(images, labels):
"""Plot Fashion-MNIST images with labels."""
use_svg_display()
_, figs = plt.subplots(1, len(images), figsize=(12, 12))
for f, img, lbl in zip(figs, images, labels):
f.imshow(img.reshape((28, 28)).asnumpy())
f.set_title(lbl)
f.axes.get_xaxis().set_visible(False)
f.axes.get_yaxis().set_visible(False)
def show_images(imgs, num_rows, num_cols, scale=2):
"""Plot a list of images."""
figsize = (num_cols * scale, num_rows * scale)
_, axes = plt.subplots(num_rows, num_cols, figsize=figsize)
for i in range(num_rows):
for j in range(num_cols):
axes[i][j].imshow(imgs[i * num_cols + j].asnumpy())
axes[i][j].axes.get_xaxis().set_visible(False)
axes[i][j].axes.get_yaxis().set_visible(False)
return axes
def show_trace_2d(f, res):
"""Show the trace of 2d variables during optimization."""
x1, x2 = zip(*res)
set_figsize()
plt.plot(x1, x2, '-o', color='#ff7f0e')
x1 = np.arange(-5.5, 1.0, 0.1)
x2 = np.arange(min(-3.0, min(x2) - 1), max(1.0, max(x2) + 1), 0.1)
x1, x2 = np.meshgrid(x1, x2)
plt.contour(x1, x2, f(x1, x2), colors='#1f77b4')
plt.xlabel('x1')
plt.ylabel('x2')
def squared_loss(y_hat, y):
"""Squared loss."""
return (y_hat - y.reshape(y_hat.shape)) ** 2 / 2
def to_onehot(X, size):
"""Represent inputs with one-hot encoding."""
return [nd.one_hot(x, size) for x in X.T]
def train(train_iter, test_iter, net, loss, trainer, ctx, num_epochs):
"""Train and evaluate a model."""
print('training on', ctx)
if isinstance(ctx, mx.Context):
ctx = [ctx]
for epoch in range(num_epochs):
train_l_sum, train_acc_sum, n, m, start = 0.0, 0.0, 0, 0, time.time()
for i, batch in enumerate(train_iter):
Xs, ys, batch_size = _get_batch(batch, ctx)
with autograd.record():
y_hats = [net(X) for X in Xs]
ls = [loss(y_hat, y) for y_hat, y in zip(y_hats, ys)]
for l in ls:
l.backward()
trainer.step(batch_size)
train_l_sum += sum([l.sum().asscalar() for l in ls])
n += sum([l.size for l in ls])
train_acc_sum += sum([(y_hat.argmax(axis=1) == y).sum().asscalar()
for y_hat, y in zip(y_hats, ys)])
m += sum([y.size for y in ys])
test_acc = evaluate_accuracy(test_iter, net, ctx)
print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f, '
'time %.1f sec'
% (epoch + 1, train_l_sum / n, train_acc_sum / m, test_acc,
time.time() - start))
def train_2d(trainer):
"""Optimize the objective function of 2d variables with a customized trainer."""
x1, x2 = -5, -2
s_x1, s_x2 = 0, 0
res = [(x1, x2)]
for i in range(20):
x1, x2, s_x1, s_x2 = trainer(x1, x2, s_x1, s_x2)
res.append((x1, x2))
print('epoch %d, x1 %f, x2 %f' % (i+1, x1, x2))
return res
def train_and_predict_rnn(rnn, get_params, init_rnn_state, num_hiddens,
vocab_size, ctx, corpus_indices, idx_to_char,
char_to_idx, is_random_iter, num_epochs, num_steps,
lr, clipping_theta, batch_size, pred_period,
pred_len, prefixes):
"""Train an RNN model and predict the next item in the sequence."""
if is_random_iter:
data_iter_fn = data_iter_random
else:
data_iter_fn = data_iter_consecutive
params = get_params()
loss = gloss.SoftmaxCrossEntropyLoss()
for epoch in range(num_epochs):
if not is_random_iter:
state = init_rnn_state(batch_size, num_hiddens, ctx)
l_sum, n, start = 0.0, 0, time.time()
data_iter = data_iter_fn(corpus_indices, batch_size, num_steps, ctx)
for X, Y in data_iter:
if is_random_iter:
state = init_rnn_state(batch_size, num_hiddens, ctx)
else:
for s in state:
s.detach()
with autograd.record():
inputs = to_onehot(X, vocab_size)
(outputs, state) = rnn(inputs, state, params)
outputs = nd.concat(*outputs, dim=0)
y = Y.T.reshape((-1,))
l = loss(outputs, y).mean()
l.backward()
grad_clipping(params, clipping_theta, ctx)
sgd(params, lr, 1)
l_sum += l.asscalar() * y.size
n += y.size
if (epoch + 1) % pred_period == 0:
print('epoch %d, perplexity %f, time %.2f sec' % (
epoch + 1, math.exp(l_sum / n), time.time() - start))
for prefix in prefixes:
print(' -', predict_rnn(
prefix, pred_len, rnn, params, init_rnn_state,
num_hiddens, vocab_size, ctx, idx_to_char, char_to_idx))
def train_and_predict_rnn_gluon(model, num_hiddens, vocab_size, ctx,
corpus_indices, idx_to_char, char_to_idx,
num_epochs, num_steps, lr, clipping_theta,
batch_size, pred_period, pred_len, prefixes):
"""Train an Gluon RNN model and predict the next item in the sequence."""
loss = gloss.SoftmaxCrossEntropyLoss()
model.initialize(ctx=ctx, force_reinit=True, init=init.Normal(0.01))
trainer = gluon.Trainer(model.collect_params(), 'sgd',
{'learning_rate': lr, 'momentum': 0, 'wd': 0})
for epoch in range(num_epochs):
l_sum, n, start = 0.0, 0, time.time()
data_iter = data_iter_consecutive(
corpus_indices, batch_size, num_steps, ctx)
state = model.begin_state(batch_size=batch_size, ctx=ctx)
for X, Y in data_iter:
for s in state:
s.detach()
with autograd.record():
(output, state) = model(X, state)
y = Y.T.reshape((-1,))
l = loss(output, y).mean()
l.backward()
params = [p.data() for p in model.collect_params().values()]
grad_clipping(params, clipping_theta, ctx)
trainer.step(1)
l_sum += l.asscalar() * y.size
n += y.size
if (epoch + 1) % pred_period == 0:
print('epoch %d, perplexity %f, time %.2f sec' % (
epoch + 1, math.exp(l_sum / n), time.time() - start))
for prefix in prefixes:
print(' -', predict_rnn_gluon(
prefix, pred_len, model, vocab_size, ctx, idx_to_char,
char_to_idx))
def train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size,
params=None, lr=None, trainer=None):
"""Train and evaluate a model with CPU."""
for epoch in range(num_epochs):
train_l_sum, train_acc_sum, n = 0.0, 0.0, 0
for X, y in train_iter:
with autograd.record():
y_hat = net(X)
l = loss(y_hat, y).sum()
l.backward()
if trainer is None:
sgd(params, lr, batch_size)
else:
trainer.step(batch_size)
y = y.astype('float32')
train_l_sum += l.asscalar()
train_acc_sum += (y_hat.argmax(axis=1) == y).sum().asscalar()
n += y.size
test_acc = evaluate_accuracy(test_iter, net)
print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f'
% (epoch + 1, train_l_sum / n, train_acc_sum / n, test_acc))
def train_ch5(net, train_iter, test_iter, batch_size, trainer, ctx,
num_epochs):
"""Train and evaluate a model with CPU or GPU."""
print('training on', ctx)
loss = gloss.SoftmaxCrossEntropyLoss()
for epoch in range(num_epochs):
train_l_sum, train_acc_sum, n, start = 0.0, 0.0, 0, time.time()
for X, y in train_iter:
X, y = X.as_in_context(ctx), y.as_in_context(ctx)
with autograd.record():
y_hat = net(X)
l = loss(y_hat, y).sum()
l.backward()
trainer.step(batch_size)
y = y.astype('float32')
train_l_sum += l.asscalar()
train_acc_sum += (y_hat.argmax(axis=1) == y).sum().asscalar()
n += y.size
test_acc = evaluate_accuracy(test_iter, net, ctx)
print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f, '
'time %.1f sec'
% (epoch + 1, train_l_sum / n, train_acc_sum / n, test_acc,
time.time() - start))
def train_ch7(trainer_fn, states, hyperparams, features, labels, batch_size=10,
num_epochs=2):
"""Train a linear regression model."""
net, loss = linreg, squared_loss
w, b = nd.random.normal(scale=0.01, shape=(features.shape[1], 1)), nd.zeros(1)
w.attach_grad()
b.attach_grad()
def eval_loss():
return loss(net(features, w, b), labels).mean().asscalar()
ls = [eval_loss()]
data_iter = gdata.DataLoader(
gdata.ArrayDataset(features, labels), batch_size, shuffle=True)
for _ in range(num_epochs):
start = time.time()
for batch_i, (X, y) in enumerate(data_iter):
with autograd.record():
l = loss(net(X, w, b), y).mean()
l.backward()
trainer_fn([w, b], states, hyperparams)
if (batch_i + 1) * batch_size % 100 == 0:
ls.append(eval_loss())
print('loss: %f, %f sec per epoch' % (ls[-1], time.time() - start))
set_figsize()
plt.plot(np.linspace(0, num_epochs, len(ls)), ls)
plt.xlabel('epoch')
plt.ylabel('loss')
def train_gluon_ch7(trainer_name, trainer_hyperparams, features, labels,
batch_size=10, num_epochs=2):
"""Train a linear regression model with a given Gluon trainer."""
net = nn.Sequential()
net.add(nn.Dense(1))
net.initialize(init.Normal(sigma=0.01))
loss = gloss.L2Loss()
def eval_loss():
return loss(net(features), labels).mean().asscalar()
ls = [eval_loss()]
data_iter = gdata.DataLoader(
gdata.ArrayDataset(features, labels), batch_size, shuffle=True)
trainer = gluon.Trainer(net.collect_params(),
trainer_name, trainer_hyperparams)
for _ in range(num_epochs):
start = time.time()
for batch_i, (X, y) in enumerate(data_iter):
with autograd.record():
l = loss(net(X), y)
l.backward()
trainer.step(batch_size)
if (batch_i + 1) * batch_size % 100 == 0:
ls.append(eval_loss())
print('loss: %f, %f sec per epoch' % (ls[-1], time.time() - start))
set_figsize()
plt.plot(np.linspace(0, num_epochs, len(ls)), ls)
plt.xlabel('epoch')
plt.ylabel('loss')
def try_all_gpus():
"""Return all available GPUs, or [mx.cpu()] if there is no GPU."""
ctxes = []
try:
for i in range(16):
ctx = mx.gpu(i)
_ = nd.array([0], ctx=ctx)
ctxes.append(ctx)
except mx.base.MXNetError:
pass
if not ctxes:
ctxes = [mx.cpu()]
return ctxes
def try_gpu():
"""If GPU is available, return mx.gpu(0); else return mx.cpu()."""
try:
ctx = mx.gpu()
_ = nd.array([0], ctx=ctx)
except mx.base.MXNetError:
ctx = mx.cpu()
return ctx
def use_svg_display():
"""Use svg format to display plot in jupyter"""
display.set_matplotlib_formats('svg')
def voc_label_indices(colormap, colormap2label):
"""Assign label indices for Pascal VOC2012 Dataset."""
colormap = colormap.astype('int32')
idx = ((colormap[:, :, 0] * 256 + colormap[:, :, 1]) * 256
+ colormap[:, :, 2])
return colormap2label[idx]
def voc_rand_crop(feature, label, height, width):
"""Random cropping for images of the Pascal VOC2012 Dataset."""
feature, rect = image.random_crop(feature, (width, height))
label = image.fixed_crop(label, *rect)
return feature, label
class VOCSegDataset(gdata.Dataset):
"""The Pascal VOC2012 Dataset."""
def __init__(self, is_train, crop_size, voc_dir, colormap2label):
self.rgb_mean = nd.array([0.485, 0.456, 0.406])
self.rgb_std = nd.array([0.229, 0.224, 0.225])
self.crop_size = crop_size
data, labels = read_voc_images(root=voc_dir, is_train=is_train)
self.data = [self.normalize_image(im) for im in self.filter(data)]
self.labels = self.filter(labels)
self.colormap2label = colormap2label
print('read ' + str(len(self.data)) + ' examples')
def normalize_image(self, data):
return (data.astype('float32') / 255 - self.rgb_mean) / self.rgb_std
def filter(self, images):
return [im for im in images if (
im.shape[0] >= self.crop_size[0] and
im.shape[1] >= self.crop_size[1])]
def __getitem__(self, idx):
data, labels = voc_rand_crop(self.data[idx], self.labels[idx],
*self.crop_size)
return (data.transpose((2, 0, 1)),
voc_label_indices(labels, self.colormap2label))
def __len__(self):
return len(self.data)
| python | Apache-2.0 | e6b18ccea71451a55fcd861d7b96fddf2587b09a | 2026-01-04T14:38:16.201239Z | false |
d2l-ai/d2l-zh | https://github.com/d2l-ai/d2l-zh/blob/e6b18ccea71451a55fcd861d7b96fddf2587b09a/contrib/to-rm-mx-contrib-text/d2lzh/__init__.py | contrib/to-rm-mx-contrib-text/d2lzh/__init__.py | from . import text
from .utils import *
__version__ = '1.0.0'
| python | Apache-2.0 | e6b18ccea71451a55fcd861d7b96fddf2587b09a | 2026-01-04T14:38:16.201239Z | false |
d2l-ai/d2l-zh | https://github.com/d2l-ai/d2l-zh/blob/e6b18ccea71451a55fcd861d7b96fddf2587b09a/contrib/to-rm-mx-contrib-text/d2lzh/text/vocab.py | contrib/to-rm-mx-contrib-text/d2lzh/text/vocab.py | class Vocabulary:
def __init__(self, counter, min_freq=0, reserved_tokens=None):
if reserved_tokens is None:
reserved_tokens = []
# Sort according to frequencies
self.token_freqs = sorted(counter.items(), key=lambda x: x[0])
self.token_freqs.sort(key=lambda x: x[1], reverse=True)
self.unk, uniq_tokens = 0, ['<unk>'] + reserved_tokens
uniq_tokens += [token for token, freq in self.token_freqs
if freq >= min_freq and token not in uniq_tokens]
self.idx_to_token, self.token_to_idx = [], dict()
for token in uniq_tokens:
self.idx_to_token.append(token)
self.token_to_idx[token] = len(self.idx_to_token) - 1
def __len__(self):
return len(self.idx_to_token)
def to_indices(self, tokens):
if not isinstance(tokens, (list, tuple)):
return self.token_to_idx.get(tokens, self.unk)
return [self.to_indices(token) for token in tokens] | python | Apache-2.0 | e6b18ccea71451a55fcd861d7b96fddf2587b09a | 2026-01-04T14:38:16.201239Z | false |
d2l-ai/d2l-zh | https://github.com/d2l-ai/d2l-zh/blob/e6b18ccea71451a55fcd861d7b96fddf2587b09a/contrib/to-rm-mx-contrib-text/d2lzh/text/__init__.py | contrib/to-rm-mx-contrib-text/d2lzh/text/__init__.py | from . import vocab
from . import embedding | python | Apache-2.0 | e6b18ccea71451a55fcd861d7b96fddf2587b09a | 2026-01-04T14:38:16.201239Z | false |
d2l-ai/d2l-zh | https://github.com/d2l-ai/d2l-zh/blob/e6b18ccea71451a55fcd861d7b96fddf2587b09a/contrib/to-rm-mx-contrib-text/d2lzh/text/embedding.py | contrib/to-rm-mx-contrib-text/d2lzh/text/embedding.py | import os
from mxnet import nd, gluon
import tarfile
import zipfile
DATA_URL = 'http://d2l-data.s3-accelerate.amazonaws.com/'
PRETRAINED_FILE = {
'glove':{},
'fasttext':{}
}
PRETRAINED_FILE['glove']['glove.6b.50d.txt'] = (DATA_URL + 'glove.6B.50d.zip',
'0b8703943ccdb6eb788e6f091b8946e82231bc4d')
PRETRAINED_FILE['glove']['glove.6b.100d.txt'] = (DATA_URL + 'glove.6B.100d.zip',
'cd43bfb07e44e6f27cbcc7bc9ae3d80284fdaf5a')
PRETRAINED_FILE['glove']['glove.42b.300d.txt'] = (DATA_URL + 'glove.42B.300d.zip',
'b5116e234e9eb9076672cfeabf5469f3eec904fa')
PRETRAINED_FILE['fasttext']['wiki.en'] = (DATA_URL + 'wiki.en.zip',
'c1816da3821ae9f43899be655002f6c723e91b88')
def mkdir_if_not_exist(path):
if not isinstance(path, str):
path = os.path.join(*path)
if not os.path.exists(path):
os.makedirs(path)
def download(embedding_name, pretrained_file_name, cache_dir=os.path.join('..', 'data')):
url, sha1 = PRETRAINED_FILE[embedding_name][pretrained_file_name]
mkdir_if_not_exist(cache_dir)
return gluon.utils.download(url, cache_dir, sha1_hash=sha1)
def download_extract(embedding_name, pretrained_file_name, folder=None):
"""Download and extract a zip/tar file."""
fname = download(embedding_name, pretrained_file_name)
base_dir = os.path.dirname(fname)
data_dir, ext = os.path.splitext(fname)
if ext == '.zip':
fp = zipfile.ZipFile(fname, 'r')
elif ext in ('.tar', '.gz'):
fp = tarfile.open(fname, 'r')
else:
assert False, 'Only zip/tar files can be extracted'
fp.extractall(base_dir)
if folder:
return os.path.join(base_dir, folder)
else:
return data_dir
def get_pretrained_file_names(embedding_name=None):
if embedding_name is not None:
return PRETRAINED_FILE[embedding_name].keys()
else:
return PRETRAINED_FILE
def create(embedding_name, pretrained_file_name, vocabulary=None):
return TokenEmbedding(embedding_name, pretrained_file_name.lower(), vocabulary)
class TokenEmbedding:
"""Token Embedding."""
def __init__(self, embedding_name, pretrained_file_name, vocabulary=None):
self.idx_to_token, self.idx_to_vec = self._load_embedding(
embedding_name, pretrained_file_name)
self.unknown_idx = 0
self.token_to_idx = {token: idx for idx, token in
enumerate(self.idx_to_token)}
if vocabulary is not None:
indices = [self.token_to_idx.get(token, self.unknown_idx)
for token in vocabulary.idx_to_token]
self.idx_to_vec = self.idx_to_vec[nd.array(indices)]
self.token_to_idx = vocabulary.token_to_idx
self.idx_to_token = vocabulary.idx_to_token
def _load_embedding(self, embedding_name, pretrained_file_name):
idx_to_token, idx_to_vec = ['<unk>'], []
data_dir = download_extract(embedding_name, pretrained_file_name)
# GloVe website: https://nlp.stanford.edu/projects/glove/
# fastText website: https://fasttext.cc/
with open(os.path.join(data_dir, 'vec.txt'), 'r') as f:
for line in f:
elems = line.rstrip().split(' ')
token, elems = elems[0], [float(elem) for elem in elems[1:]]
# Skip header information, such as the top row in fastText
if len(elems) > 1:
idx_to_token.append(token)
idx_to_vec.append(elems)
idx_to_vec = [[0] * len(idx_to_vec[0])] + idx_to_vec
return idx_to_token, nd.array(idx_to_vec)
def get_vecs_by_tokens(self, tokens):
indices = [self.token_to_idx.get(token, self.unknown_idx)
for token in tokens]
vecs = self.idx_to_vec[nd.array(indices)]
return vecs
def __len__(self):
return len(self.idx_to_token) | python | Apache-2.0 | e6b18ccea71451a55fcd861d7b96fddf2587b09a | 2026-01-04T14:38:16.201239Z | false |
d2l-ai/d2l-zh | https://github.com/d2l-ai/d2l-zh/blob/e6b18ccea71451a55fcd861d7b96fddf2587b09a/d2l/tensorflow.py | d2l/tensorflow.py | DATA_HUB = dict()
DATA_URL = 'http://d2l-data.s3-accelerate.amazonaws.com/'
import numpy as np
import tensorflow as tf
nn_Module = tf.keras.Model
################# WARNING ################
# The below part is generated automatically through:
# d2lbook build lib
# Don't edit it directly
import collections
import hashlib
import math
import os
import random
import re
import shutil
import sys
import tarfile
import time
import zipfile
from collections import defaultdict
import pandas as pd
import requests
from IPython import display
from matplotlib import pyplot as plt
from matplotlib_inline import backend_inline
d2l = sys.modules[__name__]
import numpy as np
import tensorflow as tf
def use_svg_display():
"""使用svg格式在Jupyter中显示绘图
Defined in :numref:`sec_calculus`"""
backend_inline.set_matplotlib_formats('svg')
def set_figsize(figsize=(3.5, 2.5)):
"""设置matplotlib的图表大小
Defined in :numref:`sec_calculus`"""
use_svg_display()
d2l.plt.rcParams['figure.figsize'] = figsize
def set_axes(axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend):
"""设置matplotlib的轴
Defined in :numref:`sec_calculus`"""
axes.set_xlabel(xlabel)
axes.set_ylabel(ylabel)
axes.set_xscale(xscale)
axes.set_yscale(yscale)
axes.set_xlim(xlim)
axes.set_ylim(ylim)
if legend:
axes.legend(legend)
axes.grid()
def plot(X, Y=None, xlabel=None, ylabel=None, legend=None, xlim=None,
ylim=None, xscale='linear', yscale='linear',
fmts=('-', 'm--', 'g-.', 'r:'), figsize=(3.5, 2.5), axes=None):
"""绘制数据点
Defined in :numref:`sec_calculus`"""
if legend is None:
legend = []
set_figsize(figsize)
axes = axes if axes else d2l.plt.gca()
# 如果X有一个轴,输出True
def has_one_axis(X):
return (hasattr(X, "ndim") and X.ndim == 1 or isinstance(X, list)
and not hasattr(X[0], "__len__"))
if has_one_axis(X):
X = [X]
if Y is None:
X, Y = [[]] * len(X), X
elif has_one_axis(Y):
Y = [Y]
if len(X) != len(Y):
X = X * len(Y)
axes.cla()
for x, y, fmt in zip(X, Y, fmts):
if len(x):
axes.plot(x, y, fmt)
else:
axes.plot(y, fmt)
set_axes(axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend)
class Timer:
"""记录多次运行时间"""
def __init__(self):
"""Defined in :numref:`subsec_linear_model`"""
self.times = []
self.start()
def start(self):
"""启动计时器"""
self.tik = time.time()
def stop(self):
"""停止计时器并将时间记录在列表中"""
self.times.append(time.time() - self.tik)
return self.times[-1]
def avg(self):
"""返回平均时间"""
return sum(self.times) / len(self.times)
def sum(self):
"""返回时间总和"""
return sum(self.times)
def cumsum(self):
"""返回累计时间"""
return np.array(self.times).cumsum().tolist()
def synthetic_data(w, b, num_examples):
"""生成y=Xw+b+噪声
Defined in :numref:`sec_linear_scratch`"""
X = d2l.zeros((num_examples, w.shape[0]))
X += tf.random.normal(shape=X.shape)
y = d2l.matmul(X, tf.reshape(w, (-1, 1))) + b
y += tf.random.normal(shape=y.shape, stddev=0.01)
y = d2l.reshape(y, (-1, 1))
return X, y
def linreg(X, w, b):
"""线性回归模型
Defined in :numref:`sec_linear_scratch`"""
return d2l.matmul(X, w) + b
def squared_loss(y_hat, y):
"""均方损失
Defined in :numref:`sec_linear_scratch`"""
return (y_hat - d2l.reshape(y, y_hat.shape)) ** 2 / 2
def sgd(params, grads, lr, batch_size):
"""小批量随机梯度下降
Defined in :numref:`sec_linear_scratch`"""
for param, grad in zip(params, grads):
param.assign_sub(lr*grad/batch_size)
def load_array(data_arrays, batch_size, is_train=True):
"""构造一个TensorFlow数据迭代器
Defined in :numref:`sec_linear_concise`"""
dataset = tf.data.Dataset.from_tensor_slices(data_arrays)
if is_train:
dataset = dataset.shuffle(buffer_size=1000)
dataset = dataset.batch(batch_size)
return dataset
def get_fashion_mnist_labels(labels):
"""返回Fashion-MNIST数据集的文本标签
Defined in :numref:`sec_fashion_mnist`"""
text_labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat',
'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot']
return [text_labels[int(i)] for i in labels]
def show_images(imgs, num_rows, num_cols, titles=None, scale=1.5):
"""绘制图像列表
Defined in :numref:`sec_fashion_mnist`"""
figsize = (num_cols * scale, num_rows * scale)
_, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)
axes = axes.flatten()
for i, (ax, img) in enumerate(zip(axes, imgs)):
ax.imshow(d2l.numpy(img))
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
if titles:
ax.set_title(titles[i])
return axes
def load_data_fashion_mnist(batch_size, resize=None):
"""下载Fashion-MNIST数据集,然后将其加载到内存中
Defined in :numref:`sec_fashion_mnist`"""
mnist_train, mnist_test = tf.keras.datasets.fashion_mnist.load_data()
# 将所有数字除以255,使所有像素值介于0和1之间,在最后添加一个批处理维度,
# 并将标签转换为int32。
process = lambda X, y: (tf.expand_dims(X, axis=3) / 255,
tf.cast(y, dtype='int32'))
resize_fn = lambda X, y: (
tf.image.resize_with_pad(X, resize, resize) if resize else X, y)
return (
tf.data.Dataset.from_tensor_slices(process(*mnist_train)).batch(
batch_size).shuffle(len(mnist_train[0])).map(resize_fn),
tf.data.Dataset.from_tensor_slices(process(*mnist_test)).batch(
batch_size).map(resize_fn))
def accuracy(y_hat, y):
"""计算预测正确的数量
Defined in :numref:`sec_softmax_scratch`"""
if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:
y_hat = d2l.argmax(y_hat, axis=1)
cmp = d2l.astype(y_hat, y.dtype) == y
return float(d2l.reduce_sum(d2l.astype(cmp, y.dtype)))
def evaluate_accuracy(net, data_iter):
"""计算在指定数据集上模型的精度
Defined in :numref:`sec_softmax_scratch`"""
metric = Accumulator(2) # 正确预测数、预测总数
for X, y in data_iter:
metric.add(accuracy(net(X), y), d2l.size(y))
return metric[0] / metric[1]
class Accumulator:
"""在n个变量上累加"""
def __init__(self, n):
"""Defined in :numref:`sec_softmax_scratch`"""
self.data = [0.0] * n
def add(self, *args):
self.data = [a + float(b) for a, b in zip(self.data, args)]
def reset(self):
self.data = [0.0] * len(self.data)
def __getitem__(self, idx):
return self.data[idx]
def train_epoch_ch3(net, train_iter, loss, updater):
"""训练模型一个迭代周期(定义见第3章)
Defined in :numref:`sec_softmax_scratch`"""
# 训练损失总和、训练准确度总和、样本数
metric = Accumulator(3)
for X, y in train_iter:
# 计算梯度并更新参数
with tf.GradientTape() as tape:
y_hat = net(X)
# Keras内置的损失接受的是(标签,预测),这不同于用户在本书中的实现。
# 本书的实现接受(预测,标签),例如我们上面实现的“交叉熵”
if isinstance(loss, tf.keras.losses.Loss):
l = loss(y, y_hat)
else:
l = loss(y_hat, y)
if isinstance(updater, tf.keras.optimizers.Optimizer):
params = net.trainable_variables
grads = tape.gradient(l, params)
updater.apply_gradients(zip(grads, params))
else:
updater(X.shape[0], tape.gradient(l, updater.params))
# Keras的loss默认返回一个批量的平均损失
l_sum = l * float(tf.size(y)) if isinstance(
loss, tf.keras.losses.Loss) else tf.reduce_sum(l)
metric.add(l_sum, accuracy(y_hat, y), tf.size(y))
# 返回训练损失和训练精度
return metric[0] / metric[2], metric[1] / metric[2]
class Animator:
"""在动画中绘制数据"""
def __init__(self, xlabel=None, ylabel=None, legend=None, xlim=None,
ylim=None, xscale='linear', yscale='linear',
fmts=('-', 'm--', 'g-.', 'r:'), nrows=1, ncols=1,
figsize=(3.5, 2.5)):
"""Defined in :numref:`sec_softmax_scratch`"""
# 增量地绘制多条线
if legend is None:
legend = []
d2l.use_svg_display()
self.fig, self.axes = d2l.plt.subplots(nrows, ncols, figsize=figsize)
if nrows * ncols == 1:
self.axes = [self.axes, ]
# 使用lambda函数捕获参数
self.config_axes = lambda: d2l.set_axes(
self.axes[0], xlabel, ylabel, xlim, ylim, xscale, yscale, legend)
self.X, self.Y, self.fmts = None, None, fmts
def add(self, x, y):
# 向图表中添加多个数据点
if not hasattr(y, "__len__"):
y = [y]
n = len(y)
if not hasattr(x, "__len__"):
x = [x] * n
if not self.X:
self.X = [[] for _ in range(n)]
if not self.Y:
self.Y = [[] for _ in range(n)]
for i, (a, b) in enumerate(zip(x, y)):
if a is not None and b is not None:
self.X[i].append(a)
self.Y[i].append(b)
self.axes[0].cla()
for x, y, fmt in zip(self.X, self.Y, self.fmts):
self.axes[0].plot(x, y, fmt)
self.config_axes()
display.display(self.fig)
display.clear_output(wait=True)
def train_ch3(net, train_iter, test_iter, loss, num_epochs, updater):
"""训练模型(定义见第3章)
Defined in :numref:`sec_softmax_scratch`"""
animator = Animator(xlabel='epoch', xlim=[1, num_epochs], ylim=[0.3, 0.9],
legend=['train loss', 'train acc', 'test acc'])
for epoch in range(num_epochs):
train_metrics = train_epoch_ch3(net, train_iter, loss, updater)
test_acc = evaluate_accuracy(net, test_iter)
animator.add(epoch + 1, train_metrics + (test_acc,))
train_loss, train_acc = train_metrics
assert train_loss < 0.5, train_loss
assert train_acc <= 1 and train_acc > 0.7, train_acc
assert test_acc <= 1 and test_acc > 0.7, test_acc
class Updater():
"""用小批量随机梯度下降法更新参数
Defined in :numref:`sec_softmax_scratch`"""
def __init__(self, params, lr):
self.params = params
self.lr = lr
def __call__(self, batch_size, grads):
d2l.sgd(self.params, grads, self.lr, batch_size)
def predict_ch3(net, test_iter, n=6):
"""预测标签(定义见第3章)
Defined in :numref:`sec_softmax_scratch`"""
for X, y in test_iter:
break
trues = d2l.get_fashion_mnist_labels(y)
preds = d2l.get_fashion_mnist_labels(d2l.argmax(net(X), axis=1))
titles = [true +'\n' + pred for true, pred in zip(trues, preds)]
d2l.show_images(
d2l.reshape(X[0:n], (n, 28, 28)), 1, n, titles=titles[0:n])
def evaluate_loss(net, data_iter, loss):
"""评估给定数据集上模型的损失
Defined in :numref:`sec_model_selection`"""
metric = d2l.Accumulator(2) # 损失的总和,样本数量
for X, y in data_iter:
l = loss(net(X), y)
metric.add(d2l.reduce_sum(l), d2l.size(l))
return metric[0] / metric[1]
DATA_HUB = dict()
DATA_URL = 'http://d2l-data.s3-accelerate.amazonaws.com/'
def download(name, cache_dir=os.path.join('..', 'data')):
"""下载一个DATA_HUB中的文件,返回本地文件名
Defined in :numref:`sec_kaggle_house`"""
assert name in DATA_HUB, f"{name} 不存在于 {DATA_HUB}"
url, sha1_hash = DATA_HUB[name]
os.makedirs(cache_dir, exist_ok=True)
fname = os.path.join(cache_dir, url.split('/')[-1])
if os.path.exists(fname):
sha1 = hashlib.sha1()
with open(fname, 'rb') as f:
while True:
data = f.read(1048576)
if not data:
break
sha1.update(data)
if sha1.hexdigest() == sha1_hash:
return fname # 命中缓存
print(f'正在从{url}下载{fname}...')
r = requests.get(url, stream=True, verify=True)
with open(fname, 'wb') as f:
f.write(r.content)
return fname
def download_extract(name, folder=None):
"""下载并解压zip/tar文件
Defined in :numref:`sec_kaggle_house`"""
fname = download(name)
base_dir = os.path.dirname(fname)
data_dir, ext = os.path.splitext(fname)
if ext == '.zip':
fp = zipfile.ZipFile(fname, 'r')
elif ext in ('.tar', '.gz'):
fp = tarfile.open(fname, 'r')
else:
assert False, '只有zip/tar文件可以被解压缩'
fp.extractall(base_dir)
return os.path.join(base_dir, folder) if folder else data_dir
def download_all():
"""下载DATA_HUB中的所有文件
Defined in :numref:`sec_kaggle_house`"""
for name in DATA_HUB:
download(name)
DATA_HUB['kaggle_house_train'] = (
DATA_URL + 'kaggle_house_pred_train.csv',
'585e9cc93e70b39160e7921475f9bcd7d31219ce')
DATA_HUB['kaggle_house_test'] = (
DATA_URL + 'kaggle_house_pred_test.csv',
'fa19780a7b011d9b009e8bff8e99922a8ee2eb90')
def try_gpu(i=0):
"""如果存在,则返回gpu(i),否则返回cpu()
Defined in :numref:`sec_use_gpu`"""
if len(tf.config.experimental.list_physical_devices('GPU')) >= i + 1:
return tf.device(f'/GPU:{i}')
return tf.device('/CPU:0')
def try_all_gpus():
"""返回所有可用的GPU,如果没有GPU,则返回[cpu(),]
Defined in :numref:`sec_use_gpu`"""
num_gpus = len(tf.config.experimental.list_physical_devices('GPU'))
devices = [tf.device(f'/GPU:{i}') for i in range(num_gpus)]
return devices if devices else [tf.device('/CPU:0')]
def corr2d(X, K):
"""计算二维互相关运算"""
h, w = K.shape
Y = tf.Variable(tf.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1)))
for i in range(Y.shape[0]):
for j in range(Y.shape[1]):
Y[i, j].assign(tf.reduce_sum(
X[i: i + h, j: j + w] * K))
return Y
class TrainCallback(tf.keras.callbacks.Callback):
"""一个以可视化的训练进展的回调
Defined in :numref:`sec_lenet`"""
def __init__(self, net, train_iter, test_iter, num_epochs, device_name):
self.timer = d2l.Timer()
self.animator = d2l.Animator(
xlabel='epoch', xlim=[1, num_epochs], legend=[
'train loss', 'train acc', 'test acc'])
self.net = net
self.train_iter = train_iter
self.test_iter = test_iter
self.num_epochs = num_epochs
self.device_name = device_name
def on_epoch_begin(self, epoch, logs=None):
self.timer.start()
def on_epoch_end(self, epoch, logs):
self.timer.stop()
test_acc = self.net.evaluate(
self.test_iter, verbose=0, return_dict=True)['accuracy']
metrics = (logs['loss'], logs['accuracy'], test_acc)
self.animator.add(epoch + 1, metrics)
if epoch == self.num_epochs - 1:
batch_size = next(iter(self.train_iter))[0].shape[0]
num_examples = batch_size * tf.data.experimental.cardinality(
self.train_iter).numpy()
print(f'loss {metrics[0]:.3f}, train acc {metrics[1]:.3f}, '
f'test acc {metrics[2]:.3f}')
print(f'{num_examples / self.timer.avg():.1f} examples/sec on '
f'{str(self.device_name)}')
def train_ch6(net_fn, train_iter, test_iter, num_epochs, lr, device):
"""用GPU训练模型(在第六章定义)
Defined in :numref:`sec_lenet`"""
device_name = device._device_name
strategy = tf.distribute.OneDeviceStrategy(device_name)
with strategy.scope():
optimizer = tf.keras.optimizers.SGD(learning_rate=lr)
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
net = net_fn()
net.compile(optimizer=optimizer, loss=loss, metrics=['accuracy'])
callback = TrainCallback(net, train_iter, test_iter, num_epochs,
device_name)
net.fit(train_iter, epochs=num_epochs, verbose=0, callbacks=[callback])
return net
class Residual(tf.keras.Model):
def __init__(self, num_channels, use_1x1conv=False, strides=1):
super().__init__()
self.conv1 = tf.keras.layers.Conv2D(
num_channels, padding='same', kernel_size=3, strides=strides)
self.conv2 = tf.keras.layers.Conv2D(
num_channels, kernel_size=3, padding='same')
self.conv3 = None
if use_1x1conv:
self.conv3 = tf.keras.layers.Conv2D(
num_channels, kernel_size=1, strides=strides)
self.bn1 = tf.keras.layers.BatchNormalization()
self.bn2 = tf.keras.layers.BatchNormalization()
def call(self, X):
Y = tf.keras.activations.relu(self.bn1(self.conv1(X)))
Y = self.bn2(self.conv2(Y))
if self.conv3 is not None:
X = self.conv3(X)
Y += X
return tf.keras.activations.relu(Y)
d2l.DATA_HUB['time_machine'] = (d2l.DATA_URL + 'timemachine.txt',
'090b5e7e70c295757f55df93cb0a180b9691891a')
def read_time_machine():
"""将时间机器数据集加载到文本行的列表中
Defined in :numref:`sec_text_preprocessing`"""
with open(d2l.download('time_machine'), 'r') as f:
lines = f.readlines()
return [re.sub('[^A-Za-z]+', ' ', line).strip().lower() for line in lines]
def tokenize(lines, token='word'):
"""将文本行拆分为单词或字符词元
Defined in :numref:`sec_text_preprocessing`"""
if token == 'word':
return [line.split() for line in lines]
elif token == 'char':
return [list(line) for line in lines]
else:
print('错误:未知词元类型:' + token)
class Vocab:
"""文本词表"""
def __init__(self, tokens=None, min_freq=0, reserved_tokens=None):
"""Defined in :numref:`sec_text_preprocessing`"""
if tokens is None:
tokens = []
if reserved_tokens is None:
reserved_tokens = []
# 按出现频率排序
counter = count_corpus(tokens)
self._token_freqs = sorted(counter.items(), key=lambda x: x[1],
reverse=True)
# 未知词元的索引为0
self.idx_to_token = ['<unk>'] + reserved_tokens
self.token_to_idx = {token: idx
for idx, token in enumerate(self.idx_to_token)}
for token, freq in self._token_freqs:
if freq < min_freq:
break
if token not in self.token_to_idx:
self.idx_to_token.append(token)
self.token_to_idx[token] = len(self.idx_to_token) - 1
def __len__(self):
return len(self.idx_to_token)
def __getitem__(self, tokens):
if not isinstance(tokens, (list, tuple)):
return self.token_to_idx.get(tokens, self.unk)
return [self.__getitem__(token) for token in tokens]
def to_tokens(self, indices):
if not isinstance(indices, (list, tuple)):
return self.idx_to_token[indices]
return [self.idx_to_token[index] for index in indices]
@property
def unk(self): # 未知词元的索引为0
return 0
@property
def token_freqs(self):
return self._token_freqs
def count_corpus(tokens):
"""统计词元的频率
Defined in :numref:`sec_text_preprocessing`"""
# 这里的tokens是1D列表或2D列表
if len(tokens) == 0 or isinstance(tokens[0], list):
# 将词元列表展平成一个列表
tokens = [token for line in tokens for token in line]
return collections.Counter(tokens)
def load_corpus_time_machine(max_tokens=-1):
"""返回时光机器数据集的词元索引列表和词表
Defined in :numref:`sec_text_preprocessing`"""
lines = read_time_machine()
tokens = tokenize(lines, 'char')
vocab = Vocab(tokens)
# 因为时光机器数据集中的每个文本行不一定是一个句子或一个段落,
# 所以将所有文本行展平到一个列表中
corpus = [vocab[token] for line in tokens for token in line]
if max_tokens > 0:
corpus = corpus[:max_tokens]
return corpus, vocab
def seq_data_iter_random(corpus, batch_size, num_steps):
"""使用随机抽样生成一个小批量子序列
Defined in :numref:`sec_language_model`"""
# 从随机偏移量开始对序列进行分区,随机范围包括num_steps-1
corpus = corpus[random.randint(0, num_steps - 1):]
# 减去1,是因为我们需要考虑标签
num_subseqs = (len(corpus) - 1) // num_steps
# 长度为num_steps的子序列的起始索引
initial_indices = list(range(0, num_subseqs * num_steps, num_steps))
# 在随机抽样的迭代过程中,
# 来自两个相邻的、随机的、小批量中的子序列不一定在原始序列上相邻
random.shuffle(initial_indices)
def data(pos):
# 返回从pos位置开始的长度为num_steps的序列
return corpus[pos: pos + num_steps]
num_batches = num_subseqs // batch_size
for i in range(0, batch_size * num_batches, batch_size):
# 在这里,initial_indices包含子序列的随机起始索引
initial_indices_per_batch = initial_indices[i: i + batch_size]
X = [data(j) for j in initial_indices_per_batch]
Y = [data(j + 1) for j in initial_indices_per_batch]
yield d2l.tensor(X), d2l.tensor(Y)
def seq_data_iter_sequential(corpus, batch_size, num_steps):
"""使用顺序分区生成一个小批量子序列
Defined in :numref:`sec_language_model`"""
# 从随机偏移量开始划分序列
offset = random.randint(0, num_steps)
num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size
Xs = d2l.tensor(corpus[offset: offset + num_tokens])
Ys = d2l.tensor(corpus[offset + 1: offset + 1 + num_tokens])
Xs = d2l.reshape(Xs, (batch_size, -1))
Ys = d2l.reshape(Ys, (batch_size, -1))
num_batches = Xs.shape[1] // num_steps
for i in range(0, num_batches * num_steps, num_steps):
X = Xs[:, i: i + num_steps]
Y = Ys[:, i: i + num_steps]
yield X, Y
class SeqDataLoader:
"""加载序列数据的迭代器"""
def __init__(self, batch_size, num_steps, use_random_iter, max_tokens):
"""Defined in :numref:`sec_language_model`"""
if use_random_iter:
self.data_iter_fn = d2l.seq_data_iter_random
else:
self.data_iter_fn = d2l.seq_data_iter_sequential
self.corpus, self.vocab = d2l.load_corpus_time_machine(max_tokens)
self.batch_size, self.num_steps = batch_size, num_steps
def __iter__(self):
return self.data_iter_fn(self.corpus, self.batch_size, self.num_steps)
def load_data_time_machine(batch_size, num_steps,
use_random_iter=False, max_tokens=10000):
"""返回时光机器数据集的迭代器和词表
Defined in :numref:`sec_language_model`"""
data_iter = SeqDataLoader(
batch_size, num_steps, use_random_iter, max_tokens)
return data_iter, data_iter.vocab
class RNNModelScratch:
"""从零开始实现的循环神经网络模型"""
def __init__(self, vocab_size, num_hiddens,
init_state, forward_fn, get_params):
"""Defined in :numref:`sec_rnn_scratch`"""
self.vocab_size, self.num_hiddens = vocab_size, num_hiddens
self.init_state, self.forward_fn = init_state, forward_fn
self.trainable_variables = get_params(vocab_size, num_hiddens)
def __call__(self, X, state):
X = tf.one_hot(tf.transpose(X), self.vocab_size)
X = tf.cast(X, tf.float32)
return self.forward_fn(X, state, self.trainable_variables)
def begin_state(self, batch_size, *args, **kwargs):
return self.init_state(batch_size, self.num_hiddens)
def predict_ch8(prefix, num_preds, net, vocab):
"""在prefix后面生成新字符
Defined in :numref:`sec_rnn_scratch`"""
state = net.begin_state(batch_size=1, dtype=tf.float32)
outputs = [vocab[prefix[0]]]
get_input = lambda: d2l.reshape(d2l.tensor([outputs[-1]]),
(1, 1)).numpy()
for y in prefix[1:]: # 预热期
_, state = net(get_input(), state)
outputs.append(vocab[y])
for _ in range(num_preds): # 预测num_preds步
y, state = net(get_input(), state)
outputs.append(int(y.numpy().argmax(axis=1).reshape(1)))
return ''.join([vocab.idx_to_token[i] for i in outputs])
def grad_clipping(grads, theta):
"""裁剪梯度
Defined in :numref:`sec_rnn_scratch`"""
theta = tf.constant(theta, dtype=tf.float32)
new_grad = []
for grad in grads:
if isinstance(grad, tf.IndexedSlices):
new_grad.append(tf.convert_to_tensor(grad))
else:
new_grad.append(grad)
norm = tf.math.sqrt(sum((tf.reduce_sum(grad ** 2)).numpy()
for grad in new_grad))
norm = tf.cast(norm, tf.float32)
if tf.greater(norm, theta):
for i, grad in enumerate(new_grad):
new_grad[i] = grad * theta / norm
else:
new_grad = new_grad
return new_grad
def train_epoch_ch8(net, train_iter, loss, updater, use_random_iter):
"""训练模型一个迭代周期(定义见第8章)
Defined in :numref:`sec_rnn_scratch`"""
state, timer = None, d2l.Timer()
metric = d2l.Accumulator(2) # 训练损失之和,词元数量
for X, Y in train_iter:
if state is None or use_random_iter:
# 在第一次迭代或使用随机抽样时初始化state
state = net.begin_state(batch_size=X.shape[0], dtype=tf.float32)
with tf.GradientTape(persistent=True) as g:
y_hat, state = net(X, state)
y = d2l.reshape(tf.transpose(Y), (-1))
l = loss(y, y_hat)
params = net.trainable_variables
grads = g.gradient(l, params)
grads = grad_clipping(grads, 1)
updater.apply_gradients(zip(grads, params))
# Keras默认返回一个批量中的平均损失
metric.add(l * d2l.size(y), d2l.size(y))
return math.exp(metric[0] / metric[1]), metric[1] / timer.stop()
def train_ch8(net, train_iter, vocab, lr, num_epochs, strategy,
use_random_iter=False):
"""训练模型(定义见第8章)
Defined in :numref:`sec_rnn_scratch`"""
with strategy.scope():
loss = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True)
updater = tf.keras.optimizers.SGD(lr)
animator = d2l.Animator(xlabel='epoch', ylabel='perplexity',
legend=['train'], xlim=[10, num_epochs])
predict = lambda prefix: predict_ch8(prefix, 50, net, vocab)
# 训练和预测
for epoch in range(num_epochs):
ppl, speed = train_epoch_ch8(net, train_iter, loss, updater,
use_random_iter)
if (epoch + 1) % 10 == 0:
print(predict('time traveller'))
animator.add(epoch + 1, [ppl])
device = d2l.try_gpu()._device_name
print(f'困惑度 {ppl:.1f}, {speed:.1f} 词元/秒 {str(device)}')
print(predict('time traveller'))
print(predict('traveller'))
class RNNModel(tf.keras.layers.Layer):
"""Defined in :numref:`sec_rnn-concise`"""
def __init__(self, rnn_layer, vocab_size, **kwargs):
super(RNNModel, self).__init__(**kwargs)
self.rnn = rnn_layer
self.vocab_size = vocab_size
self.dense = tf.keras.layers.Dense(vocab_size)
def call(self, inputs, state):
X = tf.one_hot(tf.transpose(inputs), self.vocab_size)
# rnn返回两个以上的值
Y, *state = self.rnn(X, state)
output = self.dense(tf.reshape(Y, (-1, Y.shape[-1])))
return output, state
def begin_state(self, *args, **kwargs):
return self.rnn.cell.get_initial_state(*args, **kwargs)
d2l.DATA_HUB['fra-eng'] = (d2l.DATA_URL + 'fra-eng.zip',
'94646ad1522d915e7b0f9296181140edcf86a4f5')
def read_data_nmt():
"""载入“英语-法语”数据集
Defined in :numref:`sec_machine_translation`"""
data_dir = d2l.download_extract('fra-eng')
with open(os.path.join(data_dir, 'fra.txt'), 'r',
encoding='utf-8') as f:
return f.read()
def preprocess_nmt(text):
"""预处理“英语-法语”数据集
Defined in :numref:`sec_machine_translation`"""
def no_space(char, prev_char):
return char in set(',.!?') and prev_char != ' '
# 使用空格替换不间断空格
# 使用小写字母替换大写字母
text = text.replace('\u202f', ' ').replace('\xa0', ' ').lower()
# 在单词和标点符号之间插入空格
out = [' ' + char if i > 0 and no_space(char, text[i - 1]) else char
for i, char in enumerate(text)]
return ''.join(out)
def tokenize_nmt(text, num_examples=None):
"""词元化“英语-法语”数据数据集
Defined in :numref:`sec_machine_translation`"""
source, target = [], []
for i, line in enumerate(text.split('\n')):
if num_examples and i > num_examples:
break
parts = line.split('\t')
if len(parts) == 2:
source.append(parts[0].split(' '))
target.append(parts[1].split(' '))
return source, target
def show_list_len_pair_hist(legend, xlabel, ylabel, xlist, ylist):
"""绘制列表长度对的直方图
Defined in :numref:`sec_machine_translation`"""
d2l.set_figsize()
_, _, patches = d2l.plt.hist(
[[len(l) for l in xlist], [len(l) for l in ylist]])
d2l.plt.xlabel(xlabel)
d2l.plt.ylabel(ylabel)
for patch in patches[1].patches:
patch.set_hatch('/')
d2l.plt.legend(legend)
def truncate_pad(line, num_steps, padding_token):
"""截断或填充文本序列
Defined in :numref:`sec_machine_translation`"""
if len(line) > num_steps:
return line[:num_steps] # 截断
return line + [padding_token] * (num_steps - len(line)) # 填充
def build_array_nmt(lines, vocab, num_steps):
"""将机器翻译的文本序列转换成小批量
Defined in :numref:`subsec_mt_data_loading`"""
lines = [vocab[l] for l in lines]
lines = [l + [vocab['<eos>']] for l in lines]
array = d2l.tensor([truncate_pad(
l, num_steps, vocab['<pad>']) for l in lines])
valid_len = d2l.reduce_sum(
d2l.astype(array != vocab['<pad>'], d2l.int32), 1)
return array, valid_len
def load_data_nmt(batch_size, num_steps, num_examples=600):
"""返回翻译数据集的迭代器和词表
Defined in :numref:`subsec_mt_data_loading`"""
text = preprocess_nmt(read_data_nmt())
source, target = tokenize_nmt(text, num_examples)
src_vocab = d2l.Vocab(source, min_freq=2,
reserved_tokens=['<pad>', '<bos>', '<eos>'])
tgt_vocab = d2l.Vocab(target, min_freq=2,
reserved_tokens=['<pad>', '<bos>', '<eos>'])
src_array, src_valid_len = build_array_nmt(source, src_vocab, num_steps)
tgt_array, tgt_valid_len = build_array_nmt(target, tgt_vocab, num_steps)
data_arrays = (src_array, src_valid_len, tgt_array, tgt_valid_len)
data_iter = d2l.load_array(data_arrays, batch_size)
return data_iter, src_vocab, tgt_vocab
class Encoder(tf.keras.layers.Layer):
"""编码器-解码器架构的基本编码器接口"""
def __init__(self, **kwargs):
super(Encoder, self).__init__(**kwargs)
def call(self, X, *args, **kwargs):
raise NotImplementedError
class Decoder(tf.keras.layers.Layer):
"""编码器-解码器架构的基本解码器接口
Defined in :numref:`sec_encoder-decoder`"""
def __init__(self, **kwargs):
super(Decoder, self).__init__(**kwargs)
def init_state(self, enc_outputs, *args):
raise NotImplementedError
def call(self, X, state, **kwargs):
raise NotImplementedError
class EncoderDecoder(tf.keras.Model):
"""编码器-解码器架构的基类
| python | Apache-2.0 | e6b18ccea71451a55fcd861d7b96fddf2587b09a | 2026-01-04T14:38:16.201239Z | true |
d2l-ai/d2l-zh | https://github.com/d2l-ai/d2l-zh/blob/e6b18ccea71451a55fcd861d7b96fddf2587b09a/d2l/mxnet.py | d2l/mxnet.py | USE_MXNET = True
USE_PYTORCH = False
USE_TENSORFLOW = False
DATA_HUB = dict()
DATA_URL = 'http://d2l-data.s3-accelerate.amazonaws.com/'
from mxnet import autograd, context, gluon, image, init, np, npx
from mxnet.gluon import nn, rnn
from mxnet.gluon.data.vision import transforms
nn_Module = nn.Block
################# WARNING ################
# The below part is generated automatically through:
# d2lbook build lib
# Don't edit it directly
import collections
import hashlib
import math
import os
import random
import re
import shutil
import sys
import tarfile
import time
import zipfile
from collections import defaultdict
import pandas as pd
import requests
from IPython import display
from matplotlib import pyplot as plt
from matplotlib_inline import backend_inline
d2l = sys.modules[__name__]
from mxnet import autograd, context, gluon, image, init, np, npx
from mxnet.gluon import nn, rnn
def use_svg_display():
"""使用svg格式在Jupyter中显示绘图
Defined in :numref:`sec_calculus`"""
backend_inline.set_matplotlib_formats('svg')
def set_figsize(figsize=(3.5, 2.5)):
"""设置matplotlib的图表大小
Defined in :numref:`sec_calculus`"""
use_svg_display()
d2l.plt.rcParams['figure.figsize'] = figsize
def set_axes(axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend):
"""设置matplotlib的轴
Defined in :numref:`sec_calculus`"""
axes.set_xlabel(xlabel)
axes.set_ylabel(ylabel)
axes.set_xscale(xscale)
axes.set_yscale(yscale)
axes.set_xlim(xlim)
axes.set_ylim(ylim)
if legend:
axes.legend(legend)
axes.grid()
def plot(X, Y=None, xlabel=None, ylabel=None, legend=None, xlim=None,
ylim=None, xscale='linear', yscale='linear',
fmts=('-', 'm--', 'g-.', 'r:'), figsize=(3.5, 2.5), axes=None):
"""绘制数据点
Defined in :numref:`sec_calculus`"""
if legend is None:
legend = []
set_figsize(figsize)
axes = axes if axes else d2l.plt.gca()
# 如果X有一个轴,输出True
def has_one_axis(X):
return (hasattr(X, "ndim") and X.ndim == 1 or isinstance(X, list)
and not hasattr(X[0], "__len__"))
if has_one_axis(X):
X = [X]
if Y is None:
X, Y = [[]] * len(X), X
elif has_one_axis(Y):
Y = [Y]
if len(X) != len(Y):
X = X * len(Y)
axes.cla()
for x, y, fmt in zip(X, Y, fmts):
if len(x):
axes.plot(x, y, fmt)
else:
axes.plot(y, fmt)
set_axes(axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend)
class Timer:
"""记录多次运行时间"""
def __init__(self):
"""Defined in :numref:`subsec_linear_model`"""
self.times = []
self.start()
def start(self):
"""启动计时器"""
self.tik = time.time()
def stop(self):
"""停止计时器并将时间记录在列表中"""
self.times.append(time.time() - self.tik)
return self.times[-1]
def avg(self):
"""返回平均时间"""
return sum(self.times) / len(self.times)
def sum(self):
"""返回时间总和"""
return sum(self.times)
def cumsum(self):
"""返回累计时间"""
return np.array(self.times).cumsum().tolist()
def synthetic_data(w, b, num_examples):
"""生成y=Xw+b+噪声
Defined in :numref:`sec_linear_scratch`"""
X = d2l.normal(0, 1, (num_examples, len(w)))
y = d2l.matmul(X, w) + b
y += d2l.normal(0, 0.01, y.shape)
return X, d2l.reshape(y, (-1, 1))
def linreg(X, w, b):
"""线性回归模型
Defined in :numref:`sec_linear_scratch`"""
return d2l.matmul(X, w) + b
def squared_loss(y_hat, y):
"""均方损失
Defined in :numref:`sec_linear_scratch`"""
return (y_hat - d2l.reshape(y, y_hat.shape)) ** 2 / 2
def sgd(params, lr, batch_size):
"""小批量随机梯度下降
Defined in :numref:`sec_linear_scratch`"""
for param in params:
param[:] = param - lr * param.grad / batch_size
def load_array(data_arrays, batch_size, is_train=True):
"""构造一个Gluon数据迭代器
Defined in :numref:`sec_linear_concise`"""
dataset = gluon.data.ArrayDataset(*data_arrays)
return gluon.data.DataLoader(dataset, batch_size, shuffle=is_train)
def get_fashion_mnist_labels(labels):
"""返回Fashion-MNIST数据集的文本标签
Defined in :numref:`sec_fashion_mnist`"""
text_labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat',
'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot']
return [text_labels[int(i)] for i in labels]
def show_images(imgs, num_rows, num_cols, titles=None, scale=1.5):
"""绘制图像列表
Defined in :numref:`sec_fashion_mnist`"""
figsize = (num_cols * scale, num_rows * scale)
_, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)
axes = axes.flatten()
for i, (ax, img) in enumerate(zip(axes, imgs)):
ax.imshow(d2l.numpy(img))
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
if titles:
ax.set_title(titles[i])
return axes
def get_dataloader_workers():
"""在非Windows的平台上,使用4个进程来读取数据
Defined in :numref:`sec_fashion_mnist`"""
return 0 if sys.platform.startswith('win') else 4
def load_data_fashion_mnist(batch_size, resize=None):
"""下载Fashion-MNIST数据集,然后将其加载到内存中
Defined in :numref:`sec_fashion_mnist`"""
dataset = gluon.data.vision
trans = [dataset.transforms.ToTensor()]
if resize:
trans.insert(0, dataset.transforms.Resize(resize))
trans = dataset.transforms.Compose(trans)
mnist_train = dataset.FashionMNIST(train=True).transform_first(trans)
mnist_test = dataset.FashionMNIST(train=False).transform_first(trans)
return (gluon.data.DataLoader(mnist_train, batch_size, shuffle=True,
num_workers=get_dataloader_workers()),
gluon.data.DataLoader(mnist_test, batch_size, shuffle=False,
num_workers=get_dataloader_workers()))
def accuracy(y_hat, y):
"""计算预测正确的数量
Defined in :numref:`sec_softmax_scratch`"""
if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:
y_hat = d2l.argmax(y_hat, axis=1)
cmp = d2l.astype(y_hat, y.dtype) == y
return float(d2l.reduce_sum(d2l.astype(cmp, y.dtype)))
def evaluate_accuracy(net, data_iter):
"""计算在指定数据集上模型的精度
Defined in :numref:`sec_softmax_scratch`"""
metric = Accumulator(2) # 正确预测数、预测总数
for X, y in data_iter:
metric.add(accuracy(net(X), y), d2l.size(y))
return metric[0] / metric[1]
class Accumulator:
"""在n个变量上累加"""
def __init__(self, n):
"""Defined in :numref:`sec_softmax_scratch`"""
self.data = [0.0] * n
def add(self, *args):
self.data = [a + float(b) for a, b in zip(self.data, args)]
def reset(self):
self.data = [0.0] * len(self.data)
def __getitem__(self, idx):
return self.data[idx]
def train_epoch_ch3(net, train_iter, loss, updater):
"""训练模型一个迭代周期(定义见第3章)
Defined in :numref:`sec_softmax_scratch`"""
# 训练损失总和、训练准确度总和、样本数
metric = Accumulator(3)
if isinstance(updater, gluon.Trainer):
updater = updater.step
for X, y in train_iter:
# 计算梯度并更新参数
with autograd.record():
y_hat = net(X)
l = loss(y_hat, y)
l.backward()
updater(X.shape[0])
metric.add(float(l.sum()), accuracy(y_hat, y), y.size)
# 返回训练损失和训练精度
return metric[0] / metric[2], metric[1] / metric[2]
class Animator:
"""在动画中绘制数据"""
def __init__(self, xlabel=None, ylabel=None, legend=None, xlim=None,
ylim=None, xscale='linear', yscale='linear',
fmts=('-', 'm--', 'g-.', 'r:'), nrows=1, ncols=1,
figsize=(3.5, 2.5)):
"""Defined in :numref:`sec_softmax_scratch`"""
# 增量地绘制多条线
if legend is None:
legend = []
d2l.use_svg_display()
self.fig, self.axes = d2l.plt.subplots(nrows, ncols, figsize=figsize)
if nrows * ncols == 1:
self.axes = [self.axes, ]
# 使用lambda函数捕获参数
self.config_axes = lambda: d2l.set_axes(
self.axes[0], xlabel, ylabel, xlim, ylim, xscale, yscale, legend)
self.X, self.Y, self.fmts = None, None, fmts
def add(self, x, y):
# 向图表中添加多个数据点
if not hasattr(y, "__len__"):
y = [y]
n = len(y)
if not hasattr(x, "__len__"):
x = [x] * n
if not self.X:
self.X = [[] for _ in range(n)]
if not self.Y:
self.Y = [[] for _ in range(n)]
for i, (a, b) in enumerate(zip(x, y)):
if a is not None and b is not None:
self.X[i].append(a)
self.Y[i].append(b)
self.axes[0].cla()
for x, y, fmt in zip(self.X, self.Y, self.fmts):
self.axes[0].plot(x, y, fmt)
self.config_axes()
display.display(self.fig)
display.clear_output(wait=True)
def train_ch3(net, train_iter, test_iter, loss, num_epochs, updater):
"""训练模型(定义见第3章)
Defined in :numref:`sec_softmax_scratch`"""
animator = Animator(xlabel='epoch', xlim=[1, num_epochs], ylim=[0.3, 0.9],
legend=['train loss', 'train acc', 'test acc'])
for epoch in range(num_epochs):
train_metrics = train_epoch_ch3(net, train_iter, loss, updater)
test_acc = evaluate_accuracy(net, test_iter)
animator.add(epoch + 1, train_metrics + (test_acc,))
train_loss, train_acc = train_metrics
assert train_loss < 0.5, train_loss
assert train_acc <= 1 and train_acc > 0.7, train_acc
assert test_acc <= 1 and test_acc > 0.7, test_acc
def predict_ch3(net, test_iter, n=6):
"""预测标签(定义见第3章)
Defined in :numref:`sec_softmax_scratch`"""
for X, y in test_iter:
break
trues = d2l.get_fashion_mnist_labels(y)
preds = d2l.get_fashion_mnist_labels(d2l.argmax(net(X), axis=1))
titles = [true +'\n' + pred for true, pred in zip(trues, preds)]
d2l.show_images(
d2l.reshape(X[0:n], (n, 28, 28)), 1, n, titles=titles[0:n])
def evaluate_loss(net, data_iter, loss):
"""评估给定数据集上模型的损失
Defined in :numref:`sec_model_selection`"""
metric = d2l.Accumulator(2) # 损失的总和,样本数量
for X, y in data_iter:
l = loss(net(X), y)
metric.add(d2l.reduce_sum(l), d2l.size(l))
return metric[0] / metric[1]
DATA_HUB = dict()
DATA_URL = 'http://d2l-data.s3-accelerate.amazonaws.com/'
def download(name, cache_dir=os.path.join('..', 'data')):
"""下载一个DATA_HUB中的文件,返回本地文件名
Defined in :numref:`sec_kaggle_house`"""
assert name in DATA_HUB, f"{name} 不存在于 {DATA_HUB}"
url, sha1_hash = DATA_HUB[name]
os.makedirs(cache_dir, exist_ok=True)
fname = os.path.join(cache_dir, url.split('/')[-1])
if os.path.exists(fname):
sha1 = hashlib.sha1()
with open(fname, 'rb') as f:
while True:
data = f.read(1048576)
if not data:
break
sha1.update(data)
if sha1.hexdigest() == sha1_hash:
return fname # 命中缓存
print(f'正在从{url}下载{fname}...')
r = requests.get(url, stream=True, verify=True)
with open(fname, 'wb') as f:
f.write(r.content)
return fname
def download_extract(name, folder=None):
"""下载并解压zip/tar文件
Defined in :numref:`sec_kaggle_house`"""
fname = download(name)
base_dir = os.path.dirname(fname)
data_dir, ext = os.path.splitext(fname)
if ext == '.zip':
fp = zipfile.ZipFile(fname, 'r')
elif ext in ('.tar', '.gz'):
fp = tarfile.open(fname, 'r')
else:
assert False, '只有zip/tar文件可以被解压缩'
fp.extractall(base_dir)
return os.path.join(base_dir, folder) if folder else data_dir
def download_all():
"""下载DATA_HUB中的所有文件
Defined in :numref:`sec_kaggle_house`"""
for name in DATA_HUB:
download(name)
DATA_HUB['kaggle_house_train'] = (
DATA_URL + 'kaggle_house_pred_train.csv',
'585e9cc93e70b39160e7921475f9bcd7d31219ce')
DATA_HUB['kaggle_house_test'] = (
DATA_URL + 'kaggle_house_pred_test.csv',
'fa19780a7b011d9b009e8bff8e99922a8ee2eb90')
def try_gpu(i=0):
"""如果存在,则返回gpu(i),否则返回cpu()
Defined in :numref:`sec_use_gpu`"""
return npx.gpu(i) if npx.num_gpus() >= i + 1 else npx.cpu()
def try_all_gpus():
"""返回所有可用的GPU,如果没有GPU,则返回[cpu()]
Defined in :numref:`sec_use_gpu`"""
devices = [npx.gpu(i) for i in range(npx.num_gpus())]
return devices if devices else [npx.cpu()]
def corr2d(X, K):
"""计算二维互相关运算
Defined in :numref:`sec_conv_layer`"""
h, w = K.shape
Y = d2l.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))
for i in range(Y.shape[0]):
for j in range(Y.shape[1]):
Y[i, j] = d2l.reduce_sum((X[i: i + h, j: j + w] * K))
return Y
def evaluate_accuracy_gpu(net, data_iter, device=None):
"""使用GPU计算模型在数据集上的精度
Defined in :numref:`sec_lenet`"""
if not device: # 查询第一个参数所在的第一个设备
device = list(net.collect_params().values())[0].list_ctx()[0]
metric = d2l.Accumulator(2) # 正确预测的数量,总预测的数量
for X, y in data_iter:
X, y = X.as_in_ctx(device), y.as_in_ctx(device)
metric.add(d2l.accuracy(net(X), y), d2l.size(y))
return metric[0] / metric[1]
def train_ch6(net, train_iter, test_iter, num_epochs, lr, device):
"""用GPU训练模型(在第六章定义)
Defined in :numref:`sec_lenet`"""
net.initialize(force_reinit=True, ctx=device, init=init.Xavier())
loss = gluon.loss.SoftmaxCrossEntropyLoss()
trainer = gluon.Trainer(net.collect_params(),
'sgd', {'learning_rate': lr})
animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs],
legend=['train loss', 'train acc', 'test acc'])
timer, num_batches = d2l.Timer(), len(train_iter)
for epoch in range(num_epochs):
metric = d2l.Accumulator(3) # 训练损失之和,训练准确率之和,样本数
for i, (X, y) in enumerate(train_iter):
timer.start()
# 下面是与“d2l.train_epoch_ch3”的主要不同
X, y = X.as_in_ctx(device), y.as_in_ctx(device)
with autograd.record():
y_hat = net(X)
l = loss(y_hat, y)
l.backward()
trainer.step(X.shape[0])
metric.add(l.sum(), d2l.accuracy(y_hat, y), X.shape[0])
timer.stop()
train_l = metric[0] / metric[2]
train_acc = metric[1] / metric[2]
if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1:
animator.add(epoch + (i + 1) / num_batches,
(train_l, train_acc, None))
test_acc = evaluate_accuracy_gpu(net, test_iter)
animator.add(epoch + 1, (None, None, test_acc))
print(f'loss {train_l:.3f}, train acc {train_acc:.3f}, '
f'test acc {test_acc:.3f}')
print(f'{metric[2] * num_epochs / timer.sum():.1f} examples/sec '
f'on {str(device)}')
class Residual(nn.Block):
def __init__(self, num_channels, use_1x1conv=False, strides=1, **kwargs):
super().__init__(**kwargs)
self.conv1 = nn.Conv2D(num_channels, kernel_size=3, padding=1,
strides=strides)
self.conv2 = nn.Conv2D(num_channels, kernel_size=3, padding=1)
if use_1x1conv:
self.conv3 = nn.Conv2D(num_channels, kernel_size=1,
strides=strides)
else:
self.conv3 = None
self.bn1 = nn.BatchNorm()
self.bn2 = nn.BatchNorm()
def forward(self, X):
Y = npx.relu(self.bn1(self.conv1(X)))
Y = self.bn2(self.conv2(Y))
if self.conv3:
X = self.conv3(X)
return npx.relu(Y + X)
d2l.DATA_HUB['time_machine'] = (d2l.DATA_URL + 'timemachine.txt',
'090b5e7e70c295757f55df93cb0a180b9691891a')
def read_time_machine():
"""将时间机器数据集加载到文本行的列表中
Defined in :numref:`sec_text_preprocessing`"""
with open(d2l.download('time_machine'), 'r') as f:
lines = f.readlines()
return [re.sub('[^A-Za-z]+', ' ', line).strip().lower() for line in lines]
def tokenize(lines, token='word'):
"""将文本行拆分为单词或字符词元
Defined in :numref:`sec_text_preprocessing`"""
if token == 'word':
return [line.split() for line in lines]
elif token == 'char':
return [list(line) for line in lines]
else:
print('错误:未知词元类型:' + token)
class Vocab:
"""文本词表"""
def __init__(self, tokens=None, min_freq=0, reserved_tokens=None):
"""Defined in :numref:`sec_text_preprocessing`"""
if tokens is None:
tokens = []
if reserved_tokens is None:
reserved_tokens = []
# 按出现频率排序
counter = count_corpus(tokens)
self._token_freqs = sorted(counter.items(), key=lambda x: x[1],
reverse=True)
# 未知词元的索引为0
self.idx_to_token = ['<unk>'] + reserved_tokens
self.token_to_idx = {token: idx
for idx, token in enumerate(self.idx_to_token)}
for token, freq in self._token_freqs:
if freq < min_freq:
break
if token not in self.token_to_idx:
self.idx_to_token.append(token)
self.token_to_idx[token] = len(self.idx_to_token) - 1
def __len__(self):
return len(self.idx_to_token)
def __getitem__(self, tokens):
if not isinstance(tokens, (list, tuple)):
return self.token_to_idx.get(tokens, self.unk)
return [self.__getitem__(token) for token in tokens]
def to_tokens(self, indices):
if not isinstance(indices, (list, tuple)):
return self.idx_to_token[indices]
return [self.idx_to_token[index] for index in indices]
@property
def unk(self): # 未知词元的索引为0
return 0
@property
def token_freqs(self):
return self._token_freqs
def count_corpus(tokens):
"""统计词元的频率
Defined in :numref:`sec_text_preprocessing`"""
# 这里的tokens是1D列表或2D列表
if len(tokens) == 0 or isinstance(tokens[0], list):
# 将词元列表展平成一个列表
tokens = [token for line in tokens for token in line]
return collections.Counter(tokens)
def load_corpus_time_machine(max_tokens=-1):
"""返回时光机器数据集的词元索引列表和词表
Defined in :numref:`sec_text_preprocessing`"""
lines = read_time_machine()
tokens = tokenize(lines, 'char')
vocab = Vocab(tokens)
# 因为时光机器数据集中的每个文本行不一定是一个句子或一个段落,
# 所以将所有文本行展平到一个列表中
corpus = [vocab[token] for line in tokens for token in line]
if max_tokens > 0:
corpus = corpus[:max_tokens]
return corpus, vocab
def seq_data_iter_random(corpus, batch_size, num_steps):
"""使用随机抽样生成一个小批量子序列
Defined in :numref:`sec_language_model`"""
# 从随机偏移量开始对序列进行分区,随机范围包括num_steps-1
corpus = corpus[random.randint(0, num_steps - 1):]
# 减去1,是因为我们需要考虑标签
num_subseqs = (len(corpus) - 1) // num_steps
# 长度为num_steps的子序列的起始索引
initial_indices = list(range(0, num_subseqs * num_steps, num_steps))
# 在随机抽样的迭代过程中,
# 来自两个相邻的、随机的、小批量中的子序列不一定在原始序列上相邻
random.shuffle(initial_indices)
def data(pos):
# 返回从pos位置开始的长度为num_steps的序列
return corpus[pos: pos + num_steps]
num_batches = num_subseqs // batch_size
for i in range(0, batch_size * num_batches, batch_size):
# 在这里,initial_indices包含子序列的随机起始索引
initial_indices_per_batch = initial_indices[i: i + batch_size]
X = [data(j) for j in initial_indices_per_batch]
Y = [data(j + 1) for j in initial_indices_per_batch]
yield d2l.tensor(X), d2l.tensor(Y)
def seq_data_iter_sequential(corpus, batch_size, num_steps):
"""使用顺序分区生成一个小批量子序列
Defined in :numref:`sec_language_model`"""
# 从随机偏移量开始划分序列
offset = random.randint(0, num_steps)
num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size
Xs = d2l.tensor(corpus[offset: offset + num_tokens])
Ys = d2l.tensor(corpus[offset + 1: offset + 1 + num_tokens])
Xs, Ys = Xs.reshape(batch_size, -1), Ys.reshape(batch_size, -1)
num_batches = Xs.shape[1] // num_steps
for i in range(0, num_steps * num_batches, num_steps):
X = Xs[:, i: i + num_steps]
Y = Ys[:, i: i + num_steps]
yield X, Y
class SeqDataLoader:
"""加载序列数据的迭代器"""
def __init__(self, batch_size, num_steps, use_random_iter, max_tokens):
"""Defined in :numref:`sec_language_model`"""
if use_random_iter:
self.data_iter_fn = d2l.seq_data_iter_random
else:
self.data_iter_fn = d2l.seq_data_iter_sequential
self.corpus, self.vocab = d2l.load_corpus_time_machine(max_tokens)
self.batch_size, self.num_steps = batch_size, num_steps
def __iter__(self):
return self.data_iter_fn(self.corpus, self.batch_size, self.num_steps)
def load_data_time_machine(batch_size, num_steps,
use_random_iter=False, max_tokens=10000):
"""返回时光机器数据集的迭代器和词表
Defined in :numref:`sec_language_model`"""
data_iter = SeqDataLoader(
batch_size, num_steps, use_random_iter, max_tokens)
return data_iter, data_iter.vocab
class RNNModelScratch:
"""从零开始实现的循环神经网络模型"""
def __init__(self, vocab_size, num_hiddens, device, get_params,
init_state, forward_fn):
"""Defined in :numref:`sec_rnn_scratch`"""
self.vocab_size, self.num_hiddens = vocab_size, num_hiddens
self.params = get_params(vocab_size, num_hiddens, device)
self.init_state, self.forward_fn = init_state, forward_fn
def __call__(self, X, state):
X = npx.one_hot(X.T, self.vocab_size)
return self.forward_fn(X, state, self.params)
def begin_state(self, batch_size, ctx):
return self.init_state(batch_size, self.num_hiddens, ctx)
def predict_ch8(prefix, num_preds, net, vocab, device):
"""在prefix后面生成新字符
Defined in :numref:`sec_rnn_scratch`"""
state = net.begin_state(batch_size=1, ctx=device)
outputs = [vocab[prefix[0]]]
get_input = lambda: d2l.reshape(
d2l.tensor([outputs[-1]], ctx=device), (1, 1))
for y in prefix[1:]: # 预热期
_, state = net(get_input(), state)
outputs.append(vocab[y])
for _ in range(num_preds): # 预测num_preds步
y, state = net(get_input(), state)
outputs.append(int(y.argmax(axis=1).reshape(1)))
return ''.join([vocab.idx_to_token[i] for i in outputs])
def grad_clipping(net, theta):
"""裁剪梯度
Defined in :numref:`sec_rnn_scratch`"""
if isinstance(net, gluon.Block):
params = [p.data() for p in net.collect_params().values()]
else:
params = net.params
norm = math.sqrt(sum((p.grad ** 2).sum() for p in params))
if norm > theta:
for param in params:
param.grad[:] *= theta / norm
def train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter):
"""训练模型一个迭代周期(定义见第8章)
Defined in :numref:`sec_rnn_scratch`"""
state, timer = None, d2l.Timer()
metric = d2l.Accumulator(2) # 训练损失之和,词元数量
for X, Y in train_iter:
if state is None or use_random_iter:
# 在第一次迭代或使用随机抽样时初始化state
state = net.begin_state(batch_size=X.shape[0], ctx=device)
else:
for s in state:
s.detach()
y = Y.T.reshape(-1)
X, y = X.as_in_ctx(device), y.as_in_ctx(device)
with autograd.record():
y_hat, state = net(X, state)
l = loss(y_hat, y).mean()
l.backward()
grad_clipping(net, 1)
updater(batch_size=1) # 因为已经调用了mean函数
metric.add(l * d2l.size(y), d2l.size(y))
return math.exp(metric[0] / metric[1]), metric[1] / timer.stop()
def train_ch8(net, train_iter, vocab, lr, num_epochs, device,
use_random_iter=False):
"""训练模型(定义见第8章)
Defined in :numref:`sec_rnn_scratch`"""
loss = gluon.loss.SoftmaxCrossEntropyLoss()
animator = d2l.Animator(xlabel='epoch', ylabel='perplexity',
legend=['train'], xlim=[10, num_epochs])
# 初始化
if isinstance(net, gluon.Block):
net.initialize(ctx=device, force_reinit=True,
init=init.Normal(0.01))
trainer = gluon.Trainer(net.collect_params(),
'sgd', {'learning_rate': lr})
updater = lambda batch_size: trainer.step(batch_size)
else:
updater = lambda batch_size: d2l.sgd(net.params, lr, batch_size)
predict = lambda prefix: predict_ch8(prefix, 50, net, vocab, device)
# 训练和预测
for epoch in range(num_epochs):
ppl, speed = train_epoch_ch8(
net, train_iter, loss, updater, device, use_random_iter)
if (epoch + 1) % 10 == 0:
animator.add(epoch + 1, [ppl])
print(f'困惑度 {ppl:.1f}, {speed:.1f} 词元/秒 {str(device)}')
print(predict('time traveller'))
print(predict('traveller'))
class RNNModel(nn.Block):
"""循环神经网络模型
Defined in :numref:`sec_rnn-concise`"""
def __init__(self, rnn_layer, vocab_size, **kwargs):
super(RNNModel, self).__init__(**kwargs)
self.rnn = rnn_layer
self.vocab_size = vocab_size
self.dense = nn.Dense(vocab_size)
def forward(self, inputs, state):
X = npx.one_hot(inputs.T, self.vocab_size)
Y, state = self.rnn(X, state)
# 全连接层首先将Y的形状改为(时间步数*批量大小,隐藏单元数)
# 它的输出形状是(时间步数*批量大小,词表大小)
output = self.dense(Y.reshape(-1, Y.shape[-1]))
return output, state
def begin_state(self, *args, **kwargs):
return self.rnn.begin_state(*args, **kwargs)
d2l.DATA_HUB['fra-eng'] = (d2l.DATA_URL + 'fra-eng.zip',
'94646ad1522d915e7b0f9296181140edcf86a4f5')
def read_data_nmt():
"""载入“英语-法语”数据集
Defined in :numref:`sec_machine_translation`"""
data_dir = d2l.download_extract('fra-eng')
with open(os.path.join(data_dir, 'fra.txt'), 'r',
encoding='utf-8') as f:
return f.read()
def preprocess_nmt(text):
"""预处理“英语-法语”数据集
Defined in :numref:`sec_machine_translation`"""
def no_space(char, prev_char):
return char in set(',.!?') and prev_char != ' '
# 使用空格替换不间断空格
# 使用小写字母替换大写字母
text = text.replace('\u202f', ' ').replace('\xa0', ' ').lower()
# 在单词和标点符号之间插入空格
out = [' ' + char if i > 0 and no_space(char, text[i - 1]) else char
for i, char in enumerate(text)]
return ''.join(out)
def tokenize_nmt(text, num_examples=None):
"""词元化“英语-法语”数据数据集
Defined in :numref:`sec_machine_translation`"""
source, target = [], []
for i, line in enumerate(text.split('\n')):
if num_examples and i > num_examples:
break
parts = line.split('\t')
if len(parts) == 2:
source.append(parts[0].split(' '))
target.append(parts[1].split(' '))
return source, target
def show_list_len_pair_hist(legend, xlabel, ylabel, xlist, ylist):
"""绘制列表长度对的直方图
Defined in :numref:`sec_machine_translation`"""
d2l.set_figsize()
_, _, patches = d2l.plt.hist(
[[len(l) for l in xlist], [len(l) for l in ylist]])
d2l.plt.xlabel(xlabel)
d2l.plt.ylabel(ylabel)
for patch in patches[1].patches:
patch.set_hatch('/')
d2l.plt.legend(legend)
def truncate_pad(line, num_steps, padding_token):
"""截断或填充文本序列
Defined in :numref:`sec_machine_translation`"""
if len(line) > num_steps:
return line[:num_steps] # 截断
return line + [padding_token] * (num_steps - len(line)) # 填充
def build_array_nmt(lines, vocab, num_steps):
"""将机器翻译的文本序列转换成小批量
Defined in :numref:`subsec_mt_data_loading`"""
lines = [vocab[l] for l in lines]
lines = [l + [vocab['<eos>']] for l in lines]
array = d2l.tensor([truncate_pad(
l, num_steps, vocab['<pad>']) for l in lines])
valid_len = d2l.reduce_sum(
d2l.astype(array != vocab['<pad>'], d2l.int32), 1)
return array, valid_len
def load_data_nmt(batch_size, num_steps, num_examples=600):
"""返回翻译数据集的迭代器和词表
Defined in :numref:`subsec_mt_data_loading`"""
text = preprocess_nmt(read_data_nmt())
source, target = tokenize_nmt(text, num_examples)
src_vocab = d2l.Vocab(source, min_freq=2,
reserved_tokens=['<pad>', '<bos>', '<eos>'])
tgt_vocab = d2l.Vocab(target, min_freq=2,
reserved_tokens=['<pad>', '<bos>', '<eos>'])
src_array, src_valid_len = build_array_nmt(source, src_vocab, num_steps)
tgt_array, tgt_valid_len = build_array_nmt(target, tgt_vocab, num_steps)
data_arrays = (src_array, src_valid_len, tgt_array, tgt_valid_len)
data_iter = d2l.load_array(data_arrays, batch_size)
return data_iter, src_vocab, tgt_vocab
class Encoder(nn.Block):
"""编码器-解码器架构的基本编码器接口"""
def __init__(self, **kwargs):
super(Encoder, self).__init__(**kwargs)
def forward(self, X, *args):
raise NotImplementedError
class Decoder(nn.Block):
"""编码器-解码器架构的基本解码器接口
Defined in :numref:`sec_encoder-decoder`"""
def __init__(self, **kwargs):
super(Decoder, self).__init__(**kwargs)
def init_state(self, enc_outputs, *args):
raise NotImplementedError
def forward(self, X, state):
raise NotImplementedError
class EncoderDecoder(nn.Block):
"""编码器-解码器架构的基类
Defined in :numref:`sec_encoder-decoder`"""
def __init__(self, encoder, decoder, **kwargs):
super(EncoderDecoder, self).__init__(**kwargs)
self.encoder = encoder
self.decoder = decoder
def forward(self, enc_X, dec_X, *args):
enc_outputs = self.encoder(enc_X, *args)
dec_state = self.decoder.init_state(enc_outputs, *args)
return self.decoder(dec_X, dec_state)
class Seq2SeqEncoder(d2l.Encoder):
"""用于序列到序列学习的循环神经网络编码器
Defined in :numref:`sec_seq2seq`"""
def __init__(self, vocab_size, embed_size, num_hiddens, num_layers,
dropout=0, **kwargs):
super(Seq2SeqEncoder, self).__init__(**kwargs)
# 嵌入层
self.embedding = nn.Embedding(vocab_size, embed_size)
self.rnn = rnn.GRU(num_hiddens, num_layers, dropout=dropout)
def forward(self, X, *args):
# 输出'X'的形状:(batch_size,num_steps,embed_size)
X = self.embedding(X)
| python | Apache-2.0 | e6b18ccea71451a55fcd861d7b96fddf2587b09a | 2026-01-04T14:38:16.201239Z | true |
d2l-ai/d2l-zh | https://github.com/d2l-ai/d2l-zh/blob/e6b18ccea71451a55fcd861d7b96fddf2587b09a/d2l/paddle.py | d2l/paddle.py | ################# WARNING ################
# The below part is generated automatically through:
# d2lbook build lib
# Don't edit it directly
import collections
import hashlib
import math
import os
import random
import re
import shutil
import sys
import tarfile
import time
import zipfile
from collections import defaultdict
import pandas as pd
import requests
from IPython import display
from matplotlib import pyplot as plt
from matplotlib_inline import backend_inline
d2l = sys.modules[__name__]
import warnings
import numpy as np
warnings.filterwarnings("ignore")
import paddle
import paddle.vision as paddlevision
from paddle import nn
from paddle.nn import functional as F
from paddle.vision import transforms
from PIL import Image
paddle.disable_signal_handler()
def use_svg_display():
"""使用svg格式在Jupyter中显示绘图
Defined in :numref:`sec_calculus`"""
backend_inline.set_matplotlib_formats('svg')
def set_figsize(figsize=(3.5, 2.5)):
"""设置matplotlib的图表大小
Defined in :numref:`sec_calculus`"""
use_svg_display()
d2l.plt.rcParams['figure.figsize'] = figsize
def set_axes(axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend):
"""设置matplotlib的轴
Defined in :numref:`sec_calculus`"""
axes.set_xlabel(xlabel)
axes.set_ylabel(ylabel)
axes.set_xscale(xscale)
axes.set_yscale(yscale)
axes.set_xlim(xlim)
axes.set_ylim(ylim)
if legend:
axes.legend(legend)
axes.grid()
def plot(X, Y=None, xlabel=None, ylabel=None, legend=None, xlim=None,
ylim=None, xscale='linear', yscale='linear',
fmts=('-', 'm--', 'g-.', 'r:'), figsize=(3.5, 2.5), axes=None):
"""绘制数据点
Defined in :numref:`sec_calculus`"""
if legend is None:
legend = []
set_figsize(figsize)
axes = axes if axes else d2l.plt.gca()
# 如果X有一个轴,输出True
def has_one_axis(X):
return (hasattr(X, "ndim") and X.ndim == 1 or isinstance(X, list)
and not hasattr(X[0], "__len__"))
if has_one_axis(X):
X = [X]
if Y is None:
X, Y = [[]] * len(X), X
elif has_one_axis(Y):
Y = [Y]
if len(X) != len(Y):
X = X * len(Y)
axes.cla()
for x, y, fmt in zip(X, Y, fmts):
if len(x):
axes.plot(x, y, fmt)
else:
axes.plot(y, fmt)
set_axes(axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend)
class Timer:
"""记录多次运行时间"""
def __init__(self):
"""Defined in :numref:`subsec_linear_model`"""
self.times = []
self.start()
def start(self):
"""启动计时器"""
self.tik = time.time()
def stop(self):
"""停止计时器并将时间记录在列表中"""
self.times.append(time.time() - self.tik)
return self.times[-1]
def avg(self):
"""返回平均时间"""
return sum(self.times) / len(self.times)
def sum(self):
"""返回时间总和"""
return sum(self.times)
def cumsum(self):
"""返回累计时间"""
return np.array(self.times).cumsum().tolist()
def synthetic_data(w, b, num_examples):
"""生成y=Xw+b+噪声
Defined in :numref:`sec_linear_scratch`"""
X = d2l.normal(0, 1, (num_examples, len(w)))
y = d2l.matmul(X, w) + b
y += d2l.normal(0, 0.01, y.shape)
return X, d2l.reshape(y, (-1, 1))
def linreg(X, w, b):
"""线性回归模型
Defined in :numref:`sec_linear_scratch`"""
return d2l.matmul(X, w) + b
def squared_loss(y_hat, y):
"""均方损失
Defined in :numref:`sec_linear_scratch`"""
return (y_hat - d2l.reshape(y, y_hat.shape)) ** 2 / 2
def sgd(params, lr, batch_size):
"""小批量随机梯度下降
Defined in :numref:`sec_linear_scratch`"""
with paddle.no_grad():
for i, param in enumerate(params):
param -= lr * params[i].grad / batch_size
params[i].set_value(param)
params[i].clear_gradient()
def load_array(data_arrays, batch_size, is_train=True):
"""构造一个Paddle数据迭代器
Defined in :numref:`sec_linear_concise`"""
dataset = paddle.io.TensorDataset(data_arrays)
return paddle.io.DataLoader(dataset, batch_size=batch_size,
shuffle=is_train,
return_list=True)
def get_fashion_mnist_labels(labels):
"""返回Fashion-MNIST数据集的文本标签
Defined in :numref:`sec_fashion_mnist`"""
text_labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat',
'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot']
return [text_labels[int(i)] for i in labels]
def show_images(imgs, num_rows, num_cols, titles=None, scale=1.5):
"""绘制图像列表
Defined in :numref:`sec_fashion_mnist`"""
figsize = (num_cols * scale, num_rows * scale)
_, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)
axes = axes.flatten()
for i, (ax, img) in enumerate(zip(axes, imgs)):
if paddle.is_tensor(img):
# 图片张量
ax.imshow(img.numpy())
else:
# PIL图片
ax.imshow(img)
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
if titles:
ax.set_title(titles[i])
return axes
def get_dataloader_workers():
"""使用4个进程来读取数据
Defined in :numref:`sec_fashion_mnist`"""
return 4
def load_data_fashion_mnist(batch_size, resize=None):
"""下载Fashion-MNIST数据集,然后将其加载到内存中
Defined in :numref:`sec_fashion_mnist`"""
trans = [transforms.ToTensor()]
if resize:
trans.insert(0, transforms.Resize(resize))
trans = transforms.Compose(trans)
mnist_train = paddle.vision.datasets.FashionMNIST(mode="train",
transform=trans)
mnist_test = paddle.vision.datasets.FashionMNIST(mode="test",
transform=trans)
return (paddle.io.DataLoader(dataset=mnist_train,
batch_size=batch_size,
shuffle=True,
return_list=True,
num_workers=get_dataloader_workers()),
paddle.io.DataLoader(dataset=mnist_test,
batch_size=batch_size,
return_list=True,
shuffle=True,
num_workers=get_dataloader_workers()))
def accuracy(y_hat, y):
"""计算预测正确的数量
Defined in :numref:`sec_softmax_scratch`"""
if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:
y_hat = d2l.argmax(y_hat, axis=1)
cmp = d2l.astype(y_hat, y.dtype) == y
return float(d2l.reduce_sum(d2l.astype(cmp, y.dtype)))
def accuracy(y_hat, y):
"""计算预测正确的数量
Defined in :numref:`sec_softmax_scratch`"""
if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:
y_hat = y_hat.argmax(axis=1)
if len(y_hat.shape) < len(y.shape):
cmp = y_hat.astype(y.dtype) == y.squeeze()
else:
cmp = y_hat.astype(y.dtype) == y
return float(cmp.astype(y.dtype).sum())
def evaluate_accuracy(net, data_iter):
"""计算在指定数据集上模型的精度
Defined in :numref:`sec_softmax_scratch`"""
if isinstance(net, paddle.nn.Layer):
net.eval() # 将模型设置为评估模式
metric = Accumulator(2) # 正确预测数、预测总数
with paddle.no_grad():
for X, y in data_iter:
metric.add(accuracy(net(X), y), d2l.size(y))
return metric[0] / metric[1]
class Accumulator:
"""在n个变量上累加"""
def __init__(self, n):
"""Defined in :numref:`sec_softmax_scratch`"""
self.data = [0.0] * n
def add(self, *args):
self.data = [a + float(b) for a, b in zip(self.data, args)]
def reset(self):
self.data = [0.0] * len(self.data)
def __getitem__(self, idx):
return self.data[idx]
def train_epoch_ch3(net, train_iter, loss, updater):
"""训练模型一个迭代周期(定义见第3章)
Defined in :numref:`sec_softmax_scratch`"""
# 将模型设置为训练模式
if isinstance(net, paddle.nn.Layer):
net.train()
# 训练损失总和、训练准确度总和、样本数
metric = Accumulator(3)
for X, y in train_iter:
# 计算梯度并更新参数
y_hat = net(X)
l = loss(y_hat, y)
if isinstance(updater, paddle.optimizer.Optimizer):
# 使用PaddlePaddle内置的优化器和损失函数
updater.clear_grad()
l.mean().backward()
updater.step()
else:
# 使用定制的优化器和损失函数
l.sum().backward()
updater(X.shape[0])
metric.add(float(l.sum()), accuracy(y_hat, y), y.numel())
return metric[0] / metric[2], metric[1] / metric[2]
class Animator:
"""在动画中绘制数据"""
def __init__(self, xlabel=None, ylabel=None, legend=None, xlim=None,
ylim=None, xscale='linear', yscale='linear',
fmts=('-', 'm--', 'g-.', 'r:'), nrows=1, ncols=1,
figsize=(3.5, 2.5)):
"""Defined in :numref:`sec_softmax_scratch`"""
# 增量地绘制多条线
if legend is None:
legend = []
d2l.use_svg_display()
self.fig, self.axes = d2l.plt.subplots(nrows, ncols, figsize=figsize)
if nrows * ncols == 1:
self.axes = [self.axes, ]
# 使用lambda函数捕获参数
self.config_axes = lambda: d2l.set_axes(
self.axes[0], xlabel, ylabel, xlim, ylim, xscale, yscale, legend)
self.X, self.Y, self.fmts = None, None, fmts
def add(self, x, y):
# 向图表中添加多个数据点
if not hasattr(y, "__len__"):
y = [y]
n = len(y)
if not hasattr(x, "__len__"):
x = [x] * n
if not self.X:
self.X = [[] for _ in range(n)]
if not self.Y:
self.Y = [[] for _ in range(n)]
for i, (a, b) in enumerate(zip(x, y)):
if a is not None and b is not None:
self.X[i].append(a)
self.Y[i].append(b)
self.axes[0].cla()
for x, y, fmt in zip(self.X, self.Y, self.fmts):
self.axes[0].plot(x, y, fmt)
self.config_axes()
display.display(self.fig)
display.clear_output(wait=True)
def train_ch3(net, train_iter, test_iter, loss, num_epochs, updater):
"""训练模型(定义见第3章)
Defined in :numref:`sec_softmax_scratch`"""
animator = Animator(xlabel='epoch', xlim=[1, num_epochs], ylim=[0.3, 0.9],
legend=['train loss', 'train acc', 'test acc'])
for epoch in range(num_epochs):
train_metrics = train_epoch_ch3(net, train_iter, loss, updater)
test_acc = evaluate_accuracy(net, test_iter)
animator.add(epoch + 1, train_metrics + (test_acc,))
train_loss, train_acc = train_metrics
assert train_loss < 0.5, train_loss
assert train_acc <= 1 and train_acc > 0.7, train_acc
assert test_acc <= 1 and test_acc > 0.7, test_acc
def predict_ch3(net, test_iter, n=6):
"""预测标签(定义见第3章)
Defined in :numref:`sec_softmax_scratch`"""
for X, y in test_iter:
break
trues = d2l.get_fashion_mnist_labels(y)
preds = d2l.get_fashion_mnist_labels(d2l.argmax(net(X), axis=1))
titles = [true +'\n' + pred for true, pred in zip(trues, preds)]
d2l.show_images(
d2l.reshape(X[0:n], (n, 28, 28)), 1, n, titles=titles[0:n])
def evaluate_loss(net, data_iter, loss):
"""评估给定数据集上模型的损失。
Defined in :numref:`sec_model_selection`"""
metric = d2l.Accumulator(2) # 损失的总和, 样本数量
for X, y in data_iter:
out = net(X)
y = y.reshape(out.shape)
l = loss(out, y)
metric.add(l.sum(), l.numel())
return metric[0] / metric[1]
DATA_HUB = dict()
DATA_URL = 'http://d2l-data.s3-accelerate.amazonaws.com/'
def download(name, cache_dir=os.path.join('..', 'data')):
"""下载一个DATA_HUB中的文件,返回本地文件名
Defined in :numref:`sec_kaggle_house`"""
assert name in DATA_HUB, f"{name} 不存在于 {DATA_HUB}"
url, sha1_hash = DATA_HUB[name]
os.makedirs(cache_dir, exist_ok=True)
fname = os.path.join(cache_dir, url.split('/')[-1])
if os.path.exists(fname):
sha1 = hashlib.sha1()
with open(fname, 'rb') as f:
while True:
data = f.read(1048576)
if not data:
break
sha1.update(data)
if sha1.hexdigest() == sha1_hash:
return fname # 命中缓存
print(f'正在从{url}下载{fname}...')
r = requests.get(url, stream=True, verify=True)
with open(fname, 'wb') as f:
f.write(r.content)
return fname
def download_extract(name, folder=None):
"""下载并解压zip/tar文件
Defined in :numref:`sec_kaggle_house`"""
fname = download(name)
base_dir = os.path.dirname(fname)
data_dir, ext = os.path.splitext(fname)
if ext == '.zip':
fp = zipfile.ZipFile(fname, 'r')
elif ext in ('.tar', '.gz'):
fp = tarfile.open(fname, 'r')
else:
assert False, '只有zip/tar文件可以被解压缩'
fp.extractall(base_dir)
return os.path.join(base_dir, folder) if folder else data_dir
def download_all():
"""下载DATA_HUB中的所有文件
Defined in :numref:`sec_kaggle_house`"""
for name in DATA_HUB:
download(name)
DATA_HUB['kaggle_house_train'] = (
DATA_URL + 'kaggle_house_pred_train.csv',
'585e9cc93e70b39160e7921475f9bcd7d31219ce')
DATA_HUB['kaggle_house_test'] = (
DATA_URL + 'kaggle_house_pred_test.csv',
'fa19780a7b011d9b009e8bff8e99922a8ee2eb90')
def try_gpu(i=0):
"""如果存在,则返回gpu(i),否则返回cpu()。
Defined in :numref:`sec_use_gpu`"""
if paddle.device.cuda.device_count() >= i + 1:
return paddle.CUDAPlace(i)
return paddle.CPUPlace()
def try_all_gpus():
"""返回所有可用的GPU,如果没有GPU,则返回[cpu(),]。
Defined in :numref:`sec_use_gpu`"""
devices = [paddle.CUDAPlace(i)
for i in range(paddle.device.cuda.device_count())]
return devices if devices else paddle.CPUPlace()
def corr2d(X, K):
"""计算二维互相关运算
Defined in :numref:`sec_conv_layer`"""
h, w = K.shape
Y = d2l.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))
for i in range(Y.shape[0]):
for j in range(Y.shape[1]):
Y[i, j] = d2l.reduce_sum((X[i: i + h, j: j + w] * K))
return Y
def evaluate_accuracy_gpu(net, data_iter, device=None):
"""使用GPU计算模型在数据集上的精度
Defined in :numref:`sec_lenet`"""
if isinstance(net, nn.Layer):
net.eval() # 设置为评估模式
if not device:
device = next(iter(net.parameters())).place
paddle.set_device("gpu:{}".format(str(device)[-2]))
# 正确预测的数量,总预测的数量
metric = d2l.Accumulator(2)
with paddle.no_grad():
for X, y in data_iter:
if isinstance(X, list):
# BERT微调所需的
X = [paddle.to_tensor(x, place=device) for x in X]
else:
X = paddle.to_tensor(X, place=device)
y = paddle.to_tensor(y, place=device)
metric.add(d2l.accuracy(net(X), y), d2l.size(y))
return metric[0] / metric[1]
def train_ch6(net, train_iter, test_iter, num_epochs, lr, device):
"""用GPU训练模型(在第六章定义)
Defined in :numref:`sec_lenet`"""
def init_weights(m):
if type(m) == nn.Linear or type(m) == nn.Conv2D:
nn.initializer.XavierUniform(m.weight)
net.apply(init_weights)
print('training on', device)
net.to(device)
optimizer = paddle.optimizer.SGD(learning_rate=lr, parameters=net.parameters())
loss = nn.CrossEntropyLoss()
animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs],
legend=['train loss', 'train acc', 'test acc'])
timer, num_batches = d2l.Timer(), len(train_iter)
for epoch in range(num_epochs):
# 训练损失之和,训练准确率之和,样本数
metric = d2l.Accumulator(3)
net.train()
for i, (X, y) in enumerate(train_iter):
timer.start()
optimizer.clear_grad()
X, y = paddle.to_tensor(X, place=device), paddle.to_tensor(y, place=device)
y_hat = net(X)
l = loss(y_hat, y)
l.backward()
optimizer.step()
with paddle.no_grad():
metric.add(l * X.shape[0], d2l.accuracy(y_hat, y), X.shape[0])
timer.stop()
train_l = metric[0] / metric[2]
train_acc = metric[1] / metric[2]
if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1:
animator.add(epoch + (i + 1) / num_batches,
(train_l, train_acc, None))
test_acc = evaluate_accuracy_gpu(net, test_iter)
animator.add(epoch + 1, (None, None, test_acc))
print(f'loss {train_l:.3f}, train acc {train_acc:.3f}, '
f'test acc {test_acc:.3f}')
print(f'{metric[2] * num_epochs / timer.sum():.1f} examples/sec '
f'on {str(device)}')
class Residual(nn.Layer):
def __init__(self, input_channels, num_channels, use_1x1conv=False,
strides=1):
super(Residual, self).__init__()
self.conv1 = nn.Conv2D(input_channels, num_channels, kernel_size=3,
padding=1, stride=strides)
self.conv2 = nn.Conv2D(num_channels, num_channels, kernel_size=3,
padding=1)
if use_1x1conv:
self.conv3 = nn.Conv2D(input_channels, num_channels,
kernel_size=1, stride=strides)
else:
self.conv3 = None
self.bn1 = nn.BatchNorm2D(num_channels)
self.bn2 = nn.BatchNorm2D(num_channels)
self.relu = nn.ReLU()
def forward(self, X):
Y = F.relu(self.bn1(self.conv1(X)))
Y = self.bn2(self.conv2(Y))
if self.conv3:
X = self.conv3(X)
Y += X
return F.relu(Y)
d2l.DATA_HUB['time_machine'] = (d2l.DATA_URL + 'timemachine.txt',
'090b5e7e70c295757f55df93cb0a180b9691891a')
def read_time_machine():
"""将时间机器数据集加载到文本行的列表中
Defined in :numref:`sec_text_preprocessing`"""
with open(d2l.download('time_machine'), 'r') as f:
lines = f.readlines()
return [re.sub('[^A-Za-z]+', ' ', line).strip().lower() for line in lines]
def tokenize(lines, token='word'):
"""将文本行拆分为单词或字符词元
Defined in :numref:`sec_text_preprocessing`"""
if token == 'word':
return [line.split() for line in lines]
elif token == 'char':
return [list(line) for line in lines]
else:
print('错误:未知词元类型:' + token)
class Vocab:
"""文本词表"""
def __init__(self, tokens=None, min_freq=0, reserved_tokens=None):
"""Defined in :numref:`sec_text_preprocessing`"""
if tokens is None:
tokens = []
if reserved_tokens is None:
reserved_tokens = []
# 按出现频率排序
counter = count_corpus(tokens)
self._token_freqs = sorted(counter.items(), key=lambda x: x[1],
reverse=True)
# 未知词元的索引为0
self.idx_to_token = ['<unk>'] + reserved_tokens
self.token_to_idx = {token: idx
for idx, token in enumerate(self.idx_to_token)}
for token, freq in self._token_freqs:
if freq < min_freq:
break
if token not in self.token_to_idx:
self.idx_to_token.append(token)
self.token_to_idx[token] = len(self.idx_to_token) - 1
def __len__(self):
return len(self.idx_to_token)
def __getitem__(self, tokens):
if not isinstance(tokens, (list, tuple)):
return self.token_to_idx.get(tokens, self.unk)
return [self.__getitem__(token) for token in tokens]
def to_tokens(self, indices):
if not isinstance(indices, (list, tuple)):
return self.idx_to_token[indices]
return [self.idx_to_token[index] for index in indices]
@property
def unk(self): # 未知词元的索引为0
return 0
@property
def token_freqs(self):
return self._token_freqs
def count_corpus(tokens):
"""统计词元的频率
Defined in :numref:`sec_text_preprocessing`"""
# 这里的tokens是1D列表或2D列表
if len(tokens) == 0 or isinstance(tokens[0], list):
# 将词元列表展平成一个列表
tokens = [token for line in tokens for token in line]
return collections.Counter(tokens)
def load_corpus_time_machine(max_tokens=-1):
"""返回时光机器数据集的词元索引列表和词表
Defined in :numref:`sec_text_preprocessing`"""
lines = read_time_machine()
tokens = tokenize(lines, 'char')
vocab = Vocab(tokens)
# 因为时光机器数据集中的每个文本行不一定是一个句子或一个段落,
# 所以将所有文本行展平到一个列表中
corpus = [vocab[token] for line in tokens for token in line]
if max_tokens > 0:
corpus = corpus[:max_tokens]
return corpus, vocab
def seq_data_iter_random(corpus, batch_size, num_steps):
"""使用随机抽样生成一个小批量子序列
Defined in :numref:`sec_language_model`"""
# 从随机偏移量开始对序列进行分区,随机范围包括num_steps-1
corpus = corpus[random.randint(0, num_steps - 1):]
# 减去1,是因为我们需要考虑标签
num_subseqs = (len(corpus) - 1) // num_steps
# 长度为num_steps的子序列的起始索引
initial_indices = list(range(0, num_subseqs * num_steps, num_steps))
# 在随机抽样的迭代过程中,
# 来自两个相邻的、随机的、小批量中的子序列不一定在原始序列上相邻
random.shuffle(initial_indices)
def data(pos):
# 返回从pos位置开始的长度为num_steps的序列
return corpus[pos: pos + num_steps]
num_batches = num_subseqs // batch_size
for i in range(0, batch_size * num_batches, batch_size):
# 在这里,initial_indices包含子序列的随机起始索引
initial_indices_per_batch = initial_indices[i: i + batch_size]
X = [data(j) for j in initial_indices_per_batch]
Y = [data(j + 1) for j in initial_indices_per_batch]
yield d2l.tensor(X), d2l.tensor(Y)
def seq_data_iter_sequential(corpus, batch_size, num_steps):
"""使用顺序分区生成一个小批量子序列
Defined in :numref:`sec_language_model`"""
# 从随机偏移量开始划分序列
offset = random.randint(0, num_steps)
num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size
Xs = d2l.tensor(corpus[offset: offset + num_tokens])
Ys = d2l.tensor(corpus[offset + 1: offset + 1 + num_tokens])
Xs, Ys = Xs.reshape((batch_size, -1)), Ys.reshape((batch_size, -1))
num_batches = Xs.shape[1] // num_steps
for i in range(0, num_steps * num_batches, num_steps):
X = Xs[:, i: i + num_steps]
Y = Ys[:, i: i + num_steps]
yield X, Y
class SeqDataLoader:
"""加载序列数据的迭代器"""
def __init__(self, batch_size, num_steps, use_random_iter, max_tokens):
"""Defined in :numref:`sec_language_model`"""
if use_random_iter:
self.data_iter_fn = d2l.seq_data_iter_random
else:
self.data_iter_fn = d2l.seq_data_iter_sequential
self.corpus, self.vocab = d2l.load_corpus_time_machine(max_tokens)
self.batch_size, self.num_steps = batch_size, num_steps
def __iter__(self):
return self.data_iter_fn(self.corpus, self.batch_size, self.num_steps)
def load_data_time_machine(batch_size, num_steps,
use_random_iter=False, max_tokens=10000):
"""返回时光机器数据集的迭代器和词表
Defined in :numref:`sec_language_model`"""
data_iter = SeqDataLoader(
batch_size, num_steps, use_random_iter, max_tokens)
return data_iter, data_iter.vocab
class RNNModelScratch:
"""从零开始实现的循环神经网络模型"""
def __init__(self, vocab_size, num_hiddens,
get_params, init_state, forward_fn):
"""Defined in :numref:`sec_rnn_scratch`"""
self.vocab_size, self.num_hiddens = vocab_size, num_hiddens
self.params = get_params(vocab_size, num_hiddens)
self.init_state, self.forward_fn = init_state, forward_fn
def __call__(self, X, state):
X = F.one_hot(X.T, self.vocab_size)
return self.forward_fn(X, state, self.params)
def begin_state(self, batch_size):
return self.init_state(batch_size, self.num_hiddens)
def predict_ch8(prefix, num_preds, net, vocab, device):
"""在prefix后面生成新字符
Defined in :numref:`sec_rnn_scratch`"""
state = net.begin_state(batch_size=1)
outputs = [vocab[prefix[0]]]
get_input = lambda: d2l.reshape(d2l.tensor(outputs[-1], place=device), (1, 1))
for y in prefix[1:]: # 预热期
_, state = net(get_input(), state)
outputs.append(vocab[y])
for _ in range(num_preds): # 预测num_preds步
y, state = net(get_input(), state)
outputs.append(int(paddle.reshape(paddle.argmax(y,axis=1),shape=[1])))
return ''.join([vocab.idx_to_token[i] for i in outputs])
def grad_clipping(net, theta):
"""裁剪梯度
Defined in :numref:`sec_rnn_scratch`"""
if isinstance(net, nn.Layer):
params = [p for p in net.parameters() if not p.stop_gradient]
else:
params = net.params
norm = paddle.sqrt(sum(paddle.sum((p.grad ** 2)) for p in params))
if norm > theta:
with paddle.no_grad():
for param in params:
param.grad.set_value(param.grad * theta / norm)
def train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter):
"""训练网络一个迭代周期(定义见第8章)
Defined in :numref:`sec_rnn_scratch`"""
state, timer = None, d2l.Timer()
metric = d2l.Accumulator(2) # 训练损失之和,词元数量
for X, Y in train_iter:
if state is None or use_random_iter:
# 在第一次迭代或使用随机抽样时初始化state
state = net.begin_state(batch_size=X.shape[0])
else:
if isinstance(net, nn.Layer) and not isinstance(state, tuple):
# state对于nn.GRU是个张量
state.stop_gradient=True
else:
# state对于nn.LSTM或对于我们从零开始实现的模型是个张量
for s in state:
s.stop_gradient=True
y = paddle.reshape(Y.T,shape=[-1])
X = paddle.to_tensor(X, place=device)
y = paddle.to_tensor(y, place=device)
y_hat, state = net(X, state)
l = loss(y_hat, y).mean()
if isinstance(updater, paddle.optimizer.Optimizer):
updater.clear_grad()
l.backward()
grad_clipping(net, 1)
updater.step()
else:
l.backward()
grad_clipping(net, 1)
# 因为已经调用了mean函数
updater(batch_size=1)
metric.add(l * d2l.size(y), d2l.size(y))
return math.exp(metric[0] / metric[1]), metric[1] / timer.stop()
def train_ch8(net, train_iter, vocab, lr, num_epochs, device, use_random_iter=False):
"""训练模型(定义见第8章)
Defined in :numref:`sec_rnn_scratch`"""
loss = nn.CrossEntropyLoss()
animator = d2l.Animator(xlabel='epoch', ylabel='perplexity',
legend=['train'], xlim=[10, num_epochs])
# 初始化
if isinstance(net, nn.Layer):
updater = paddle.optimizer.SGD(
learning_rate=lr, parameters=net.parameters())
else:
updater = lambda batch_size: d2l.sgd(net.params, lr, batch_size)
predict = lambda prefix: predict_ch8(prefix, 50, net, vocab, device)
# 训练和预测
for epoch in range(num_epochs):
ppl, speed = train_epoch_ch8(
net, train_iter, loss, updater, device, use_random_iter)
if (epoch + 1) % 10 == 0:
print(predict('time traveller'))
animator.add(epoch + 1, [ppl])
print(f'困惑度 {ppl:.1f}, {speed:.1f} 词元/秒 {str(device)}')
print(predict('time traveller'))
print(predict('traveller'))
class RNNModel(nn.Layer):
"""循环神经网络模型
Defined in :numref:`sec_rnn-concise`"""
def __init__(self, rnn_layer, vocab_size, **kwargs):
super(RNNModel, self).__init__(**kwargs)
self.rnn = rnn_layer
self.vocab_size = vocab_size
self.num_hiddens = self.rnn.hidden_size
# 如果RNN是双向的(之后将介绍),num_directions应该是2,否则应该是1
if self.rnn.num_directions==1:
self.num_directions = 1
self.linear = nn.Linear(self.num_hiddens, self.vocab_size)
else:
self.num_directions = 2
self.linear = nn.Linear(self.num_hiddens * 2, self.vocab_size)
def forward(self, inputs, state):
X = F.one_hot(inputs.T, self.vocab_size)
Y, state = self.rnn(X, state)
# 全连接层首先将Y的形状改为(时间步数*批量大小,隐藏单元数)
# 它的输出形状是(时间步数*批量大小,词表大小)。
output = self.linear(Y.reshape((-1, Y.shape[-1])))
return output, state
def begin_state(self, batch_size=1):
if not isinstance(self.rnn, nn.LSTM):
# nn.GRU以张量作为隐状态
return paddle.zeros(shape=[self.num_directions * self.rnn.num_layers,
batch_size, self.num_hiddens])
else:
# nn.LSTM以元组作为隐状态
return (paddle.zeros(
shape=[self.num_directions * self.rnn.num_layers,
batch_size, self.num_hiddens]),
paddle.zeros(
shape=[self.num_directions * self.rnn.num_layers,
batch_size, self.num_hiddens]))
d2l.DATA_HUB['fra-eng'] = (d2l.DATA_URL + 'fra-eng.zip',
'94646ad1522d915e7b0f9296181140edcf86a4f5')
def read_data_nmt():
"""载入“英语-法语”数据集
Defined in :numref:`sec_machine_translation`"""
data_dir = d2l.download_extract('fra-eng')
with open(os.path.join(data_dir, 'fra.txt'), 'r',
encoding='utf-8') as f:
return f.read()
def preprocess_nmt(text):
"""预处理“英语-法语”数据集
Defined in :numref:`sec_machine_translation`"""
def no_space(char, prev_char):
return char in set(',.!?') and prev_char != ' '
# 使用空格替换不间断空格
# 使用小写字母替换大写字母
text = text.replace('\u202f', ' ').replace('\xa0', ' ').lower()
# 在单词和标点符号之间插入空格
out = [' ' + char if i > 0 and no_space(char, text[i - 1]) else char
for i, char in enumerate(text)]
return ''.join(out)
def tokenize_nmt(text, num_examples=None):
"""词元化“英语-法语”数据数据集
Defined in :numref:`sec_machine_translation`"""
source, target = [], []
for i, line in enumerate(text.split('\n')):
if num_examples and i > num_examples:
break
parts = line.split('\t')
if len(parts) == 2:
source.append(parts[0].split(' '))
target.append(parts[1].split(' '))
return source, target
def show_list_len_pair_hist(legend, xlabel, ylabel, xlist, ylist):
"""绘制列表长度对的直方图
Defined in :numref:`sec_machine_translation`"""
d2l.set_figsize()
_, _, patches = d2l.plt.hist(
[[len(l) for l in xlist], [len(l) for l in ylist]])
d2l.plt.xlabel(xlabel)
d2l.plt.ylabel(ylabel)
| python | Apache-2.0 | e6b18ccea71451a55fcd861d7b96fddf2587b09a | 2026-01-04T14:38:16.201239Z | true |
d2l-ai/d2l-zh | https://github.com/d2l-ai/d2l-zh/blob/e6b18ccea71451a55fcd861d7b96fddf2587b09a/d2l/__init__.py | d2l/__init__.py | """Saved source code for "Dive into Deep Learing" (https://d2l.ai).
Please import d2l by one of the following ways:
from d2l import mxnet as d2l # Use MXNet as the backend
from d2l import torch as d2l # Use PyTorch as the backend
from d2l import tensorflow as d2l # Use TensorFlow as the backend
from d2l import paddle as d2l # Use Paddle as the backend
"""
__version__ = "2.0.0"
| python | Apache-2.0 | e6b18ccea71451a55fcd861d7b96fddf2587b09a | 2026-01-04T14:38:16.201239Z | false |
d2l-ai/d2l-zh | https://github.com/d2l-ai/d2l-zh/blob/e6b18ccea71451a55fcd861d7b96fddf2587b09a/d2l/torch.py | d2l/torch.py | DATA_HUB = dict()
DATA_URL = 'http://d2l-data.s3-accelerate.amazonaws.com/'
import numpy as np
import torch
import torchvision
from PIL import Image
from torch import nn
from torch.nn import functional as F
from torch.utils import data
from torchvision import transforms
nn_Module = nn.Module
################# WARNING ################
# The below part is generated automatically through:
# d2lbook build lib
# Don't edit it directly
import collections
import hashlib
import math
import os
import random
import re
import shutil
import sys
import tarfile
import time
import zipfile
from collections import defaultdict
import pandas as pd
import requests
from IPython import display
from matplotlib import pyplot as plt
from matplotlib_inline import backend_inline
d2l = sys.modules[__name__]
import numpy as np
import torch
import torchvision
from PIL import Image
from torch import nn
from torch.nn import functional as F
from torch.utils import data
from torchvision import transforms
def use_svg_display():
"""使用svg格式在Jupyter中显示绘图
Defined in :numref:`sec_calculus`"""
backend_inline.set_matplotlib_formats('svg')
def set_figsize(figsize=(3.5, 2.5)):
"""设置matplotlib的图表大小
Defined in :numref:`sec_calculus`"""
use_svg_display()
d2l.plt.rcParams['figure.figsize'] = figsize
def set_axes(axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend):
"""设置matplotlib的轴
Defined in :numref:`sec_calculus`"""
axes.set_xlabel(xlabel)
axes.set_ylabel(ylabel)
axes.set_xscale(xscale)
axes.set_yscale(yscale)
axes.set_xlim(xlim)
axes.set_ylim(ylim)
if legend:
axes.legend(legend)
axes.grid()
def plot(X, Y=None, xlabel=None, ylabel=None, legend=None, xlim=None,
ylim=None, xscale='linear', yscale='linear',
fmts=('-', 'm--', 'g-.', 'r:'), figsize=(3.5, 2.5), axes=None):
"""绘制数据点
Defined in :numref:`sec_calculus`"""
if legend is None:
legend = []
set_figsize(figsize)
axes = axes if axes else d2l.plt.gca()
# 如果X有一个轴,输出True
def has_one_axis(X):
return (hasattr(X, "ndim") and X.ndim == 1 or isinstance(X, list)
and not hasattr(X[0], "__len__"))
if has_one_axis(X):
X = [X]
if Y is None:
X, Y = [[]] * len(X), X
elif has_one_axis(Y):
Y = [Y]
if len(X) != len(Y):
X = X * len(Y)
axes.cla()
for x, y, fmt in zip(X, Y, fmts):
if len(x):
axes.plot(x, y, fmt)
else:
axes.plot(y, fmt)
set_axes(axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend)
class Timer:
"""记录多次运行时间"""
def __init__(self):
"""Defined in :numref:`subsec_linear_model`"""
self.times = []
self.start()
def start(self):
"""启动计时器"""
self.tik = time.time()
def stop(self):
"""停止计时器并将时间记录在列表中"""
self.times.append(time.time() - self.tik)
return self.times[-1]
def avg(self):
"""返回平均时间"""
return sum(self.times) / len(self.times)
def sum(self):
"""返回时间总和"""
return sum(self.times)
def cumsum(self):
"""返回累计时间"""
return np.array(self.times).cumsum().tolist()
def synthetic_data(w, b, num_examples):
"""生成y=Xw+b+噪声
Defined in :numref:`sec_linear_scratch`"""
X = d2l.normal(0, 1, (num_examples, len(w)))
y = d2l.matmul(X, w) + b
y += d2l.normal(0, 0.01, y.shape)
return X, d2l.reshape(y, (-1, 1))
def linreg(X, w, b):
"""线性回归模型
Defined in :numref:`sec_linear_scratch`"""
return d2l.matmul(X, w) + b
def squared_loss(y_hat, y):
"""均方损失
Defined in :numref:`sec_linear_scratch`"""
return (y_hat - d2l.reshape(y, y_hat.shape)) ** 2 / 2
def sgd(params, lr, batch_size):
"""小批量随机梯度下降
Defined in :numref:`sec_linear_scratch`"""
with torch.no_grad():
for param in params:
param -= lr * param.grad / batch_size
param.grad.zero_()
def load_array(data_arrays, batch_size, is_train=True):
"""构造一个PyTorch数据迭代器
Defined in :numref:`sec_linear_concise`"""
dataset = data.TensorDataset(*data_arrays)
return data.DataLoader(dataset, batch_size, shuffle=is_train)
def get_fashion_mnist_labels(labels):
"""返回Fashion-MNIST数据集的文本标签
Defined in :numref:`sec_fashion_mnist`"""
text_labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat',
'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot']
return [text_labels[int(i)] for i in labels]
def show_images(imgs, num_rows, num_cols, titles=None, scale=1.5):
"""绘制图像列表
Defined in :numref:`sec_fashion_mnist`"""
figsize = (num_cols * scale, num_rows * scale)
_, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)
axes = axes.flatten()
for i, (ax, img) in enumerate(zip(axes, imgs)):
if torch.is_tensor(img):
# 图片张量
ax.imshow(img.numpy())
else:
# PIL图片
ax.imshow(img)
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
if titles:
ax.set_title(titles[i])
return axes
def get_dataloader_workers():
"""使用4个进程来读取数据
Defined in :numref:`sec_fashion_mnist`"""
return 4
def load_data_fashion_mnist(batch_size, resize=None):
"""下载Fashion-MNIST数据集,然后将其加载到内存中
Defined in :numref:`sec_fashion_mnist`"""
trans = [transforms.ToTensor()]
if resize:
trans.insert(0, transforms.Resize(resize))
trans = transforms.Compose(trans)
mnist_train = torchvision.datasets.FashionMNIST(
root="../data", train=True, transform=trans, download=True)
mnist_test = torchvision.datasets.FashionMNIST(
root="../data", train=False, transform=trans, download=True)
return (data.DataLoader(mnist_train, batch_size, shuffle=True,
num_workers=get_dataloader_workers()),
data.DataLoader(mnist_test, batch_size, shuffle=False,
num_workers=get_dataloader_workers()))
def accuracy(y_hat, y):
"""计算预测正确的数量
Defined in :numref:`sec_softmax_scratch`"""
if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:
y_hat = d2l.argmax(y_hat, axis=1)
cmp = d2l.astype(y_hat, y.dtype) == y
return float(d2l.reduce_sum(d2l.astype(cmp, y.dtype)))
def evaluate_accuracy(net, data_iter):
"""计算在指定数据集上模型的精度
Defined in :numref:`sec_softmax_scratch`"""
if isinstance(net, torch.nn.Module):
net.eval() # 将模型设置为评估模式
metric = Accumulator(2) # 正确预测数、预测总数
with torch.no_grad():
for X, y in data_iter:
metric.add(accuracy(net(X), y), d2l.size(y))
return metric[0] / metric[1]
class Accumulator:
"""在n个变量上累加"""
def __init__(self, n):
"""Defined in :numref:`sec_softmax_scratch`"""
self.data = [0.0] * n
def add(self, *args):
self.data = [a + float(b) for a, b in zip(self.data, args)]
def reset(self):
self.data = [0.0] * len(self.data)
def __getitem__(self, idx):
return self.data[idx]
def train_epoch_ch3(net, train_iter, loss, updater):
"""训练模型一个迭代周期(定义见第3章)
Defined in :numref:`sec_softmax_scratch`"""
# 将模型设置为训练模式
if isinstance(net, torch.nn.Module):
net.train()
# 训练损失总和、训练准确度总和、样本数
metric = Accumulator(3)
for X, y in train_iter:
# 计算梯度并更新参数
y_hat = net(X)
l = loss(y_hat, y)
if isinstance(updater, torch.optim.Optimizer):
# 使用PyTorch内置的优化器和损失函数
updater.zero_grad()
l.mean().backward()
updater.step()
else:
# 使用定制的优化器和损失函数
l.sum().backward()
updater(X.shape[0])
metric.add(float(l.sum()), accuracy(y_hat, y), y.numel())
# 返回训练损失和训练精度
return metric[0] / metric[2], metric[1] / metric[2]
class Animator:
"""在动画中绘制数据"""
def __init__(self, xlabel=None, ylabel=None, legend=None, xlim=None,
ylim=None, xscale='linear', yscale='linear',
fmts=('-', 'm--', 'g-.', 'r:'), nrows=1, ncols=1,
figsize=(3.5, 2.5)):
"""Defined in :numref:`sec_softmax_scratch`"""
# 增量地绘制多条线
if legend is None:
legend = []
d2l.use_svg_display()
self.fig, self.axes = d2l.plt.subplots(nrows, ncols, figsize=figsize)
if nrows * ncols == 1:
self.axes = [self.axes, ]
# 使用lambda函数捕获参数
self.config_axes = lambda: d2l.set_axes(
self.axes[0], xlabel, ylabel, xlim, ylim, xscale, yscale, legend)
self.X, self.Y, self.fmts = None, None, fmts
def add(self, x, y):
# 向图表中添加多个数据点
if not hasattr(y, "__len__"):
y = [y]
n = len(y)
if not hasattr(x, "__len__"):
x = [x] * n
if not self.X:
self.X = [[] for _ in range(n)]
if not self.Y:
self.Y = [[] for _ in range(n)]
for i, (a, b) in enumerate(zip(x, y)):
if a is not None and b is not None:
self.X[i].append(a)
self.Y[i].append(b)
self.axes[0].cla()
for x, y, fmt in zip(self.X, self.Y, self.fmts):
self.axes[0].plot(x, y, fmt)
self.config_axes()
display.display(self.fig)
display.clear_output(wait=True)
def train_ch3(net, train_iter, test_iter, loss, num_epochs, updater):
"""训练模型(定义见第3章)
Defined in :numref:`sec_softmax_scratch`"""
animator = Animator(xlabel='epoch', xlim=[1, num_epochs], ylim=[0.3, 0.9],
legend=['train loss', 'train acc', 'test acc'])
for epoch in range(num_epochs):
train_metrics = train_epoch_ch3(net, train_iter, loss, updater)
test_acc = evaluate_accuracy(net, test_iter)
animator.add(epoch + 1, train_metrics + (test_acc,))
train_loss, train_acc = train_metrics
assert train_loss < 0.5, train_loss
assert train_acc <= 1 and train_acc > 0.7, train_acc
assert test_acc <= 1 and test_acc > 0.7, test_acc
def predict_ch3(net, test_iter, n=6):
"""预测标签(定义见第3章)
Defined in :numref:`sec_softmax_scratch`"""
for X, y in test_iter:
break
trues = d2l.get_fashion_mnist_labels(y)
preds = d2l.get_fashion_mnist_labels(d2l.argmax(net(X), axis=1))
titles = [true +'\n' + pred for true, pred in zip(trues, preds)]
d2l.show_images(
d2l.reshape(X[0:n], (n, 28, 28)), 1, n, titles=titles[0:n])
def evaluate_loss(net, data_iter, loss):
"""评估给定数据集上模型的损失
Defined in :numref:`sec_model_selection`"""
metric = d2l.Accumulator(2) # 损失的总和,样本数量
for X, y in data_iter:
out = net(X)
y = d2l.reshape(y, out.shape)
l = loss(out, y)
metric.add(d2l.reduce_sum(l), d2l.size(l))
return metric[0] / metric[1]
DATA_HUB = dict()
DATA_URL = 'http://d2l-data.s3-accelerate.amazonaws.com/'
def download(name, cache_dir=os.path.join('..', 'data')):
"""下载一个DATA_HUB中的文件,返回本地文件名
Defined in :numref:`sec_kaggle_house`"""
assert name in DATA_HUB, f"{name} 不存在于 {DATA_HUB}"
url, sha1_hash = DATA_HUB[name]
os.makedirs(cache_dir, exist_ok=True)
fname = os.path.join(cache_dir, url.split('/')[-1])
if os.path.exists(fname):
sha1 = hashlib.sha1()
with open(fname, 'rb') as f:
while True:
data = f.read(1048576)
if not data:
break
sha1.update(data)
if sha1.hexdigest() == sha1_hash:
return fname # 命中缓存
print(f'正在从{url}下载{fname}...')
r = requests.get(url, stream=True, verify=True)
with open(fname, 'wb') as f:
f.write(r.content)
return fname
def download_extract(name, folder=None):
"""下载并解压zip/tar文件
Defined in :numref:`sec_kaggle_house`"""
fname = download(name)
base_dir = os.path.dirname(fname)
data_dir, ext = os.path.splitext(fname)
if ext == '.zip':
fp = zipfile.ZipFile(fname, 'r')
elif ext in ('.tar', '.gz'):
fp = tarfile.open(fname, 'r')
else:
assert False, '只有zip/tar文件可以被解压缩'
fp.extractall(base_dir)
return os.path.join(base_dir, folder) if folder else data_dir
def download_all():
"""下载DATA_HUB中的所有文件
Defined in :numref:`sec_kaggle_house`"""
for name in DATA_HUB:
download(name)
DATA_HUB['kaggle_house_train'] = (
DATA_URL + 'kaggle_house_pred_train.csv',
'585e9cc93e70b39160e7921475f9bcd7d31219ce')
DATA_HUB['kaggle_house_test'] = (
DATA_URL + 'kaggle_house_pred_test.csv',
'fa19780a7b011d9b009e8bff8e99922a8ee2eb90')
def try_gpu(i=0):
"""如果存在,则返回gpu(i),否则返回cpu()
Defined in :numref:`sec_use_gpu`"""
if torch.cuda.device_count() >= i + 1:
return torch.device(f'cuda:{i}')
return torch.device('cpu')
def try_all_gpus():
"""返回所有可用的GPU,如果没有GPU,则返回[cpu(),]
Defined in :numref:`sec_use_gpu`"""
devices = [torch.device(f'cuda:{i}')
for i in range(torch.cuda.device_count())]
return devices if devices else [torch.device('cpu')]
def corr2d(X, K):
"""计算二维互相关运算
Defined in :numref:`sec_conv_layer`"""
h, w = K.shape
Y = d2l.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))
for i in range(Y.shape[0]):
for j in range(Y.shape[1]):
Y[i, j] = d2l.reduce_sum((X[i: i + h, j: j + w] * K))
return Y
def evaluate_accuracy_gpu(net, data_iter, device=None):
"""使用GPU计算模型在数据集上的精度
Defined in :numref:`sec_lenet`"""
if isinstance(net, nn.Module):
net.eval() # 设置为评估模式
if not device:
device = next(iter(net.parameters())).device
# 正确预测的数量,总预测的数量
metric = d2l.Accumulator(2)
with torch.no_grad():
for X, y in data_iter:
if isinstance(X, list):
# BERT微调所需的(之后将介绍)
X = [x.to(device) for x in X]
else:
X = X.to(device)
y = y.to(device)
metric.add(d2l.accuracy(net(X), y), d2l.size(y))
return metric[0] / metric[1]
def train_ch6(net, train_iter, test_iter, num_epochs, lr, device):
"""用GPU训练模型(在第六章定义)
Defined in :numref:`sec_lenet`"""
def init_weights(m):
if type(m) == nn.Linear or type(m) == nn.Conv2d:
nn.init.xavier_uniform_(m.weight)
net.apply(init_weights)
print('training on', device)
net.to(device)
optimizer = torch.optim.SGD(net.parameters(), lr=lr)
loss = nn.CrossEntropyLoss()
animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs],
legend=['train loss', 'train acc', 'test acc'])
timer, num_batches = d2l.Timer(), len(train_iter)
for epoch in range(num_epochs):
# 训练损失之和,训练准确率之和,样本数
metric = d2l.Accumulator(3)
net.train()
for i, (X, y) in enumerate(train_iter):
timer.start()
optimizer.zero_grad()
X, y = X.to(device), y.to(device)
y_hat = net(X)
l = loss(y_hat, y)
l.backward()
optimizer.step()
with torch.no_grad():
metric.add(l * X.shape[0], d2l.accuracy(y_hat, y), X.shape[0])
timer.stop()
train_l = metric[0] / metric[2]
train_acc = metric[1] / metric[2]
if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1:
animator.add(epoch + (i + 1) / num_batches,
(train_l, train_acc, None))
test_acc = evaluate_accuracy_gpu(net, test_iter)
animator.add(epoch + 1, (None, None, test_acc))
print(f'loss {train_l:.3f}, train acc {train_acc:.3f}, '
f'test acc {test_acc:.3f}')
print(f'{metric[2] * num_epochs / timer.sum():.1f} examples/sec '
f'on {str(device)}')
class Residual(nn.Module):
def __init__(self, input_channels, num_channels,
use_1x1conv=False, strides=1):
super().__init__()
self.conv1 = nn.Conv2d(input_channels, num_channels,
kernel_size=3, padding=1, stride=strides)
self.conv2 = nn.Conv2d(num_channels, num_channels,
kernel_size=3, padding=1)
if use_1x1conv:
self.conv3 = nn.Conv2d(input_channels, num_channels,
kernel_size=1, stride=strides)
else:
self.conv3 = None
self.bn1 = nn.BatchNorm2d(num_channels)
self.bn2 = nn.BatchNorm2d(num_channels)
def forward(self, X):
Y = F.relu(self.bn1(self.conv1(X)))
Y = self.bn2(self.conv2(Y))
if self.conv3:
X = self.conv3(X)
Y += X
return F.relu(Y)
d2l.DATA_HUB['time_machine'] = (d2l.DATA_URL + 'timemachine.txt',
'090b5e7e70c295757f55df93cb0a180b9691891a')
def read_time_machine():
"""将时间机器数据集加载到文本行的列表中
Defined in :numref:`sec_text_preprocessing`"""
with open(d2l.download('time_machine'), 'r') as f:
lines = f.readlines()
return [re.sub('[^A-Za-z]+', ' ', line).strip().lower() for line in lines]
def tokenize(lines, token='word'):
"""将文本行拆分为单词或字符词元
Defined in :numref:`sec_text_preprocessing`"""
if token == 'word':
return [line.split() for line in lines]
elif token == 'char':
return [list(line) for line in lines]
else:
print('错误:未知词元类型:' + token)
class Vocab:
"""文本词表"""
def __init__(self, tokens=None, min_freq=0, reserved_tokens=None):
"""Defined in :numref:`sec_text_preprocessing`"""
if tokens is None:
tokens = []
if reserved_tokens is None:
reserved_tokens = []
# 按出现频率排序
counter = count_corpus(tokens)
self._token_freqs = sorted(counter.items(), key=lambda x: x[1],
reverse=True)
# 未知词元的索引为0
self.idx_to_token = ['<unk>'] + reserved_tokens
self.token_to_idx = {token: idx
for idx, token in enumerate(self.idx_to_token)}
for token, freq in self._token_freqs:
if freq < min_freq:
break
if token not in self.token_to_idx:
self.idx_to_token.append(token)
self.token_to_idx[token] = len(self.idx_to_token) - 1
def __len__(self):
return len(self.idx_to_token)
def __getitem__(self, tokens):
if not isinstance(tokens, (list, tuple)):
return self.token_to_idx.get(tokens, self.unk)
return [self.__getitem__(token) for token in tokens]
def to_tokens(self, indices):
if not isinstance(indices, (list, tuple)):
return self.idx_to_token[indices]
return [self.idx_to_token[index] for index in indices]
@property
def unk(self): # 未知词元的索引为0
return 0
@property
def token_freqs(self):
return self._token_freqs
def count_corpus(tokens):
"""统计词元的频率
Defined in :numref:`sec_text_preprocessing`"""
# 这里的tokens是1D列表或2D列表
if len(tokens) == 0 or isinstance(tokens[0], list):
# 将词元列表展平成一个列表
tokens = [token for line in tokens for token in line]
return collections.Counter(tokens)
def load_corpus_time_machine(max_tokens=-1):
"""返回时光机器数据集的词元索引列表和词表
Defined in :numref:`sec_text_preprocessing`"""
lines = read_time_machine()
tokens = tokenize(lines, 'char')
vocab = Vocab(tokens)
# 因为时光机器数据集中的每个文本行不一定是一个句子或一个段落,
# 所以将所有文本行展平到一个列表中
corpus = [vocab[token] for line in tokens for token in line]
if max_tokens > 0:
corpus = corpus[:max_tokens]
return corpus, vocab
def seq_data_iter_random(corpus, batch_size, num_steps):
"""使用随机抽样生成一个小批量子序列
Defined in :numref:`sec_language_model`"""
# 从随机偏移量开始对序列进行分区,随机范围包括num_steps-1
corpus = corpus[random.randint(0, num_steps - 1):]
# 减去1,是因为我们需要考虑标签
num_subseqs = (len(corpus) - 1) // num_steps
# 长度为num_steps的子序列的起始索引
initial_indices = list(range(0, num_subseqs * num_steps, num_steps))
# 在随机抽样的迭代过程中,
# 来自两个相邻的、随机的、小批量中的子序列不一定在原始序列上相邻
random.shuffle(initial_indices)
def data(pos):
# 返回从pos位置开始的长度为num_steps的序列
return corpus[pos: pos + num_steps]
num_batches = num_subseqs // batch_size
for i in range(0, batch_size * num_batches, batch_size):
# 在这里,initial_indices包含子序列的随机起始索引
initial_indices_per_batch = initial_indices[i: i + batch_size]
X = [data(j) for j in initial_indices_per_batch]
Y = [data(j + 1) for j in initial_indices_per_batch]
yield d2l.tensor(X), d2l.tensor(Y)
def seq_data_iter_sequential(corpus, batch_size, num_steps):
"""使用顺序分区生成一个小批量子序列
Defined in :numref:`sec_language_model`"""
# 从随机偏移量开始划分序列
offset = random.randint(0, num_steps)
num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size
Xs = d2l.tensor(corpus[offset: offset + num_tokens])
Ys = d2l.tensor(corpus[offset + 1: offset + 1 + num_tokens])
Xs, Ys = Xs.reshape(batch_size, -1), Ys.reshape(batch_size, -1)
num_batches = Xs.shape[1] // num_steps
for i in range(0, num_steps * num_batches, num_steps):
X = Xs[:, i: i + num_steps]
Y = Ys[:, i: i + num_steps]
yield X, Y
class SeqDataLoader:
"""加载序列数据的迭代器"""
def __init__(self, batch_size, num_steps, use_random_iter, max_tokens):
"""Defined in :numref:`sec_language_model`"""
if use_random_iter:
self.data_iter_fn = d2l.seq_data_iter_random
else:
self.data_iter_fn = d2l.seq_data_iter_sequential
self.corpus, self.vocab = d2l.load_corpus_time_machine(max_tokens)
self.batch_size, self.num_steps = batch_size, num_steps
def __iter__(self):
return self.data_iter_fn(self.corpus, self.batch_size, self.num_steps)
def load_data_time_machine(batch_size, num_steps,
use_random_iter=False, max_tokens=10000):
"""返回时光机器数据集的迭代器和词表
Defined in :numref:`sec_language_model`"""
data_iter = SeqDataLoader(
batch_size, num_steps, use_random_iter, max_tokens)
return data_iter, data_iter.vocab
class RNNModelScratch:
"""从零开始实现的循环神经网络模型"""
def __init__(self, vocab_size, num_hiddens, device,
get_params, init_state, forward_fn):
"""Defined in :numref:`sec_rnn_scratch`"""
self.vocab_size, self.num_hiddens = vocab_size, num_hiddens
self.params = get_params(vocab_size, num_hiddens, device)
self.init_state, self.forward_fn = init_state, forward_fn
def __call__(self, X, state):
X = F.one_hot(X.T, self.vocab_size).type(torch.float32)
return self.forward_fn(X, state, self.params)
def begin_state(self, batch_size, device):
return self.init_state(batch_size, self.num_hiddens, device)
def predict_ch8(prefix, num_preds, net, vocab, device):
"""在prefix后面生成新字符
Defined in :numref:`sec_rnn_scratch`"""
state = net.begin_state(batch_size=1, device=device)
outputs = [vocab[prefix[0]]]
get_input = lambda: d2l.reshape(d2l.tensor(
[outputs[-1]], device=device), (1, 1))
for y in prefix[1:]: # 预热期
_, state = net(get_input(), state)
outputs.append(vocab[y])
for _ in range(num_preds): # 预测num_preds步
y, state = net(get_input(), state)
outputs.append(int(y.argmax(dim=1).reshape(1)))
return ''.join([vocab.idx_to_token[i] for i in outputs])
def grad_clipping(net, theta):
"""裁剪梯度
Defined in :numref:`sec_rnn_scratch`"""
if isinstance(net, nn.Module):
params = [p for p in net.parameters() if p.requires_grad]
else:
params = net.params
norm = torch.sqrt(sum(torch.sum((p.grad ** 2)) for p in params))
if norm > theta:
for param in params:
param.grad[:] *= theta / norm
def train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter):
"""训练网络一个迭代周期(定义见第8章)
Defined in :numref:`sec_rnn_scratch`"""
state, timer = None, d2l.Timer()
metric = d2l.Accumulator(2) # 训练损失之和,词元数量
for X, Y in train_iter:
if state is None or use_random_iter:
# 在第一次迭代或使用随机抽样时初始化state
state = net.begin_state(batch_size=X.shape[0], device=device)
else:
if isinstance(net, nn.Module) and not isinstance(state, tuple):
# state对于nn.GRU是个张量
state.detach_()
else:
# state对于nn.LSTM或对于我们从零开始实现的模型是个张量
for s in state:
s.detach_()
y = Y.T.reshape(-1)
X, y = X.to(device), y.to(device)
y_hat, state = net(X, state)
l = loss(y_hat, y.long()).mean()
if isinstance(updater, torch.optim.Optimizer):
updater.zero_grad()
l.backward()
grad_clipping(net, 1)
updater.step()
else:
l.backward()
grad_clipping(net, 1)
# 因为已经调用了mean函数
updater(batch_size=1)
metric.add(l * d2l.size(y), d2l.size(y))
return math.exp(metric[0] / metric[1]), metric[1] / timer.stop()
def train_ch8(net, train_iter, vocab, lr, num_epochs, device,
use_random_iter=False):
"""训练模型(定义见第8章)
Defined in :numref:`sec_rnn_scratch`"""
loss = nn.CrossEntropyLoss()
animator = d2l.Animator(xlabel='epoch', ylabel='perplexity',
legend=['train'], xlim=[10, num_epochs])
# 初始化
if isinstance(net, nn.Module):
updater = torch.optim.SGD(net.parameters(), lr)
else:
updater = lambda batch_size: d2l.sgd(net.params, lr, batch_size)
predict = lambda prefix: predict_ch8(prefix, 50, net, vocab, device)
# 训练和预测
for epoch in range(num_epochs):
ppl, speed = train_epoch_ch8(
net, train_iter, loss, updater, device, use_random_iter)
if (epoch + 1) % 10 == 0:
print(predict('time traveller'))
animator.add(epoch + 1, [ppl])
print(f'困惑度 {ppl:.1f}, {speed:.1f} 词元/秒 {str(device)}')
print(predict('time traveller'))
print(predict('traveller'))
class RNNModel(nn.Module):
"""循环神经网络模型
Defined in :numref:`sec_rnn-concise`"""
def __init__(self, rnn_layer, vocab_size, **kwargs):
super(RNNModel, self).__init__(**kwargs)
self.rnn = rnn_layer
self.vocab_size = vocab_size
self.num_hiddens = self.rnn.hidden_size
# 如果RNN是双向的(之后将介绍),num_directions应该是2,否则应该是1
if not self.rnn.bidirectional:
self.num_directions = 1
self.linear = nn.Linear(self.num_hiddens, self.vocab_size)
else:
self.num_directions = 2
self.linear = nn.Linear(self.num_hiddens * 2, self.vocab_size)
def forward(self, inputs, state):
X = F.one_hot(inputs.T.long(), self.vocab_size)
X = X.to(torch.float32)
Y, state = self.rnn(X, state)
# 全连接层首先将Y的形状改为(时间步数*批量大小,隐藏单元数)
# 它的输出形状是(时间步数*批量大小,词表大小)。
output = self.linear(Y.reshape((-1, Y.shape[-1])))
return output, state
def begin_state(self, device, batch_size=1):
if not isinstance(self.rnn, nn.LSTM):
# nn.GRU以张量作为隐状态
return torch.zeros((self.num_directions * self.rnn.num_layers,
batch_size, self.num_hiddens),
device=device)
else:
# nn.LSTM以元组作为隐状态
return (torch.zeros((
self.num_directions * self.rnn.num_layers,
batch_size, self.num_hiddens), device=device),
torch.zeros((
self.num_directions * self.rnn.num_layers,
batch_size, self.num_hiddens), device=device))
d2l.DATA_HUB['fra-eng'] = (d2l.DATA_URL + 'fra-eng.zip',
'94646ad1522d915e7b0f9296181140edcf86a4f5')
def read_data_nmt():
"""载入“英语-法语”数据集
Defined in :numref:`sec_machine_translation`"""
data_dir = d2l.download_extract('fra-eng')
with open(os.path.join(data_dir, 'fra.txt'), 'r',
encoding='utf-8') as f:
return f.read()
def preprocess_nmt(text):
"""预处理“英语-法语”数据集
Defined in :numref:`sec_machine_translation`"""
def no_space(char, prev_char):
return char in set(',.!?') and prev_char != ' '
# 使用空格替换不间断空格
# 使用小写字母替换大写字母
text = text.replace('\u202f', ' ').replace('\xa0', ' ').lower()
# 在单词和标点符号之间插入空格
out = [' ' + char if i > 0 and no_space(char, text[i - 1]) else char
for i, char in enumerate(text)]
return ''.join(out)
def tokenize_nmt(text, num_examples=None):
"""词元化“英语-法语”数据数据集
Defined in :numref:`sec_machine_translation`"""
source, target = [], []
for i, line in enumerate(text.split('\n')):
if num_examples and i > num_examples:
break
parts = line.split('\t')
if len(parts) == 2:
source.append(parts[0].split(' '))
target.append(parts[1].split(' '))
return source, target
def show_list_len_pair_hist(legend, xlabel, ylabel, xlist, ylist):
"""绘制列表长度对的直方图
Defined in :numref:`sec_machine_translation`"""
d2l.set_figsize()
_, _, patches = d2l.plt.hist(
[[len(l) for l in xlist], [len(l) for l in ylist]])
d2l.plt.xlabel(xlabel)
d2l.plt.ylabel(ylabel)
for patch in patches[1].patches:
patch.set_hatch('/')
d2l.plt.legend(legend)
def truncate_pad(line, num_steps, padding_token):
"""截断或填充文本序列
Defined in :numref:`sec_machine_translation`"""
if len(line) > num_steps:
return line[:num_steps] # 截断
return line + [padding_token] * (num_steps - len(line)) # 填充
def build_array_nmt(lines, vocab, num_steps):
"""将机器翻译的文本序列转换成小批量
Defined in :numref:`subsec_mt_data_loading`"""
lines = [vocab[l] for l in lines]
lines = [l + [vocab['<eos>']] for l in lines]
array = d2l.tensor([truncate_pad(
l, num_steps, vocab['<pad>']) for l in lines])
valid_len = d2l.reduce_sum(
d2l.astype(array != vocab['<pad>'], d2l.int32), 1)
return array, valid_len
def load_data_nmt(batch_size, num_steps, num_examples=600):
| python | Apache-2.0 | e6b18ccea71451a55fcd861d7b96fddf2587b09a | 2026-01-04T14:38:16.201239Z | true |
mingrammer/diagrams | https://github.com/mingrammer/diagrams/blob/072474aa2a7d24cd074b2e1a61d0b714c3daa945/config.py | config.py | # fmt: off
#########################
# Application #
#########################
APP_NAME = "diagrams"
DIR_DOC_ROOT = "docs/nodes"
DIR_APP_ROOT = "diagrams"
DIR_RESOURCE = "resources"
DIR_TEMPLATE = "templates"
PROVIDERS = (
"base",
"onprem",
"aws",
"azure",
"digitalocean",
"gcp",
"ibm",
"firebase",
"k8s",
"alibabacloud",
"oci",
"programming",
"saas",
"elastic",
"generic",
"openstack",
"outscale",
"gis"
)
#########################
# Resource Processing #
#########################
CMD_ROUND = "round"
CMD_ROUND_OPTS = ("-w",)
CMD_SVG2PNG = "inkscape"
CMD_SVG2PNG_OPTS = ("-w", "256", "-h", "256", "--export-type", "png")
CMD_SVG2PNG_IM = "convert"
CMD_SVG2PNG_IM_OPTS = ("-shave", "25%x25%", "-resize", "256x256!")
FILE_PREFIXES = {
"onprem": (),
"aws": ("Amazon-", "AWS-"),
"azure": ("Azure-",),
"digitalocean": (),
"gcp": ("Cloud-",),
"firebase": ("Cloud-",),
"ibm": (),
"k8s": (),
"alibabacloud": (),
"oci": ("OCI-icon-",),
"programming": (),
"saas": (),
"elastic": (),
"outscale": (),
"generic": (),
"openstack": (),
"gis": (),
}
#########################
# Doc Auto Generation #
#########################
TMPL_APIDOC = "apidoc.tmpl"
#########################
# Class Auto Generation #
#########################
TMPL_MODULE = "module.tmpl"
UPPER_WORDS = {
"aws": ("aws", "api", "ebs", "ec2", "efs", "emr", "rds", "ml", "mq", "nat", "vpc", "waf", "sdk"),
"azure": ("ad", "b2c", "ai", "api", "cdn", "ddos", "dns", "fxt", "hana", "hd", "id", "sap", "sql", "vm", "vpn", "vpc"),
"gcp": ("gcp", "ai", "api", "cdn", "dns", "gke", "gpu", "iap", "ids", "ml", "nat", "os", "sdk", "sql", "ssd", "tpu", "vpn"),
"firebase": ("ab", "fcm", "ml"),
"k8s": (
"api", "cm", "ccm", "crb", "crd", "ds", "etcd", "hpa", "k8s", "ns", "psp", "pv", "pvc", "rb", "rs",
"sa", "sc", "sts", "svc",
),
"oci": ("oci", "ocid", "oke", "ocir", "ddos", "waf", "bm", "vm", "cdn", "vpn", "dns", "nat", "dms", "api", "id"),
"elastic": ("apm", "siem", "ece", "eck", "sql"),
"generic": ("vpn", "ios", "xen", "sql", "lxc"),
"outscale": ("osc",),
"openstack": ("rpm", "loci", "nfv", "ec2api"),
"pve": ("pve",),
"ibm": ("ibm",),
"gis": ("gis","ban","ign","ogc","qgis","wfs","wms"),
}
TITLE_WORDS = {
"onprem": {
"onprem": "OnPrem",
},
"alibabacloud": {
"alibabacloud": "AlibabaCloud"
},
"aws": {
"cloudfront": "CloudFront"
},
"digitalocean": {
"digitalocean": "DigitalOcean"
},
"openstack": {
"openstack": "OpenStack"
},
"ibm": {
"ibm": "IBMCloud"
},
}
# TODO: check if the classname exists
ALIASES = {
"onprem": {
"analytics": {
"Powerbi": "PowerBI"
},
"ci": {
"Circleci": "CircleCI",
"Concourseci": "ConcourseCI",
"Droneci": "DroneCI",
"Gitlabci": "GitlabCI",
"Travisci": "TravisCI",
"Teamcity": "TC",
"Zuulci": "ZuulCI",
},
"container": {
"Lxc": "LXC",
"Rkt": "RKT",
},
"database": {
"Clickhouse": "ClickHouse",
"Cockroachdb": "CockroachDB",
"Couchdb": "CouchDB",
"Hbase": "HBase",
"Influxdb": "InfluxDB",
"Janusgraph": "JanusGraph",
"Mariadb": "MariaDB",
"Mongodb": "MongoDB",
"Mssql": "MSSQL",
"Mysql": "MySQL",
"Postgresql": "PostgreSQL",
"Qdrant": "Qdrant",
},
"gitops": {
"Argocd": "ArgoCD",
},
"logging": {
"Fluentbit": "FluentBit",
"Rsyslog": "RSyslog",
},
"network": {
"Etcd": "ETCD",
"Haproxy": "HAProxy",
"OpenServiceMesh": "OSM",
"Opnsense": "OPNSense",
"Pfsense": "PFSense",
"Vyos": "VyOS"
},
"proxmox": {
"Pve": "ProxmoxVE",
},
"queue": {
"Activemq": "ActiveMQ",
"Emqx": "EMQX",
"Rabbitmq": "RabbitMQ",
"Zeromq": "ZeroMQ",
},
"storage": {
"Ceph": "CEPH",
"CephOsd": "CEPH_OSD",
},
"workflow": {
"Kubeflow": "KubeFlow",
"Nifi": "NiFi",
}
},
"aws": {
"analytics": {
"ElasticsearchService": "ES",
},
"business": {
"AlexaForBusiness": "A4B"
},
"blockchain": {
"QuantumLedgerDatabaseQldb": "QLDB"
},
"compute": {
"ApplicationAutoScaling": "AutoScaling",
"EC2Ami": "AMI",
"EC2ContainerRegistry": "ECR",
"ElasticBeanstalk": "EB",
"ElasticContainerService": "ECS",
"ElasticKubernetesService": "EKS",
"ServerlessApplicationRepository": "SAR",
},
"database": {
"DatabaseMigrationService": "DMS",
"DocumentdbMongodbCompatibility": "DocumentDB",
"DynamodbDax": "DAX",
"DynamodbGlobalSecondaryIndex": "DynamodbGSI",
"Database": "DB",
"Dynamodb": "DDB",
"Elasticache": "ElastiCache",
"QuantumLedgerDatabaseQldb": "QLDB",
},
"devtools": {
"CommandLineInterface": "CLI",
"DeveloperTools": "DevTools",
},
"engagement": {
"SimpleEmailServiceSes": "SES",
},
"general": {
"GenericOfficeBuilding": "OfficeBuilding",
},
"integration": {
"SimpleNotificationServiceSns": "SNS",
"SimpleQueueServiceSqs": "SQS",
"StepFunctions": "SF",
},
"iot": {
"Freertos": "FreeRTOS",
"IotHardwareBoard": "IotBoard",
},
"management": {
"SystemsManager": "SSM",
"SystemsManagerParameterStore": "ParameterStore",
},
"migration": {
"ApplicationDiscoveryService": "ADS",
"CloudendureMigration": "CEM",
"DatabaseMigrationService": "DMS",
"MigrationAndTransfer": "MAT",
"ServerMigrationService": "SMS",
},
"ml": {
"DeepLearningContainers": "DLC",
},
"network": {
"CloudFront": "CF",
"ElasticLoadBalancing": "ELB",
"ElbApplicationLoadBalancer": "ALB",
"ElbClassicLoadBalancer": "CLB",
"ElbNetworkLoadBalancer": "NLB",
"GlobalAccelerator": "GAX",
"InternetGateway": "IGW",
"TransitGateway": "TGW",
"TransitGatewayAttachment": "TGWAttach",
},
"security": {
"CertificateManager": "ACM",
"Cloudhsm": "CloudHSM",
"DirectoryService": "DS",
"FirewallManager": "FMS",
"IdentityAndAccessManagementIamAccessAnalyzer": "IAMAccessAnalyzer",
"IdentityAndAccessManagementIamAWSSts": "IAMAWSSts",
"IdentityAndAccessManagementIamPermissions": "IAMPermissions",
"IdentityAndAccessManagementIamRole": "IAMRole",
"IdentityAndAccessManagementIam": "IAM",
"KeyManagementService": "KMS",
"ResourceAccessManager": "RAM",
},
"storage": {
"CloudendureDisasterRecovery": "CDR",
"ElasticBlockStoreEBS": "EBS",
"ElasticFileSystemEFS": "EFS",
"Fsx": "FSx",
"SimpleStorageServiceS3": "S3",
},
},
"azure": {
"compute": {
"ContainerRegistries": "ACR",
"KubernetesServices": "AKS",
"VMScaleSet": "VMSS"
},
},
"gcp": {
"analytics": {
"Bigquery": "BigQuery",
"Pubsub": "PubSub",
},
"compute": {
"AppEngine": "GAE",
"ComputeEngine": "GCE",
"Functions": "GCF",
"KubernetesEngine": "GKE",
"Run": "CloudRun",
},
"database": {
"Bigtable": "BigTable",
},
"devtools": {
"ContainerRegistry": "GCR",
},
"migration": {
"MigrateComputeEngine": "CE",
},
"ml": {
"Automl": "AutoML",
"NaturalLanguageAPI": "NLAPI",
"SpeechToText": "STT",
"TextToSpeech": "TTS",
},
"network": {
"CloudIDS": "IDS",
"PrivateServiceConnect": "PSC",
"VirtualPrivateCloud": "VPC",
},
"security": {
"AccessContextManager": "ACM",
"KeyManagementService": "KMS",
"SecurityCommandCenter": "SCC",
},
"storage": {
"LocalSSD": "SSD",
"Storage": "GCS",
},
},
"firebase": {
"grow": {
"Messaging": "FCM"
}
},
"k8s": {
"clusterconfig": {
"Limits": "LimitRange",
"HPA": "HorizontalPodAutoscaler",
},
"compute": {
"Deploy": "Deployment",
"DS": "DaemonSet",
"RS": "ReplicaSet",
"STS": "StatefulSet"
},
"controlplane": {
"API": "APIServer",
"CM": "ControllerManager",
"KProxy": "KubeProxy",
"Sched": "Scheduler",
},
"group": {
"NS": "Namespace",
},
"network": {
"Ep": "Endpoint",
"Ing": "Ingress",
"Netpol": "NetworkPolicy",
"SVC": "Service",
},
"podconfig": {
"CM": "ConfigMap",
},
"rbac": {
"CRole": "ClusterRole",
"CRB": "ClusterRoleBinding",
"RB": "RoleBinding",
"SA": "ServiceAccount",
},
"storage": {
"PV": "PersistentVolume",
"PVC": "PersistentVolumeClaim",
"SC": "StorageClass",
"Vol": "Volume",
},
},
"alibabacloud": {
"application": {
"LogService": "SLS",
"MessageNotificationService": "MNS",
"PerformanceTestingService": "PTS",
"SmartConversationAnalysis": "SCA",
},
"compute": {
"AutoScaling": "ESS",
"ElasticComputeService": "ECS",
"ElasticContainerInstance": "ECI",
"ElasticHighPerformanceComputing": "EHPC",
"FunctionCompute": "FC",
"OperationOrchestrationService": "OOS",
"ResourceOrchestrationService": "ROS",
"ServerLoadBalancer": "SLB",
"ServerlessAppEngine": "SAE",
"SimpleApplicationServer": "SAS",
"WebAppService": "WAS",
},
"database": {
"DataManagementService": "DMS",
"DataTransmissionService": "DTS",
"DatabaseBackupService": "DBS",
"DisributeRelationalDatabaseService": "DRDS",
"GraphDatabaseService": "GDS",
"RelationalDatabaseService": "RDS",
},
"network": {
"CloudEnterpriseNetwork": "CEN",
"ElasticIpAddress": "EIP",
"ServerLoadBalancer": "SLB",
"VirtualPrivateCloud": "VPC",
},
"security": {
"AntiBotService": "ABS",
"AntifraudService": "AS",
"CloudFirewall": "CFW",
"ContentModeration": "CM",
"DataEncryptionService": "DES",
"WebApplicationFirewall": "WAF",
},
"storage": {
"FileStorageHdfs": "HDFS",
"FileStorageNas": "NAS",
"HybridBackupRecovery": "HBR",
"HybridCloudDisasterRecovery": "HDR",
"ObjectStorageService": "OSS",
"ObjectTableStore": "OTS",
}
},
"digitalocean": {},
"gis": {},
"oci": {
"compute": {
"VM": "VirtualMachine",
"VMWhite": "VirtualMachineWhite",
"BM": "BareMetal",
"BMWhite": "BareMetalWhite",
"OCIR": "OCIRegistry",
"OCIRWhite": "OCIRegistryWhite",
"OKE": "ContainerEngine",
"OKEWhite": "ContainerEngineWhite",
},
"database": {
"Autonomous": "ADB",
"AutonomousWhite": "ADBWhite",
"DatabaseService": "DBService",
"DatabaseServiceWhite": "DBServiceWhite",
}
},
"programming": {
"framework": {
"Fastapi": "FastAPI",
"Graphql": "GraphQL",
"Dotnet": "DotNet",
"Nextjs": "NextJs"
},
"language": {
"Javascript": "JavaScript",
"Nodejs": "NodeJS",
"Php": "PHP",
"Typescript": "TypeScript"
},
},
"saas": {
"logging": {
"Datadog": "DataDog",
"Newrelic": "NewRelic"
}
},
"elastic": {
"elasticsearch": {
"Elasticsearch": "ElasticSearch",
"Logstash": "LogStash",
"MachineLearning": "ML",
}
},
"outscale": {
"Osc": "OSC",
},
"ibm": {},
"generic": {},
"openstack": {
"user": {
"Openstackclient": "OpenStackClient",
},
"billing": {
"Cloudkitty": "CloudKitty",
},
"deployment": {
"Kolla": "KollaAnsible",
"Tripleo": "TripleO",
}
},
}
| python | MIT | 072474aa2a7d24cd074b2e1a61d0b714c3daa945 | 2026-01-04T14:39:58.534150Z | false |
mingrammer/diagrams | https://github.com/mingrammer/diagrams/blob/072474aa2a7d24cd074b2e1a61d0b714c3daa945/scripts/generate.py | scripts/generate.py | import os
import sys
from typing import Iterable
from jinja2 import Environment, FileSystemLoader, Template, exceptions
import config as cfg
from . import app_root_dir, base_dir, doc_root_dir, resource_dir, template_dir
_usage = "Usage: generate.py <provider>"
def load_tmpl(tmpl: str) -> Template:
env = Environment(loader=FileSystemLoader(template_dir()))
env.filters["up_or_title"] = up_or_title
return env.get_template(tmpl)
def up_or_title(pvd: str, s: str) -> str:
if s in cfg.UPPER_WORDS.get(pvd, ()):
return s.upper()
if s in cfg.TITLE_WORDS.get(pvd, {}):
return cfg.TITLE_WORDS[pvd][s]
return s.title()
def gen_classes(pvd: str, typ: str, paths: Iterable[str]) -> str:
"""Generate all service node classes based on resources paths with class templates."""
tmpl = load_tmpl(cfg.TMPL_MODULE)
# TODO: extract the gen class metas for sharing
# TODO: independent function for generating all pvd/typ/paths pairs
def _gen_class_meta(path: str) -> dict:
base = os.path.splitext(path)[0]
name = "".join([up_or_title(pvd, s) for s in base.split("-")])
return {"name": name, "icon": path}
metas = map(_gen_class_meta, paths)
aliases = cfg.ALIASES[pvd][typ] if typ in cfg.ALIASES[pvd] else {}
return tmpl.render(pvd=pvd, typ=typ, metas=metas, aliases=aliases)
def gen_apidoc(pvd: str, typ_paths: dict) -> str:
try:
default_tmp = cfg.TMPL_APIDOC.split(".")
tmpl_file = f"{default_tmp[0]}_{pvd}.{default_tmp[1]}"
tmpl = load_tmpl(tmpl_file)
except exceptions.TemplateNotFound:
tmpl = load_tmpl(cfg.TMPL_APIDOC)
# TODO: remove
def _gen_class_name(path: str) -> str:
base = os.path.splitext(path)[0]
name = "".join([up_or_title(pvd, s) for s in base.split("-")])
return name
typ_classes = {}
for typ, (paths, resource_root) in sorted(typ_paths.items()):
typ_classes[typ] = []
for path in paths:
name = _gen_class_name(path)
resource_path = os.path.join(resource_root, path)
alias = cfg.ALIASES[pvd].get(typ, {}).get(name)
typ_classes[typ].append(
{"name": name, "alias": alias, "resource_path": resource_path})
return tmpl.render(pvd=pvd, typ_classes=typ_classes)
def make_module(pvd: str, typ: str, classes: str) -> None:
"""Create a module file"""
mod_path = os.path.join(app_root_dir(pvd), f"{typ}.py")
with open(mod_path, "w+") as f:
f.write(classes)
def make_apidoc(pvd: str, content: str) -> None:
"""Create an api documentation file"""
mod_path = os.path.join(doc_root_dir(), f"{pvd}.md")
with open(mod_path, "w+") as f:
f.write(content)
def generate(pvd: str) -> None:
"""Generates a service node classes."""
typ_paths = {}
base = base_dir()
for root, _, files in os.walk(resource_dir(pvd)):
# Extract the names and paths from resources.
files.sort()
pngs = list(filter(lambda f: f.endswith(".png"), files))
paths = list(filter(lambda f: "rounded" not in f, pngs))
# Skip the top-root directory.
typ = os.path.basename(root)
if typ == pvd:
continue
resource_root = os.path.relpath(root, base)
classes = gen_classes(pvd, typ, paths)
make_module(pvd, typ, classes)
typ_paths[typ] = (paths, resource_root)
# Build API documentation
apidoc = gen_apidoc(pvd, typ_paths)
make_apidoc(pvd, apidoc)
if __name__ == "__main__":
pvd = sys.argv[1]
if pvd not in cfg.PROVIDERS:
sys.exit()
generate(pvd)
| python | MIT | 072474aa2a7d24cd074b2e1a61d0b714c3daa945 | 2026-01-04T14:39:58.534150Z | false |
mingrammer/diagrams | https://github.com/mingrammer/diagrams/blob/072474aa2a7d24cd074b2e1a61d0b714c3daa945/scripts/resource.py | scripts/resource.py | """
resources.py provides useful tools for resources processing.
There are 2 commands available.
- clean: clean and unify the resources file names with some rules.
- round: generate the rounded images from the original squared images.
"""
import os
import subprocess
import sys
import config as cfg
from . import resource_dir
_usage = "Usage: resource.py <cmd> <pvd>"
def cleaner_onprem(f):
f = f.replace("_", "-")
return f.lower()
def cleaner_aws(f):
f = f.replace("_", "-")
f = f.replace("@4x", "")
f = f.replace("@5x", "")
f = f.replace("2.0", "2-0")
f = f.replace("-light-bg4x", "")
f = f.replace("-light-bg", "")
for p in cfg.FILE_PREFIXES["aws"]:
if f.startswith(p):
f = f[len(p):]
break
return f.lower()
def cleaner_azure(f):
f = f.replace("_", "-")
f = f.replace("(", "").replace(")", "")
f = "-".join(f.split())
for p in cfg.FILE_PREFIXES["azure"]:
if f.startswith(p):
f = f[len(p):]
break
return f.lower()
def cleaner_gcp(f):
f = f.replace("_", "-")
f = "-".join(f.split())
for p in cfg.FILE_PREFIXES["gcp"]:
if f.startswith(p):
f = f[len(p):]
break
return f.lower()
def cleaner_ibm(f):
f = f.replace("_", "-")
f = "-".join(f.split())
for p in cfg.FILE_PREFIXES["ibm"]:
if f.startswith(p):
f = f[len(p):]
break
return f.lower()
def cleaner_firebase(f):
f = f.replace("_", "-")
f = "-".join(f.split())
for p in cfg.FILE_PREFIXES["firebase"]:
if f.startswith(p):
f = f[len(p):]
break
return f.lower()
def cleaner_k8s(f):
f = f.replace("-256", "")
for p in cfg.FILE_PREFIXES["k8s"]:
if f.startswith(p):
f = f[len(p):]
break
return f.lower()
def cleaner_digitalocean(f):
f = f.replace("-32", "")
for p in cfg.FILE_PREFIXES["digitalocean"]:
if f.startswith(p):
f = f[len(p):]
break
return f.lower()
def cleaner_alibabacloud(f):
for p in cfg.FILE_PREFIXES["alibabacloud"]:
if f.startswith(p):
f = f[len(p):]
break
return f.lower()
def cleaner_oci(f):
f = f.replace(" ", "-")
f = f.replace("_", "-")
for p in cfg.FILE_PREFIXES["oci"]:
if f.startswith(p):
f = f[len(p):]
break
return f.lower()
def cleaner_programming(f):
return f.lower()
def cleaner_generic(f):
return f.lower()
def cleaner_saas(f):
return f.lower()
def cleaner_elastic(f):
return f.lower()
def cleaner_outscale(f):
return f.lower()
def cleaner_openstack(f):
return f.lower()
def cleaner_gis(f):
return f.lower()
cleaners = {
"onprem": cleaner_onprem,
"aws": cleaner_aws,
"azure": cleaner_azure,
"digitalocean": cleaner_digitalocean,
"gcp": cleaner_gcp,
"ibm": cleaner_ibm,
"firebase": cleaner_firebase,
"k8s": cleaner_k8s,
"alibabacloud": cleaner_alibabacloud,
"oci": cleaner_oci,
"programming": cleaner_programming,
"saas": cleaner_saas,
"elastic": cleaner_elastic,
"outscale": cleaner_outscale,
"generic": cleaner_generic,
"openstack": cleaner_openstack,
"gis": cleaner_gis,
}
def clean_png(pvd: str) -> None:
"""Refine the resources files names."""
def _rename(base: str, png: str):
new = cleaners[pvd](png)
old_path = os.path.join(base, png)
new_path = os.path.join(base, new)
os.rename(old_path, new_path)
for root, _, files in os.walk(resource_dir(pvd)):
pngs = filter(lambda f: f.endswith(".png"), files)
[_rename(root, png) for png in pngs]
def round_png(pvd: str) -> None:
"""Round the images."""
def _round(base: str, path: str):
path = os.path.join(base, path)
subprocess.run([cfg.CMD_ROUND, *cfg.CMD_ROUND_OPTS, path])
for root, _, files in os.walk(resource_dir(pvd)):
pngs = filter(lambda f: f.endswith(".png"), files)
paths = filter(lambda f: "rounded" not in f, pngs)
[_round(root, path) for path in paths]
def svg2png(pvd: str) -> None:
"""Convert the svg into png"""
def _convert(base: str, path: str):
path = os.path.join(base, path)
subprocess.run([cfg.CMD_SVG2PNG, *cfg.CMD_SVG2PNG_OPTS, path])
subprocess.run(["rm", path])
for root, _, files in os.walk(resource_dir(pvd)):
svgs = filter(lambda f: f.endswith(".svg"), files)
[_convert(root, path) for path in svgs]
def svg2png2(pvd: str) -> None:
"""Convert the svg into png using image magick"""
def _convert(base: str, path: str):
path_src = os.path.join(base, path)
path_dest = path_src.replace(".svg", ".png")
subprocess.run([cfg.CMD_SVG2PNG_IM, *
cfg.CMD_SVG2PNG_IM_OPTS, path_src, path_dest])
subprocess.run(["rm", path_src])
for root, _, files in os.walk(resource_dir(pvd)):
svgs = filter(lambda f: f.endswith(".svg"), files)
[_convert(root, path) for path in svgs]
# fmt: off
commands = {
"clean": clean_png,
"round": round_png,
"svg2png": svg2png,
"svg2png2": svg2png2,
}
# fmt: on
if __name__ == "__main__":
if len(sys.argv) < 3:
print(_usage)
sys.exit()
cmd = sys.argv[1]
pvd = sys.argv[2]
if cmd not in commands:
sys.exit()
if pvd not in cfg.PROVIDERS:
sys.exit()
commands[cmd](pvd)
| python | MIT | 072474aa2a7d24cd074b2e1a61d0b714c3daa945 | 2026-01-04T14:39:58.534150Z | false |
mingrammer/diagrams | https://github.com/mingrammer/diagrams/blob/072474aa2a7d24cd074b2e1a61d0b714c3daa945/scripts/__init__.py | scripts/__init__.py | import os
from pathlib import Path
import config as cfg
def base_dir() -> Path:
return Path(os.path.abspath(os.path.dirname(__file__))).parent
def app_root_dir(pvd: str) -> str:
return os.path.join(base_dir(), cfg.DIR_APP_ROOT, pvd)
def doc_root_dir() -> str:
return os.path.join(base_dir(), cfg.DIR_DOC_ROOT)
def resource_dir(pvd: str) -> str:
return os.path.join(base_dir(), cfg.DIR_RESOURCE, pvd)
def template_dir() -> str:
return os.path.join(base_dir(), cfg.DIR_TEMPLATE)
| python | MIT | 072474aa2a7d24cd074b2e1a61d0b714c3daa945 | 2026-01-04T14:39:58.534150Z | false |
mingrammer/diagrams | https://github.com/mingrammer/diagrams/blob/072474aa2a7d24cd074b2e1a61d0b714c3daa945/tests/test_c4.py | tests/test_c4.py | import os
import random
import string
import unittest
from diagrams import Diagram, setcluster, setdiagram
from diagrams.c4 import Container, Database, Person, Relationship, System, SystemBoundary
class C4Test(unittest.TestCase):
def setUp(self):
self.name = "diagram-" + \
"".join([random.choice(string.hexdigits) for n in range(7)]).lower()
def tearDown(self):
setdiagram(None)
setcluster(None)
try:
os.remove(self.name + ".png")
except FileNotFoundError:
pass
def test_nodes(self):
with Diagram(name=self.name, show=False):
person = Person("person", "A person.")
container = Container(
"container",
"Java application",
"The application.")
database = Database(
"database",
"Oracle database",
"Stores information.")
def test_external_nodes(self):
with Diagram(name=self.name, show=False):
external_person = Person("person", external=True)
external_system = System("external", external=True)
def test_systems(self):
with Diagram(name=self.name, show=False):
system = System("system", "The internal system.")
system_without_description = System("unknown")
def test_edges(self):
with Diagram(name=self.name, show=False):
c1 = Container("container1")
c2 = Container("container2")
c1 >> c2
def test_edges_with_labels(self):
with Diagram(name=self.name, show=False):
c1 = Container("container1")
c2 = Container("container2")
c1 >> Relationship("depends on") >> c2
c1 << Relationship("is depended on by") << c2
def test_edge_without_constraint(self):
with Diagram(name=self.name, show=False):
s1 = System("system 1")
s2 = System("system 2")
s1 >> Relationship(constraint="False") >> s2
def test_cluster(self):
with Diagram(name=self.name, show=False):
with SystemBoundary("System"):
Container("container", "type", "description")
| python | MIT | 072474aa2a7d24cd074b2e1a61d0b714c3daa945 | 2026-01-04T14:39:58.534150Z | false |
mingrammer/diagrams | https://github.com/mingrammer/diagrams/blob/072474aa2a7d24cd074b2e1a61d0b714c3daa945/tests/test_diagram.py | tests/test_diagram.py | import os
import pathlib
import shutil
import unittest
from diagrams import Cluster, Diagram, Edge, Node, getcluster, getdiagram, setcluster, setdiagram
class DiagramTest(unittest.TestCase):
def setUp(self):
self.name = "diagram_test"
def tearDown(self):
setdiagram(None)
setcluster(None)
# Only some tests generate the image file.
try:
shutil.rmtree(self.name)
except OSError:
# Consider it file
try:
os.remove(self.name + ".png")
except FileNotFoundError:
pass
def test_validate_direction(self):
# Normal directions.
for dir in ("TB", "BT", "LR", "RL", "tb"):
Diagram(direction=dir)
# Invalid directions.
for dir in ("BR", "TL", "Unknown"):
with self.assertRaises(ValueError):
Diagram(direction=dir)
def test_validate_curvestyle(self):
# Normal directions.
for cvs in ("ortho", "curved", "CURVED"):
Diagram(curvestyle=cvs)
# Invalid directions.
for cvs in ("tangent", "unknown"):
with self.assertRaises(ValueError):
Diagram(curvestyle=cvs)
def test_validate_outformat(self):
# Normal output formats.
for fmt in ("png", "jpg", "svg", "pdf", "PNG", "dot"):
Diagram(outformat=fmt)
# Invalid output formats.
for fmt in ("pnp", "jpe", "unknown"):
with self.assertRaises(ValueError):
Diagram(outformat=fmt)
def test_with_global_context(self):
self.assertIsNone(getdiagram())
with Diagram(name=os.path.join(self.name, "with_global_context"), show=False):
self.assertIsNotNone(getdiagram())
self.assertIsNone(getdiagram())
def test_node_not_in_diagram(self):
# Node must be belong to a diagrams.
with self.assertRaises(EnvironmentError):
Node("node")
def test_node_to_node(self):
with Diagram(name=os.path.join(self.name, "node_to_node"), show=False):
node1 = Node("node1")
node2 = Node("node2")
self.assertEqual(node1 - node2, node2)
self.assertEqual(node1 >> node2, node2)
self.assertEqual(node1 << node2, node2)
def test_node_to_nodes(self):
with Diagram(name=os.path.join(self.name, "node_to_nodes"), show=False):
node1 = Node("node1")
nodes = [Node("node2"), Node("node3")]
self.assertEqual(node1 - nodes, nodes)
self.assertEqual(node1 >> nodes, nodes)
self.assertEqual(node1 << nodes, nodes)
def test_nodes_to_node(self):
with Diagram(name=os.path.join(self.name, "nodes_to_node"), show=False):
node1 = Node("node1")
nodes = [Node("node2"), Node("node3")]
self.assertEqual(nodes - node1, node1)
self.assertEqual(nodes >> node1, node1)
self.assertEqual(nodes << node1, node1)
def test_default_filename(self):
self.name = "example_1"
with Diagram(name="Example 1", show=False):
Node("node1")
self.assertTrue(os.path.exists(f"{self.name}.png"))
def test_custom_filename(self):
self.name = "my_custom_name"
with Diagram(name="Example 1", filename=self.name, show=False):
Node("node1")
self.assertTrue(os.path.exists(f"{self.name}.png"))
def test_empty_name(self):
"""Check that providing an empty name don't crash, but save in a diagrams_image.xxx file."""
self.name = "diagrams_image"
with Diagram(show=False):
Node("node1")
self.assertTrue(os.path.exists(f"{self.name}.png"))
def test_autolabel(self):
with Diagram(name=os.path.join(self.name, "nodes_to_node"), show=False):
node1 = Node("node1")
self.assertTrue(node1.label, "Node\nnode1")
def test_outformat_list(self):
"""Check that outformat render all the files from the list."""
self.name = "diagrams_image"
with Diagram(show=False, outformat=["dot", "png"]):
Node("node1")
# both files must exist
self.assertTrue(os.path.exists(f"{self.name}.png"))
self.assertTrue(os.path.exists(f"{self.name}.dot"))
# clean the dot file as it only generated here
os.remove(self.name + ".dot")
class ClusterTest(unittest.TestCase):
def setUp(self):
self.name = "cluster_test"
def tearDown(self):
setdiagram(None)
setcluster(None)
# Only some tests generate the image file.
try:
shutil.rmtree(self.name)
except OSError:
pass
def test_validate_direction(self):
# Normal directions.
for dir in ("TB", "BT", "LR", "RL"):
with Diagram(name=os.path.join(self.name, "validate_direction"), show=False):
Cluster(direction=dir)
# Invalid directions.
for dir in ("BR", "TL", "Unknown"):
with self.assertRaises(ValueError):
with Diagram(name=os.path.join(self.name, "validate_direction"), show=False):
Cluster(direction=dir)
def test_with_global_context(self):
with Diagram(name=os.path.join(self.name, "with_global_context"), show=False):
self.assertIsNone(getcluster())
with Cluster():
self.assertIsNotNone(getcluster())
self.assertIsNone(getcluster())
def test_with_nested_cluster(self):
with Diagram(name=os.path.join(self.name, "with_nested_cluster"), show=False):
self.assertIsNone(getcluster())
with Cluster() as c1:
self.assertEqual(c1, getcluster())
with Cluster() as c2:
self.assertEqual(c2, getcluster())
self.assertEqual(c1, getcluster())
self.assertIsNone(getcluster())
def test_node_not_in_diagram(self):
# Node must be belong to a diagrams.
with self.assertRaises(EnvironmentError):
Node("node")
def test_node_to_node(self):
with Diagram(name=os.path.join(self.name, "node_to_node"), show=False):
with Cluster():
node1 = Node("node1")
node2 = Node("node2")
self.assertEqual(node1 - node2, node2)
self.assertEqual(node1 >> node2, node2)
self.assertEqual(node1 << node2, node2)
def test_node_to_nodes(self):
with Diagram(name=os.path.join(self.name, "node_to_nodes"), show=False):
with Cluster():
node1 = Node("node1")
nodes = [Node("node2"), Node("node3")]
self.assertEqual(node1 - nodes, nodes)
self.assertEqual(node1 >> nodes, nodes)
self.assertEqual(node1 << nodes, nodes)
def test_nodes_to_node(self):
with Diagram(name=os.path.join(self.name, "nodes_to_node"), show=False):
with Cluster():
node1 = Node("node1")
nodes = [Node("node2"), Node("node3")]
self.assertEqual(nodes - node1, node1)
self.assertEqual(nodes >> node1, node1)
self.assertEqual(nodes << node1, node1)
class EdgeTest(unittest.TestCase):
def setUp(self):
self.name = "edge_test"
def tearDown(self):
setdiagram(None)
setcluster(None)
# Only some tests generate the image file.
try:
shutil.rmtree(self.name)
except OSError:
pass
def test_node_to_node(self):
with Diagram(name=os.path.join(self.name, "node_to_node"), show=False):
node1 = Node("node1")
node2 = Node("node2")
self.assertEqual(node1 - Edge(color="red") - node2, node2)
def test_node_to_nodes(self):
with Diagram(name=os.path.join(self.name, "node_to_nodes"), show=False):
with Cluster():
node1 = Node("node1")
nodes = [Node("node2"), Node("node3")]
self.assertEqual(node1 - Edge(color="red") - nodes, nodes)
def test_nodes_to_node(self):
with Diagram(name=os.path.join(self.name, "nodes_to_node"), show=False):
with Cluster():
node1 = Node("node1")
nodes = [Node("node2"), Node("node3")]
self.assertEqual(nodes - Edge(color="red") - node1, node1)
def test_nodes_to_node_with_additional_attributes(self):
with Diagram(name=os.path.join(self.name, "nodes_to_node_with_additional_attributes"), show=False):
with Cluster():
node1 = Node("node1")
nodes = [Node("node2"), Node("node3")]
self.assertEqual(
nodes -
Edge(
color="red") -
Edge(
color="green") -
node1,
node1)
def test_node_to_node_with_attributes(self):
with Diagram(name=os.path.join(self.name, "node_to_node_with_attributes"), show=False):
with Cluster():
node1 = Node("node1")
node2 = Node("node2")
self.assertEqual(
node1 << Edge(
color="red",
label="1.1") << node2,
node2)
self.assertEqual(
node1 >> Edge(
color="green",
label="1.2") >> node2,
node2)
self.assertEqual(
node1 << Edge(
color="blue",
label="1.3") >> node2,
node2)
def test_node_to_node_with_additional_attributes(self):
with Diagram(name=os.path.join(self.name, "node_to_node_with_additional_attributes"), show=False):
with Cluster():
node1 = Node("node1")
node2 = Node("node2")
self.assertEqual(
node1 << Edge(
color="red",
label="2.1") << Edge(
color="blue") << node2,
node2)
self.assertEqual(
node1 >> Edge(
color="green",
label="2.2") >> Edge(
color="red") >> node2,
node2)
self.assertEqual(
node1 << Edge(
color="blue",
label="2.3") >> Edge(
color="black") >> node2,
node2)
def test_nodes_to_node_with_attributes_loop(self):
with Diagram(name=os.path.join(self.name, "nodes_to_node_with_attributes_loop"), show=False):
with Cluster():
node = Node("node")
self.assertEqual(
node >> Edge(
color="red",
label="3.1") >> node,
node)
self.assertEqual(
node << Edge(
color="green",
label="3.2") << node,
node)
self.assertEqual(
node >> Edge(
color="blue",
label="3.3") << node,
node)
self.assertEqual(
node << Edge(
color="pink",
label="3.4") >> node,
node)
def test_nodes_to_node_with_attributes_bothdirectional(self):
with Diagram(name=os.path.join(self.name, "nodes_to_node_with_attributes_bothdirectional"), show=False):
with Cluster():
node1 = Node("node1")
nodes = [Node("node2"), Node("node3")]
self.assertEqual(
nodes << Edge(
color="green",
label="4") >> node1,
node1)
def test_nodes_to_node_with_attributes_bidirectional(self):
with Diagram(name=os.path.join(self.name, "nodes_to_node_with_attributes_bidirectional"), show=False):
with Cluster():
node1 = Node("node1")
nodes = [Node("node2"), Node("node3")]
self.assertEqual(
nodes << Edge(
color="blue",
label="5") >> node1,
node1)
def test_nodes_to_node_with_attributes_onedirectional(self):
with Diagram(name=os.path.join(self.name, "nodes_to_node_with_attributes_onedirectional"), show=False):
with Cluster():
node1 = Node("node1")
nodes = [Node("node2"), Node("node3")]
self.assertEqual(
nodes >> Edge(
color="red",
label="6.1") >> node1,
node1)
self.assertEqual(
nodes << Edge(
color="green",
label="6.2") << node1,
node1)
def test_nodes_to_node_with_additional_attributes_directional(self):
with Diagram(name=os.path.join(self.name, "nodes_to_node_with_additional_attributes_directional"), show=False):
with Cluster():
node1 = Node("node1")
nodes = [Node("node2"), Node("node3")]
self.assertEqual(
nodes >> Edge(
color="red",
label="6.1") >> Edge(
color="blue",
label="6.2") >> node1,
node1)
self.assertEqual(
nodes << Edge(
color="green",
label="6.3") << Edge(
color="pink",
label="6.4") << node1,
node1)
class ResourcesTest(unittest.TestCase):
def test_folder_depth(self):
"""
The code currently only handles resource folders up to a dir depth of 2
i.e. resources/<provider>/<type>/<image>, so check that this depth isn't
exceeded.
"""
resources_dir = pathlib.Path(__file__).parent.parent / "resources"
max_depth = max(
os.path.relpath(
d,
resources_dir).count(
os.sep) +
1 for d,
_,
_ in os.walk(resources_dir))
self.assertLessEqual(max_depth, 2)
def test_resources_exist_and_render(self):
"""
Test that resources directory exists and icons can be loaded for rendering.
This ensures the package build includes all necessary resource files.
"""
from diagrams.aws.compute import EC2
from diagrams.aws.database import RDS
# Verify resources directory exists
resources_dir = pathlib.Path(__file__).parent.parent / "resources"
self.assertTrue(resources_dir.exists(), "resources directory should exist")
# Verify AWS resources exist (sample check)
aws_compute_dir = resources_dir / "aws" / "compute"
self.assertTrue(aws_compute_dir.exists(), "AWS compute resources should exist")
# Verify icon files exist
ec2_icon = aws_compute_dir / "ec2.png"
self.assertTrue(ec2_icon.exists(), "EC2 icon should exist")
# Test that nodes can load their icons
test_diagram_name = "test_resources_render"
try:
with Diagram(test_diagram_name, show=False):
ec2_node = EC2("test-ec2")
rds_node = RDS("test-rds")
# Verify nodes have icon attributes set
self.assertIsNotNone(ec2_node._icon, "EC2 node should have an icon")
self.assertIsNotNone(rds_node._icon, "RDS node should have an icon")
# Verify icon paths are valid
ec2_icon_path = ec2_node._load_icon()
rds_icon_path = rds_node._load_icon()
self.assertTrue(os.path.exists(ec2_icon_path),
f"EC2 icon path should exist: {ec2_icon_path}")
self.assertTrue(os.path.exists(rds_icon_path),
f"RDS icon path should exist: {rds_icon_path}")
finally:
# Clean up generated files
try:
os.remove(test_diagram_name + ".png")
except FileNotFoundError:
pass
| python | MIT | 072474aa2a7d24cd074b2e1a61d0b714c3daa945 | 2026-01-04T14:39:58.534150Z | false |
mingrammer/diagrams | https://github.com/mingrammer/diagrams/blob/072474aa2a7d24cd074b2e1a61d0b714c3daa945/tests/test_cli.py | tests/test_cli.py | import os
import unittest
from io import StringIO
from unittest.mock import mock_open, patch
from diagrams.cli import run
class CliTest(unittest.TestCase):
def setUp(self):
self.test_file = "test_diagram.py"
# dummy content for the test file
self.test_content_1 = """
from diagrams import Diagram
with Diagram(name="Test", show=False):
pass
"""
# content from getting started examples with utf-8
# only support the installed fonts defined in Dockerfile
self.test_content_2 = """
from diagrams import Diagram
from diagrams.aws.compute import EC2
from diagrams.aws.database import RDS
from diagrams.aws.network import ELB
with Diagram("test_2", show=False, direction="TB"):
ELB("lb") >> [EC2("ワーカー1"),
EC2("작업자 2를"),
EC2("робітник 3"),
EC2("worker4"),
EC2("työntekijä 4")] >> RDS("events")
"""
def tearDown(self):
try:
os.remove("test.png")
except FileNotFoundError:
pass
def test_run_with_valid_file(self):
# write the test file
with open(self.test_file, "w") as f:
f.write(self.test_content_1)
with patch("sys.argv", ["diagrams", self.test_file]):
exit_code = run()
self.assertEqual(exit_code, 0)
try:
os.remove(self.test_file)
except FileNotFoundError:
pass
def test_run_with_multiple_files(self):
multiple_files = ["file1.py", "file2.py"]
# write the code files
with open("file1.py", "w") as f:
f.write(self.test_content_1)
with open("file2.py", "w") as f:
f.write(self.test_content_2)
with patch("sys.argv", ["diagrams"] + multiple_files):
exit_code = run()
self.assertEqual(exit_code, 0)
# cleanup code file
for one_file in multiple_files:
try:
os.remove(one_file)
except FileNotFoundError:
pass
# cleanup generated image
try:
os.remove("test_2.png")
except FileNotFoundError:
pass
def test_run_with_no_arguments(self):
with patch("sys.argv", ["diagrams"]):
with patch("sys.stderr", new=StringIO()) as fake_stderr:
with self.assertRaises(SystemExit):
run()
self.assertIn("the following arguments are required: path", fake_stderr.getvalue())
def test_run_with_nonexistent_file(self):
with patch("sys.argv", ["diagrams", "nonexistent.py"]):
with self.assertRaises(FileNotFoundError):
run()
def test_run_with_invalid_python_code(self):
invalid_content = "this is not valid python code"
with patch("builtins.open", mock_open(read_data=invalid_content)):
with patch("sys.argv", ["diagrams", self.test_file]):
with self.assertRaises(SyntaxError):
run()
| python | MIT | 072474aa2a7d24cd074b2e1a61d0b714c3daa945 | 2026-01-04T14:39:58.534150Z | false |
mingrammer/diagrams | https://github.com/mingrammer/diagrams/blob/072474aa2a7d24cd074b2e1a61d0b714c3daa945/tests/__init__.py | tests/__init__.py | python | MIT | 072474aa2a7d24cd074b2e1a61d0b714c3daa945 | 2026-01-04T14:39:58.534150Z | false | |
mingrammer/diagrams | https://github.com/mingrammer/diagrams/blob/072474aa2a7d24cd074b2e1a61d0b714c3daa945/diagrams/cli.py | diagrams/cli.py | import argparse
import sys
def run() -> int:
"""
Run diagrams code files in a diagrams environment.
Args:
paths: A list of paths to Python files containing diagrams code.
Returns:
The exit code.
"""
parser = argparse.ArgumentParser(
description="Run diagrams code files in a diagrams environment.",
)
parser.add_argument(
"paths",
metavar="path",
type=str,
nargs="+",
help="a Python file containing diagrams code",
)
args = parser.parse_args()
for path in args.paths:
with open(path, encoding='utf-8') as f:
exec(f.read())
return 0
def main():
sys.exit(run())
if __name__ == "__main__":
main()
| python | MIT | 072474aa2a7d24cd074b2e1a61d0b714c3daa945 | 2026-01-04T14:39:58.534150Z | false |
mingrammer/diagrams | https://github.com/mingrammer/diagrams/blob/072474aa2a7d24cd074b2e1a61d0b714c3daa945/diagrams/__init__.py | diagrams/__init__.py | import contextvars
import os
import uuid
from pathlib import Path
from typing import Dict, List, Optional, Union
from graphviz import Digraph
# Global contexts for a diagrams and a cluster.
#
# These global contexts are for letting the clusters and nodes know
# where context they are belong to. So the all clusters and nodes does
# not need to specify the current diagrams or cluster via parameters.
__diagram = contextvars.ContextVar("diagrams")
__cluster = contextvars.ContextVar("cluster")
def getdiagram() -> "Diagram":
try:
return __diagram.get()
except LookupError:
return None
def setdiagram(diagram: "Diagram"):
__diagram.set(diagram)
def getcluster() -> "Cluster":
try:
return __cluster.get()
except LookupError:
return None
def setcluster(cluster: "Cluster"):
__cluster.set(cluster)
class Diagram:
__directions = ("TB", "BT", "LR", "RL")
__curvestyles = ("ortho", "curved")
__outformats = ("png", "jpg", "svg", "pdf", "dot")
# fmt: off
_default_graph_attrs = {
"pad": "2.0",
"splines": "ortho",
"nodesep": "0.60",
"ranksep": "0.75",
"fontname": "Sans-Serif",
"fontsize": "15",
"fontcolor": "#2D3436",
}
_default_node_attrs = {
"shape": "box",
"style": "rounded",
"fixedsize": "true",
"width": "1.4",
"height": "1.4",
"labelloc": "b",
# imagepos attribute is not backward compatible
# TODO: check graphviz version to see if "imagepos" is available >= 2.40
# https://github.com/xflr6/graphviz/blob/master/graphviz/backend.py#L248
# "imagepos": "tc",
"imagescale": "true",
"fontname": "Sans-Serif",
"fontsize": "13",
"fontcolor": "#2D3436",
}
_default_edge_attrs = {
"color": "#7B8894",
}
# fmt: on
# TODO: Label position option
# TODO: Save directory option (filename + directory?)
def __init__(
self,
name: str = "",
filename: str = "",
direction: str = "LR",
curvestyle: str = "ortho",
outformat: Union[str, list[str]] = "png",
autolabel: bool = False,
show: bool = True,
strict: bool = False,
graph_attr: Optional[dict] = None,
node_attr: Optional[dict] = None,
edge_attr: Optional[dict] = None,
):
"""Diagram represents a global diagrams context.
:param name: Diagram name. It will be used for output filename if the
filename isn't given.
:param filename: The output filename, without the extension (.png).
If not given, it will be generated from the name.
:param direction: Data flow direction. Default is 'left to right'.
:param curvestyle: Curve bending style. One of "ortho" or "curved".
:param outformat: Output file format. Default is 'png'.
:param show: Open generated image after save if true, just only save otherwise.
:param graph_attr: Provide graph_attr dot config attributes.
:param node_attr: Provide node_attr dot config attributes.
:param edge_attr: Provide edge_attr dot config attributes.
:param strict: Rendering should merge multi-edges.
"""
if graph_attr is None:
graph_attr = {}
if node_attr is None:
node_attr = {}
if edge_attr is None:
edge_attr = {}
self.name = name
if not name and not filename:
filename = "diagrams_image"
elif not filename:
filename = "_".join(self.name.split()).lower()
self.filename = filename
self.dot = Digraph(self.name, filename=self.filename, strict=strict)
# Set attributes.
for k, v in self._default_graph_attrs.items():
self.dot.graph_attr[k] = v
self.dot.graph_attr["label"] = self.name
for k, v in self._default_node_attrs.items():
self.dot.node_attr[k] = v
for k, v in self._default_edge_attrs.items():
self.dot.edge_attr[k] = v
if not self._validate_direction(direction):
raise ValueError(f'"{direction}" is not a valid direction')
self.dot.graph_attr["rankdir"] = direction
if not self._validate_curvestyle(curvestyle):
raise ValueError(f'"{curvestyle}" is not a valid curvestyle')
self.dot.graph_attr["splines"] = curvestyle
if isinstance(outformat, list):
for one_format in outformat:
if not self._validate_outformat(one_format):
raise ValueError(
f'"{one_format}" is not a valid output format')
else:
if not self._validate_outformat(outformat):
raise ValueError(f'"{outformat}" is not a valid output format')
self.outformat = outformat
# Merge passed in attributes
self.dot.graph_attr.update(graph_attr)
self.dot.node_attr.update(node_attr)
self.dot.edge_attr.update(edge_attr)
self.show = show
self.autolabel = autolabel
def __str__(self) -> str:
return str(self.dot)
def __enter__(self):
setdiagram(self)
return self
def __exit__(self, exc_type, exc_value, traceback):
self.render()
# Remove the graphviz file leaving only the image.
os.remove(self.filename)
setdiagram(None)
def _repr_png_(self):
return self.dot.pipe(format="png")
def _validate_direction(self, direction: str) -> bool:
return direction.upper() in self.__directions
def _validate_curvestyle(self, curvestyle: str) -> bool:
return curvestyle.lower() in self.__curvestyles
def _validate_outformat(self, outformat: str) -> bool:
return outformat.lower() in self.__outformats
def node(self, nodeid: str, label: str, **attrs) -> None:
"""Create a new node."""
self.dot.node(nodeid, label=label, **attrs)
def connect(self, node: "Node", node2: "Node", edge: "Edge") -> None:
"""Connect the two Nodes."""
self.dot.edge(node.nodeid, node2.nodeid, **edge.attrs)
def subgraph(self, dot: Digraph) -> None:
"""Create a subgraph for clustering"""
self.dot.subgraph(dot)
def render(self) -> None:
if isinstance(self.outformat, list):
for one_format in self.outformat:
self.dot.render(format=one_format, view=self.show, quiet=True)
else:
self.dot.render(format=self.outformat, view=self.show, quiet=True)
class Cluster:
__directions = ("TB", "BT", "LR", "RL")
__bgcolors = ("#E5F5FD", "#EBF3E7", "#ECE8F6", "#FDF7E3")
# fmt: off
_default_graph_attrs = {
"shape": "box",
"style": "rounded",
"labeljust": "l",
"pencolor": "#AEB6BE",
"fontname": "Sans-Serif",
"fontsize": "12",
}
# fmt: on
# FIXME:
# Cluster direction does not work now. Graphviz couldn't render
# correctly for a subgraph that has a different rank direction.
def __init__(
self,
label: str = "cluster",
direction: str = "LR",
graph_attr: Optional[dict] = None,
):
"""Cluster represents a cluster context.
:param label: Cluster label.
:param direction: Data flow direction. Default is 'left to right'.
:param graph_attr: Provide graph_attr dot config attributes.
"""
if graph_attr is None:
graph_attr = {}
self.label = label
self.name = "cluster_" + self.label
self.dot = Digraph(self.name)
# Set attributes.
for k, v in self._default_graph_attrs.items():
self.dot.graph_attr[k] = v
self.dot.graph_attr["label"] = self.label
if not self._validate_direction(direction):
raise ValueError(f'"{direction}" is not a valid direction')
self.dot.graph_attr["rankdir"] = direction
# Node must be belong to a diagrams.
self._diagram = getdiagram()
if self._diagram is None:
raise EnvironmentError("Global diagrams context not set up")
self._parent = getcluster()
# Set cluster depth for distinguishing the background color
self.depth = self._parent.depth + 1 if self._parent else 0
coloridx = self.depth % len(self.__bgcolors)
self.dot.graph_attr["bgcolor"] = self.__bgcolors[coloridx]
# Merge passed in attributes
self.dot.graph_attr.update(graph_attr)
def __enter__(self):
setcluster(self)
return self
def __exit__(self, exc_type, exc_value, traceback):
if self._parent:
self._parent.subgraph(self.dot)
else:
self._diagram.subgraph(self.dot)
setcluster(self._parent)
def _validate_direction(self, direction: str) -> bool:
return direction.upper() in self.__directions
def node(self, nodeid: str, label: str, **attrs) -> None:
"""Create a new node in the cluster."""
self.dot.node(nodeid, label=label, **attrs)
def subgraph(self, dot: Digraph) -> None:
self.dot.subgraph(dot)
class Node:
"""Node represents a node for a specific backend service."""
_provider = None
_type = None
_icon_dir = None
_icon = None
_height = 1.9
def __init__(self, label: str = "", *, nodeid: str = None, **attrs: Dict):
"""Node represents a system component.
:param label: Node label.
"""
# Generates an ID for identifying a node, unless specified
self._id = nodeid or self._rand_id()
self.label = label
# Node must be belong to a diagrams.
self._diagram = getdiagram()
if self._diagram is None:
raise EnvironmentError("Global diagrams context not set up")
if self._diagram.autolabel:
prefix = self.__class__.__name__
if self.label:
self.label = prefix + "\n" + self.label
else:
self.label = prefix
# fmt: off
# If a node has an icon, increase the height slightly to avoid
# that label being spanned between icon image and white space.
# Increase the height by the number of new lines included in the label.
padding = 0.4 * (self.label.count('\n'))
self._attrs = {
"shape": "none",
"height": str(self._height + padding),
"image": self._load_icon(),
} if self._icon else {}
# fmt: on
self._attrs.update(attrs)
self._cluster = getcluster()
# If a node is in the cluster context, add it to cluster.
if self._cluster:
self._cluster.node(self._id, self.label, **self._attrs)
else:
self._diagram.node(self._id, self.label, **self._attrs)
def __repr__(self):
_name = self.__class__.__name__
return f"<{self._provider}.{self._type}.{_name}>"
def __sub__(self, other: Union["Node", List["Node"], "Edge"]):
"""Implement Self - Node, Self - [Nodes] and Self - Edge."""
if isinstance(other, list):
for node in other:
self.connect(node, Edge(self))
return other
elif isinstance(other, Node):
return self.connect(other, Edge(self))
else:
other.node = self
return other
def __rsub__(self, other: Union[List["Node"], List["Edge"]]):
"""Called for [Nodes] and [Edges] - Self because list don't have __sub__ operators."""
for o in other:
if isinstance(o, Edge):
o.connect(self)
else:
o.connect(self, Edge(self))
return self
def __rshift__(self, other: Union["Node", List["Node"], "Edge"]):
"""Implements Self >> Node, Self >> [Nodes] and Self Edge."""
if isinstance(other, list):
for node in other:
self.connect(node, Edge(self, forward=True))
return other
elif isinstance(other, Node):
return self.connect(other, Edge(self, forward=True))
else:
other.forward = True
other.node = self
return other
def __lshift__(self, other: Union["Node", List["Node"], "Edge"]):
"""Implements Self << Node, Self << [Nodes] and Self << Edge."""
if isinstance(other, list):
for node in other:
self.connect(node, Edge(self, reverse=True))
return other
elif isinstance(other, Node):
return self.connect(other, Edge(self, reverse=True))
else:
other.reverse = True
return other.connect(self)
def __rrshift__(self, other: Union[List["Node"], List["Edge"]]):
"""Called for [Nodes] and [Edges] >> Self because list don't have __rshift__ operators."""
for o in other:
if isinstance(o, Edge):
o.forward = True
o.connect(self)
else:
o.connect(self, Edge(self, forward=True))
return self
def __rlshift__(self, other: Union[List["Node"], List["Edge"]]):
"""Called for [Nodes] << Self because list of Nodes don't have __lshift__ operators."""
for o in other:
if isinstance(o, Edge):
o.reverse = True
o.connect(self)
else:
o.connect(self, Edge(self, reverse=True))
return self
@property
def nodeid(self):
return self._id
# TODO: option for adding flow description to the connection edge
def connect(self, node: "Node", edge: "Edge"):
"""Connect to other node.
:param node: Other node instance.
:param edge: Type of the edge.
:return: Connected node.
"""
if not isinstance(node, Node):
ValueError(f"{node} is not a valid Node")
if not isinstance(edge, Edge):
ValueError(f"{edge} is not a valid Edge")
# An edge must be added on the global diagrams, not a cluster.
self._diagram.connect(self, node, edge)
return node
@staticmethod
def _rand_id():
return uuid.uuid4().hex
def _load_icon(self):
basedir = Path(os.path.abspath(os.path.dirname(__file__)))
return os.path.join(basedir.parent, self._icon_dir, self._icon)
class Edge:
"""Edge represents an edge between two nodes."""
_default_edge_attrs = {
"fontcolor": "#2D3436",
"fontname": "Sans-Serif",
"fontsize": "13",
}
def __init__(
self,
node: "Node" = None,
forward: bool = False,
reverse: bool = False,
label: str = "",
color: str = "",
style: str = "",
**attrs: Dict,
):
"""Edge represents an edge between two nodes.
:param node: Parent node.
:param forward: Points forward.
:param reverse: Points backward.
:param label: Edge label.
:param color: Edge color.
:param style: Edge style.
:param attrs: Other edge attributes
"""
if node is not None:
assert isinstance(node, Node)
self.node = node
self.forward = forward
self.reverse = reverse
self._attrs = {}
# Set attributes.
for k, v in self._default_edge_attrs.items():
self._attrs[k] = v
if label:
# Graphviz complaining about using label for edges, so replace it with xlabel.
# Update: xlabel option causes the misaligned label position:
# https://github.com/mingrammer/diagrams/issues/83
self._attrs["label"] = label
if color:
self._attrs["color"] = color
if style:
self._attrs["style"] = style
self._attrs.update(attrs)
def __sub__(self, other: Union["Node", "Edge", List["Node"]]):
"""Implement Self - Node or Edge and Self - [Nodes]"""
return self.connect(other)
def __rsub__(self, other: Union[List["Node"],
List["Edge"]]) -> List["Edge"]:
"""Called for [Nodes] or [Edges] - Self because list don't have __sub__ operators."""
return self.append(other)
def __rshift__(self, other: Union["Node", "Edge", List["Node"]]):
"""Implements Self >> Node or Edge and Self >> [Nodes]."""
self.forward = True
return self.connect(other)
def __lshift__(self, other: Union["Node", "Edge", List["Node"]]):
"""Implements Self << Node or Edge and Self << [Nodes]."""
self.reverse = True
return self.connect(other)
def __rrshift__(self,
other: Union[List["Node"],
List["Edge"]]) -> List["Edge"]:
"""Called for [Nodes] or [Edges] >> Self because list of Edges don't have __rshift__ operators."""
return self.append(other, forward=True)
def __rlshift__(self,
other: Union[List["Node"],
List["Edge"]]) -> List["Edge"]:
"""Called for [Nodes] or [Edges] << Self because list of Edges don't have __lshift__ operators."""
return self.append(other, reverse=True)
def append(self,
other: Union[List["Node"],
List["Edge"]],
forward=None,
reverse=None) -> List["Edge"]:
result = []
for o in other:
if isinstance(o, Edge):
o.forward = forward if forward else o.forward
o.reverse = reverse if reverse else o.reverse
self._attrs = o.attrs.copy()
result.append(o)
else:
result.append(
Edge(
o,
forward=forward,
reverse=reverse,
**self._attrs))
return result
def connect(self, other: Union["Node", "Edge", List["Node"]]):
if isinstance(other, list):
for node in other:
self.node.connect(node, self)
return other
elif isinstance(other, Edge):
self._attrs = other._attrs.copy()
return self
else:
if self.node is not None:
return self.node.connect(other, self)
else:
self.node = other
return self
@property
def attrs(self) -> Dict:
if self.forward and self.reverse:
direction = "both"
elif self.forward:
direction = "forward"
elif self.reverse:
direction = "back"
else:
direction = "none"
return {**self._attrs, "dir": direction}
Group = Cluster
| python | MIT | 072474aa2a7d24cd074b2e1a61d0b714c3daa945 | 2026-01-04T14:39:58.534150Z | false |
mingrammer/diagrams | https://github.com/mingrammer/diagrams/blob/072474aa2a7d24cd074b2e1a61d0b714c3daa945/diagrams/saas/crm.py | diagrams/saas/crm.py | # This module is automatically generated by autogen.sh. DO NOT EDIT.
from . import _Saas
class _Crm(_Saas):
_type = "crm"
_icon_dir = "resources/saas/crm"
class Intercom(_Crm):
_icon = "intercom.png"
class Zendesk(_Crm):
_icon = "zendesk.png"
# Aliases
| python | MIT | 072474aa2a7d24cd074b2e1a61d0b714c3daa945 | 2026-01-04T14:39:58.534150Z | false |
mingrammer/diagrams | https://github.com/mingrammer/diagrams/blob/072474aa2a7d24cd074b2e1a61d0b714c3daa945/diagrams/saas/recommendation.py | diagrams/saas/recommendation.py | # This module is automatically generated by autogen.sh. DO NOT EDIT.
from . import _Saas
class _Recommendation(_Saas):
_type = "recommendation"
_icon_dir = "resources/saas/recommendation"
class Recombee(_Recommendation):
_icon = "recombee.png"
# Aliases
| python | MIT | 072474aa2a7d24cd074b2e1a61d0b714c3daa945 | 2026-01-04T14:39:58.534150Z | false |
mingrammer/diagrams | https://github.com/mingrammer/diagrams/blob/072474aa2a7d24cd074b2e1a61d0b714c3daa945/diagrams/saas/security.py | diagrams/saas/security.py | # This module is automatically generated by autogen.sh. DO NOT EDIT.
from . import _Saas
class _Security(_Saas):
_type = "security"
_icon_dir = "resources/saas/security"
class Crowdstrike(_Security):
_icon = "crowdstrike.png"
class Sonarqube(_Security):
_icon = "sonarqube.png"
# Aliases
| python | MIT | 072474aa2a7d24cd074b2e1a61d0b714c3daa945 | 2026-01-04T14:39:58.534150Z | false |
mingrammer/diagrams | https://github.com/mingrammer/diagrams/blob/072474aa2a7d24cd074b2e1a61d0b714c3daa945/diagrams/saas/automation.py | diagrams/saas/automation.py | # This module is automatically generated by autogen.sh. DO NOT EDIT.
from . import _Saas
class _Automation(_Saas):
_type = "automation"
_icon_dir = "resources/saas/automation"
class N8N(_Automation):
_icon = "n8n.png"
# Aliases
| python | MIT | 072474aa2a7d24cd074b2e1a61d0b714c3daa945 | 2026-01-04T14:39:58.534150Z | false |
mingrammer/diagrams | https://github.com/mingrammer/diagrams/blob/072474aa2a7d24cd074b2e1a61d0b714c3daa945/diagrams/saas/social.py | diagrams/saas/social.py | # This module is automatically generated by autogen.sh. DO NOT EDIT.
from . import _Saas
class _Social(_Saas):
_type = "social"
_icon_dir = "resources/saas/social"
class Facebook(_Social):
_icon = "facebook.png"
class Twitter(_Social):
_icon = "twitter.png"
# Aliases
| python | MIT | 072474aa2a7d24cd074b2e1a61d0b714c3daa945 | 2026-01-04T14:39:58.534150Z | false |
mingrammer/diagrams | https://github.com/mingrammer/diagrams/blob/072474aa2a7d24cd074b2e1a61d0b714c3daa945/diagrams/saas/identity.py | diagrams/saas/identity.py | # This module is automatically generated by autogen.sh. DO NOT EDIT.
from . import _Saas
class _Identity(_Saas):
_type = "identity"
_icon_dir = "resources/saas/identity"
class Auth0(_Identity):
_icon = "auth0.png"
class Okta(_Identity):
_icon = "okta.png"
# Aliases
| python | MIT | 072474aa2a7d24cd074b2e1a61d0b714c3daa945 | 2026-01-04T14:39:58.534150Z | false |
mingrammer/diagrams | https://github.com/mingrammer/diagrams/blob/072474aa2a7d24cd074b2e1a61d0b714c3daa945/diagrams/saas/chat.py | diagrams/saas/chat.py | # This module is automatically generated by autogen.sh. DO NOT EDIT.
from . import _Saas
class _Chat(_Saas):
_type = "chat"
_icon_dir = "resources/saas/chat"
class Discord(_Chat):
_icon = "discord.png"
class Line(_Chat):
_icon = "line.png"
class Mattermost(_Chat):
_icon = "mattermost.png"
class Messenger(_Chat):
_icon = "messenger.png"
class RocketChat(_Chat):
_icon = "rocket-chat.png"
class Slack(_Chat):
_icon = "slack.png"
class Teams(_Chat):
_icon = "teams.png"
class Telegram(_Chat):
_icon = "telegram.png"
# Aliases
| python | MIT | 072474aa2a7d24cd074b2e1a61d0b714c3daa945 | 2026-01-04T14:39:58.534150Z | false |
mingrammer/diagrams | https://github.com/mingrammer/diagrams/blob/072474aa2a7d24cd074b2e1a61d0b714c3daa945/diagrams/saas/payment.py | diagrams/saas/payment.py | # This module is automatically generated by autogen.sh. DO NOT EDIT.
from . import _Saas
class _Payment(_Saas):
_type = "payment"
_icon_dir = "resources/saas/payment"
class Adyen(_Payment):
_icon = "adyen.png"
class AmazonPay(_Payment):
_icon = "amazon-pay.png"
class Paypal(_Payment):
_icon = "paypal.png"
class Stripe(_Payment):
_icon = "stripe.png"
# Aliases
| python | MIT | 072474aa2a7d24cd074b2e1a61d0b714c3daa945 | 2026-01-04T14:39:58.534150Z | false |
mingrammer/diagrams | https://github.com/mingrammer/diagrams/blob/072474aa2a7d24cd074b2e1a61d0b714c3daa945/diagrams/saas/analytics.py | diagrams/saas/analytics.py | # This module is automatically generated by autogen.sh. DO NOT EDIT.
from . import _Saas
class _Analytics(_Saas):
_type = "analytics"
_icon_dir = "resources/saas/analytics"
class Dataform(_Analytics):
_icon = "dataform.png"
class Snowflake(_Analytics):
_icon = "snowflake.png"
class Stitch(_Analytics):
_icon = "stitch.png"
# Aliases
| python | MIT | 072474aa2a7d24cd074b2e1a61d0b714c3daa945 | 2026-01-04T14:39:58.534150Z | false |
mingrammer/diagrams | https://github.com/mingrammer/diagrams/blob/072474aa2a7d24cd074b2e1a61d0b714c3daa945/diagrams/saas/filesharing.py | diagrams/saas/filesharing.py | # This module is automatically generated by autogen.sh. DO NOT EDIT.
from . import _Saas
class _Filesharing(_Saas):
_type = "filesharing"
_icon_dir = "resources/saas/filesharing"
class Nextcloud(_Filesharing):
_icon = "nextcloud.png"
# Aliases
| python | MIT | 072474aa2a7d24cd074b2e1a61d0b714c3daa945 | 2026-01-04T14:39:58.534150Z | false |
mingrammer/diagrams | https://github.com/mingrammer/diagrams/blob/072474aa2a7d24cd074b2e1a61d0b714c3daa945/diagrams/saas/logging.py | diagrams/saas/logging.py | # This module is automatically generated by autogen.sh. DO NOT EDIT.
from . import _Saas
class _Logging(_Saas):
_type = "logging"
_icon_dir = "resources/saas/logging"
class Datadog(_Logging):
_icon = "datadog.png"
class Newrelic(_Logging):
_icon = "newrelic.png"
class Papertrail(_Logging):
_icon = "papertrail.png"
# Aliases
DataDog = Datadog
NewRelic = Newrelic
| python | MIT | 072474aa2a7d24cd074b2e1a61d0b714c3daa945 | 2026-01-04T14:39:58.534150Z | false |
mingrammer/diagrams | https://github.com/mingrammer/diagrams/blob/072474aa2a7d24cd074b2e1a61d0b714c3daa945/diagrams/saas/__init__.py | diagrams/saas/__init__.py | """
Saas provides a set of general saas services.
"""
from diagrams import Node
class _Saas(Node):
_provider = "saas"
_icon_dir = "resources/saas"
fontcolor = "#ffffff"
class Saas(_Saas):
_icon = "saas.png"
| python | MIT | 072474aa2a7d24cd074b2e1a61d0b714c3daa945 | 2026-01-04T14:39:58.534150Z | false |
mingrammer/diagrams | https://github.com/mingrammer/diagrams/blob/072474aa2a7d24cd074b2e1a61d0b714c3daa945/diagrams/saas/media.py | diagrams/saas/media.py | # This module is automatically generated by autogen.sh. DO NOT EDIT.
from . import _Saas
class _Media(_Saas):
_type = "media"
_icon_dir = "resources/saas/media"
class Cloudinary(_Media):
_icon = "cloudinary.png"
# Aliases
| python | MIT | 072474aa2a7d24cd074b2e1a61d0b714c3daa945 | 2026-01-04T14:39:58.534150Z | false |
mingrammer/diagrams | https://github.com/mingrammer/diagrams/blob/072474aa2a7d24cd074b2e1a61d0b714c3daa945/diagrams/saas/alerting.py | diagrams/saas/alerting.py | # This module is automatically generated by autogen.sh. DO NOT EDIT.
from . import _Saas
class _Alerting(_Saas):
_type = "alerting"
_icon_dir = "resources/saas/alerting"
class Newrelic(_Alerting):
_icon = "newrelic.png"
class Opsgenie(_Alerting):
_icon = "opsgenie.png"
class Pagerduty(_Alerting):
_icon = "pagerduty.png"
class Pushover(_Alerting):
_icon = "pushover.png"
class Xmatters(_Alerting):
_icon = "xmatters.png"
# Aliases
| python | MIT | 072474aa2a7d24cd074b2e1a61d0b714c3daa945 | 2026-01-04T14:39:58.534150Z | false |
mingrammer/diagrams | https://github.com/mingrammer/diagrams/blob/072474aa2a7d24cd074b2e1a61d0b714c3daa945/diagrams/saas/cdn.py | diagrams/saas/cdn.py | # This module is automatically generated by autogen.sh. DO NOT EDIT.
from . import _Saas
class _Cdn(_Saas):
_type = "cdn"
_icon_dir = "resources/saas/cdn"
class Akamai(_Cdn):
_icon = "akamai.png"
class Cloudflare(_Cdn):
_icon = "cloudflare.png"
class Fastly(_Cdn):
_icon = "fastly.png"
class Imperva(_Cdn):
_icon = "imperva.png"
# Aliases
| python | MIT | 072474aa2a7d24cd074b2e1a61d0b714c3daa945 | 2026-01-04T14:39:58.534150Z | false |
mingrammer/diagrams | https://github.com/mingrammer/diagrams/blob/072474aa2a7d24cd074b2e1a61d0b714c3daa945/diagrams/saas/communication.py | diagrams/saas/communication.py | # This module is automatically generated by autogen.sh. DO NOT EDIT.
from . import _Saas
class _Communication(_Saas):
_type = "communication"
_icon_dir = "resources/saas/communication"
class Twilio(_Communication):
_icon = "twilio.png"
# Aliases
| python | MIT | 072474aa2a7d24cd074b2e1a61d0b714c3daa945 | 2026-01-04T14:39:58.534150Z | false |
mingrammer/diagrams | https://github.com/mingrammer/diagrams/blob/072474aa2a7d24cd074b2e1a61d0b714c3daa945/diagrams/outscale/security.py | diagrams/outscale/security.py | # This module is automatically generated by autogen.sh. DO NOT EDIT.
from . import _Outscale
class _Security(_Outscale):
_type = "security"
_icon_dir = "resources/outscale/security"
class Firewall(_Security):
_icon = "firewall.png"
class IdentityAndAccessManagement(_Security):
_icon = "identity-and-access-management.png"
# Aliases
| python | MIT | 072474aa2a7d24cd074b2e1a61d0b714c3daa945 | 2026-01-04T14:39:58.534150Z | false |
mingrammer/diagrams | https://github.com/mingrammer/diagrams/blob/072474aa2a7d24cd074b2e1a61d0b714c3daa945/diagrams/outscale/compute.py | diagrams/outscale/compute.py | # This module is automatically generated by autogen.sh. DO NOT EDIT.
from . import _Outscale
class _Compute(_Outscale):
_type = "compute"
_icon_dir = "resources/outscale/compute"
class Compute(_Compute):
_icon = "compute.png"
class DirectConnect(_Compute):
_icon = "direct-connect.png"
# Aliases
| python | MIT | 072474aa2a7d24cd074b2e1a61d0b714c3daa945 | 2026-01-04T14:39:58.534150Z | false |
mingrammer/diagrams | https://github.com/mingrammer/diagrams/blob/072474aa2a7d24cd074b2e1a61d0b714c3daa945/diagrams/outscale/storage.py | diagrams/outscale/storage.py | # This module is automatically generated by autogen.sh. DO NOT EDIT.
from . import _Outscale
class _Storage(_Outscale):
_type = "storage"
_icon_dir = "resources/outscale/storage"
class SimpleStorageService(_Storage):
_icon = "simple-storage-service.png"
class Storage(_Storage):
_icon = "storage.png"
# Aliases
| python | MIT | 072474aa2a7d24cd074b2e1a61d0b714c3daa945 | 2026-01-04T14:39:58.534150Z | false |
mingrammer/diagrams | https://github.com/mingrammer/diagrams/blob/072474aa2a7d24cd074b2e1a61d0b714c3daa945/diagrams/outscale/network.py | diagrams/outscale/network.py | # This module is automatically generated by autogen.sh. DO NOT EDIT.
from . import _Outscale
class _Network(_Outscale):
_type = "network"
_icon_dir = "resources/outscale/network"
class ClientVpn(_Network):
_icon = "client-vpn.png"
class InternetService(_Network):
_icon = "internet-service.png"
class LoadBalancer(_Network):
_icon = "load-balancer.png"
class NatService(_Network):
_icon = "nat-service.png"
class Net(_Network):
_icon = "net.png"
class SiteToSiteVpng(_Network):
_icon = "site-to-site-vpng.png"
# Aliases
| python | MIT | 072474aa2a7d24cd074b2e1a61d0b714c3daa945 | 2026-01-04T14:39:58.534150Z | false |
mingrammer/diagrams | https://github.com/mingrammer/diagrams/blob/072474aa2a7d24cd074b2e1a61d0b714c3daa945/diagrams/outscale/__init__.py | diagrams/outscale/__init__.py | from diagrams import Node
class _Outscale(Node):
_provider = "outscale"
_icon_dir = "resources/outscale"
fontcolor = "#ffffff"
class Outscale(_Outscale):
_icon = "outscale.png"
| python | MIT | 072474aa2a7d24cd074b2e1a61d0b714c3daa945 | 2026-01-04T14:39:58.534150Z | false |
mingrammer/diagrams | https://github.com/mingrammer/diagrams/blob/072474aa2a7d24cd074b2e1a61d0b714c3daa945/diagrams/custom/__init__.py | diagrams/custom/__init__.py | """
Custom provides the possibility of load an image to be presented as a node.
"""
from diagrams import Node
class Custom(Node):
_provider = "custom"
_type = "custom"
_icon_dir = None
fontcolor = "#ffffff"
def _load_icon(self):
return self._icon
def __init__(self, label, icon_path, *args, **kwargs):
self._icon = icon_path
super().__init__(label, *args, **kwargs)
| python | MIT | 072474aa2a7d24cd074b2e1a61d0b714c3daa945 | 2026-01-04T14:39:58.534150Z | false |
mingrammer/diagrams | https://github.com/mingrammer/diagrams/blob/072474aa2a7d24cd074b2e1a61d0b714c3daa945/diagrams/generic/blank.py | diagrams/generic/blank.py | # This module is automatically generated by autogen.sh. DO NOT EDIT.
from . import _Generic
class _Blank(_Generic):
_type = "blank"
_icon_dir = "resources/generic/blank"
class Blank(_Blank):
_icon = "blank.png"
# Aliases
| python | MIT | 072474aa2a7d24cd074b2e1a61d0b714c3daa945 | 2026-01-04T14:39:58.534150Z | false |
mingrammer/diagrams | https://github.com/mingrammer/diagrams/blob/072474aa2a7d24cd074b2e1a61d0b714c3daa945/diagrams/generic/compute.py | diagrams/generic/compute.py | # This module is automatically generated by autogen.sh. DO NOT EDIT.
from . import _Generic
class _Compute(_Generic):
_type = "compute"
_icon_dir = "resources/generic/compute"
class Rack(_Compute):
_icon = "rack.png"
# Aliases
| python | MIT | 072474aa2a7d24cd074b2e1a61d0b714c3daa945 | 2026-01-04T14:39:58.534150Z | false |
mingrammer/diagrams | https://github.com/mingrammer/diagrams/blob/072474aa2a7d24cd074b2e1a61d0b714c3daa945/diagrams/generic/storage.py | diagrams/generic/storage.py | # This module is automatically generated by autogen.sh. DO NOT EDIT.
from . import _Generic
class _Storage(_Generic):
_type = "storage"
_icon_dir = "resources/generic/storage"
class Storage(_Storage):
_icon = "storage.png"
# Aliases
| python | MIT | 072474aa2a7d24cd074b2e1a61d0b714c3daa945 | 2026-01-04T14:39:58.534150Z | false |
mingrammer/diagrams | https://github.com/mingrammer/diagrams/blob/072474aa2a7d24cd074b2e1a61d0b714c3daa945/diagrams/generic/network.py | diagrams/generic/network.py | # This module is automatically generated by autogen.sh. DO NOT EDIT.
from . import _Generic
class _Network(_Generic):
_type = "network"
_icon_dir = "resources/generic/network"
class Firewall(_Network):
_icon = "firewall.png"
class Router(_Network):
_icon = "router.png"
class Subnet(_Network):
_icon = "subnet.png"
class Switch(_Network):
_icon = "switch.png"
class VPN(_Network):
_icon = "vpn.png"
# Aliases
| python | MIT | 072474aa2a7d24cd074b2e1a61d0b714c3daa945 | 2026-01-04T14:39:58.534150Z | false |
mingrammer/diagrams | https://github.com/mingrammer/diagrams/blob/072474aa2a7d24cd074b2e1a61d0b714c3daa945/diagrams/generic/database.py | diagrams/generic/database.py | # This module is automatically generated by autogen.sh. DO NOT EDIT.
from . import _Generic
class _Database(_Generic):
_type = "database"
_icon_dir = "resources/generic/database"
class SQL(_Database):
_icon = "sql.png"
# Aliases
| python | MIT | 072474aa2a7d24cd074b2e1a61d0b714c3daa945 | 2026-01-04T14:39:58.534150Z | false |
mingrammer/diagrams | https://github.com/mingrammer/diagrams/blob/072474aa2a7d24cd074b2e1a61d0b714c3daa945/diagrams/generic/os.py | diagrams/generic/os.py | # This module is automatically generated by autogen.sh. DO NOT EDIT.
from . import _Generic
class _Os(_Generic):
_type = "os"
_icon_dir = "resources/generic/os"
class Android(_Os):
_icon = "android.png"
class Centos(_Os):
_icon = "centos.png"
class Debian(_Os):
_icon = "debian.png"
class IOS(_Os):
_icon = "ios.png"
class LinuxGeneral(_Os):
_icon = "linux-general.png"
class Raspbian(_Os):
_icon = "raspbian.png"
class RedHat(_Os):
_icon = "red-hat.png"
class Suse(_Os):
_icon = "suse.png"
class Ubuntu(_Os):
_icon = "ubuntu.png"
class Windows(_Os):
_icon = "windows.png"
# Aliases
| python | MIT | 072474aa2a7d24cd074b2e1a61d0b714c3daa945 | 2026-01-04T14:39:58.534150Z | false |
mingrammer/diagrams | https://github.com/mingrammer/diagrams/blob/072474aa2a7d24cd074b2e1a61d0b714c3daa945/diagrams/generic/__init__.py | diagrams/generic/__init__.py | """
Generic provides the possibility of load an image to be presented as a node.
"""
from diagrams import Node
class _Generic(Node):
provider = "generic"
_icon_dir = "resources/generic"
fontcolor = "#ffffff"
class Generic(_Generic):
_icon = "generic.png"
| python | MIT | 072474aa2a7d24cd074b2e1a61d0b714c3daa945 | 2026-01-04T14:39:58.534150Z | false |
mingrammer/diagrams | https://github.com/mingrammer/diagrams/blob/072474aa2a7d24cd074b2e1a61d0b714c3daa945/diagrams/generic/virtualization.py | diagrams/generic/virtualization.py | # This module is automatically generated by autogen.sh. DO NOT EDIT.
from . import _Generic
class _Virtualization(_Generic):
_type = "virtualization"
_icon_dir = "resources/generic/virtualization"
class Qemu(_Virtualization):
_icon = "qemu.png"
class Virtualbox(_Virtualization):
_icon = "virtualbox.png"
class Vmware(_Virtualization):
_icon = "vmware.png"
class XEN(_Virtualization):
_icon = "xen.png"
# Aliases
| python | MIT | 072474aa2a7d24cd074b2e1a61d0b714c3daa945 | 2026-01-04T14:39:58.534150Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.