sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
serengil/deepface:tests/integration/test_postgres_register.py | # built-in dependencies
import os
from unittest.mock import patch
# 3rd party dependencies
import pytest
import psycopg
from deepface import DeepFace
# project dependencies
from deepface.commons.logger import Logger
logger = Logger()
connection_details_dict = {
"host": "localhost",
"port": 5433,
"dbname": "deepface",
"user": "deepface_user",
"password": "deepface_pass",
}
connection_details_str = "postgresql://deepface_user:deepface_pass@localhost:5433/deepface"
# pylint: disable=unused-argument
@pytest.fixture
def flush_data():
conn = psycopg.connect(**connection_details_dict)
cur = conn.cursor()
cur.execute("DELETE FROM embeddings;")
conn.commit()
cur.close()
conn.close()
logger.debug("ποΈ Embeddings data flushed.")
def test_regsiter_with_json(flush_data):
img_path = "../unit/dataset/img1.jpg"
result = DeepFace.register(
img=img_path,
model_name="Facenet",
detector_backend="mtcnn",
connection_details=connection_details_dict,
)
assert result["inserted"] == 1
logger.info("β
Registered with json test passed.")
def test_register_with_string(flush_data):
img_path = "../unit/dataset/img1.jpg"
result = DeepFace.register(
img=img_path,
model_name="Facenet",
detector_backend="mtcnn",
connection_details=connection_details_str,
)
assert result["inserted"] == 1
logger.info("β
Registered with string test passed.")
@patch.dict(os.environ, {"DEEPFACE_POSTGRES_URI": connection_details_str})
def test_register_with_envvar(flush_data):
img_path = "../unit/dataset/img1.jpg"
result = DeepFace.register(
img=img_path,
model_name="Facenet",
detector_backend="mtcnn",
)
assert result["inserted"] == 1
logger.info("β
Registered with env var test passed.")
def test_register_with_connection(flush_data):
conn = psycopg.connect(**connection_details_dict)
img_path = "../unit/dataset/img1.jpg"
result = DeepFace.register(
img=img_path,
model_name="Facenet",
detector_backend="mtcnn",
connection=conn,
)
assert result["inserted"] == 1
conn.close()
logger.info("β
Registered with connection test passed.")
def test_register_duplicate(flush_data):
img1_path = "../unit/dataset/img1.jpg"
result = DeepFace.register(
img=img1_path,
model_name="Facenet",
detector_backend="mtcnn",
connection_details=connection_details_dict,
)
assert result["inserted"] == 1
# Facenet & opencv pair should have different extracted face & embedding than Facenet & mtcnn
result = DeepFace.register(
img=img1_path,
model_name="Facenet",
detector_backend="opencv",
connection_details=connection_details_dict,
)
assert result["inserted"] == 1
# Duplicate registration with same model & detector should raise error
with pytest.raises(ValueError, match="Duplicate detected for extracted face and embedding"):
_ = DeepFace.register(
img=img1_path,
model_name="Facenet",
detector_backend="mtcnn",
connection_details=connection_details_dict,
)
logger.info("β
Duplicate registration test passed.")
| {
"repo_id": "serengil/deepface",
"file_path": "tests/integration/test_postgres_register.py",
"license": "MIT License",
"lines": 96,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
serengil/deepface:tests/integration/test_postgres_search.py | # built-in dependencies
import os
from unittest.mock import patch
# 3rd party dependencies
import pytest
import psycopg
from deepface import DeepFace
from tqdm import tqdm
import pandas as pd
# project dependencies
from deepface.commons.logger import Logger
logger = Logger()
connection_details_dict = {
"host": "localhost",
"port": 5433,
"dbname": "deepface",
"user": "deepface_user",
"password": "deepface_pass",
}
# pylint: disable=unused-argument
@pytest.fixture
def flush_data():
conn = psycopg.connect(**connection_details_dict)
cur = conn.cursor()
cur.execute("DELETE FROM embeddings;")
conn.commit()
cur.close()
conn.close()
logger.info("ποΈ Embeddings data flushed.")
@pytest.fixture
def load_data():
conn = psycopg.connect(**connection_details_dict)
# collect items
database_items = []
for dirpath, dirnames, filenames in os.walk("../unit/dataset"):
for filename in filenames:
if not filename.lower().endswith((".png", ".jpg", ".jpeg")):
continue
filepath = os.path.join(dirpath, filename)
database_items.append(filepath)
for img_path in tqdm(database_items):
_ = DeepFace.register(
img=img_path,
model_name="Facenet",
detector_backend="mtcnn",
connection=conn,
)
conn.close()
logger.info(f"β
Data with size {len(database_items)} loaded into Postgres for search tests.")
def test_postgres_search(flush_data, load_data):
conn = psycopg.connect(**connection_details_dict)
target_path = "dataset/target.jpg"
# we loaded data for Facenet and mtcnn, not opencv
with pytest.raises(ValueError, match="No embeddings found in the database for the criteria"):
_ = DeepFace.search(
img=target_path,
model_name="Facenet",
detector_backend="opencv",
connection=conn,
)
dfs = DeepFace.search(
img=target_path,
model_name="Facenet",
distance_metric="euclidean",
detector_backend="mtcnn",
connection=conn,
)
assert isinstance(dfs, list)
assert len(dfs) == 1
for df in dfs:
assert isinstance(df, pd.DataFrame)
assert df.shape[0] > 0
logger.info(df)
conn.close()
logger.info("β
Postgres search test passed.")
| {
"repo_id": "serengil/deepface",
"file_path": "tests/integration/test_postgres_search.py",
"license": "MIT License",
"lines": 75,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
serengil/deepface:deepface/modules/exceptions.py | # pylint: disable=unnecessary-pass
class ImgNotFound(ValueError):
"""Raised when the input image is not found or cannot be loaded."""
pass
class PathNotFound(ValueError):
"""Raised when the input path is not found."""
pass
class FaceNotDetected(ValueError):
"""Raised when no face is detected in the input image."""
pass
class SpoofDetected(ValueError):
"""Raised when a spoofed face is detected in the input image."""
pass
class EmptyDatasource(ValueError):
"""Raised when the provided data source is empty."""
pass
class DimensionMismatchError(ValueError):
"""Raised when the dimensions of the input do not match the expected dimensions."""
pass
class InvalidEmbeddingsShapeError(ValueError):
"""Raised when the shape of the embeddings is invalid."""
pass
class DataTypeError(ValueError):
"""Raised when the input data type is incorrect."""
pass
class UnimplementedError(ValueError):
"""Raised when a requested feature is not implemented."""
pass
class DuplicateEntryError(ValueError):
"""Raised when a duplicate entry is found in the database."""
pass
| {
"repo_id": "serengil/deepface",
"file_path": "deepface/modules/exceptions.py",
"license": "MIT License",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
serengil/deepface:deepface/commons/embed_utils.py | from typing import List, Union
def is_flat_embedding(x: Union[List[float], List[List[float]]]) -> bool:
"""
Check if the embeddings represent a single flat list of floats
rather than a list of list of float.
Args:
x (List[float] or List[List[float]]): Embeddings to check.
Returns:
bool: True if x is a flat list of floats, False otherwise.
"""
return isinstance(x, list) and all(isinstance(i, (int, float)) for i in x)
| {
"repo_id": "serengil/deepface",
"file_path": "deepface/commons/embed_utils.py",
"license": "MIT License",
"lines": 11,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
serengil/deepface:deepface/modules/encryption.py | # built-in dependencies
from typing import List, Union, Optional, cast
# third-party dependencies
from lightphe import LightPHE
from lightphe.models.Tensor import EncryptedTensor
import numpy as np
# project dependencies
from deepface.commons.embed_utils import is_flat_embedding
from deepface.commons.logger import Logger
logger = Logger()
# pylint: disable=no-else-return
def encrypt_embeddings(
embeddings: Union[List[float], List[List[float]]], cryptosystem: Optional[LightPHE] = None
) -> Union[EncryptedTensor, List[EncryptedTensor], None]:
"""
Encrypt embeddings using a provided cryptosystem.
Args:
embeddings (List[float] or List[List[float]]): Embeddings to encrypt.
cryptosystem (LightPHE): Cryptosystem to use for encryption.
Returns:
EncryptedTensor or List[EncryptedTensor] or None: Encrypted embeddings or None
if no cryptosystem is provided.
"""
if cryptosystem is None:
return None
if is_flat_embedding(embeddings):
embedding = cast(List[float], embeddings) # let type checker know
encrypted_embedding = encrypt_embedding(embedding, cryptosystem)
return encrypted_embedding
else:
encrypted_embeddings: List[EncryptedTensor] = []
embeddings = cast(List[List[float]], embeddings)
for embedding in embeddings:
encrypted_embedding = encrypt_embedding(embedding, cryptosystem)
encrypted_embeddings.append(encrypted_embedding)
if all(item is None for item in encrypted_embeddings):
return None
return encrypted_embeddings
def encrypt_embedding(embeddings: List[float], cryptosystem: LightPHE) -> Optional[EncryptedTensor]:
"""
Encrypt an embedding using a provided cryptosystem.
Args:
embeddings (List[float]): Embedding to encrypt.
cryptosystem (LightPHE): Cryptosystem to use for encryption.
Returns:
EncryptedTensor or None: Encrypted embedding or None if encryption is skipped.
"""
if any(x < 0 for x in embeddings):
logger.warn(
"Skipping encryption because it contains negative values."
"Consider to set minmax_normalize=True in DeepFace.represent method."
)
return None
norm = np.linalg.norm(embeddings)
if not np.isclose(norm, 1.0):
logger.warn(
"Skipping encryption because given embedding is not l_2 normalized."
"Consider to set l2_normalize=True in DeepFace.represent method."
)
return None
encrypted_embeddings = cryptosystem.encrypt(embeddings, silent=True)
return encrypted_embeddings
| {
"repo_id": "serengil/deepface",
"file_path": "deepface/modules/encryption.py",
"license": "MIT License",
"lines": 62,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
serengil/deepface:deepface/config/minmax.py | from typing import Tuple
# these values are determined empirically for each model from unit test items
minmax_values = {
"VGG-Face": (0.0, 0.27054874177488775),
"Facenet": (-3.541942596435547, 3.247769594192505),
"Facenet512": (-4.388302803039551, 3.643190622329712),
"OpenFace": (-0.34191709756851196, 0.26318004727363586),
"DeepFace": (0.0, 17.294939041137695),
"DeepID": (0.0, 127.86836242675781),
"Dlib": (-0.41398656368255615, 0.5201137661933899),
"ArcFace": (-2.945136308670044, 2.087090015411377),
}
def get_minmax_values(model_name: str) -> Tuple[float, float]:
if model_name not in minmax_values:
return (0, 0)
return minmax_values[model_name]
| {
"repo_id": "serengil/deepface",
"file_path": "deepface/config/minmax.py",
"license": "MIT License",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
serengil/deepface:deepface/modules/normalization.py | # built-in dependencies
from typing import List, Union, cast
# third-party dependencies
import numpy as np
# project dependencies
from deepface.config.minmax import get_minmax_values
from deepface.commons.embed_utils import is_flat_embedding
def normalize_embedding_minmax(
model_name: str, embeddings: Union[List[float], List[List[float]]]
) -> Union[List[float], List[List[float]]]:
"""
Normalize embeddings using min-max normalization based on model-specific min-max values.
Args:
model_name (str): Name of the model to get min-max values for.
embeddings (List[float] or List[List[float]]): Embeddings to normalize.
Returns:
List[float] or List[List[float]]: Normalized embeddings.
"""
dim_min, dim_max = get_minmax_values(model_name)
if dim_max - dim_min == 0:
return embeddings
if is_flat_embedding(embeddings):
embeddings = cast(List[float], embeddings) # let type checker know
# Clamp vals to [dim_min, dim_max] to ensure the norm-embedding stays in [0, 1]
embeddings = [max(x, dim_min) for x in embeddings] # lower-bound clamp
embeddings = [min(x, dim_max) for x in embeddings] # upper-bound clamp
embeddings = [(x - dim_min) / (dim_max - dim_min) for x in embeddings]
else:
normalized_embeddings = []
for emb in embeddings:
emb = cast(List[float], emb) # let type checker know
# Clamp vals to [dim_min, dim_max] to ensure the norm-embedding stays in [0, 1]
emb = [max(x, dim_min) for x in emb] # lower-bound clamp
emb = [min(x, dim_max) for x in emb] # upper-bound clamp
emb = [(min(max(x, dim_min), dim_max) - dim_min) / (dim_max - dim_min) for x in emb]
normalized_embeddings.append(emb)
embeddings = normalized_embeddings
return embeddings
def normalize_embedding_l2(
embeddings: Union[List[float], List[List[float]]],
) -> Union[List[float], List[List[float]]]:
"""
Normalize embeddings using L2 normalization.
Args:
embeddings (List[float] or List[List[float]]): Embeddings to normalize.
Returns:
List[float] or List[List[float]]: L2-normalized embeddings.
"""
if is_flat_embedding(embeddings):
norm = float(np.linalg.norm(embeddings))
if norm > 0:
embeddings = cast(List[float], embeddings) # let type checker know
embeddings = (np.array(embeddings) / norm).tolist()
else:
normalized_embeddings = []
for emb in embeddings:
emb = cast(List[float], emb) # let type checker know
norm = float(np.linalg.norm(emb))
if norm > 0:
normalized_emb = (np.array(emb) / norm).tolist()
else:
normalized_emb = emb
normalized_embeddings.append(normalized_emb)
embeddings = normalized_embeddings
return embeddings
| {
"repo_id": "serengil/deepface",
"file_path": "deepface/modules/normalization.py",
"license": "MIT License",
"lines": 65,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
serengil/deepface:deepface/config/threshold.py | thresholds = {
"VGG-Face": {"cosine": 0.68, "euclidean": 1.17, "euclidean_l2": 1.17, "angular": 0.39},
"Facenet": {"cosine": 0.40, "euclidean": 10, "euclidean_l2": 0.80, "angular": 0.33},
"Facenet512": {"cosine": 0.30, "euclidean": 23.56, "euclidean_l2": 1.04, "angular": 0.35},
"ArcFace": {"cosine": 0.68, "euclidean": 4.15, "euclidean_l2": 1.13, "angular": 0.39},
"Dlib": {"cosine": 0.07, "euclidean": 0.6, "euclidean_l2": 0.4, "angular": 0.12},
"SFace": {"cosine": 0.593, "euclidean": 10.734, "euclidean_l2": 1.055, "angular": 0.36},
"OpenFace": {"cosine": 0.10, "euclidean": 0.55, "euclidean_l2": 0.55, "angular": 0.11},
"DeepFace": {"cosine": 0.23, "euclidean": 64, "euclidean_l2": 0.64, "angular": 0.12},
"DeepID": {"cosine": 0.015, "euclidean": 45, "euclidean_l2": 0.17, "angular": 0.04},
"GhostFaceNet": {"cosine": 0.65, "euclidean": 35.71, "euclidean_l2": 1.10, "angular": 0.38},
"Buffalo_L": {"cosine": 0.55, "euclidean": 0.6, "euclidean_l2": 1.1, "angular": 0.45},
}
| {
"repo_id": "serengil/deepface",
"file_path": "deepface/config/threshold.py",
"license": "MIT License",
"lines": 13,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
sherlock-project/sherlock:devel/summarize_site_validation.py | #!/usr/bin/env python
# This module summarizes the results of site validation tests queued by
# workflow validate_modified_targets for presentation in Issue comments.
from defusedxml import ElementTree as ET
import sys
from pathlib import Path
def summarize_junit_xml(xml_path: Path) -> str:
tree = ET.parse(xml_path)
root = tree.getroot()
suite = root.find('testsuite')
pass_message: str = ":heavy_check_mark: Pass"
fail_message: str = ":x: Fail"
if suite is None:
raise ValueError("Invalid JUnit XML: No testsuite found")
summary_lines: list[str] = []
summary_lines.append("#### Automatic validation of changes\n")
summary_lines.append("| Target | F+ Check | F- Check |")
summary_lines.append("|---|---|---|")
failures = int(suite.get('failures', 0))
errors_detected: bool = False
results: dict[str, dict[str, str]] = {}
for testcase in suite.findall('testcase'):
test_name = testcase.get('name').split('[')[0]
site_name = testcase.get('name').split('[')[1].rstrip(']')
failure = testcase.find('failure')
error = testcase.find('error')
if site_name not in results:
results[site_name] = {}
if test_name == "test_false_neg":
results[site_name]['F- Check'] = pass_message if failure is None and error is None else fail_message
elif test_name == "test_false_pos":
results[site_name]['F+ Check'] = pass_message if failure is None and error is None else fail_message
if error is not None:
errors_detected = True
for result in results:
summary_lines.append(f"| {result} | {results[result].get('F+ Check', 'Error!')} | {results[result].get('F- Check', 'Error!')} |")
if failures > 0:
summary_lines.append("\n___\n" +
"\nFailures were detected on at least one updated target. Commits containing accuracy failures" +
" will often not be merged (unless a rationale is provided, such as false negatives due to regional differences).")
if errors_detected:
summary_lines.append("\n___\n" +
"\n**Errors were detected during validation. Please review the workflow logs.**")
return "\n".join(summary_lines)
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Usage: summarize_site_validation.py <junit-xml-file>")
sys.exit(1)
xml_path: Path = Path(sys.argv[1])
if not xml_path.is_file():
print(f"Error: File '{xml_path}' does not exist.")
sys.exit(1)
summary: str = summarize_junit_xml(xml_path)
print(summary)
| {
"repo_id": "sherlock-project/sherlock",
"file_path": "devel/summarize_site_validation.py",
"license": "MIT License",
"lines": 54,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
sherlock-project/sherlock:tests/test_validate_targets.py | import pytest
import re
import rstr
from sherlock_project.sherlock import sherlock
from sherlock_project.notify import QueryNotify
from sherlock_project.result import QueryResult, QueryStatus
FALSE_POSITIVE_ATTEMPTS: int = 2 # Since the usernames are randomly generated, it's POSSIBLE that a real username can be hit
FALSE_POSITIVE_QUANTIFIER_UPPER_BOUND: int = 15 # If a pattern uses quantifiers such as `+` `*` or `{n,}`, limit the upper bound (0 to disable)
FALSE_POSITIVE_DEFAULT_PATTERN: str = r'^[a-zA-Z0-9]{7,20}$' # Used in absence of a regexCheck entry
def set_pattern_upper_bound(pattern: str, upper_bound: int = FALSE_POSITIVE_QUANTIFIER_UPPER_BOUND) -> str:
"""Set upper bound for regex patterns that use quantifiers such as `+` `*` or `{n,}`."""
def replace_upper_bound(match: re.Match) -> str: # type: ignore
lower_bound: int = int(match.group(1)) if match.group(1) else 0 # type: ignore
nonlocal upper_bound
upper_bound = upper_bound if lower_bound < upper_bound else lower_bound # type: ignore # noqa: F823
return f'{{{lower_bound},{upper_bound}}}'
pattern = re.sub(r'(?<!\\)\{(\d+),\}', replace_upper_bound, pattern) # {n,} # type: ignore
pattern = re.sub(r'(?<!\\)\+', f'{{1,{upper_bound}}}', pattern) # +
pattern = re.sub(r'(?<!\\)\*', f'{{0,{upper_bound}}}', pattern) # *
return pattern
def false_positive_check(sites_info: dict[str, dict[str, str]], site: str, pattern: str) -> QueryStatus:
"""Check if a site is likely to produce false positives."""
status: QueryStatus = QueryStatus.UNKNOWN
for _ in range(FALSE_POSITIVE_ATTEMPTS):
query_notify: QueryNotify = QueryNotify()
username: str = rstr.xeger(pattern)
result: QueryResult | str = sherlock(
username=username,
site_data=sites_info,
query_notify=query_notify,
)[site]['status']
if not hasattr(result, 'status'):
raise TypeError(f"Result for site {site} does not have 'status' attribute. Actual result: {result}")
if type(result.status) is not QueryStatus: # type: ignore
raise TypeError(f"Result status for site {site} is not of type QueryStatus. Actual type: {type(result.status)}") # type: ignore
status = result.status # type: ignore
if status in (QueryStatus.AVAILABLE, QueryStatus.WAF):
return status
return status
def false_negative_check(sites_info: dict[str, dict[str, str]], site: str) -> QueryStatus:
"""Check if a site is likely to produce false negatives."""
status: QueryStatus = QueryStatus.UNKNOWN
query_notify: QueryNotify = QueryNotify()
result: QueryResult | str = sherlock(
username=sites_info[site]['username_claimed'],
site_data=sites_info,
query_notify=query_notify,
)[site]['status']
if not hasattr(result, 'status'):
raise TypeError(f"Result for site {site} does not have 'status' attribute. Actual result: {result}")
if type(result.status) is not QueryStatus: # type: ignore
raise TypeError(f"Result status for site {site} is not of type QueryStatus. Actual type: {type(result.status)}") # type: ignore
status = result.status # type: ignore
return status
@pytest.mark.validate_targets
@pytest.mark.online
class Test_All_Targets:
@pytest.mark.validate_targets_fp
def test_false_pos(self, chunked_sites: dict[str, dict[str, str]]):
"""Iterate through all sites in the manifest to discover possible false-positive inducting targets."""
pattern: str
for site in chunked_sites:
try:
pattern = chunked_sites[site]['regexCheck']
except KeyError:
pattern = FALSE_POSITIVE_DEFAULT_PATTERN
if FALSE_POSITIVE_QUANTIFIER_UPPER_BOUND > 0:
pattern = set_pattern_upper_bound(pattern)
result: QueryStatus = false_positive_check(chunked_sites, site, pattern)
assert result is QueryStatus.AVAILABLE, f"{site} produced false positive with pattern {pattern}, result was {result}"
@pytest.mark.validate_targets_fn
def test_false_neg(self, chunked_sites: dict[str, dict[str, str]]):
"""Iterate through all sites in the manifest to discover possible false-negative inducting targets."""
for site in chunked_sites:
result: QueryStatus = false_negative_check(chunked_sites, site)
assert result is QueryStatus.CLAIMED, f"{site} produced false negative, result was {result}"
| {
"repo_id": "sherlock-project/sherlock",
"file_path": "tests/test_validate_targets.py",
"license": "MIT License",
"lines": 76,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
sickn33/antigravity-awesome-skills:tools/scripts/_project_paths.py | from __future__ import annotations
from pathlib import Path
def find_repo_root(start_path: str | Path) -> Path:
current = Path(start_path).resolve()
if current.is_file():
current = current.parent
for candidate in (current, *current.parents):
if (candidate / "package.json").is_file() and (candidate / "README.md").is_file():
return candidate
raise FileNotFoundError(f"Could not find repository root from {start_path!r}")
| {
"repo_id": "sickn33/antigravity-awesome-skills",
"file_path": "tools/scripts/_project_paths.py",
"license": "MIT License",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
sickn33/antigravity-awesome-skills:tools/scripts/analyze_voltagent_repo.py | #!/usr/bin/env python3
"""
Analyze VoltAgent/awesome-agent-skills repository to extract and normalize skills.
Usage:
python3 scripts/analyze_voltagent_repo.py [--output OUTPUT.json]
"""
import re
import json
import sys
import urllib.request
import urllib.error
from pathlib import Path
from typing import List, Dict, Optional
from urllib.parse import urlparse
# VoltAgent repo README URL
VOLTAGENT_README_URL = "https://raw.githubusercontent.com/VoltAgent/awesome-agent-skills/main/README.md"
def normalize_skill_name(name: str) -> str:
"""Normalize skill name to kebab-case."""
# Remove organization prefix if present (e.g., "anthropics/docx" -> "docx")
if '/' in name:
name = name.split('/')[-1]
# Convert to lowercase and replace spaces/special chars with hyphens
name = re.sub(r'[^a-z0-9-]', '-', name.lower())
# Remove multiple consecutive hyphens
name = re.sub(r'-+', '-', name)
# Remove leading/trailing hyphens
name = name.strip('-')
return name
def extract_skills_from_markdown(content: str) -> List[Dict[str, str]]:
"""Extract skills from VoltAgent README markdown."""
skills = []
# Pattern to match: **org/skill-name** or **skill-name** followed by URL
# Format: - **[org/skill-name](url)** - Description
pattern = r'\*\*\[([^\]]+)\]\(([^\)]+)\)\*\*'
lines = content.split('\n')
current_category = None
for i, line in enumerate(lines):
# Track category sections
if line.startswith('## '):
current_category = line.replace('## ', '').strip()
# Match skill entries
matches = re.findall(pattern, line)
for skill_ref, url in matches:
# Extract description (text after the link)
description = line.split(')**', 1)[-1].strip()
if description.startswith('- '):
description = description[2:].strip()
# Normalize skill name
normalized_name = normalize_skill_name(skill_ref)
# Extract org if present
org = None
if '/' in skill_ref:
org, skill_part = skill_ref.split('/', 1)
else:
skill_part = skill_ref
skill_info = {
'original_ref': skill_ref,
'normalized_name': normalized_name,
'org': org,
'skill_part': skill_part,
'url': url,
'description': description,
'category': current_category or 'uncategorized',
'line_number': i + 1
}
skills.append(skill_info)
return skills
def load_existing_skills(catalog_path: str) -> Dict[str, Dict]:
"""Load existing skills from catalog.json."""
try:
with open(catalog_path, 'r', encoding='utf-8') as f:
catalog = json.load(f)
existing = {}
for skill in catalog.get('skills', []):
name = skill.get('name', '').lower()
normalized = normalize_skill_name(name)
existing[normalized] = skill
return existing
except FileNotFoundError:
print(f"β οΈ Catalog file not found: {catalog_path}")
return {}
except json.JSONDecodeError as e:
print(f"β Error parsing catalog.json: {e}")
return {}
def fetch_readme(url: str) -> Optional[str]:
"""Fetch README content from URL."""
try:
with urllib.request.urlopen(url, timeout=10) as response:
return response.read().decode('utf-8')
except urllib.error.URLError as e:
print(f"β Error fetching README: {e}")
return None
except Exception as e:
print(f"β Unexpected error: {e}")
return None
def find_similar_skills(new_name: str, existing: Dict[str, Dict], threshold: float = 0.8) -> List[str]:
"""Find similar skill names using simple string similarity."""
similar = []
new_lower = new_name.lower()
for existing_name, skill_data in existing.items():
existing_lower = existing_name.lower()
# Simple similarity check
if new_lower in existing_lower or existing_lower in new_lower:
similar.append(existing_name)
elif abs(len(new_lower) - len(existing_lower)) <= 2:
# Check character overlap
common_chars = set(new_lower) & set(existing_lower)
if len(common_chars) / max(len(set(new_lower)), len(set(existing_lower))) >= threshold:
similar.append(existing_name)
return similar
def main():
base_dir = Path(__file__).parent.parent
catalog_path = base_dir / "data" / "catalog.json"
output_path = base_dir / "voltagent_analysis.json"
print("π Analyzing VoltAgent/awesome-agent-skills repository...")
print(f"π Fetching README from: {VOLTAGENT_README_URL}")
# Fetch README
readme_content = fetch_readme(VOLTAGENT_README_URL)
if not readme_content:
print("β Failed to fetch README. Exiting.")
sys.exit(1)
print("β
README fetched successfully")
# Extract skills
print("\nπ Extracting skills from README...")
voltagent_skills = extract_skills_from_markdown(readme_content)
print(f"β
Found {len(voltagent_skills)} skills in VoltAgent repo")
# Load existing skills
print(f"\nπ Loading existing skills from: {catalog_path}")
existing_skills = load_existing_skills(str(catalog_path))
print(f"β
Found {len(existing_skills)} existing skills")
# Compare and categorize
print("\nπ Comparing skills...")
new_skills = []
existing_matches = []
similar_skills = []
for skill in voltagent_skills:
normalized = skill['normalized_name']
if normalized in existing_skills:
existing_matches.append({
'voltagent': skill,
'existing': existing_skills[normalized]
})
else:
# Check for similar names
similar = find_similar_skills(normalized, existing_skills)
if similar:
similar_skills.append({
'voltagent': skill,
'similar': similar
})
else:
new_skills.append(skill)
# Generate report
report = {
'analysis_date': str(Path(__file__).stat().st_mtime),
'voltagent_readme_url': VOLTAGENT_README_URL,
'summary': {
'total_voltagent_skills': len(voltagent_skills),
'total_existing_skills': len(existing_skills),
'new_skills_found': len(new_skills),
'existing_matches': len(existing_matches),
'similar_skills': len(similar_skills)
},
'new_skills': new_skills,
'existing_matches': existing_matches,
'similar_skills': similar_skills
}
# Save report
with open(output_path, 'w', encoding='utf-8') as f:
json.dump(report, f, indent=2, ensure_ascii=False)
print(f"\nπ Analysis Summary:")
print(f" Total VoltAgent skills: {len(voltagent_skills)}")
print(f" Existing skills: {len(existing_skills)}")
print(f" β¨ New skills found: {len(new_skills)}")
print(f" β
Already present: {len(existing_matches)}")
print(f" β οΈ Similar names: {len(similar_skills)}")
print(f"\nπΎ Report saved to: {output_path}")
if new_skills:
print(f"\nπ New skills to evaluate:")
for skill in new_skills[:20]: # Show first 20
print(f" β’ {skill['normalized_name']} ({skill['original_ref']})")
if len(new_skills) > 20:
print(f" ... and {len(new_skills) - 20} more")
if similar_skills:
print(f"\nβ οΈ Skills with similar names (may be duplicates):")
for item in similar_skills[:10]: # Show first 10
skill = item['voltagent']
print(f" β’ {skill['normalized_name']} (similar to: {', '.join(item['similar'][:3])})")
if __name__ == "__main__":
main()
| {
"repo_id": "sickn33/antigravity-awesome-skills",
"file_path": "tools/scripts/analyze_voltagent_repo.py",
"license": "MIT License",
"lines": 190,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
sickn33/antigravity-awesome-skills:tools/scripts/check_html_content.py | #!/usr/bin/env python3
"""Check for HTML content in skills and identify which need conversion."""
import json
import re
from pathlib import Path
def check_html_content(skill_path: Path) -> dict:
"""Check if a skill file contains HTML content."""
try:
content = skill_path.read_text(encoding='utf-8')
except Exception as e:
return {'error': str(e), 'has_html': False}
# HTML patterns (excluding code blocks)
html_patterns = [
r'<!DOCTYPE\s+html',
r'<html\s',
r'<head\s*>',
r'<body\s*>',
r'<script\s',
r'<style\s',
r'<link\s+rel=',
r'<meta\s+charset=',
r'github\.githubassets\.com',
r'github-cloud\.s3\.amazonaws\.com'
]
lines = content.split('\n')
in_code_block = False
html_matches = []
for i, line in enumerate(lines, 1):
# Track code blocks
if line.strip().startswith('```'):
in_code_block = not in_code_block
continue
# Skip HTML in code blocks
if in_code_block:
continue
# Check for HTML patterns
for pattern in html_patterns:
if re.search(pattern, line, re.IGNORECASE):
html_matches.append({
'line': i,
'pattern': pattern,
'preview': line[:100].strip()
})
return {
'has_html': len(html_matches) > 5, # Threshold
'html_count': len(html_matches),
'matches': html_matches[:10] # First 10 matches
}
def main():
# Load similar skills from analysis
analysis_file = Path('voltagent_analysis.json')
if not analysis_file.exists():
print("β voltagent_analysis.json not found")
return
with open(analysis_file, 'r') as f:
analysis = json.load(f)
similar_skills = analysis.get('similar_skills', [])
skills_dir = Path('skills')
print(f"π Checking {len(similar_skills)} similar skills for HTML content...\n")
skills_with_html = []
skills_checked = 0
for item in similar_skills:
skill_name = item['voltagent']['normalized_name']
skill_path = skills_dir / skill_name / 'SKILL.md'
if not skill_path.exists():
continue
skills_checked += 1
result = check_html_content(skill_path)
if result.get('has_html'):
skills_with_html.append({
'name': skill_name,
'url': item['voltagent']['url'],
'description': item['voltagent']['description'],
'html_count': result['html_count'],
'matches': result.get('matches', [])
})
print(f"π Checked {skills_checked} skills")
print(f"β οΈ Found {len(skills_with_html)} skills with HTML content\n")
if skills_with_html:
print("Skills needing HTML-to-Markdown conversion:")
for skill in skills_with_html:
print(f"\n β’ {skill['name']}")
print(f" HTML patterns: {skill['html_count']}")
print(f" URL: {skill['url']}")
if skill['matches']:
print(f" Sample match (line {skill['matches'][0]['line']}): {skill['matches'][0]['preview'][:80]}...")
# Also check recently implemented skills
print("\n\nπ Checking recently implemented skills...\n")
validation_file = Path('voltagent_validation.json')
if validation_file.exists():
with open(validation_file, 'r') as f:
validation = json.load(f)
validated_skills = validation.get('validated', [])
recent_with_html = []
for item in validated_skills:
skill_name = item['skill']['normalized_name']
skill_path = skills_dir / skill_name / 'SKILL.md'
if not skill_path.exists():
continue
result = check_html_content(skill_path)
if result.get('has_html'):
recent_with_html.append({
'name': skill_name,
'html_count': result['html_count']
})
if recent_with_html:
print(f"β οΈ Found {len(recent_with_html)} recently implemented skills with HTML:")
for skill in recent_with_html:
print(f" β’ {skill['name']} ({skill['html_count']} HTML patterns)")
else:
print("β
No HTML content found in recently implemented skills")
# Save results
output = {
'similar_skills_with_html': skills_with_html,
'total_checked': skills_checked,
'total_with_html': len(skills_with_html)
}
output_file = Path('html_content_analysis.json')
with open(output_file, 'w') as f:
json.dump(output, f, indent=2)
print(f"\nπΎ Results saved to: {output_file}")
if __name__ == "__main__":
main()
| {
"repo_id": "sickn33/antigravity-awesome-skills",
"file_path": "tools/scripts/check_html_content.py",
"license": "MIT License",
"lines": 123,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
sickn33/antigravity-awesome-skills:tools/scripts/convert_html_to_markdown.py | #!/usr/bin/env python3
"""
Convert skills with HTML content to clean markdown.
Attempts to download raw markdown files from GitHub, extracts content from HTML if needed,
or creates minimal markdown content as fallback.
"""
import json
import re
import sys
import urllib.request
import urllib.error
from pathlib import Path
from typing import Dict, Optional, Tuple
from urllib.parse import urlparse, urljoin
def parse_frontmatter(content: str) -> Optional[Dict]:
"""Parse YAML frontmatter."""
fm_match = re.search(r'^---\s*\n(.*?)\n---', content, re.DOTALL)
if not fm_match:
return None
fm_text = fm_match.group(1)
metadata = {}
for line in fm_text.split('\n'):
if ':' in line:
key, val = line.split(':', 1)
metadata[key.strip()] = val.strip().strip('"').strip("'")
return metadata
def has_html_content(content: str) -> bool:
"""Check if content contains HTML document structure."""
html_patterns = [
r'<!DOCTYPE\s+html',
r'<html\s',
r'github\.githubassets\.com',
r'github-cloud\.s3\.amazonaws\.com'
]
# Check outside code blocks
lines = content.split('\n')
in_code_block = False
html_count = 0
for line in lines:
if line.strip().startswith('```'):
in_code_block = not in_code_block
continue
if not in_code_block:
for pattern in html_patterns:
if re.search(pattern, line, re.IGNORECASE):
html_count += 1
break
return html_count > 5
def build_raw_github_url(source_url: str) -> Optional[str]:
"""Convert GitHub tree/blob URL to raw URL."""
if not source_url or 'github.com' not in source_url:
return None
# Handle tree URLs: https://github.com/org/repo/tree/main/path
if '/tree/' in source_url:
parts = source_url.split('/tree/')
if len(parts) == 2:
base = parts[0]
path = parts[1]
return f"{base}/raw/{path}/SKILL.md"
# Handle blob URLs: https://github.com/org/repo/blob/main/path/SKILL.md
if '/blob/' in source_url:
return source_url.replace('/blob/', '/raw/')
# Handle directory URLs - try common paths
if source_url.endswith('/'):
source_url = source_url.rstrip('/')
# Try adding SKILL.md
variations = [
f"{source_url}/SKILL.md",
f"{source_url}/raw/main/SKILL.md",
f"{source_url}/raw/master/SKILL.md"
]
return variations[0] if variations else None
def download_raw_markdown(url: str) -> Tuple[bool, Optional[str]]:
"""Attempt to download raw markdown file."""
try:
req = urllib.request.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (compatible; AntigravitySkillsConverter/1.0)')
with urllib.request.urlopen(req, timeout=15) as response:
if response.status == 200:
content = response.read().decode('utf-8')
# Validate it's markdown (not HTML)
if not has_html_content(content):
return True, content
except urllib.error.HTTPError as e:
if e.code == 404:
return False, None
except Exception:
pass
return False, None
def extract_markdown_from_html(html_content: str) -> Optional[str]:
"""Extract markdown content from GitHub HTML page."""
# Try to find markdown content in common GitHub page structures
patterns = [
r'<article[^>]*>(.*?)</article>',
r'<main[^>]*>(.*?)</main>',
r'<div[^>]*class="[^"]*markdown[^"]*"[^>]*>(.*?)</div>',
r'<div[^>]*class="[^"]*readme[^"]*"[^>]*>(.*?)</div>',
]
for pattern in patterns:
match = re.search(pattern, html_content, re.DOTALL | re.IGNORECASE)
if match:
content = match.group(1)
# Basic HTML to markdown conversion
markdown = convert_html_to_markdown(content)
if markdown and len(markdown.strip()) > 100:
return markdown
return None
def convert_html_to_markdown(html: str) -> str:
"""Basic HTML to markdown conversion."""
# Remove scripts and styles
html = re.sub(r'<script[^>]*>.*?</script>', '', html, flags=re.DOTALL | re.IGNORECASE)
html = re.sub(r'<style[^>]*>.*?</style>', '', html, flags=re.DOTALL | re.IGNORECASE)
# Headings
html = re.sub(r'<h1[^>]*>(.*?)</h1>', r'# \1', html, flags=re.DOTALL | re.IGNORECASE)
html = re.sub(r'<h2[^>]*>(.*?)</h2>', r'## \1', html, flags=re.DOTALL | re.IGNORECASE)
html = re.sub(r'<h3[^>]*>(.*?)</h3>', r'### \1', html, flags=re.DOTALL | re.IGNORECASE)
# Code blocks
html = re.sub(r'<pre[^>]*><code[^>]*>(.*?)</code></pre>', r'```\n\1\n```', html, flags=re.DOTALL | re.IGNORECASE)
html = re.sub(r'<code[^>]*>(.*?)</code>', r'`\1`', html, flags=re.DOTALL | re.IGNORECASE)
# Links
html = re.sub(r'<a[^>]*href="([^"]*)"[^>]*>(.*?)</a>', r'[\2](\1)', html, flags=re.DOTALL | re.IGNORECASE)
# Lists
html = re.sub(r'<li[^>]*>(.*?)</li>', r'- \1', html, flags=re.DOTALL | re.IGNORECASE)
html = re.sub(r'<ul[^>]*>|</ul>|<ol[^>]*>|</ol>', '', html, flags=re.IGNORECASE)
# Paragraphs
html = re.sub(r'<p[^>]*>(.*?)</p>', r'\1\n\n', html, flags=re.DOTALL | re.IGNORECASE)
# Remove remaining HTML tags
html = re.sub(r'<[^>]+>', '', html)
# Clean up whitespace
html = re.sub(r'\n{3,}', '\n\n', html)
html = html.strip()
return html
def create_minimal_markdown(metadata: Dict, source_url: str) -> str:
"""Create minimal markdown content from metadata."""
name = metadata.get('name', 'skill')
description = metadata.get('description', '')
# Extract "When to Use" if it exists in current content
when_to_use = f"Use this skill when you need to {description.lower()}."
# Create title from name
title = name.replace('-', ' ').title()
markdown = f"""# {title}
## Overview
{description}
## When to Use This Skill
{when_to_use}
## Instructions
This skill provides guidance and patterns for {description.lower()}.
## Resources
For more information, see the [source repository]({source_url}).
"""
return markdown
def convert_skill(skill_path: Path) -> Dict:
"""Convert a single skill from HTML to markdown."""
skill_name = skill_path.parent.name
result = {
'skill': skill_name,
'method': None,
'success': False,
'error': None
}
try:
content = skill_path.read_text(encoding='utf-8')
except Exception as e:
result['error'] = f"Failed to read file: {e}"
return result
# Parse frontmatter
metadata = parse_frontmatter(content)
if not metadata:
result['error'] = "No frontmatter found"
return result
source_url = metadata.get('source', '')
# Extract frontmatter and "When to Use" section
frontmatter_match = re.search(r'^(---\s*\n.*?\n---)', content, re.DOTALL)
frontmatter = frontmatter_match.group(1) if frontmatter_match else ''
when_to_use_match = re.search(r'##\s+When to Use.*?\n(.*?)(?=\n<!DOCTYPE|\n##|\Z)', content, re.DOTALL | re.IGNORECASE)
when_to_use_content = when_to_use_match.group(1).strip() if when_to_use_match else None
# Try method 1: Download raw markdown
raw_url = build_raw_github_url(source_url)
if raw_url:
success, raw_content = download_raw_markdown(raw_url)
if success and raw_content:
# Preserve frontmatter from original
raw_metadata = parse_frontmatter(raw_content)
if raw_metadata:
# Merge metadata (keep original source)
raw_metadata['source'] = source_url
raw_metadata['risk'] = metadata.get('risk', 'safe')
# Rebuild frontmatter
new_frontmatter = '---\n'
for key, value in raw_metadata.items():
if isinstance(value, str) and (' ' in value or ':' in value):
new_frontmatter += f'{key}: "{value}"\n'
else:
new_frontmatter += f'{key}: {value}\n'
new_frontmatter += '---\n'
# Remove frontmatter from raw content
raw_content_no_fm = re.sub(r'^---\s*\n.*?\n---\s*\n', '', raw_content, flags=re.DOTALL)
new_content = new_frontmatter + '\n' + raw_content_no_fm
skill_path.write_text(new_content, encoding='utf-8')
result['method'] = 'raw_download'
result['success'] = True
return result
# Try method 2: Extract from HTML
if has_html_content(content):
markdown_content = extract_markdown_from_html(content)
if markdown_content and len(markdown_content.strip()) > 100:
# Rebuild with frontmatter
new_content = frontmatter + '\n\n' + markdown_content
skill_path.write_text(new_content, encoding='utf-8')
result['method'] = 'html_extraction'
result['success'] = True
return result
# Method 3: Create minimal content
minimal_content = create_minimal_markdown(metadata, source_url)
new_content = frontmatter + '\n\n' + minimal_content
skill_path.write_text(new_content, encoding='utf-8')
result['method'] = 'minimal_creation'
result['success'] = True
return result
def main():
base_dir = Path(__file__).parent.parent
skills_dir = base_dir / "skills"
# Find skills with HTML content
print("π Identifying skills with HTML content...")
skills_with_html = []
for skill_dir in skills_dir.iterdir():
if not skill_dir.is_dir() or skill_dir.name.startswith('.'):
continue
skill_file = skill_dir / 'SKILL.md'
if not skill_file.exists():
continue
try:
content = skill_file.read_text(encoding='utf-8')
if has_html_content(content):
skills_with_html.append(skill_file)
except Exception:
continue
print(f"β
Found {len(skills_with_html)} skills with HTML content\n")
if not skills_with_html:
print("No skills with HTML content found.")
return
# Create backup directory
backup_dir = base_dir / "skills_backup_html"
backup_dir.mkdir(exist_ok=True)
print(f"π¦ Creating backups in: {backup_dir}")
for skill_file in skills_with_html:
backup_path = backup_dir / skill_file.parent.name / 'SKILL.md'
backup_path.parent.mkdir(parents=True, exist_ok=True)
backup_path.write_bytes(skill_file.read_bytes())
print("β
Backups created\n")
# Convert each skill
print(f"π Converting {len(skills_with_html)} skills...\n")
results = []
for i, skill_file in enumerate(skills_with_html, 1):
skill_name = skill_file.parent.name
print(f"[{i}/{len(skills_with_html)}] {skill_name}")
result = convert_skill(skill_file)
results.append(result)
if result['success']:
print(f" β
Converted using method: {result['method']}")
else:
print(f" β Failed: {result.get('error', 'Unknown error')}")
print()
# Summary
print("=" * 60)
print("π Conversion Summary:")
print(f" Total skills: {len(skills_with_html)}")
print(f" β
Successful: {sum(1 for r in results if r['success'])}")
print(f" β Failed: {sum(1 for r in results if not r['success'])}")
methods = {}
for r in results:
if r['success']:
method = r['method']
methods[method] = methods.get(method, 0) + 1
print(f"\n Methods used:")
for method, count in methods.items():
print(f" β’ {method}: {count}")
# Save report
report = {
'total_skills': len(skills_with_html),
'successful': sum(1 for r in results if r['success']),
'failed': sum(1 for r in results if not r['success']),
'results': results,
'backup_location': str(backup_dir)
}
report_file = base_dir / "html_conversion_results.json"
with open(report_file, 'w', encoding='utf-8') as f:
json.dump(report, f, indent=2, ensure_ascii=False)
print(f"\nπΎ Report saved to: {report_file}")
print(f"π¦ Backups saved to: {backup_dir}")
if __name__ == "__main__":
main()
| {
"repo_id": "sickn33/antigravity-awesome-skills",
"file_path": "tools/scripts/convert_html_to_markdown.py",
"license": "MIT License",
"lines": 292,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
sickn33/antigravity-awesome-skills:tools/scripts/implement_voltagent_skills.py | #!/usr/bin/env python3
"""
Implement validated VoltAgent skills into the local repository.
Downloads and adapts skills from GitHub repositories.
"""
import json
import os
import re
import sys
import urllib.request
import urllib.error
from pathlib import Path
from typing import Dict, Optional
from urllib.parse import urlparse, urljoin
def normalize_skill_name(name: str) -> str:
"""Normalize skill name to kebab-case."""
if '/' in name:
name = name.split('/')[-1]
name = re.sub(r'[^a-z0-9-]', '-', name.lower())
name = re.sub(r'-+', '-', name)
return name.strip('-')
def download_file(url: str, output_path: Path) -> bool:
"""Download a file from URL."""
try:
# Convert blob URL to raw URL
if '/blob/' in url:
url = url.replace('/blob/', '/raw/')
req = urllib.request.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (compatible; AntigravitySkillsDownloader/1.0)')
with urllib.request.urlopen(req, timeout=15) as response:
content = response.read()
output_path.parent.mkdir(parents=True, exist_ok=True)
output_path.write_bytes(content)
return True
except Exception as e:
print(f" β Error downloading {url}: {e}")
return False
def find_skill_file_url(base_url: str) -> Optional[str]:
"""Find SKILL.md file URL from repository base URL."""
# Common paths for skill files
variations = [
f"{base_url}/SKILL.md",
f"{base_url}/skill.md",
f"{base_url}/README.md",
f"{base_url}/index.md",
f"{base_url}/SKILL.md",
]
# Also try raw GitHub URLs
if '/tree/' in base_url:
base_url = base_url.replace('/tree/', '/raw/')
elif '/blob/' in base_url:
base_url = base_url.replace('/blob/', '/raw/')
variations.extend([
f"{base_url}/SKILL.md",
f"{base_url}/skill.md",
f"{base_url}/README.md",
])
for url in variations:
try:
req = urllib.request.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0')
with urllib.request.urlopen(req, timeout=5) as response:
if response.status == 200:
return url
except:
continue
return None
def parse_frontmatter(content: str) -> Optional[Dict]:
"""Parse YAML frontmatter."""
fm_match = re.search(r'^---\s*\n(.*?)\n---', content, re.DOTALL)
if not fm_match:
return None
fm_text = fm_match.group(1)
metadata = {}
for line in fm_text.split('\n'):
if ':' in line:
key, val = line.split(':', 1)
metadata[key.strip()] = val.strip().strip('"').strip("'")
return metadata
def ensure_frontmatter_compliance(content: str, skill_name: str, source_url: str, description: str) -> str:
"""Ensure SKILL.md has compliant frontmatter."""
# Parse existing frontmatter
metadata = parse_frontmatter(content)
if not metadata:
# No frontmatter, add it
frontmatter = f"""---
name: {skill_name}
description: "{description}"
source: "{source_url}"
risk: safe
---
"""
# Add after first line if it's a title, otherwise at the beginning
lines = content.split('\n')
if lines[0].startswith('#'):
return '\n'.join([lines[0], '', frontmatter] + lines[1:])
else:
return frontmatter + '\n' + content
# Update existing frontmatter
metadata['name'] = skill_name
if 'description' not in metadata or not metadata['description']:
metadata['description'] = description
if 'source' not in metadata:
metadata['source'] = source_url
if 'risk' not in metadata:
metadata['risk'] = 'safe'
# Rebuild frontmatter
frontmatter_lines = ['---']
for key, value in metadata.items():
if isinstance(value, str) and (' ' in value or ':' in value):
frontmatter_lines.append(f'{key}: "{value}"')
else:
frontmatter_lines.append(f'{key}: {value}')
frontmatter_lines.append('---')
# Replace frontmatter in content
content_without_fm = re.sub(r'^---\s*\n.*?\n---\s*\n', '', content, flags=re.DOTALL)
return '\n'.join(frontmatter_lines) + '\n\n' + content_without_fm
def ensure_when_to_use_section(content: str, description: str) -> str:
"""Ensure 'When to Use' section exists."""
if re.search(r'^##\s+When to Use', content, re.MULTILINE | re.IGNORECASE):
return content # Already has it
# Add section after frontmatter
when_to_use = f"""
## When to Use This Skill
{description}
Use this skill when you need to work with {description.lower()}.
"""
# Insert after frontmatter
content_without_fm = re.sub(r'^---\s*\n.*?\n---\s*\n', '', content, flags=re.DOTALL)
frontmatter_match = re.search(r'^---\s*\n.*?\n---', content, re.DOTALL)
if frontmatter_match:
frontmatter = frontmatter_match.group(0)
return frontmatter + '\n' + when_to_use + '\n' + content_without_fm
else:
return when_to_use + '\n\n' + content
def implement_skill(skill_data: Dict, skills_dir: Path) -> tuple[bool, str]:
"""Implement a single skill."""
skill_name = skill_data['skill']['normalized_name']
skill_url = skill_data['skill_file_url'] or skill_data['skill']['url']
description = skill_data['skill']['description']
source_url = skill_data['skill']['url']
skill_dir = skills_dir / skill_name
# Check if already exists
if skill_dir.exists():
return False, f"Skill directory already exists: {skill_name}"
print(f" π¦ Implementing {skill_name}...")
# Create directory
skill_dir.mkdir(parents=True, exist_ok=True)
# Download SKILL.md
skill_file_url = find_skill_file_url(skill_url)
if not skill_file_url:
# Try to construct from base URL
if '/tree/' in skill_url:
base_path = skill_url.split('/tree/')[1]
repo_base = skill_url.split('/tree/')[0]
skill_file_url = f"{repo_base}/raw/{base_path}/SKILL.md"
else:
skill_file_url = skill_url.rstrip('/') + '/SKILL.md'
skill_md_path = skill_dir / "SKILL.md"
if download_file(skill_file_url, skill_md_path):
# Read and fix content
content = skill_md_path.read_text(encoding='utf-8')
# Ensure compliance
content = ensure_frontmatter_compliance(content, skill_name, source_url, description)
content = ensure_when_to_use_section(content, description)
skill_md_path.write_text(content, encoding='utf-8')
print(f" β
SKILL.md created")
else:
# Create minimal SKILL.md
minimal_skill = f"""---
name: {skill_name}
description: "{description}"
source: "{source_url}"
risk: safe
---
# {skill_name.replace('-', ' ').title()}
## Overview
{description}
## When to Use This Skill
Use this skill when you need to work with {description.lower()}.
## Instructions
This skill provides guidance and patterns for {description.lower()}.
For more information, see the [source repository]({source_url}).
"""
skill_md_path.write_text(minimal_skill, encoding='utf-8')
print(f" β οΈ Created minimal SKILL.md (source file not found)")
return True, f"Successfully implemented {skill_name}"
def main():
base_dir = Path(__file__).parent.parent
validation_file = base_dir / "voltagent_validation.json"
skills_dir = base_dir / "skills"
if not validation_file.exists():
print(f"β Validation file not found: {validation_file}")
print(" Run validate_voltagent_sources.py first")
sys.exit(1)
with open(validation_file, 'r', encoding='utf-8') as f:
validation = json.load(f)
validated_skills = validation.get('validated', [])
if not validated_skills:
print("β No validated skills to implement")
sys.exit(1)
print(f"π Implementing {len(validated_skills)} validated skills...")
print()
implemented = []
failed = []
for i, skill_data in enumerate(validated_skills, 1):
skill_name = skill_data['skill']['normalized_name']
print(f"[{i}/{len(validated_skills)}] {skill_name}")
success, message = implement_skill(skill_data, skills_dir)
if success:
implemented.append(skill_name)
print(f" β
{message}")
else:
failed.append({'name': skill_name, 'error': message})
print(f" β {message}")
print()
print("=" * 60)
print("π Implementation Summary:")
print(f" β
Implemented: {len(implemented)}")
print(f" β Failed: {len(failed)}")
if implemented:
print(f"\nβ
Successfully implemented skills:")
for name in implemented:
print(f" β’ {name}")
if failed:
print(f"\nβ Failed implementations:")
for item in failed:
print(f" β’ {item['name']}: {item['error']}")
if __name__ == "__main__":
main()
| {
"repo_id": "sickn33/antigravity-awesome-skills",
"file_path": "tools/scripts/implement_voltagent_skills.py",
"license": "MIT License",
"lines": 230,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
sickn33/antigravity-awesome-skills:tools/scripts/sync_repo_metadata.py | #!/usr/bin/env python3
import argparse
import json
import os
import re
import sys
from update_readme import configure_utf8_output, find_repo_root, load_metadata, update_readme
ABOUT_DESCRIPTION_RE = re.compile(r'"description"\s*:\s*"([^"]*)"')
def update_package_description(base_dir: str, metadata: dict, dry_run: bool) -> bool:
package_path = os.path.join(base_dir, "package.json")
with open(package_path, "r", encoding="utf-8") as file:
content = file.read()
new_description = (
f"{metadata['total_skills_label']} agentic skills for Claude Code, Gemini CLI, "
"Cursor, Antigravity & more. Installer CLI."
)
updated_content = ABOUT_DESCRIPTION_RE.sub(
f'"description": "{new_description}"', content, count=1
)
if updated_content == content:
return False
if dry_run:
print(f"[dry-run] Would update package description in {package_path}")
return True
with open(package_path, "w", encoding="utf-8", newline="\n") as file:
file.write(updated_content)
print(f"β
Updated package description in {package_path}")
return True
def print_manual_github_about(metadata: dict) -> None:
description = (
f"{metadata['total_skills_label']} curated SKILL.md files for Claude Code, "
"Cursor, Gemini CLI, Codex, Copilot, and Antigravity."
)
print("\nManual GitHub repo settings update:")
print(f"- About description: {description}")
print("- Suggested topics: claude-code, cursor, gemini-cli, codex-cli, github-copilot, antigravity")
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Synchronize repository metadata across README and package.json."
)
parser.add_argument("--dry-run", action="store_true", help="Preview updates without writing files.")
return parser.parse_args()
def main() -> int:
args = parse_args()
base_dir = find_repo_root(os.path.dirname(__file__))
metadata = load_metadata(base_dir)
print("Repository metadata")
print(json.dumps(metadata, indent=2))
readme_metadata = update_readme(dry_run=args.dry_run)
package_updated = update_package_description(base_dir, metadata, args.dry_run)
print_manual_github_about(readme_metadata)
if args.dry_run and not package_updated:
print("\n[dry-run] No package.json description changes required.")
return 0
if __name__ == "__main__":
configure_utf8_output()
sys.exit(main())
| {
"repo_id": "sickn33/antigravity-awesome-skills",
"file_path": "tools/scripts/sync_repo_metadata.py",
"license": "MIT License",
"lines": 57,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
sickn33/antigravity-awesome-skills:tools/scripts/update_readme.py | #!/usr/bin/env python3
import argparse
import io
import json
import os
import re
import sys
import urllib.error
import urllib.request
from datetime import datetime, timezone
GITHUB_REPO = "sickn33/antigravity-awesome-skills"
SYNC_COMMENT_RE = re.compile(r"<!-- registry-sync: .*? -->")
def configure_utf8_output() -> None:
"""Best-effort UTF-8 stdout/stderr on Windows without dropping diagnostics."""
if sys.platform != "win32":
return
for stream_name in ("stdout", "stderr"):
stream = getattr(sys, stream_name)
try:
stream.reconfigure(encoding="utf-8", errors="backslashreplace")
continue
except Exception:
pass
buffer = getattr(stream, "buffer", None)
if buffer is not None:
setattr(
sys,
stream_name,
io.TextIOWrapper(buffer, encoding="utf-8", errors="backslashreplace"),
)
def find_repo_root(start_path: str) -> str:
current = os.path.abspath(start_path)
while True:
if os.path.isfile(os.path.join(current, "package.json")) and os.path.isfile(
os.path.join(current, "README.md")
):
return current
parent = os.path.dirname(current)
if parent == current:
raise FileNotFoundError("Could not locate repository root from script path.")
current = parent
def format_skill_count(total_skills: int) -> str:
return f"{total_skills:,}+"
def format_star_badge_count(stars: int) -> str:
if stars >= 1000:
rounded = int(round(stars / 1000.0))
return f"{rounded}%2C000%2B"
return f"{stars}%2B"
def format_star_milestone(stars: int) -> str:
if stars >= 1000:
rounded = int(round(stars / 1000.0))
return f"{rounded},000+"
return f"{stars}+"
def format_star_celebration(stars: int) -> str:
if stars >= 1000:
rounded = int(round(stars / 1000.0))
return f"{rounded}k"
return str(stars)
def fetch_star_count(repo: str) -> int | None:
url = f"https://api.github.com/repos/{repo}"
request = urllib.request.Request(
url,
headers={
"Accept": "application/vnd.github+json",
"User-Agent": "antigravity-awesome-skills-readme-sync",
},
)
try:
with urllib.request.urlopen(request, timeout=10) as response:
payload = json.load(response)
except (urllib.error.URLError, TimeoutError, json.JSONDecodeError):
return None
stars = payload.get("stargazers_count")
return int(stars) if isinstance(stars, int) else None
def load_metadata(base_dir: str, repo: str = GITHUB_REPO) -> dict:
readme_path = os.path.join(base_dir, "README.md")
package_path = os.path.join(base_dir, "package.json")
index_path = os.path.join(base_dir, "skills_index.json")
with open(index_path, "r", encoding="utf-8") as file:
skills = json.load(file)
with open(package_path, "r", encoding="utf-8") as file:
package = json.load(file)
with open(readme_path, "r", encoding="utf-8") as file:
current_readme = file.read()
current_star_match = re.search(r"β%20([\d%2C\+]+)%20Stars", current_readme)
current_stars = None
if current_star_match:
compact = current_star_match.group(1).replace("%2C", "").replace("%2B", "")
compact = compact.rstrip("+")
if compact.isdigit():
current_stars = int(compact)
live_stars = fetch_star_count(repo)
total_stars = live_stars if live_stars is not None else current_stars or 0
return {
"repo": repo,
"version": str(package.get("version", "0.0.0")),
"total_skills": len(skills),
"total_skills_label": format_skill_count(len(skills)),
"stars": total_stars,
"star_badge_count": format_star_badge_count(total_stars),
"star_milestone": format_star_milestone(total_stars),
"star_celebration": format_star_celebration(total_stars),
"updated_at": datetime.now(timezone.utc).replace(microsecond=0).isoformat(),
"used_live_star_count": live_stars is not None,
}
def apply_metadata(content: str, metadata: dict) -> str:
total_skills = metadata["total_skills"]
total_skills_label = metadata["total_skills_label"]
version = metadata["version"]
star_badge_count = metadata["star_badge_count"]
star_milestone = metadata["star_milestone"]
star_celebration = metadata["star_celebration"]
sync_comment = (
f"<!-- registry-sync: version={version}; skills={total_skills}; "
f"stars={metadata['stars']}; updated_at={metadata['updated_at']} -->"
)
content = re.sub(
r"^# π Antigravity Awesome Skills: .*?$",
(
f"# π Antigravity Awesome Skills: {total_skills_label} "
"Agentic Skills for Claude Code, Gemini CLI, Cursor, Copilot & More"
),
content,
count=1,
flags=re.MULTILINE,
)
content = re.sub(
r"^> \*\*The Ultimate Collection of .*?\*\*$",
(
f"> **The Ultimate Collection of {total_skills_label} Universal Agentic "
"Skills for AI Coding Assistants β Claude Code, Gemini CLI, Codex CLI, "
"Antigravity IDE, GitHub Copilot, Cursor, OpenCode, AdaL**"
),
content,
count=1,
flags=re.MULTILINE,
)
content = re.sub(
r"https://img\.shields\.io/badge/β%20[\d%2C\+]+%20Stars-gold\?style=for-the-badge",
f"https://img.shields.io/badge/β%20{star_badge_count}%20Stars-gold?style=for-the-badge",
content,
count=1,
)
content = re.sub(
r"^\*\*Antigravity Awesome Skills\*\* is a curated, battle-tested library of \*\*.*?\*\* designed",
(
f"**Antigravity Awesome Skills** is a curated, battle-tested library of "
f"**{total_skills_label} high-performance agentic skills** designed"
),
content,
count=1,
flags=re.MULTILINE,
)
content = re.sub(
r"\[π Browse \d[\d,]*\+ Skills\]\(#browse-[^)]+\)",
f"[π Browse {total_skills_label} Skills](#browse-{total_skills}-skills)",
content,
count=1,
)
content = re.sub(
r"\*\*Welcome to the V[\d.]+ .*? Stars Celebration Release!\*\*",
f"**Welcome to the V{version} {star_celebration} Stars Celebration Release!**",
content,
count=1,
)
content = re.sub(
r"> \*\*π .*? GitHub Stars Milestone!\*\*",
f"> **π {star_milestone} GitHub Stars Milestone!**",
content,
count=1,
)
content = re.sub(
r"\*\*Antigravity Awesome Skills\*\* \(Release [\d.]+\) is a massive upgrade to your AI's capabilities, now featuring \*\*.*?\*\* skills",
(
f"**Antigravity Awesome Skills** (Release {version}) is a massive upgrade "
f"to your AI's capabilities, now featuring **{total_skills_label} skills**"
),
content,
count=1,
)
content = re.sub(
r"## Browse \d[\d,]*\+ Skills",
f"## Browse {total_skills_label} Skills",
content,
count=1,
)
content = re.sub(
r"<!-- registry-sync: .*? -->\n?",
"",
content,
count=1,
)
return f"{sync_comment}\n{content.lstrip()}"
def update_readme(dry_run: bool = False) -> dict:
base_dir = find_repo_root(os.path.dirname(__file__))
readme_path = os.path.join(base_dir, "README.md")
metadata = load_metadata(base_dir)
print(f"π Reading README from: {readme_path}")
print(f"π’ Total skills found: {metadata['total_skills']}")
print(f"π·οΈ Version found: {metadata['version']}")
if metadata["used_live_star_count"]:
print(f"β Live GitHub stars found: {metadata['stars']}")
else:
print(f"β Using existing README star count: {metadata['stars']}")
with open(readme_path, "r", encoding="utf-8") as file:
content = file.read()
updated_content = apply_metadata(content, metadata)
if dry_run:
print("π§ͺ Dry run enabled; README.md not written.")
return metadata
with open(readme_path, "w", encoding="utf-8", newline="\n") as file:
file.write(updated_content)
print("β
README.md updated successfully.")
return metadata
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Sync generated metadata into README.md.")
parser.add_argument("--dry-run", action="store_true", help="Compute metadata without writing files.")
return parser.parse_args()
if __name__ == "__main__":
configure_utf8_output()
args = parse_args()
update_readme(dry_run=args.dry_run)
| {
"repo_id": "sickn33/antigravity-awesome-skills",
"file_path": "tools/scripts/update_readme.py",
"license": "MIT License",
"lines": 222,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
sickn33/antigravity-awesome-skills:tools/scripts/validate_voltagent_sources.py | #!/usr/bin/env python3
"""
Validate GitHub sources for VoltAgent skills.
Checks:
- URL accessibility
- Repository existence
- SKILL.md presence
- License compatibility
"""
import json
import sys
import urllib.request
import urllib.error
from pathlib import Path
from typing import Dict, List, Optional
from urllib.parse import urlparse, urljoin
def check_url_accessible(url: str, timeout: int = 10) -> tuple[bool, Optional[str]]:
"""Check if URL is accessible."""
try:
req = urllib.request.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (compatible; AntigravitySkillsValidator/1.0)')
with urllib.request.urlopen(req, timeout=timeout) as response:
return True, None
except urllib.error.HTTPError as e:
return False, f"HTTP {e.code}: {e.reason}"
except urllib.error.URLError as e:
return False, f"URL Error: {str(e)}"
except Exception as e:
return False, f"Unexpected error: {str(e)}"
def get_repo_base_url(github_url: str) -> Optional[str]:
"""Extract repository base URL from GitHub URL."""
# Handle different GitHub URL formats
# https://github.com/org/repo/tree/main/path/to/skill
# https://github.com/org/repo/blob/main/path/to/skill/SKILL.md
parsed = urlparse(github_url)
if parsed.netloc != 'github.com':
return None
parts = parsed.path.strip('/').split('/')
if len(parts) >= 2:
return f"https://github.com/{parts[0]}/{parts[1]}"
return None
def check_skill_file_exists(url: str) -> tuple[bool, Optional[str]]:
"""Check if SKILL.md exists at the URL or nearby."""
# Try direct URL first
accessible, error = check_url_accessible(url)
if accessible:
return True, url
# Try common variations
base_url = url.rsplit('/', 1)[0] if '/' in url else url
variations = [
f"{base_url}/SKILL.md",
f"{base_url}/skill.md",
f"{base_url}/README.md",
f"{base_url}/index.md"
]
for variant in variations:
accessible, _ = check_url_accessible(variant)
if accessible:
return True, variant
return False, None
def check_license_compatibility(repo_url: str) -> tuple[bool, Optional[str]]:
"""Check repository license (simplified check)."""
# Try to fetch LICENSE file
repo_base = get_repo_base_url(repo_url)
if not repo_base:
return True, "unknown" # Assume compatible if can't check
license_urls = [
f"{repo_base}/blob/main/LICENSE",
f"{repo_base}/blob/master/LICENSE",
f"{repo_base}/blob/main/LICENSE.md",
f"{repo_base}/blob/master/LICENSE.md"
]
for license_url in license_urls:
accessible, _ = check_url_accessible(license_url)
if accessible:
# Try to read first few lines to detect license type
try:
req = urllib.request.Request(license_url.replace('/blob/', '/raw/'))
req.add_header('User-Agent', 'Mozilla/5.0')
with urllib.request.urlopen(req, timeout=5) as response:
content = response.read(500).decode('utf-8', errors='ignore').lower()
if 'mit' in content or 'apache' in content or 'bsd' in content:
return True, "compatible"
elif 'gpl' in content:
return False, "GPL (may be incompatible)"
except:
pass
# If no LICENSE found, assume compatible (many repos don't have explicit LICENSE)
return True, "no_license_found"
def validate_sources(analysis_file: str) -> Dict:
"""Validate all sources from VoltAgent analysis."""
with open(analysis_file, 'r', encoding='utf-8') as f:
analysis = json.load(f)
new_skills = analysis.get('new_skills', [])
print(f"π Validating {len(new_skills)} new skills...")
print()
validated = []
failed = []
for i, skill in enumerate(new_skills, 1):
name = skill['normalized_name']
url = skill['url']
org = skill.get('org', 'unknown')
print(f"[{i}/{len(new_skills)}] {name} ({org})")
validation_result = {
'skill': skill,
'url_accessible': False,
'skill_file_found': False,
'skill_file_url': None,
'license_compatible': True,
'license_info': 'unknown',
'valid': False,
'errors': []
}
# Check URL accessibility
accessible, error = check_url_accessible(url)
validation_result['url_accessible'] = accessible
if not accessible:
validation_result['errors'].append(f"URL not accessible: {error}")
print(f" β URL not accessible: {error}")
failed.append(validation_result)
continue
print(f" β
URL accessible")
# Check for SKILL.md
skill_found, skill_url = check_skill_file_exists(url)
validation_result['skill_file_found'] = skill_found
validation_result['skill_file_url'] = skill_url
if skill_found:
print(f" β
Skill file found: {skill_url}")
else:
validation_result['errors'].append("SKILL.md not found")
print(f" β οΈ SKILL.md not found (may need manual creation)")
# Check license
license_ok, license_info = check_license_compatibility(url)
validation_result['license_compatible'] = license_ok
validation_result['license_info'] = license_info
if license_ok:
print(f" β
License: {license_info}")
else:
validation_result['errors'].append(f"License issue: {license_info}")
print(f" β οΈ License: {license_info}")
# Determine if valid
# Valid if URL accessible and (skill file found OR from official org)
official_orgs = ['vercel-labs', 'cloudflare', 'huggingface', 'trailofbits',
'expo', 'getsentry', 'neondatabase', 'fal-ai-community',
'google-labs-code', 'better-auth', 'tinybirdco', 'remotion-dev']
is_official = org in official_orgs
validation_result['is_official'] = is_official
if accessible and (skill_found or is_official):
validation_result['valid'] = True
validated.append(validation_result)
print(f" β
VALID")
else:
failed.append(validation_result)
print(f" β INVALID")
print()
return {
'validated': validated,
'failed': failed,
'summary': {
'total': len(new_skills),
'valid': len(validated),
'failed': len(failed)
}
}
def main():
base_dir = Path(__file__).parent.parent
analysis_file = base_dir / "voltagent_analysis.json"
output_file = base_dir / "voltagent_validation.json"
if not analysis_file.exists():
print(f"β Analysis file not found: {analysis_file}")
print(" Run analyze_voltagent_repo.py first")
sys.exit(1)
print("π Validating VoltAgent skill sources...")
print()
results = validate_sources(str(analysis_file))
# Save results
with open(output_file, 'w', encoding='utf-8') as f:
json.dump(results, f, indent=2, ensure_ascii=False)
print("=" * 60)
print("π Validation Summary:")
print(f" Total skills: {results['summary']['total']}")
print(f" β
Valid: {results['summary']['valid']}")
print(f" β Failed: {results['summary']['failed']}")
print()
print(f"πΎ Results saved to: {output_file}")
if results['validated']:
print(f"\nβ
Valid skills ready for implementation:")
for item in results['validated']:
skill = item['skill']
print(f" β’ {skill['normalized_name']} ({skill.get('org', 'unknown')})")
if results['failed']:
print(f"\nβ Failed validations:")
for item in results['failed']:
skill = item['skill']
errors = ', '.join(item['errors'])
print(f" β’ {skill['normalized_name']}: {errors}")
if __name__ == "__main__":
main()
| {
"repo_id": "sickn33/antigravity-awesome-skills",
"file_path": "tools/scripts/validate_voltagent_sources.py",
"license": "MIT License",
"lines": 198,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
sickn33/antigravity-awesome-skills:skills/videodb/scripts/ws_listener.py | #!/usr/bin/env python3
"""
WebSocket event listener for VideoDB with auto-reconnect and graceful shutdown.
Usage:
python scripts/ws_listener.py [OPTIONS] [output_dir]
Arguments:
output_dir Directory for output files (default: /tmp or VIDEODB_EVENTS_DIR env var)
Options:
--clear Clear the events file before starting (use when starting a new session)
Output files:
<output_dir>/videodb_events.jsonl - All WebSocket events (JSONL format)
<output_dir>/videodb_ws_id - WebSocket connection ID
<output_dir>/videodb_ws_pid - Process ID for easy termination
Output (first line, for parsing):
WS_ID=<connection_id>
Examples:
python scripts/ws_listener.py & # Run in background
python scripts/ws_listener.py --clear # Clear events and start fresh
python scripts/ws_listener.py --clear /tmp/mydir # Custom dir with clear
kill $(cat /tmp/videodb_ws_pid) # Stop the listener
"""
import os
import sys
import json
import signal
import asyncio
from datetime import datetime, timezone
from pathlib import Path
from dotenv import load_dotenv
load_dotenv()
import videodb
# Retry config
MAX_RETRIES = 10
INITIAL_BACKOFF = 1 # seconds
MAX_BACKOFF = 60 # seconds
# Parse arguments
def parse_args():
clear = False
output_dir = None
args = sys.argv[1:]
for arg in args:
if arg == "--clear":
clear = True
elif not arg.startswith("-"):
output_dir = arg
if output_dir is None:
output_dir = os.environ.get("VIDEODB_EVENTS_DIR", "/tmp")
return clear, Path(output_dir)
CLEAR_EVENTS, OUTPUT_DIR = parse_args()
EVENTS_FILE = OUTPUT_DIR / "videodb_events.jsonl"
WS_ID_FILE = OUTPUT_DIR / "videodb_ws_id"
PID_FILE = OUTPUT_DIR / "videodb_ws_pid"
# Track if this is the first connection (for clearing events)
_first_connection = True
def log(msg: str):
"""Log with timestamp."""
ts = datetime.now().strftime("%H:%M:%S")
print(f"[{ts}] {msg}", flush=True)
def append_event(event: dict):
"""Append event to JSONL file with timestamps."""
event["ts"] = datetime.now(timezone.utc).isoformat()
event["unix_ts"] = datetime.now(timezone.utc).timestamp()
with open(EVENTS_FILE, "a") as f:
f.write(json.dumps(event) + "\n")
def write_pid():
"""Write PID file for easy process management."""
OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
PID_FILE.write_text(str(os.getpid()))
def cleanup_pid():
"""Remove PID file on exit."""
try:
PID_FILE.unlink(missing_ok=True)
except Exception:
pass
async def listen_with_retry():
"""Main listen loop with auto-reconnect and exponential backoff."""
global _first_connection
retry_count = 0
backoff = INITIAL_BACKOFF
while retry_count < MAX_RETRIES:
try:
conn = videodb.connect()
ws_wrapper = conn.connect_websocket()
ws = await ws_wrapper.connect()
ws_id = ws.connection_id
# Ensure output directory exists
OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
# Clear events file only on first connection if --clear flag is set
if _first_connection and CLEAR_EVENTS:
EVENTS_FILE.unlink(missing_ok=True)
log("Cleared events file")
_first_connection = False
# Write ws_id to file for easy retrieval
WS_ID_FILE.write_text(ws_id)
# Print ws_id (parseable format for LLM)
if retry_count == 0:
print(f"WS_ID={ws_id}", flush=True)
log(f"Connected (ws_id={ws_id})")
# Reset retry state on successful connection
retry_count = 0
backoff = INITIAL_BACKOFF
# Listen for messages
async for msg in ws.receive():
append_event(msg)
channel = msg.get("channel", msg.get("event", "unknown"))
text = msg.get("data", {}).get("text", "")
if text:
print(f"[{channel}] {text[:80]}", flush=True)
# If we exit the loop normally, connection was closed
log("Connection closed by server")
except asyncio.CancelledError:
log("Shutdown requested")
raise
except Exception as e:
retry_count += 1
log(f"Connection error: {e}")
if retry_count >= MAX_RETRIES:
log(f"Max retries ({MAX_RETRIES}) exceeded, exiting")
break
log(f"Reconnecting in {backoff}s (attempt {retry_count}/{MAX_RETRIES})...")
await asyncio.sleep(backoff)
backoff = min(backoff * 2, MAX_BACKOFF)
async def main_async():
"""Async main with signal handling."""
loop = asyncio.get_running_loop()
shutdown_event = asyncio.Event()
def handle_signal():
log("Received shutdown signal")
shutdown_event.set()
# Register signal handlers
for sig in (signal.SIGINT, signal.SIGTERM):
loop.add_signal_handler(sig, handle_signal)
# Run listener with cancellation support
listen_task = asyncio.create_task(listen_with_retry())
shutdown_task = asyncio.create_task(shutdown_event.wait())
done, pending = await asyncio.wait(
[listen_task, shutdown_task],
return_when=asyncio.FIRST_COMPLETED,
)
# Cancel remaining tasks
for task in pending:
task.cancel()
try:
await task
except asyncio.CancelledError:
pass
log("Shutdown complete")
def main():
write_pid()
try:
asyncio.run(main_async())
finally:
cleanup_pid()
if __name__ == "__main__":
main()
| {
"repo_id": "sickn33/antigravity-awesome-skills",
"file_path": "skills/videodb/scripts/ws_listener.py",
"license": "MIT License",
"lines": 155,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
sickn33/antigravity-awesome-skills:skills/audio-transcriber/scripts/transcribe.py | #!/usr/bin/env python3
"""
Audio Transcriber v1.1.0
Transcreve Γ‘udio para texto e gera atas/resumos usando LLM.
"""
import os
import sys
import json
import subprocess
import shutil
from datetime import datetime
from pathlib import Path
# Rich for beautiful terminal output
try:
from rich.console import Console
from rich.prompt import Prompt
from rich.panel import Panel
from rich.progress import Progress, SpinnerColumn, TextColumn, BarColumn
from rich import print as rprint
RICH_AVAILABLE = True
except ImportError:
RICH_AVAILABLE = False
print("β οΈ Installing rich for better UI...")
subprocess.run([sys.executable, "-m", "pip", "install", "--user", "rich"], check=False)
from rich.console import Console
from rich.prompt import Prompt
from rich.panel import Panel
from rich.progress import Progress, SpinnerColumn, TextColumn, BarColumn
from rich import print as rprint
# tqdm for progress bars
try:
from tqdm import tqdm
except ImportError:
print("β οΈ Installing tqdm for progress bars...")
subprocess.run([sys.executable, "-m", "pip", "install", "--user", "tqdm"], check=False)
from tqdm import tqdm
# Whisper engines
try:
from faster_whisper import WhisperModel
TRANSCRIBER = "faster-whisper"
except ImportError:
try:
import whisper
TRANSCRIBER = "whisper"
except ImportError:
print("β Nenhum engine de transcriΓ§Γ£o encontrado!")
print(" Instale: pip install faster-whisper")
sys.exit(1)
console = Console()
# Template padrΓ£o RISEN para fallback
DEFAULT_MEETING_PROMPT = """
Role: VocΓͺ Γ© um transcritor profissional especializado em documentaΓ§Γ£o.
Instructions: Transforme a transcriΓ§Γ£o fornecida em um documento estruturado e profissional.
Steps:
1. Identifique o tipo de conteΓΊdo (reuniΓ£o, palestra, entrevista, etc.)
2. Extraia os principais tΓ³picos e pontos-chave
3. Identifique participantes/speakers (se aplicΓ‘vel)
4. Extraia decisΓ΅es tomadas e aΓ§Γ΅es definidas (se reuniΓ£o)
5. Organize em formato apropriado com seΓ§Γ΅es claras
6. Use Markdown para formataΓ§Γ£o profissional
End Goal: Documento final bem estruturado, legΓvel e pronto para distribuiΓ§Γ£o.
Narrowing:
- Mantenha objetividade e clareza
- Preserve contexto importante
- Use formataΓ§Γ£o Markdown adequada
- Inclua timestamps relevantes quando aplicΓ‘vel
"""
def detect_cli_tool():
"""Detecta qual CLI de LLM estΓ‘ disponΓvel (claude > gh copilot)."""
if shutil.which('claude'):
return 'claude'
elif shutil.which('gh'):
result = subprocess.run(['gh', 'copilot', '--version'],
capture_output=True, text=True)
if result.returncode == 0:
return 'gh-copilot'
return None
def invoke_prompt_engineer(raw_prompt, timeout=90):
"""
Invoca prompt-engineer skill via CLI para melhorar/gerar prompts.
Args:
raw_prompt: Prompt a ser melhorado ou meta-prompt
timeout: Timeout em segundos
Returns:
str: Prompt melhorado ou DEFAULT_MEETING_PROMPT se falhar
"""
try:
# Tentar via gh copilot
console.print("[dim] Invocando prompt-engineer...[/dim]")
result = subprocess.run(
['gh', 'copilot', 'suggest', '-t', 'shell', raw_prompt],
capture_output=True,
text=True,
timeout=timeout
)
if result.returncode == 0 and result.stdout.strip():
return result.stdout.strip()
else:
console.print("[yellow]β οΈ prompt-engineer nΓ£o respondeu, usando template padrΓ£o[/yellow]")
return DEFAULT_MEETING_PROMPT
except subprocess.TimeoutExpired:
console.print(f"[red]β οΈ Timeout apΓ³s {timeout}s, usando template padrΓ£o[/red]")
return DEFAULT_MEETING_PROMPT
except Exception as e:
console.print(f"[red]β οΈ Erro ao invocar prompt-engineer: {e}[/red]")
return DEFAULT_MEETING_PROMPT
def handle_prompt_workflow(user_prompt, transcript):
"""
Gerencia fluxo completo de prompts com prompt-engineer.
CenΓ‘rio A: UsuΓ‘rio forneceu prompt β Melhorar AUTOMATICAMENTE β Confirmar
CenΓ‘rio B: Sem prompt β Sugerir tipo β Confirmar β Gerar β Confirmar
Returns:
str: Prompt final a usar, ou None se usuΓ‘rio recusou processamento
"""
prompt_engineer_available = os.path.exists(
os.path.expanduser('~/.copilot/skills/prompt-engineer/SKILL.md')
)
# ========== CENΓRIO A: USUΓRIO FORNECEU PROMPT ==========
if user_prompt:
console.print("\n[cyan]π Prompt fornecido pelo usuΓ‘rio[/cyan]")
console.print(Panel(user_prompt[:300] + ("..." if len(user_prompt) > 300 else ""),
title="Prompt original", border_style="dim"))
if prompt_engineer_available:
# Melhora AUTOMATICAMENTE (sem perguntar)
console.print("\n[cyan]π§ Melhorando prompt com prompt-engineer...[/cyan]")
improved_prompt = invoke_prompt_engineer(
f"melhore este prompt:\n\n{user_prompt}"
)
# Mostrar AMBAS versΓ΅es
console.print("\n[green]β¨ VersΓ£o melhorada:[/green]")
console.print(Panel(improved_prompt[:500] + ("..." if len(improved_prompt) > 500 else ""),
title="Prompt otimizado", border_style="green"))
console.print("\n[dim]π VersΓ£o original:[/dim]")
console.print(Panel(user_prompt[:300] + ("..." if len(user_prompt) > 300 else ""),
title="Seu prompt", border_style="dim"))
# Pergunta qual usar
confirm = Prompt.ask(
"\nπ‘ Usar versΓ£o melhorada?",
choices=["s", "n"],
default="s"
)
return improved_prompt if confirm == "s" else user_prompt
else:
# prompt-engineer nΓ£o disponΓvel
console.print("[yellow]β οΈ prompt-engineer skill nΓ£o disponΓvel[/yellow]")
console.print("[dim]β
Usando seu prompt original[/dim]")
return user_prompt
# ========== CENΓRIO B: SEM PROMPT - AUTO-GERAΓΓO ==========
else:
console.print("\n[yellow]β οΈ Nenhum prompt fornecido.[/yellow]")
if not prompt_engineer_available:
console.print("[yellow]β οΈ prompt-engineer skill nΓ£o encontrado[/yellow]")
console.print("[dim]Usando template padrΓ£o...[/dim]")
return DEFAULT_MEETING_PROMPT
# PASSO 1: Perguntar se quer auto-gerar
console.print("Posso analisar o transcript e sugerir um formato de resumo/ata?")
generate = Prompt.ask(
"\nπ‘ Gerar prompt automaticamente?",
choices=["s", "n"],
default="s"
)
if generate == "n":
console.print("[dim]β
Ok, gerando apenas transcript.md (sem ata)[/dim]")
return None # Sinaliza: nΓ£o processar com LLM
# PASSO 2: Analisar transcript e SUGERIR tipo
console.print("\n[cyan]π Analisando transcript...[/cyan]")
suggestion_meta_prompt = f"""
Analise este transcript ({len(transcript)} caracteres) e sugira:
1. Tipo de conteΓΊdo (reuniΓ£o, palestra, entrevista, etc.)
2. Formato de saΓda recomendado (ata formal, resumo executivo, notas estruturadas)
3. Framework ideal (RISEN, RODES, STAR, etc.)
Primeiras 1000 palavras do transcript:
{transcript[:4000]}
Responda em 2-3 linhas concisas.
"""
suggested_type = invoke_prompt_engineer(suggestion_meta_prompt)
# PASSO 3: Mostrar sugestΓ£o e CONFIRMAR
console.print("\n[green]π‘ SugestΓ£o de formato:[/green]")
console.print(Panel(suggested_type, title="AnΓ‘lise do transcript", border_style="green"))
confirm_type = Prompt.ask(
"\nπ‘ Usar este formato?",
choices=["s", "n"],
default="s"
)
if confirm_type == "n":
console.print("[dim]Usando template padrΓ£o...[/dim]")
return DEFAULT_MEETING_PROMPT
# PASSO 4: Gerar prompt completo baseado na sugestΓ£o
console.print("\n[cyan]β¨ Gerando prompt estruturado...[/cyan]")
final_meta_prompt = f"""
Crie um prompt completo e estruturado (usando framework apropriado) para:
{suggested_type}
O prompt deve instruir uma IA a transformar o transcript em um documento
profissional e bem formatado em Markdown.
"""
generated_prompt = invoke_prompt_engineer(final_meta_prompt)
# PASSO 5: Mostrar prompt gerado e CONFIRMAR
console.print("\n[green]β
Prompt gerado:[/green]")
console.print(Panel(generated_prompt[:600] + ("..." if len(generated_prompt) > 600 else ""),
title="Preview", border_style="green"))
confirm_final = Prompt.ask(
"\nπ‘ Usar este prompt?",
choices=["s", "n"],
default="s"
)
if confirm_final == "s":
return generated_prompt
else:
console.print("[dim]Usando template padrΓ£o...[/dim]")
return DEFAULT_MEETING_PROMPT
def process_with_llm(transcript, prompt, cli_tool='claude', timeout=300):
"""
Processa transcript com LLM usando prompt fornecido.
Args:
transcript: Texto transcrito
prompt: Prompt instruindo como processar
cli_tool: 'claude' ou 'gh-copilot'
timeout: Timeout em segundos
Returns:
str: Ata/resumo processado
"""
full_prompt = f"{prompt}\n\n---\n\nTranscriΓ§Γ£o:\n\n{transcript}"
try:
with Progress(
SpinnerColumn(),
TextColumn("[progress.description]{task.description}"),
transient=True
) as progress:
progress.add_task(description=f"π€ Processando com {cli_tool}...", total=None)
if cli_tool == 'claude':
result = subprocess.run(
['claude', '-'],
input=full_prompt,
capture_output=True,
text=True,
timeout=timeout
)
elif cli_tool == 'gh-copilot':
result = subprocess.run(
['gh', 'copilot', 'suggest', '-t', 'shell', full_prompt],
capture_output=True,
text=True,
timeout=timeout
)
else:
raise ValueError(f"CLI tool desconhecido: {cli_tool}")
if result.returncode == 0:
return result.stdout.strip()
else:
console.print(f"[red]β Erro ao processar com {cli_tool}[/red]")
console.print(f"[dim]{result.stderr[:200]}[/dim]")
return None
except subprocess.TimeoutExpired:
console.print(f"[red]β Timeout apΓ³s {timeout}s[/red]")
return None
except Exception as e:
console.print(f"[red]β Erro: {e}[/red]")
return None
def transcribe_audio(audio_file, model="base"):
"""
Transcreve Γ‘udio usando Whisper com barra de progresso.
Returns:
dict: {language, duration, segments: [{start, end, text}]}
"""
console.print(f"\n[cyan]ποΈ Transcrevendo Γ‘udio com {TRANSCRIBER}...[/cyan]")
try:
if TRANSCRIBER == "faster-whisper":
model_obj = WhisperModel(model, device="cpu", compute_type="int8")
segments, info = model_obj.transcribe(
audio_file,
language=None,
vad_filter=True,
word_timestamps=True
)
data = {
"language": info.language,
"language_probability": round(info.language_probability, 2),
"duration": info.duration,
"segments": []
}
# Converter generator em lista com progresso
console.print("[dim]Processando segmentos...[/dim]")
for segment in tqdm(segments, desc="Segmentos", unit="seg"):
data["segments"].append({
"start": round(segment.start, 2),
"end": round(segment.end, 2),
"text": segment.text.strip()
})
else: # whisper original
import whisper
model_obj = whisper.load_model(model)
result = model_obj.transcribe(audio_file, word_timestamps=True)
data = {
"language": result["language"],
"duration": result["segments"][-1]["end"] if result["segments"] else 0,
"segments": result["segments"]
}
console.print(f"[green]β
TranscriΓ§Γ£o completa! Idioma: {data['language'].upper()}[/green]")
console.print(f"[dim] {len(data['segments'])} segmentos processados[/dim]")
return data
except Exception as e:
console.print(f"[red]β Erro na transcriΓ§Γ£o: {e}[/red]")
sys.exit(1)
def save_outputs(transcript_text, ata_text, audio_file, output_dir="."):
"""
Salva transcript e ata em arquivos .md com timestamp.
Returns:
tuple: (transcript_path, ata_path or None)
"""
timestamp = datetime.now().strftime("%Y%m%d-%H%M%S")
base_name = Path(audio_file).stem
# Sempre salva transcript
transcript_filename = f"transcript-{timestamp}.md"
transcript_path = Path(output_dir) / transcript_filename
with open(transcript_path, 'w', encoding='utf-8') as f:
f.write(transcript_text)
console.print(f"[green]β
Transcript salvo:[/green] {transcript_filename}")
# Salva ata se existir
ata_path = None
if ata_text:
ata_filename = f"ata-{timestamp}.md"
ata_path = Path(output_dir) / ata_filename
with open(ata_path, 'w', encoding='utf-8') as f:
f.write(ata_text)
console.print(f"[green]β
Ata salva:[/green] {ata_filename}")
return str(transcript_path), str(ata_path) if ata_path else None
def main():
"""FunΓ§Γ£o principal."""
import argparse
parser = argparse.ArgumentParser(description="Audio Transcriber v1.1.0")
parser.add_argument("audio_file", help="Arquivo de Γ‘udio para transcrever")
parser.add_argument("--prompt", help="Prompt customizado para processar transcript")
parser.add_argument("--model", default="base", help="Modelo Whisper (tiny/base/small/medium/large)")
parser.add_argument("--output-dir", default=".", help="DiretΓ³rio de saΓda")
args = parser.parse_args()
# Verificar arquivo existe
if not os.path.exists(args.audio_file):
console.print(f"[red]β Arquivo nΓ£o encontrado: {args.audio_file}[/red]")
sys.exit(1)
console.print("[bold cyan]π΅ Audio Transcriber v1.1.0[/bold cyan]\n")
# Step 1: Transcrever
transcription_data = transcribe_audio(args.audio_file, model=args.model)
# Gerar texto do transcript
transcript_text = f"# TranscriΓ§Γ£o de Γudio\n\n"
transcript_text += f"**Arquivo:** {Path(args.audio_file).name}\n"
transcript_text += f"**Idioma:** {transcription_data['language'].upper()}\n"
transcript_text += f"**Data:** {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n"
transcript_text += "---\n\n## TranscriΓ§Γ£o Completa\n\n"
for seg in transcription_data["segments"]:
start_min = int(seg["start"] // 60)
start_sec = int(seg["start"] % 60)
end_min = int(seg["end"] // 60)
end_sec = int(seg["end"] % 60)
transcript_text += f"**[{start_min:02d}:{start_sec:02d} β {end_min:02d}:{end_sec:02d}]** \n{seg['text']}\n\n"
# Step 2: Detectar CLI
cli_tool = detect_cli_tool()
if not cli_tool:
console.print("\n[yellow]β οΈ Nenhuma CLI de IA detectada (Claude ou GitHub Copilot)[/yellow]")
console.print("[dim]βΉοΈ Salvando apenas transcript.md...[/dim]")
save_outputs(transcript_text, None, args.audio_file, args.output_dir)
console.print("\n[cyan]π‘ Para gerar ata/resumo:[/cyan]")
console.print(" - Instale Claude CLI: pip install claude-cli")
console.print(" - Ou GitHub Copilot CLI jΓ‘ estΓ‘ instalado (gh copilot)")
return
console.print(f"\n[green]β
CLI detectada: {cli_tool}[/green]")
# Step 3: Workflow de prompt
final_prompt = handle_prompt_workflow(args.prompt, transcript_text)
if final_prompt is None:
# UsuΓ‘rio recusou processamento
save_outputs(transcript_text, None, args.audio_file, args.output_dir)
return
# Step 4: Processar com LLM
ata_text = process_with_llm(transcript_text, final_prompt, cli_tool)
if ata_text:
console.print("[green]β
Ata gerada com sucesso![/green]")
else:
console.print("[yellow]β οΈ Falha ao gerar ata, salvando apenas transcript[/yellow]")
# Step 5: Salvar arquivos
console.print("\n[cyan]πΎ Salvando arquivos...[/cyan]")
save_outputs(transcript_text, ata_text, args.audio_file, args.output_dir)
console.print("\n[bold green]β
ConcluΓdo![/bold green]")
if __name__ == "__main__":
main()
| {
"repo_id": "sickn33/antigravity-awesome-skills",
"file_path": "skills/audio-transcriber/scripts/transcribe.py",
"license": "MIT License",
"lines": 383,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
sickn33/antigravity-awesome-skills:skills/youtube-summarizer/scripts/extract-transcript.py | #!/usr/bin/env python3
"""
Extract YouTube video transcript
Usage: ./extract-transcript.py VIDEO_ID [LANGUAGE_CODE]
"""
import sys
from youtube_transcript_api import YouTubeTranscriptApi, TranscriptsDisabled, NoTranscriptFound
def extract_transcript(video_id, language='en'):
"""Extract transcript from YouTube video"""
try:
# Try to get transcript in specified language with fallback to English
transcript = YouTubeTranscriptApi.get_transcript(
video_id,
languages=[language, 'en']
)
# Combine all transcript segments
full_text = " ".join([entry['text'] for entry in transcript])
return full_text
except TranscriptsDisabled:
print(f"β Transcripts are disabled for video {video_id}", file=sys.stderr)
sys.exit(1)
except NoTranscriptFound:
print(f"β No transcript found for video {video_id}", file=sys.stderr)
sys.exit(1)
except Exception as e:
print(f"β Error: {e}", file=sys.stderr)
sys.exit(1)
def list_available_transcripts(video_id):
"""List all available transcripts for a video"""
try:
transcript_list = YouTubeTranscriptApi.list_transcripts(video_id)
print(f"β
Available transcripts for {video_id}:")
for transcript in transcript_list:
generated = "[Auto-generated]" if transcript.is_generated else "[Manual]"
translatable = "(translatable)" if transcript.is_translatable else ""
print(f" - {transcript.language} ({transcript.language_code}) {generated} {translatable}")
return True
except Exception as e:
print(f"β Error listing transcripts: {e}", file=sys.stderr)
return False
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Usage: ./extract-transcript.py VIDEO_ID [LANGUAGE_CODE]")
print(" ./extract-transcript.py VIDEO_ID --list (list available transcripts)")
sys.exit(1)
video_id = sys.argv[1]
# Check if user wants to list available transcripts
if len(sys.argv) > 2 and sys.argv[2] == "--list":
success = list_available_transcripts(video_id)
sys.exit(0 if success else 1)
# Extract transcript
language = sys.argv[2] if len(sys.argv) > 2 else 'en'
transcript = extract_transcript(video_id, language)
print(transcript)
| {
"repo_id": "sickn33/antigravity-awesome-skills",
"file_path": "skills/youtube-summarizer/scripts/extract-transcript.py",
"license": "MIT License",
"lines": 54,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
sickn33/antigravity-awesome-skills:skills/oss-hunter/bin/hunter.py | import os
import json
import subprocess
import sys
def run_gh_command(args):
try:
result = subprocess.run(['gh'] + args, capture_output=True, text=True, check=True)
return result.stdout
except subprocess.CalledProcessError as e:
print(f"Error running gh command: {e.stderr}", file=sys.stderr)
return None
def hunt():
print("π― Hunting for high-impact OSS issues...")
# 1. Find trending repos (stars > 1000 created/updated recently)
repos_json = run_gh_command(['api', 'search/repositories?q=stars:>1000+pushed:>2026-02-01&sort=stars&order=desc', '--jq', '.items[] | {full_name: .full_name, stars: .stargazers_count, description: .description}'])
if not repos_json:
print("No trending repositories found.")
return
repos = [json.loads(line) for line in repos_json.strip().split('\n')[:10]]
dossier = []
for repo in repos:
name = repo['full_name']
print(f"Checking {name}...")
# 2. Search for help-wanted issues
issues_json = run_gh_command(['issue', 'list', '--repo', name, '--label', 'help wanted', '--json', 'number,title,url', '--limit', '3'])
if issues_json:
try:
issues = json.loads(issues_json)
for issue in issues:
dossier.append({
'repo': name,
'stars': repo['stars'],
'number': issue['number'],
'title': issue['title'],
'url': issue['url']
})
except json.JSONDecodeError:
pass
print("\n--- π OSS CONTRIBUTION DOSSIER ---")
for item in dossier:
print(f"\n[{item['repo']} β
{item['stars']}]")
print(f"Issue #{item['number']}: {item['title']}")
print(f"Link: {item['url']}")
if __name__ == "__main__":
hunt()
| {
"repo_id": "sickn33/antigravity-awesome-skills",
"file_path": "skills/oss-hunter/bin/hunter.py",
"license": "MIT License",
"lines": 45,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
sickn33/antigravity-awesome-skills:skills/api-design-principles/assets/rest-api-template.py | """
Production-ready REST API template using FastAPI.
Includes pagination, filtering, error handling, and best practices.
"""
from fastapi import FastAPI, HTTPException, Query, Path, Depends, status
from fastapi.middleware.cors import CORSMiddleware
from fastapi.middleware.trustedhost import TrustedHostMiddleware
from fastapi.responses import JSONResponse
from pydantic import BaseModel, Field, EmailStr, ConfigDict
from typing import Optional, List, Any
from datetime import datetime
from enum import Enum
app = FastAPI(
title="API Template",
version="1.0.0",
docs_url="/api/docs"
)
# Security Middleware
# Trusted Host: Prevents HTTP Host Header attacks
app.add_middleware(
TrustedHostMiddleware,
allowed_hosts=["*"] # TODO: Configure this in production, e.g. ["api.example.com"]
)
# CORS: Configures Cross-Origin Resource Sharing
app.add_middleware(
CORSMiddleware,
allow_origins=["*"], # TODO: Update this with specific origins in production
allow_credentials=False, # TODO: Set to True if you need cookies/auth headers, but restrict origins
allow_methods=["*"],
allow_headers=["*"],
)
# Models
class UserStatus(str, Enum):
ACTIVE = "active"
INACTIVE = "inactive"
SUSPENDED = "suspended"
class UserBase(BaseModel):
email: EmailStr
name: str = Field(..., min_length=1, max_length=100)
status: UserStatus = UserStatus.ACTIVE
class UserCreate(UserBase):
password: str = Field(..., min_length=8)
class UserUpdate(BaseModel):
email: Optional[EmailStr] = None
name: Optional[str] = Field(None, min_length=1, max_length=100)
status: Optional[UserStatus] = None
class User(UserBase):
id: str
created_at: datetime
updated_at: datetime
model_config = ConfigDict(from_attributes=True)
# Pagination
class PaginationParams(BaseModel):
page: int = Field(1, ge=1)
page_size: int = Field(20, ge=1, le=100)
class PaginatedResponse(BaseModel):
items: List[Any]
total: int
page: int
page_size: int
pages: int
# Error handling
class ErrorDetail(BaseModel):
field: Optional[str] = None
message: str
code: str
class ErrorResponse(BaseModel):
error: str
message: str
details: Optional[List[ErrorDetail]] = None
@app.exception_handler(HTTPException)
async def http_exception_handler(request, exc):
return JSONResponse(
status_code=exc.status_code,
content=ErrorResponse(
error=exc.__class__.__name__,
message=exc.detail if isinstance(exc.detail, str) else exc.detail.get("message", "Error"),
details=exc.detail.get("details") if isinstance(exc.detail, dict) else None
).model_dump()
)
# Endpoints
@app.get("/api/users", response_model=PaginatedResponse, tags=["Users"])
async def list_users(
page: int = Query(1, ge=1),
page_size: int = Query(20, ge=1, le=100),
status: Optional[UserStatus] = Query(None),
search: Optional[str] = Query(None)
):
"""List users with pagination and filtering."""
# Mock implementation
total = 100
items = [
User(
id=str(i),
email=f"user{i}@example.com",
name=f"User {i}",
status=UserStatus.ACTIVE,
created_at=datetime.now(),
updated_at=datetime.now()
).model_dump()
for i in range((page-1)*page_size, min(page*page_size, total))
]
return PaginatedResponse(
items=items,
total=total,
page=page,
page_size=page_size,
pages=(total + page_size - 1) // page_size
)
@app.post("/api/users", response_model=User, status_code=status.HTTP_201_CREATED, tags=["Users"])
async def create_user(user: UserCreate):
"""Create a new user."""
# Mock implementation
return User(
id="123",
email=user.email,
name=user.name,
status=user.status,
created_at=datetime.now(),
updated_at=datetime.now()
)
@app.get("/api/users/{user_id}", response_model=User, tags=["Users"])
async def get_user(user_id: str = Path(..., description="User ID")):
"""Get user by ID."""
# Mock: Check if exists
if user_id == "999":
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail={"message": "User not found", "details": {"id": user_id}}
)
return User(
id=user_id,
email="user@example.com",
name="User Name",
status=UserStatus.ACTIVE,
created_at=datetime.now(),
updated_at=datetime.now()
)
@app.patch("/api/users/{user_id}", response_model=User, tags=["Users"])
async def update_user(user_id: str, update: UserUpdate):
"""Partially update user."""
# Validate user exists
existing = await get_user(user_id)
# Apply updates
update_data = update.model_dump(exclude_unset=True)
for field, value in update_data.items():
setattr(existing, field, value)
existing.updated_at = datetime.now()
return existing
@app.delete("/api/users/{user_id}", status_code=status.HTTP_204_NO_CONTENT, tags=["Users"])
async def delete_user(user_id: str):
"""Delete user."""
await get_user(user_id) # Verify exists
return None
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000)
| {
"repo_id": "sickn33/antigravity-awesome-skills",
"file_path": "skills/api-design-principles/assets/rest-api-template.py",
"license": "MIT License",
"lines": 157,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
sickn33/antigravity-awesome-skills:skills/prompt-engineering-patterns/scripts/optimize-prompt.py | #!/usr/bin/env python3
"""
Prompt Optimization Script
Automatically test and optimize prompts using A/B testing and metrics tracking.
"""
import json
import time
from typing import List, Dict, Any
from dataclasses import dataclass
from concurrent.futures import ThreadPoolExecutor
import numpy as np
@dataclass
class TestCase:
input: Dict[str, Any]
expected_output: str
metadata: Dict[str, Any] = None
class PromptOptimizer:
def __init__(self, llm_client, test_suite: List[TestCase]):
self.client = llm_client
self.test_suite = test_suite
self.results_history = []
self.executor = ThreadPoolExecutor()
def shutdown(self):
"""Shutdown the thread pool executor."""
self.executor.shutdown(wait=True)
def evaluate_prompt(self, prompt_template: str, test_cases: List[TestCase] = None) -> Dict[str, float]:
"""Evaluate a prompt template against test cases in parallel."""
if test_cases is None:
test_cases = self.test_suite
metrics = {
'accuracy': [],
'latency': [],
'token_count': [],
'success_rate': []
}
def process_test_case(test_case):
start_time = time.time()
# Render prompt with test case inputs
prompt = prompt_template.format(**test_case.input)
# Get LLM response
response = self.client.complete(prompt)
# Measure latency
latency = time.time() - start_time
# Calculate individual metrics
token_count = len(prompt.split()) + len(response.split())
success = 1 if response else 0
accuracy = self.calculate_accuracy(response, test_case.expected_output)
return {
'latency': latency,
'token_count': token_count,
'success_rate': success,
'accuracy': accuracy
}
# Run test cases in parallel
results = list(self.executor.map(process_test_case, test_cases))
# Aggregate metrics
for result in results:
metrics['latency'].append(result['latency'])
metrics['token_count'].append(result['token_count'])
metrics['success_rate'].append(result['success_rate'])
metrics['accuracy'].append(result['accuracy'])
return {
'avg_accuracy': np.mean(metrics['accuracy']),
'avg_latency': np.mean(metrics['latency']),
'p95_latency': np.percentile(metrics['latency'], 95),
'avg_tokens': np.mean(metrics['token_count']),
'success_rate': np.mean(metrics['success_rate'])
}
def calculate_accuracy(self, response: str, expected: str) -> float:
"""Calculate accuracy score between response and expected output."""
# Simple exact match
if response.strip().lower() == expected.strip().lower():
return 1.0
# Partial match using word overlap
response_words = set(response.lower().split())
expected_words = set(expected.lower().split())
if not expected_words:
return 0.0
overlap = len(response_words & expected_words)
return overlap / len(expected_words)
def optimize(self, base_prompt: str, max_iterations: int = 5) -> Dict[str, Any]:
"""Iteratively optimize a prompt."""
current_prompt = base_prompt
best_prompt = base_prompt
best_score = 0
current_metrics = None
for iteration in range(max_iterations):
print(f"\nIteration {iteration + 1}/{max_iterations}")
# Evaluate current prompt
# Bolt Optimization: Avoid re-evaluating if we already have metrics from previous iteration
if current_metrics:
metrics = current_metrics
else:
metrics = self.evaluate_prompt(current_prompt)
print(f"Accuracy: {metrics['avg_accuracy']:.2f}, Latency: {metrics['avg_latency']:.2f}s")
# Track results
self.results_history.append({
'iteration': iteration,
'prompt': current_prompt,
'metrics': metrics
})
# Update best if improved
if metrics['avg_accuracy'] > best_score:
best_score = metrics['avg_accuracy']
best_prompt = current_prompt
# Stop if good enough
if metrics['avg_accuracy'] > 0.95:
print("Achieved target accuracy!")
break
# Generate variations for next iteration
variations = self.generate_variations(current_prompt, metrics)
# Test variations and pick best
best_variation = current_prompt
best_variation_score = metrics['avg_accuracy']
best_variation_metrics = metrics
for variation in variations:
var_metrics = self.evaluate_prompt(variation)
if var_metrics['avg_accuracy'] > best_variation_score:
best_variation_score = var_metrics['avg_accuracy']
best_variation = variation
best_variation_metrics = var_metrics
current_prompt = best_variation
current_metrics = best_variation_metrics
return {
'best_prompt': best_prompt,
'best_score': best_score,
'history': self.results_history
}
def generate_variations(self, prompt: str, current_metrics: Dict) -> List[str]:
"""Generate prompt variations to test."""
variations = []
# Variation 1: Add explicit format instruction
variations.append(prompt + "\n\nProvide your answer in a clear, concise format.")
# Variation 2: Add step-by-step instruction
variations.append("Let's solve this step by step.\n\n" + prompt)
# Variation 3: Add verification step
variations.append(prompt + "\n\nVerify your answer before responding.")
# Variation 4: Make more concise
concise = self.make_concise(prompt)
if concise != prompt:
variations.append(concise)
# Variation 5: Add examples (if none present)
if "example" not in prompt.lower():
variations.append(self.add_examples(prompt))
return variations[:3] # Return top 3 variations
def make_concise(self, prompt: str) -> str:
"""Remove redundant words to make prompt more concise."""
replacements = [
("in order to", "to"),
("due to the fact that", "because"),
("at this point in time", "now"),
("in the event that", "if"),
]
result = prompt
for old, new in replacements:
result = result.replace(old, new)
return result
def add_examples(self, prompt: str) -> str:
"""Add example section to prompt."""
return f"""{prompt}
Example:
Input: Sample input
Output: Sample output
"""
def compare_prompts(self, prompt_a: str, prompt_b: str) -> Dict[str, Any]:
"""A/B test two prompts."""
print("Testing Prompt A...")
metrics_a = self.evaluate_prompt(prompt_a)
print("Testing Prompt B...")
metrics_b = self.evaluate_prompt(prompt_b)
return {
'prompt_a_metrics': metrics_a,
'prompt_b_metrics': metrics_b,
'winner': 'A' if metrics_a['avg_accuracy'] > metrics_b['avg_accuracy'] else 'B',
'improvement': abs(metrics_a['avg_accuracy'] - metrics_b['avg_accuracy'])
}
def export_results(self, filename: str):
"""Export optimization results to JSON."""
with open(filename, 'w') as f:
json.dump(self.results_history, f, indent=2)
def main():
# Example usage
test_suite = [
TestCase(
input={'text': 'This movie was amazing!'},
expected_output='Positive'
),
TestCase(
input={'text': 'Worst purchase ever.'},
expected_output='Negative'
),
TestCase(
input={'text': 'It was okay, nothing special.'},
expected_output='Neutral'
)
]
# Mock LLM client for demonstration
class MockLLMClient:
def complete(self, prompt):
# Simulate LLM response
if 'amazing' in prompt:
return 'Positive'
elif 'worst' in prompt.lower():
return 'Negative'
else:
return 'Neutral'
optimizer = PromptOptimizer(MockLLMClient(), test_suite)
try:
base_prompt = "Classify the sentiment of: {text}\nSentiment:"
results = optimizer.optimize(base_prompt)
print("\n" + "="*50)
print("Optimization Complete!")
print(f"Best Accuracy: {results['best_score']:.2f}")
print(f"Best Prompt:\n{results['best_prompt']}")
optimizer.export_results('optimization_results.json')
finally:
optimizer.shutdown()
if __name__ == '__main__':
main()
| {
"repo_id": "sickn33/antigravity-awesome-skills",
"file_path": "skills/prompt-engineering-patterns/scripts/optimize-prompt.py",
"license": "MIT License",
"lines": 219,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
sickn33/antigravity-awesome-skills:skills/voice-ai-engine-development/examples/complete_voice_engine.py | """
Example: Complete Voice AI Engine Implementation
This example demonstrates a minimal but complete voice AI engine
with all core components: Transcriber, Agent, Synthesizer, and WebSocket integration.
"""
import asyncio
from typing import Dict, AsyncGenerator
from fastapi import FastAPI, WebSocket, WebSocketDisconnect
from dataclasses import dataclass
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
app = FastAPI()
# ============================================================================
# Data Models
# ============================================================================
@dataclass
class Transcription:
message: str
confidence: float
is_final: bool
is_interrupt: bool = False
@dataclass
class AgentResponse:
message: str
is_interruptible: bool = True
@dataclass
class SynthesisResult:
chunk_generator: AsyncGenerator[bytes, None]
get_message_up_to: callable
# ============================================================================
# Base Worker Pattern
# ============================================================================
class BaseWorker:
"""Base class for all workers in the pipeline"""
def __init__(self, input_queue: asyncio.Queue, output_queue: asyncio.Queue):
self.input_queue = input_queue
self.output_queue = output_queue
self.active = False
self._task = None
def start(self):
"""Start the worker's processing loop"""
self.active = True
self._task = asyncio.create_task(self._run_loop())
async def _run_loop(self):
"""Main processing loop - runs forever until terminated"""
while self.active:
try:
item = await self.input_queue.get()
await self.process(item)
except Exception as e:
logger.error(f"Worker error: {e}", exc_info=True)
async def process(self, item):
"""Override this - does the actual work"""
raise NotImplementedError
def terminate(self):
"""Stop the worker"""
self.active = False
if self._task:
self._task.cancel()
# ============================================================================
# Transcriber Component
# ============================================================================
class DeepgramTranscriber(BaseWorker):
"""Converts audio chunks to text transcriptions using Deepgram"""
def __init__(self, config: Dict):
super().__init__(asyncio.Queue(), asyncio.Queue())
self.config = config
self.is_muted = False
def send_audio(self, chunk: bytes):
"""Client calls this to send audio"""
if not self.is_muted:
self.input_queue.put_nowait(chunk)
else:
# Send silence instead (prevents echo during bot speech)
self.input_queue.put_nowait(self.create_silent_chunk(len(chunk)))
def create_silent_chunk(self, size: int) -> bytes:
"""Create a silent audio chunk"""
return b'\x00' * size
def mute(self):
"""Called when bot starts speaking (prevents echo)"""
self.is_muted = True
logger.info("π [TRANSCRIBER] Muted")
def unmute(self):
"""Called when bot stops speaking"""
self.is_muted = False
logger.info("π [TRANSCRIBER] Unmuted")
async def process(self, audio_chunk: bytes):
"""Process audio chunk and generate transcription"""
# In a real implementation, this would call Deepgram API
# For this example, we'll simulate a transcription
# Simulate API call delay
await asyncio.sleep(0.1)
# Mock transcription
transcription = Transcription(
message="Hello, how can I help you?",
confidence=0.95,
is_final=True
)
logger.info(f"π€ [TRANSCRIBER] Received: '{transcription.message}'")
self.output_queue.put_nowait(transcription)
# ============================================================================
# Agent Component
# ============================================================================
class GeminiAgent(BaseWorker):
"""LLM-powered conversational agent using Google Gemini"""
def __init__(self, config: Dict):
super().__init__(asyncio.Queue(), asyncio.Queue())
self.config = config
self.conversation_history = []
async def process(self, transcription: Transcription):
"""Process transcription and generate response"""
# Add user message to history
self.conversation_history.append({
"role": "user",
"content": transcription.message
})
logger.info(f"π€ [AGENT] Generating response for: '{transcription.message}'")
# Generate response (streaming)
async for response in self.generate_response(transcription.message):
self.output_queue.put_nowait(response)
async def generate_response(self, user_input: str) -> AsyncGenerator[AgentResponse, None]:
"""Generate streaming response from LLM"""
# In a real implementation, this would call Gemini API
# For this example, we'll simulate a streaming response
# Simulate streaming delay
await asyncio.sleep(0.5)
# IMPORTANT: Buffer entire response before yielding
# This prevents audio jumping/cutting off
full_response = f"I understand you said: {user_input}. How can I assist you further?"
# Add to conversation history
self.conversation_history.append({
"role": "assistant",
"content": full_response
})
logger.info(f"π€ [AGENT] Generated: '{full_response}'")
# Yield complete response
yield AgentResponse(
message=full_response,
is_interruptible=True
)
# ============================================================================
# Synthesizer Component
# ============================================================================
class ElevenLabsSynthesizer:
"""Converts text to speech using ElevenLabs"""
def __init__(self, config: Dict):
self.config = config
async def create_speech(self, message: str, chunk_size: int = 1024) -> SynthesisResult:
"""
Generate speech audio from text
Returns SynthesisResult with:
- chunk_generator: AsyncGenerator yielding audio chunks
- get_message_up_to: Function to get partial text for interrupts
"""
# In a real implementation, this would call ElevenLabs API
# For this example, we'll simulate audio generation
logger.info(f"π [SYNTHESIZER] Synthesizing {len(message)} characters")
async def chunk_generator():
# Simulate streaming audio chunks
num_chunks = len(message) // 10 + 1
for i in range(num_chunks):
# Simulate API delay
await asyncio.sleep(0.1)
# Mock audio chunk (in reality, this would be PCM audio)
chunk = b'\x00' * chunk_size
yield chunk
def get_message_up_to(seconds: float) -> str:
"""Calculate partial message based on playback time"""
# Estimate: ~150 words per minute = ~2.5 words per second
# Rough estimate: 5 characters per word
chars_per_second = 12.5
char_index = int(seconds * chars_per_second)
return message[:char_index]
return SynthesisResult(
chunk_generator=chunk_generator(),
get_message_up_to=get_message_up_to
)
# ============================================================================
# Output Device
# ============================================================================
class WebsocketOutputDevice:
"""Sends audio chunks to client via WebSocket"""
def __init__(self, websocket: WebSocket):
self.websocket = websocket
async def consume_nonblocking(self, chunk: bytes):
"""Send audio chunk to client"""
await self.websocket.send_bytes(chunk)
# ============================================================================
# Conversation Orchestrator
# ============================================================================
class StreamingConversation:
"""Orchestrates the entire voice conversation pipeline"""
def __init__(
self,
output_device: WebsocketOutputDevice,
transcriber: DeepgramTranscriber,
agent: GeminiAgent,
synthesizer: ElevenLabsSynthesizer
):
self.output_device = output_device
self.transcriber = transcriber
self.agent = agent
self.synthesizer = synthesizer
self.is_human_speaking = True
self.interrupt_event = asyncio.Event()
async def start(self):
"""Start all workers"""
logger.info("π [CONVERSATION] Starting...")
# Start workers
self.transcriber.start()
self.agent.start()
# Start processing pipelines
asyncio.create_task(self._process_transcriptions())
asyncio.create_task(self._process_agent_responses())
async def _process_transcriptions(self):
"""Process transcriptions from transcriber"""
while True:
transcription = await self.transcriber.output_queue.get()
# Check if this is an interrupt
if not self.is_human_speaking:
logger.info("β οΈ [INTERRUPT] User interrupted bot")
self.interrupt_event.set()
transcription.is_interrupt = True
self.is_human_speaking = True
# Send to agent
await self.agent.input_queue.put(transcription)
async def _process_agent_responses(self):
"""Process responses from agent and synthesize"""
while True:
response = await self.agent.output_queue.get()
self.is_human_speaking = False
# Mute transcriber to prevent echo
self.transcriber.mute()
# Synthesize and play
synthesis_result = await self.synthesizer.create_speech(response.message)
await self._send_speech_to_output(synthesis_result, seconds_per_chunk=0.1)
# Unmute transcriber
self.transcriber.unmute()
self.is_human_speaking = True
async def _send_speech_to_output(self, synthesis_result: SynthesisResult, seconds_per_chunk: float):
"""
Send synthesized audio to output with rate limiting
CRITICAL: Rate limiting enables interrupts to work
"""
chunk_idx = 0
async for chunk in synthesis_result.chunk_generator:
# Check for interrupt
if self.interrupt_event.is_set():
logger.info(f"π [INTERRUPT] Stopped after {chunk_idx} chunks")
# Calculate what was actually spoken
seconds_spoken = chunk_idx * seconds_per_chunk
partial_message = synthesis_result.get_message_up_to(seconds_spoken)
logger.info(f"π [INTERRUPT] Partial message: '{partial_message}'")
# Clear interrupt event
self.interrupt_event.clear()
return
start_time = asyncio.get_event_loop().time()
# Send chunk to output device
await self.output_device.consume_nonblocking(chunk)
# CRITICAL: Wait for chunk to play before sending next one
# This is what makes interrupts work!
processing_time = asyncio.get_event_loop().time() - start_time
await asyncio.sleep(max(seconds_per_chunk - processing_time, 0))
chunk_idx += 1
def receive_audio(self, audio_chunk: bytes):
"""Receive audio from client"""
self.transcriber.send_audio(audio_chunk)
async def terminate(self):
"""Gracefully shut down all workers"""
logger.info("π [CONVERSATION] Terminating...")
self.transcriber.terminate()
self.agent.terminate()
# Wait for queues to drain
await asyncio.sleep(0.5)
# ============================================================================
# WebSocket Endpoint
# ============================================================================
@app.websocket("/conversation")
async def conversation_endpoint(websocket: WebSocket):
"""WebSocket endpoint for voice conversations"""
await websocket.accept()
logger.info("β
[WEBSOCKET] Client connected")
# Configuration
config = {
"transcriberProvider": "deepgram",
"llmProvider": "gemini",
"voiceProvider": "elevenlabs",
"prompt": "You are a helpful AI assistant.",
}
# Create components
transcriber = DeepgramTranscriber(config)
agent = GeminiAgent(config)
synthesizer = ElevenLabsSynthesizer(config)
output_device = WebsocketOutputDevice(websocket)
# Create conversation
conversation = StreamingConversation(
output_device=output_device,
transcriber=transcriber,
agent=agent,
synthesizer=synthesizer
)
# Start conversation
await conversation.start()
try:
# Process incoming audio
async for message in websocket.iter_bytes():
conversation.receive_audio(message)
except WebSocketDisconnect:
logger.info("β [WEBSOCKET] Client disconnected")
except Exception as e:
logger.error(f"β [WEBSOCKET] Error: {e}", exc_info=True)
finally:
await conversation.terminate()
# ============================================================================
# Main Entry Point
# ============================================================================
if __name__ == "__main__":
import uvicorn
logger.info("π Starting Voice AI Engine...")
uvicorn.run(app, host="0.0.0.0", port=8000)
| {
"repo_id": "sickn33/antigravity-awesome-skills",
"file_path": "skills/voice-ai-engine-development/examples/complete_voice_engine.py",
"license": "MIT License",
"lines": 321,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
sickn33/antigravity-awesome-skills:skills/voice-ai-engine-development/examples/gemini_agent_example.py | """
Example: Gemini Agent Implementation with Streaming
This example shows how to implement a Gemini-powered agent
that properly buffers responses to prevent audio jumping.
"""
import asyncio
from typing import AsyncGenerator, List, Dict
from dataclasses import dataclass
import logging
logger = logging.getLogger(__name__)
@dataclass
class Message:
role: str # "user" or "assistant"
content: str
@dataclass
class GeneratedResponse:
message: str
is_interruptible: bool = True
class GeminiAgent:
"""
LLM-powered conversational agent using Google Gemini
Key Features:
- Maintains conversation history
- Streams responses from Gemini API
- Buffers entire response before yielding (prevents audio jumping)
- Handles interrupts gracefully
"""
def __init__(self, config: Dict):
self.config = config
self.conversation_history: List[Message] = []
self.system_prompt = config.get("prompt", "You are a helpful AI assistant.")
self.current_task = None
async def generate_response(
self,
user_input: str,
is_interrupt: bool = False
) -> AsyncGenerator[GeneratedResponse, None]:
"""
Generate streaming response from Gemini
IMPORTANT: This buffers the entire LLM response before yielding
to prevent audio jumping/cutting off.
Args:
user_input: The user's message
is_interrupt: Whether this is an interrupt
Yields:
GeneratedResponse with complete buffered message
"""
# Add user message to history
self.conversation_history.append(
Message(role="user", content=user_input)
)
logger.info(f"π€ [AGENT] Generating response for: '{user_input}'")
# Build conversation context for Gemini
contents = self._build_gemini_contents()
# Stream response from Gemini and buffer it
full_response = ""
try:
# In a real implementation, this would call Gemini API
# async for chunk in self._create_gemini_stream(contents):
# if isinstance(chunk, str):
# full_response += chunk
# For this example, simulate streaming
async for chunk in self._simulate_gemini_stream(user_input):
full_response += chunk
# Log progress (optional)
if len(full_response) % 50 == 0:
logger.debug(f"π€ [AGENT] Buffered {len(full_response)} chars...")
except Exception as e:
logger.error(f"β [AGENT] Error generating response: {e}")
full_response = "I apologize, but I encountered an error. Could you please try again?"
# CRITICAL: Only yield after buffering the ENTIRE response
# This prevents multiple TTS calls that cause audio jumping
if full_response.strip():
# Add to conversation history
self.conversation_history.append(
Message(role="assistant", content=full_response)
)
logger.info(f"β
[AGENT] Generated complete response ({len(full_response)} chars)")
yield GeneratedResponse(
message=full_response.strip(),
is_interruptible=True
)
def _build_gemini_contents(self) -> List[Dict]:
"""
Build conversation contents for Gemini API
Format:
[
{"role": "user", "parts": [{"text": "System: ..."}]},
{"role": "model", "parts": [{"text": "Understood."}]},
{"role": "user", "parts": [{"text": "Hello"}]},
{"role": "model", "parts": [{"text": "Hi there!"}]},
...
]
"""
contents = []
# Add system prompt as first user message
if self.system_prompt:
contents.append({
"role": "user",
"parts": [{"text": f"System Instruction: {self.system_prompt}"}]
})
contents.append({
"role": "model",
"parts": [{"text": "Understood."}]
})
# Add conversation history
for message in self.conversation_history:
role = "user" if message.role == "user" else "model"
contents.append({
"role": role,
"parts": [{"text": message.content}]
})
return contents
async def _simulate_gemini_stream(self, user_input: str) -> AsyncGenerator[str, None]:
"""
Simulate Gemini streaming response
In a real implementation, this would be:
async def _create_gemini_stream(self, contents):
response = await genai.GenerativeModel('gemini-pro').generate_content_async(
contents,
stream=True
)
async for chunk in response:
if chunk.text:
yield chunk.text
"""
# Simulate response
response = f"I understand you said: {user_input}. How can I assist you further?"
# Simulate streaming by yielding chunks
chunk_size = 10
for i in range(0, len(response), chunk_size):
chunk = response[i:i + chunk_size]
await asyncio.sleep(0.05) # Simulate network delay
yield chunk
def update_last_bot_message_on_cut_off(self, partial_message: str):
"""
Update conversation history when bot is interrupted
This ensures the conversation history reflects what was actually spoken,
not what was planned to be spoken.
Args:
partial_message: The partial message that was actually spoken
"""
if self.conversation_history and self.conversation_history[-1].role == "assistant":
# Update the last bot message with the partial message
self.conversation_history[-1].content = partial_message
logger.info(f"π [AGENT] Updated history with partial message: '{partial_message}'")
def cancel_current_task(self):
"""Cancel the current generation task (for interrupts)"""
if self.current_task and not self.current_task.done():
self.current_task.cancel()
logger.info("π [AGENT] Cancelled current generation task")
def get_conversation_history(self) -> List[Message]:
"""Get the full conversation history"""
return self.conversation_history.copy()
def clear_conversation_history(self):
"""Clear the conversation history"""
self.conversation_history.clear()
logger.info("ποΈ [AGENT] Cleared conversation history")
# ============================================================================
# Example Usage
# ============================================================================
async def example_usage():
"""Example of how to use the GeminiAgent"""
# Configure agent
config = {
"prompt": "You are a helpful AI assistant specializing in voice conversations.",
"llmProvider": "gemini"
}
# Create agent
agent = GeminiAgent(config)
# Simulate conversation
user_messages = [
"Hello, how are you?",
"What's the weather like today?",
"Thank you!"
]
for user_message in user_messages:
print(f"\nπ€ User: {user_message}")
# Generate response
async for response in agent.generate_response(user_message):
print(f"π€ Bot: {response.message}")
# Print conversation history
print("\nπ Conversation History:")
for i, message in enumerate(agent.get_conversation_history(), 1):
print(f"{i}. {message.role}: {message.content}")
if __name__ == "__main__":
asyncio.run(example_usage())
| {
"repo_id": "sickn33/antigravity-awesome-skills",
"file_path": "skills/voice-ai-engine-development/examples/gemini_agent_example.py",
"license": "MIT License",
"lines": 187,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
sickn33/antigravity-awesome-skills:skills/voice-ai-engine-development/examples/interrupt_system_example.py | """
Example: Interrupt System Implementation
This example demonstrates how to implement a robust interrupt system
that allows users to interrupt the bot mid-sentence.
"""
import asyncio
import threading
from typing import Any
from dataclasses import dataclass
import logging
logger = logging.getLogger(__name__)
# ============================================================================
# InterruptibleEvent Pattern
# ============================================================================
class InterruptibleEvent:
"""
Wrapper for events that can be interrupted
Every event in the pipeline is wrapped in an InterruptibleEvent,
allowing the system to stop processing mid-stream.
"""
def __init__(self, payload: Any, is_interruptible: bool = True):
self.payload = payload
self.is_interruptible = is_interruptible
self.interruption_event = threading.Event() # Initially not set
self.interrupted = False
def interrupt(self) -> bool:
"""
Interrupt this event
Returns:
True if the event was interrupted, False if it was not interruptible
"""
if not self.is_interruptible:
return False
if not self.interrupted:
self.interruption_event.set() # Signal to stop!
self.interrupted = True
logger.info("β οΈ [INTERRUPT] Event interrupted")
return True
return False
def is_interrupted(self) -> bool:
"""Check if this event has been interrupted"""
return self.interruption_event.is_set()
# ============================================================================
# Conversation with Interrupt Support
# ============================================================================
class ConversationWithInterrupts:
"""
Conversation orchestrator with interrupt support
Key Features:
- Tracks all in-flight interruptible events
- Broadcasts interrupts to all workers
- Cancels current tasks
- Updates conversation history with partial messages
"""
def __init__(self):
self.is_human_speaking = True
self.interruptible_events = asyncio.Queue()
self.agent = None # Set externally
self.synthesizer_worker = None # Set externally
def broadcast_interrupt(self) -> bool:
"""
Broadcast interrupt to all in-flight events
This is called when the user starts speaking while the bot is speaking.
Returns:
True if any events were interrupted
"""
num_interrupts = 0
# Interrupt all queued events
while True:
try:
interruptible_event = self.interruptible_events.get_nowait()
if interruptible_event.interrupt():
num_interrupts += 1
except asyncio.QueueEmpty:
break
# Cancel current tasks
if self.agent:
self.agent.cancel_current_task()
if self.synthesizer_worker:
self.synthesizer_worker.cancel_current_task()
logger.info(f"β οΈ [INTERRUPT] Interrupted {num_interrupts} events")
return num_interrupts > 0
def add_interruptible_event(self, event: InterruptibleEvent):
"""Add an event to the interruptible queue"""
self.interruptible_events.put_nowait(event)
# ============================================================================
# Synthesis Worker with Interrupt Support
# ============================================================================
class SynthesisWorkerWithInterrupts:
"""
Synthesis worker that supports interrupts
Key Features:
- Checks for interrupts before sending each audio chunk
- Calculates partial message when interrupted
- Updates agent's conversation history with partial message
"""
def __init__(self, agent, output_device):
self.agent = agent
self.output_device = output_device
self.current_task = None
async def send_speech_to_output(
self,
message: str,
synthesis_result,
stop_event: threading.Event,
seconds_per_chunk: float = 0.1
) -> tuple[str, bool]:
"""
Send synthesized speech to output with interrupt support
Args:
message: The full message being synthesized
synthesis_result: SynthesisResult with chunk_generator and get_message_up_to
stop_event: Event that signals when to stop (interrupt)
seconds_per_chunk: Duration of each audio chunk in seconds
Returns:
Tuple of (message_sent, was_cut_off)
- message_sent: The actual message sent (partial if interrupted)
- was_cut_off: True if interrupted, False if completed
"""
chunk_idx = 0
async for chunk_result in synthesis_result.chunk_generator:
# CRITICAL: Check for interrupt before sending each chunk
if stop_event.is_set():
logger.info(f"π [SYNTHESIZER] Interrupted after {chunk_idx} chunks")
# Calculate what was actually spoken
seconds_spoken = chunk_idx * seconds_per_chunk
partial_message = synthesis_result.get_message_up_to(seconds_spoken)
logger.info(f"π [SYNTHESIZER] Partial message: '{partial_message}'")
return partial_message, True # cut_off = True
start_time = asyncio.get_event_loop().time()
# Send chunk to output device
await self.output_device.consume_nonblocking(chunk_result.chunk)
# CRITICAL: Wait for chunk to play before sending next one
# This is what makes interrupts work!
processing_time = asyncio.get_event_loop().time() - start_time
await asyncio.sleep(max(seconds_per_chunk - processing_time, 0))
chunk_idx += 1
# Completed without interruption
logger.info(f"β
[SYNTHESIZER] Completed {chunk_idx} chunks")
return message, False # cut_off = False
def cancel_current_task(self):
"""Cancel the current synthesis task"""
if self.current_task and not self.current_task.done():
self.current_task.cancel()
logger.info("π [SYNTHESIZER] Cancelled current task")
# ============================================================================
# Transcription Worker with Interrupt Detection
# ============================================================================
class TranscriptionWorkerWithInterrupts:
"""
Transcription worker that detects interrupts
Key Features:
- Detects when user speaks while bot is speaking
- Marks transcription as interrupt
- Triggers broadcast_interrupt()
"""
def __init__(self, conversation):
self.conversation = conversation
async def process(self, transcription):
"""
Process transcription and detect interrupts
If the user starts speaking while the bot is speaking,
this is an interrupt.
"""
# Check if this is an interrupt
if not self.conversation.is_human_speaking:
logger.info("β οΈ [TRANSCRIPTION] User interrupted bot!")
# Broadcast interrupt to all in-flight events
interrupted = self.conversation.broadcast_interrupt()
transcription.is_interrupt = interrupted
# Update speaking state
self.conversation.is_human_speaking = True
# Continue processing transcription...
logger.info(f"π€ [TRANSCRIPTION] Received: '{transcription.message}'")
# ============================================================================
# Example Usage
# ============================================================================
@dataclass
class MockTranscription:
message: str
is_interrupt: bool = False
@dataclass
class MockSynthesisResult:
async def chunk_generator(self):
"""Generate mock audio chunks"""
for i in range(10):
await asyncio.sleep(0.1)
yield type('obj', (object,), {'chunk': b'\x00' * 1024})()
def get_message_up_to(self, seconds: float) -> str:
"""Get partial message up to specified seconds"""
full_message = "I think the weather will be nice today and tomorrow and the day after."
chars_per_second = len(full_message) / 1.0 # Assume 1 second total
char_index = int(seconds * chars_per_second)
return full_message[:char_index]
async def example_interrupt_scenario():
"""
Example scenario: User interrupts bot mid-sentence
"""
print("π¬ Scenario: User interrupts bot mid-sentence\n")
# Create conversation
conversation = ConversationWithInterrupts()
# Create mock components
class MockAgent:
def cancel_current_task(self):
print("π [AGENT] Task cancelled")
def update_last_bot_message_on_cut_off(self, partial_message):
print(f"π [AGENT] Updated history: '{partial_message}'")
class MockOutputDevice:
async def consume_nonblocking(self, chunk):
pass
agent = MockAgent()
output_device = MockOutputDevice()
conversation.agent = agent
# Create synthesis worker
synthesis_worker = SynthesisWorkerWithInterrupts(agent, output_device)
conversation.synthesizer_worker = synthesis_worker
# Create interruptible event
stop_event = threading.Event()
interruptible_event = InterruptibleEvent(
payload="Bot is speaking...",
is_interruptible=True
)
conversation.add_interruptible_event(interruptible_event)
# Start bot speaking
print("π€ Bot starts speaking: 'I think the weather will be nice today and tomorrow and the day after.'\n")
conversation.is_human_speaking = False
# Simulate synthesis in background
synthesis_result = MockSynthesisResult()
synthesis_task = asyncio.create_task(
synthesis_worker.send_speech_to_output(
message="I think the weather will be nice today and tomorrow and the day after.",
synthesis_result=synthesis_result,
stop_event=stop_event,
seconds_per_chunk=0.1
)
)
# Wait a bit, then interrupt
await asyncio.sleep(0.3)
print("π€ User interrupts: 'Stop!'\n")
# Trigger interrupt
conversation.broadcast_interrupt()
stop_event.set()
# Wait for synthesis to finish
message_sent, was_cut_off = await synthesis_task
print(f"\nβ
Result:")
print(f" - Message sent: '{message_sent}'")
print(f" - Was cut off: {was_cut_off}")
# Update agent history
if was_cut_off:
agent.update_last_bot_message_on_cut_off(message_sent)
if __name__ == "__main__":
asyncio.run(example_interrupt_scenario())
| {
"repo_id": "sickn33/antigravity-awesome-skills",
"file_path": "skills/voice-ai-engine-development/examples/interrupt_system_example.py",
"license": "MIT License",
"lines": 252,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
sickn33/antigravity-awesome-skills:skills/voice-ai-engine-development/templates/base_worker_template.py | """
Template: Base Worker Implementation
Use this template as a starting point for creating new workers
in your voice AI pipeline.
"""
import asyncio
from typing import Any
import logging
logger = logging.getLogger(__name__)
class BaseWorker:
"""
Base class for all workers in the voice AI pipeline
Workers follow the producer-consumer pattern:
- Consume items from input_queue
- Process items
- Produce results to output_queue
All workers run concurrently via asyncio.
"""
def __init__(self, input_queue: asyncio.Queue, output_queue: asyncio.Queue):
"""
Initialize the worker
Args:
input_queue: Queue to consume items from
output_queue: Queue to produce results to
"""
self.input_queue = input_queue
self.output_queue = output_queue
self.active = False
self._task = None
def start(self):
"""Start the worker's processing loop"""
self.active = True
self._task = asyncio.create_task(self._run_loop())
logger.info(f"β
[{self.__class__.__name__}] Started")
async def _run_loop(self):
"""
Main processing loop - runs forever until terminated
This loop:
1. Waits for items from input_queue
2. Processes each item
3. Handles errors gracefully
"""
while self.active:
try:
# Block until item arrives
item = await self.input_queue.get()
# Process the item
await self.process(item)
except asyncio.CancelledError:
# Task was cancelled (normal during shutdown)
logger.info(f"π [{self.__class__.__name__}] Task cancelled")
break
except Exception as e:
# Log error but don't crash the worker
logger.error(
f"β [{self.__class__.__name__}] Error processing item: {e}",
exc_info=True
)
# Continue processing next item
async def process(self, item: Any):
"""
Process a single item
Override this method in your worker implementation.
Args:
item: The item to process
"""
raise NotImplementedError(
f"{self.__class__.__name__} must implement process()"
)
def terminate(self):
"""
Stop the worker gracefully
This sets active=False and cancels the processing task.
"""
self.active = False
if self._task and not self._task.done():
self._task.cancel()
logger.info(f"π [{self.__class__.__name__}] Terminated")
async def wait_for_completion(self):
"""Wait for the worker task to complete"""
if self._task:
try:
await self._task
except asyncio.CancelledError:
pass
# ============================================================================
# Example: Custom Worker Implementation
# ============================================================================
class ExampleWorker(BaseWorker):
"""
Example worker that demonstrates how to extend BaseWorker
This worker receives strings, converts them to uppercase,
and sends them to the output queue.
"""
def __init__(self, input_queue: asyncio.Queue, output_queue: asyncio.Queue):
super().__init__(input_queue, output_queue)
# Add any custom initialization here
self.processed_count = 0
async def process(self, item: str):
"""
Process a single item
Args:
item: String to convert to uppercase
"""
# Simulate some processing time
await asyncio.sleep(0.1)
# Process the item
result = item.upper()
# Send to output queue
self.output_queue.put_nowait(result)
# Update counter
self.processed_count += 1
logger.info(
f"β
[{self.__class__.__name__}] "
f"Processed '{item}' -> '{result}' "
f"(total: {self.processed_count})"
)
# ============================================================================
# Example Usage
# ============================================================================
async def example_usage():
"""Example of how to use the worker"""
# Create queues
input_queue = asyncio.Queue()
output_queue = asyncio.Queue()
# Create worker
worker = ExampleWorker(input_queue, output_queue)
# Start worker
worker.start()
# Send items to process
items = ["hello", "world", "voice", "ai"]
for item in items:
input_queue.put_nowait(item)
# Wait for processing
await asyncio.sleep(0.5)
# Get results
results = []
while not output_queue.empty():
results.append(await output_queue.get())
print(f"\nβ
Results: {results}")
# Terminate worker
worker.terminate()
await worker.wait_for_completion()
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
asyncio.run(example_usage())
| {
"repo_id": "sickn33/antigravity-awesome-skills",
"file_path": "skills/voice-ai-engine-development/templates/base_worker_template.py",
"license": "MIT License",
"lines": 146,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
sickn33/antigravity-awesome-skills:skills/voice-ai-engine-development/templates/multi_provider_factory_template.py | """
Template: Multi-Provider Factory
Use this template to create a factory that supports multiple providers
for transcription, LLM, and TTS services.
"""
from typing import Dict, Any
from abc import ABC, abstractmethod
import logging
logger = logging.getLogger(__name__)
# ============================================================================
# Provider Interfaces
# ============================================================================
class TranscriberProvider(ABC):
"""Abstract base class for transcriber providers"""
@abstractmethod
async def transcribe_stream(self, audio_stream):
"""Transcribe streaming audio"""
pass
class LLMProvider(ABC):
"""Abstract base class for LLM providers"""
@abstractmethod
async def generate_response(self, messages, stream=True):
"""Generate response from messages"""
pass
class TTSProvider(ABC):
"""Abstract base class for TTS providers"""
@abstractmethod
async def synthesize_speech(self, text):
"""Synthesize speech from text"""
pass
# ============================================================================
# Multi-Provider Factory
# ============================================================================
class VoiceComponentFactory:
"""
Factory for creating voice AI components with multiple provider support
Supports:
- Multiple transcription providers (Deepgram, AssemblyAI, Azure, Google)
- Multiple LLM providers (OpenAI, Gemini, Claude)
- Multiple TTS providers (ElevenLabs, Azure, Google, Polly, Play.ht)
"""
def __init__(self):
self.transcriber_providers = {
"deepgram": self._create_deepgram_transcriber,
"assemblyai": self._create_assemblyai_transcriber,
"azure": self._create_azure_transcriber,
"google": self._create_google_transcriber,
}
self.llm_providers = {
"openai": self._create_openai_agent,
"gemini": self._create_gemini_agent,
"claude": self._create_claude_agent,
}
self.tts_providers = {
"elevenlabs": self._create_elevenlabs_synthesizer,
"azure": self._create_azure_synthesizer,
"google": self._create_google_synthesizer,
"polly": self._create_polly_synthesizer,
"playht": self._create_playht_synthesizer,
}
def create_transcriber(self, config: Dict[str, Any]):
"""
Create transcriber based on configuration
Args:
config: Configuration dict with 'transcriberProvider' key
Returns:
Transcriber instance
Raises:
ValueError: If provider is not supported
"""
provider = config.get("transcriberProvider", "deepgram").lower()
if provider not in self.transcriber_providers:
raise ValueError(
f"Unknown transcriber provider: {provider}. "
f"Supported: {list(self.transcriber_providers.keys())}"
)
logger.info(f"π€ Creating transcriber: {provider}")
return self.transcriber_providers[provider](config)
def create_agent(self, config: Dict[str, Any]):
"""
Create LLM agent based on configuration
Args:
config: Configuration dict with 'llmProvider' key
Returns:
Agent instance
Raises:
ValueError: If provider is not supported
"""
provider = config.get("llmProvider", "openai").lower()
if provider not in self.llm_providers:
raise ValueError(
f"Unknown LLM provider: {provider}. "
f"Supported: {list(self.llm_providers.keys())}"
)
logger.info(f"π€ Creating agent: {provider}")
return self.llm_providers[provider](config)
def create_synthesizer(self, config: Dict[str, Any]):
"""
Create TTS synthesizer based on configuration
Args:
config: Configuration dict with 'voiceProvider' key
Returns:
Synthesizer instance
Raises:
ValueError: If provider is not supported
"""
provider = config.get("voiceProvider", "elevenlabs").lower()
if provider not in self.tts_providers:
raise ValueError(
f"Unknown voice provider: {provider}. "
f"Supported: {list(self.tts_providers.keys())}"
)
logger.info(f"π Creating synthesizer: {provider}")
return self.tts_providers[provider](config)
# ========================================================================
# Transcriber Implementations
# ========================================================================
def _create_deepgram_transcriber(self, config: Dict[str, Any]):
"""Create Deepgram transcriber"""
# TODO: Implement Deepgram transcriber
# from .transcribers.deepgram import DeepgramTranscriber
# return DeepgramTranscriber(
# api_key=config.get("deepgramApiKey"),
# model=config.get("deepgramModel", "nova-2"),
# language=config.get("language", "en-US")
# )
raise NotImplementedError("Deepgram transcriber not implemented")
def _create_assemblyai_transcriber(self, config: Dict[str, Any]):
"""Create AssemblyAI transcriber"""
# TODO: Implement AssemblyAI transcriber
raise NotImplementedError("AssemblyAI transcriber not implemented")
def _create_azure_transcriber(self, config: Dict[str, Any]):
"""Create Azure Speech transcriber"""
# TODO: Implement Azure transcriber
raise NotImplementedError("Azure transcriber not implemented")
def _create_google_transcriber(self, config: Dict[str, Any]):
"""Create Google Cloud Speech transcriber"""
# TODO: Implement Google transcriber
raise NotImplementedError("Google transcriber not implemented")
# ========================================================================
# LLM Agent Implementations
# ========================================================================
def _create_openai_agent(self, config: Dict[str, Any]):
"""Create OpenAI agent"""
# TODO: Implement OpenAI agent
# from .agents.openai import OpenAIAgent
# return OpenAIAgent(
# api_key=config.get("openaiApiKey"),
# model=config.get("openaiModel", "gpt-4"),
# system_prompt=config.get("prompt", "You are a helpful assistant.")
# )
raise NotImplementedError("OpenAI agent not implemented")
def _create_gemini_agent(self, config: Dict[str, Any]):
"""Create Google Gemini agent"""
# TODO: Implement Gemini agent
# from .agents.gemini import GeminiAgent
# return GeminiAgent(
# api_key=config.get("geminiApiKey"),
# model=config.get("geminiModel", "gemini-pro"),
# system_prompt=config.get("prompt", "You are a helpful assistant.")
# )
raise NotImplementedError("Gemini agent not implemented")
def _create_claude_agent(self, config: Dict[str, Any]):
"""Create Anthropic Claude agent"""
# TODO: Implement Claude agent
raise NotImplementedError("Claude agent not implemented")
# ========================================================================
# TTS Synthesizer Implementations
# ========================================================================
def _create_elevenlabs_synthesizer(self, config: Dict[str, Any]):
"""Create ElevenLabs synthesizer"""
# TODO: Implement ElevenLabs synthesizer
# from .synthesizers.elevenlabs import ElevenLabsSynthesizer
# return ElevenLabsSynthesizer(
# api_key=config.get("elevenlabsApiKey"),
# voice_id=config.get("elevenlabsVoiceId"),
# model_id=config.get("elevenlabsModel", "eleven_monolingual_v1")
# )
raise NotImplementedError("ElevenLabs synthesizer not implemented")
def _create_azure_synthesizer(self, config: Dict[str, Any]):
"""Create Azure TTS synthesizer"""
# TODO: Implement Azure synthesizer
raise NotImplementedError("Azure synthesizer not implemented")
def _create_google_synthesizer(self, config: Dict[str, Any]):
"""Create Google Cloud TTS synthesizer"""
# TODO: Implement Google synthesizer
raise NotImplementedError("Google synthesizer not implemented")
def _create_polly_synthesizer(self, config: Dict[str, Any]):
"""Create Amazon Polly synthesizer"""
# TODO: Implement Polly synthesizer
raise NotImplementedError("Polly synthesizer not implemented")
def _create_playht_synthesizer(self, config: Dict[str, Any]):
"""Create Play.ht synthesizer"""
# TODO: Implement Play.ht synthesizer
raise NotImplementedError("Play.ht synthesizer not implemented")
# ============================================================================
# Example Usage
# ============================================================================
def example_usage():
"""Example of how to use the factory"""
# Configuration
config = {
"transcriberProvider": "deepgram",
"deepgramApiKey": "your-api-key",
"llmProvider": "gemini",
"geminiApiKey": "your-api-key",
"voiceProvider": "elevenlabs",
"elevenlabsApiKey": "your-api-key",
"elevenlabsVoiceId": "your-voice-id",
"prompt": "You are a helpful AI assistant."
}
# Create factory
factory = VoiceComponentFactory()
try:
# Create components
transcriber = factory.create_transcriber(config)
agent = factory.create_agent(config)
synthesizer = factory.create_synthesizer(config)
print("β
All components created successfully!")
except ValueError as e:
print(f"β Configuration error: {e}")
except NotImplementedError as e:
print(f"β οΈ Not implemented: {e}")
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
example_usage()
| {
"repo_id": "sickn33/antigravity-awesome-skills",
"file_path": "skills/voice-ai-engine-development/templates/multi_provider_factory_template.py",
"license": "MIT License",
"lines": 226,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
sickn33/antigravity-awesome-skills:skills/last30days/scripts/last30days.py | #!/usr/bin/env python3
"""
last30days - Research a topic from the last 30 days on Reddit + X.
Usage:
python3 last30days.py <topic> [options]
Options:
--mock Use fixtures instead of real API calls
--emit=MODE Output mode: compact|json|md|context|path (default: compact)
--sources=MODE Source selection: auto|reddit|x|both (default: auto)
--quick Faster research with fewer sources (8-12 each)
--deep Comprehensive research with more sources (50-70 Reddit, 40-60 X)
--debug Enable verbose debug logging
"""
import argparse
import json
import os
import sys
from concurrent.futures import ThreadPoolExecutor, as_completed
from datetime import datetime, timezone
from pathlib import Path
# Add lib to path
SCRIPT_DIR = Path(__file__).parent.resolve()
sys.path.insert(0, str(SCRIPT_DIR))
from lib import (
dates,
dedupe,
env,
http,
models,
normalize,
openai_reddit,
reddit_enrich,
render,
schema,
score,
ui,
websearch,
xai_x,
)
def load_fixture(name: str) -> dict:
"""Load a fixture file."""
fixture_path = SCRIPT_DIR.parent / "fixtures" / name
if fixture_path.exists():
with open(fixture_path) as f:
return json.load(f)
return {}
def _search_reddit(
topic: str,
config: dict,
selected_models: dict,
from_date: str,
to_date: str,
depth: str,
mock: bool,
) -> tuple:
"""Search Reddit via OpenAI (runs in thread).
Returns:
Tuple of (reddit_items, raw_openai, error)
"""
raw_openai = None
reddit_error = None
if mock:
raw_openai = load_fixture("openai_sample.json")
else:
try:
raw_openai = openai_reddit.search_reddit(
config["OPENAI_API_KEY"],
selected_models["openai"],
topic,
from_date,
to_date,
depth=depth,
)
except http.HTTPError as e:
raw_openai = {"error": str(e)}
reddit_error = f"API error: {e}"
except Exception as e:
raw_openai = {"error": str(e)}
reddit_error = f"{type(e).__name__}: {e}"
# Parse response
reddit_items = openai_reddit.parse_reddit_response(raw_openai or {})
# Quick retry with simpler query if few results
if len(reddit_items) < 5 and not mock and not reddit_error:
core = openai_reddit._extract_core_subject(topic)
if core.lower() != topic.lower():
try:
retry_raw = openai_reddit.search_reddit(
config["OPENAI_API_KEY"],
selected_models["openai"],
core,
from_date, to_date,
depth=depth,
)
retry_items = openai_reddit.parse_reddit_response(retry_raw)
# Add items not already found (by URL)
existing_urls = {item.get("url") for item in reddit_items}
for item in retry_items:
if item.get("url") not in existing_urls:
reddit_items.append(item)
except Exception:
pass
return reddit_items, raw_openai, reddit_error
def _search_x(
topic: str,
config: dict,
selected_models: dict,
from_date: str,
to_date: str,
depth: str,
mock: bool,
) -> tuple:
"""Search X via xAI (runs in thread).
Returns:
Tuple of (x_items, raw_xai, error)
"""
raw_xai = None
x_error = None
if mock:
raw_xai = load_fixture("xai_sample.json")
else:
try:
raw_xai = xai_x.search_x(
config["XAI_API_KEY"],
selected_models["xai"],
topic,
from_date,
to_date,
depth=depth,
)
except http.HTTPError as e:
raw_xai = {"error": str(e)}
x_error = f"API error: {e}"
except Exception as e:
raw_xai = {"error": str(e)}
x_error = f"{type(e).__name__}: {e}"
# Parse response
x_items = xai_x.parse_x_response(raw_xai or {})
return x_items, raw_xai, x_error
def run_research(
topic: str,
sources: str,
config: dict,
selected_models: dict,
from_date: str,
to_date: str,
depth: str = "default",
mock: bool = False,
progress: ui.ProgressDisplay = None,
) -> tuple:
"""Run the research pipeline.
Returns:
Tuple of (reddit_items, x_items, web_needed, raw_openai, raw_xai, raw_reddit_enriched, reddit_error, x_error)
Note: web_needed is True when WebSearch should be performed by Claude.
The script outputs a marker and Claude handles WebSearch in its session.
"""
reddit_items = []
x_items = []
raw_openai = None
raw_xai = None
raw_reddit_enriched = []
reddit_error = None
x_error = None
# Check if WebSearch is needed (always needed in web-only mode)
web_needed = sources in ("all", "web", "reddit-web", "x-web")
# Web-only mode: no API calls needed, Claude handles everything
if sources == "web":
if progress:
progress.start_web_only()
progress.end_web_only()
return reddit_items, x_items, True, raw_openai, raw_xai, raw_reddit_enriched, reddit_error, x_error
# Determine which searches to run
run_reddit = sources in ("both", "reddit", "all", "reddit-web")
run_x = sources in ("both", "x", "all", "x-web")
# Run Reddit and X searches in parallel
reddit_future = None
x_future = None
with ThreadPoolExecutor(max_workers=2) as executor:
# Submit both searches
if run_reddit:
if progress:
progress.start_reddit()
reddit_future = executor.submit(
_search_reddit, topic, config, selected_models,
from_date, to_date, depth, mock
)
if run_x:
if progress:
progress.start_x()
x_future = executor.submit(
_search_x, topic, config, selected_models,
from_date, to_date, depth, mock
)
# Collect results
if reddit_future:
try:
reddit_items, raw_openai, reddit_error = reddit_future.result()
if reddit_error and progress:
progress.show_error(f"Reddit error: {reddit_error}")
except Exception as e:
reddit_error = f"{type(e).__name__}: {e}"
if progress:
progress.show_error(f"Reddit error: {e}")
if progress:
progress.end_reddit(len(reddit_items))
if x_future:
try:
x_items, raw_xai, x_error = x_future.result()
if x_error and progress:
progress.show_error(f"X error: {x_error}")
except Exception as e:
x_error = f"{type(e).__name__}: {e}"
if progress:
progress.show_error(f"X error: {e}")
if progress:
progress.end_x(len(x_items))
# Enrich Reddit items with real data (sequential, but with error handling per-item)
if reddit_items:
if progress:
progress.start_reddit_enrich(1, len(reddit_items))
for i, item in enumerate(reddit_items):
if progress and i > 0:
progress.update_reddit_enrich(i + 1, len(reddit_items))
try:
if mock:
mock_thread = load_fixture("reddit_thread_sample.json")
reddit_items[i] = reddit_enrich.enrich_reddit_item(item, mock_thread)
else:
reddit_items[i] = reddit_enrich.enrich_reddit_item(item)
except Exception as e:
# Log but don't crash - keep the unenriched item
if progress:
progress.show_error(f"Enrich failed for {item.get('url', 'unknown')}: {e}")
raw_reddit_enriched.append(reddit_items[i])
if progress:
progress.end_reddit_enrich()
return reddit_items, x_items, web_needed, raw_openai, raw_xai, raw_reddit_enriched, reddit_error, x_error
def main():
parser = argparse.ArgumentParser(
description="Research a topic from the last 30 days on Reddit + X"
)
parser.add_argument("topic", nargs="?", help="Topic to research")
parser.add_argument("--mock", action="store_true", help="Use fixtures")
parser.add_argument(
"--emit",
choices=["compact", "json", "md", "context", "path"],
default="compact",
help="Output mode",
)
parser.add_argument(
"--sources",
choices=["auto", "reddit", "x", "both"],
default="auto",
help="Source selection",
)
parser.add_argument(
"--quick",
action="store_true",
help="Faster research with fewer sources (8-12 each)",
)
parser.add_argument(
"--deep",
action="store_true",
help="Comprehensive research with more sources (50-70 Reddit, 40-60 X)",
)
parser.add_argument(
"--debug",
action="store_true",
help="Enable verbose debug logging",
)
parser.add_argument(
"--include-web",
action="store_true",
help="Include general web search alongside Reddit/X (lower weighted)",
)
args = parser.parse_args()
# Enable debug logging if requested
if args.debug:
os.environ["LAST30DAYS_DEBUG"] = "1"
# Re-import http to pick up debug flag
from lib import http as http_module
http_module.DEBUG = True
# Determine depth
if args.quick and args.deep:
print("Error: Cannot use both --quick and --deep", file=sys.stderr)
sys.exit(1)
elif args.quick:
depth = "quick"
elif args.deep:
depth = "deep"
else:
depth = "default"
if not args.topic:
print("Error: Please provide a topic to research.", file=sys.stderr)
print("Usage: python3 last30days.py <topic> [options]", file=sys.stderr)
sys.exit(1)
# Load config
config = env.get_config()
# Check available sources
available = env.get_available_sources(config)
# Mock mode can work without keys
if args.mock:
if args.sources == "auto":
sources = "both"
else:
sources = args.sources
else:
# Validate requested sources against available
sources, error = env.validate_sources(args.sources, available, args.include_web)
if error:
# If it's a warning about WebSearch fallback, print but continue
if "WebSearch fallback" in error:
print(f"Note: {error}", file=sys.stderr)
else:
print(f"Error: {error}", file=sys.stderr)
sys.exit(1)
# Get date range
from_date, to_date = dates.get_date_range(30)
# Check what keys are missing for promo messaging
missing_keys = env.get_missing_keys(config)
# Initialize progress display
progress = ui.ProgressDisplay(args.topic, show_banner=True)
# Show promo for missing keys BEFORE research
if missing_keys != 'none':
progress.show_promo(missing_keys)
# Select models
if args.mock:
# Use mock models
mock_openai_models = load_fixture("models_openai_sample.json").get("data", [])
mock_xai_models = load_fixture("models_xai_sample.json").get("data", [])
selected_models = models.get_models(
{
"OPENAI_API_KEY": "mock",
"XAI_API_KEY": "mock",
**config,
},
mock_openai_models,
mock_xai_models,
)
else:
selected_models = models.get_models(config)
# Determine mode string
if sources == "all":
mode = "all" # reddit + x + web
elif sources == "both":
mode = "both" # reddit + x
elif sources == "reddit":
mode = "reddit-only"
elif sources == "reddit-web":
mode = "reddit-web"
elif sources == "x":
mode = "x-only"
elif sources == "x-web":
mode = "x-web"
elif sources == "web":
mode = "web-only"
else:
mode = sources
# Run research
reddit_items, x_items, web_needed, raw_openai, raw_xai, raw_reddit_enriched, reddit_error, x_error = run_research(
args.topic,
sources,
config,
selected_models,
from_date,
to_date,
depth,
args.mock,
progress,
)
# Processing phase
progress.start_processing()
# Normalize items
normalized_reddit = normalize.normalize_reddit_items(reddit_items, from_date, to_date)
normalized_x = normalize.normalize_x_items(x_items, from_date, to_date)
# Hard date filter: exclude items with verified dates outside the range
# This is the safety net - even if prompts let old content through, this filters it
filtered_reddit = normalize.filter_by_date_range(normalized_reddit, from_date, to_date)
filtered_x = normalize.filter_by_date_range(normalized_x, from_date, to_date)
# Score items
scored_reddit = score.score_reddit_items(filtered_reddit)
scored_x = score.score_x_items(filtered_x)
# Sort items
sorted_reddit = score.sort_items(scored_reddit)
sorted_x = score.sort_items(scored_x)
# Dedupe items
deduped_reddit = dedupe.dedupe_reddit(sorted_reddit)
deduped_x = dedupe.dedupe_x(sorted_x)
progress.end_processing()
# Create report
report = schema.create_report(
args.topic,
from_date,
to_date,
mode,
selected_models.get("openai"),
selected_models.get("xai"),
)
report.reddit = deduped_reddit
report.x = deduped_x
report.reddit_error = reddit_error
report.x_error = x_error
# Generate context snippet
report.context_snippet_md = render.render_context_snippet(report)
# Write outputs
render.write_outputs(report, raw_openai, raw_xai, raw_reddit_enriched)
# Show completion
if sources == "web":
progress.show_web_only_complete()
else:
progress.show_complete(len(deduped_reddit), len(deduped_x))
# Output result
output_result(report, args.emit, web_needed, args.topic, from_date, to_date, missing_keys)
def output_result(
report: schema.Report,
emit_mode: str,
web_needed: bool = False,
topic: str = "",
from_date: str = "",
to_date: str = "",
missing_keys: str = "none",
):
"""Output the result based on emit mode."""
if emit_mode == "compact":
print(render.render_compact(report, missing_keys=missing_keys))
elif emit_mode == "json":
print(json.dumps(report.to_dict(), indent=2))
elif emit_mode == "md":
print(render.render_full_report(report))
elif emit_mode == "context":
print(report.context_snippet_md)
elif emit_mode == "path":
print(render.get_context_path())
# Output WebSearch instructions if needed
if web_needed:
print("\n" + "="*60)
print("### WEBSEARCH REQUIRED ###")
print("="*60)
print(f"Topic: {topic}")
print(f"Date range: {from_date} to {to_date}")
print("")
print("Claude: Use your WebSearch tool to find 8-15 relevant web pages.")
print("EXCLUDE: reddit.com, x.com, twitter.com (already covered above)")
print("INCLUDE: blogs, docs, news, tutorials from the last 30 days")
print("")
print("After searching, synthesize WebSearch results WITH the Reddit/X")
print("results above. WebSearch items should rank LOWER than comparable")
print("Reddit/X items (they lack engagement metrics).")
print("="*60)
if __name__ == "__main__":
main()
| {
"repo_id": "sickn33/antigravity-awesome-skills",
"file_path": "skills/last30days/scripts/last30days.py",
"license": "MIT License",
"lines": 450,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
sickn33/antigravity-awesome-skills:skills/last30days/scripts/lib/cache.py | """Caching utilities for last30days skill."""
import hashlib
import json
import os
from datetime import datetime, timezone
from pathlib import Path
from typing import Any, Optional
CACHE_DIR = Path.home() / ".cache" / "last30days"
DEFAULT_TTL_HOURS = 24
MODEL_CACHE_TTL_DAYS = 7
def ensure_cache_dir():
"""Ensure cache directory exists."""
CACHE_DIR.mkdir(parents=True, exist_ok=True)
def get_cache_key(topic: str, from_date: str, to_date: str, sources: str) -> str:
"""Generate a cache key from query parameters."""
key_data = f"{topic}|{from_date}|{to_date}|{sources}"
return hashlib.sha256(key_data.encode()).hexdigest()[:16]
def get_cache_path(cache_key: str) -> Path:
"""Get path to cache file."""
return CACHE_DIR / f"{cache_key}.json"
def is_cache_valid(cache_path: Path, ttl_hours: int = DEFAULT_TTL_HOURS) -> bool:
"""Check if cache file exists and is within TTL."""
if not cache_path.exists():
return False
try:
stat = cache_path.stat()
mtime = datetime.fromtimestamp(stat.st_mtime, tz=timezone.utc)
now = datetime.now(timezone.utc)
age_hours = (now - mtime).total_seconds() / 3600
return age_hours < ttl_hours
except OSError:
return False
def load_cache(cache_key: str, ttl_hours: int = DEFAULT_TTL_HOURS) -> Optional[dict]:
"""Load data from cache if valid."""
cache_path = get_cache_path(cache_key)
if not is_cache_valid(cache_path, ttl_hours):
return None
try:
with open(cache_path, 'r') as f:
return json.load(f)
except (json.JSONDecodeError, OSError):
return None
def get_cache_age_hours(cache_path: Path) -> Optional[float]:
"""Get age of cache file in hours."""
if not cache_path.exists():
return None
try:
stat = cache_path.stat()
mtime = datetime.fromtimestamp(stat.st_mtime, tz=timezone.utc)
now = datetime.now(timezone.utc)
return (now - mtime).total_seconds() / 3600
except OSError:
return None
def load_cache_with_age(cache_key: str, ttl_hours: int = DEFAULT_TTL_HOURS) -> tuple:
"""Load data from cache with age info.
Returns:
Tuple of (data, age_hours) or (None, None) if invalid
"""
cache_path = get_cache_path(cache_key)
if not is_cache_valid(cache_path, ttl_hours):
return None, None
age = get_cache_age_hours(cache_path)
try:
with open(cache_path, 'r') as f:
return json.load(f), age
except (json.JSONDecodeError, OSError):
return None, None
def save_cache(cache_key: str, data: dict):
"""Save data to cache."""
ensure_cache_dir()
cache_path = get_cache_path(cache_key)
try:
with open(cache_path, 'w') as f:
json.dump(data, f)
except OSError:
pass # Silently fail on cache write errors
def clear_cache():
"""Clear all cache files."""
if CACHE_DIR.exists():
for f in CACHE_DIR.glob("*.json"):
try:
f.unlink()
except OSError:
pass
# Model selection cache (longer TTL)
MODEL_CACHE_FILE = CACHE_DIR / "model_selection.json"
def load_model_cache() -> dict:
"""Load model selection cache."""
if not is_cache_valid(MODEL_CACHE_FILE, MODEL_CACHE_TTL_DAYS * 24):
return {}
try:
with open(MODEL_CACHE_FILE, 'r') as f:
return json.load(f)
except (json.JSONDecodeError, OSError):
return {}
def save_model_cache(data: dict):
"""Save model selection cache."""
ensure_cache_dir()
try:
with open(MODEL_CACHE_FILE, 'w') as f:
json.dump(data, f)
except OSError:
pass
def get_cached_model(provider: str) -> Optional[str]:
"""Get cached model selection for a provider."""
cache = load_model_cache()
return cache.get(provider)
def set_cached_model(provider: str, model: str):
"""Cache model selection for a provider."""
cache = load_model_cache()
cache[provider] = model
cache['updated_at'] = datetime.now(timezone.utc).isoformat()
save_model_cache(cache)
| {
"repo_id": "sickn33/antigravity-awesome-skills",
"file_path": "skills/last30days/scripts/lib/cache.py",
"license": "MIT License",
"lines": 113,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
sickn33/antigravity-awesome-skills:skills/last30days/scripts/lib/dates.py | """Date utilities for last30days skill."""
from datetime import datetime, timedelta, timezone
from typing import Optional, Tuple
def get_date_range(days: int = 30) -> Tuple[str, str]:
"""Get the date range for the last N days.
Returns:
Tuple of (from_date, to_date) as YYYY-MM-DD strings
"""
today = datetime.now(timezone.utc).date()
from_date = today - timedelta(days=days)
return from_date.isoformat(), today.isoformat()
def parse_date(date_str: Optional[str]) -> Optional[datetime]:
"""Parse a date string in various formats.
Supports: YYYY-MM-DD, ISO 8601, Unix timestamp
"""
if not date_str:
return None
# Try Unix timestamp (from Reddit)
try:
ts = float(date_str)
return datetime.fromtimestamp(ts, tz=timezone.utc)
except (ValueError, TypeError):
pass
# Try ISO formats
formats = [
"%Y-%m-%d",
"%Y-%m-%dT%H:%M:%S",
"%Y-%m-%dT%H:%M:%SZ",
"%Y-%m-%dT%H:%M:%S%z",
"%Y-%m-%dT%H:%M:%S.%f%z",
]
for fmt in formats:
try:
return datetime.strptime(date_str, fmt).replace(tzinfo=timezone.utc)
except ValueError:
continue
return None
def timestamp_to_date(ts: Optional[float]) -> Optional[str]:
"""Convert Unix timestamp to YYYY-MM-DD string."""
if ts is None:
return None
try:
dt = datetime.fromtimestamp(ts, tz=timezone.utc)
return dt.date().isoformat()
except (ValueError, TypeError, OSError):
return None
def get_date_confidence(date_str: Optional[str], from_date: str, to_date: str) -> str:
"""Determine confidence level for a date.
Args:
date_str: The date to check (YYYY-MM-DD or None)
from_date: Start of valid range (YYYY-MM-DD)
to_date: End of valid range (YYYY-MM-DD)
Returns:
'high', 'med', or 'low'
"""
if not date_str:
return 'low'
try:
dt = datetime.strptime(date_str, "%Y-%m-%d").date()
start = datetime.strptime(from_date, "%Y-%m-%d").date()
end = datetime.strptime(to_date, "%Y-%m-%d").date()
if start <= dt <= end:
return 'high'
elif dt < start:
# Older than range
return 'low'
else:
# Future date (suspicious)
return 'low'
except ValueError:
return 'low'
def days_ago(date_str: Optional[str]) -> Optional[int]:
"""Calculate how many days ago a date is.
Returns None if date is invalid or missing.
"""
if not date_str:
return None
try:
dt = datetime.strptime(date_str, "%Y-%m-%d").date()
today = datetime.now(timezone.utc).date()
delta = today - dt
return delta.days
except ValueError:
return None
def recency_score(date_str: Optional[str], max_days: int = 30) -> int:
"""Calculate recency score (0-100).
0 days ago = 100, max_days ago = 0, clamped.
"""
age = days_ago(date_str)
if age is None:
return 0 # Unknown date gets worst score
if age < 0:
return 100 # Future date (treat as today)
if age >= max_days:
return 0
return int(100 * (1 - age / max_days))
| {
"repo_id": "sickn33/antigravity-awesome-skills",
"file_path": "skills/last30days/scripts/lib/dates.py",
"license": "MIT License",
"lines": 96,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
sickn33/antigravity-awesome-skills:skills/last30days/scripts/lib/dedupe.py | """Near-duplicate detection for last30days skill."""
import re
from typing import List, Set, Tuple, Union
from . import schema
def normalize_text(text: str) -> str:
"""Normalize text for comparison.
- Lowercase
- Remove punctuation
- Collapse whitespace
"""
text = text.lower()
text = re.sub(r'[^\w\s]', ' ', text)
text = re.sub(r'\s+', ' ', text)
return text.strip()
def get_ngrams(text: str, n: int = 3) -> Set[str]:
"""Get character n-grams from text."""
text = normalize_text(text)
if len(text) < n:
return {text}
return {text[i:i+n] for i in range(len(text) - n + 1)}
def jaccard_similarity(set1: Set[str], set2: Set[str]) -> float:
"""Compute Jaccard similarity between two sets."""
if not set1 or not set2:
return 0.0
intersection = len(set1 & set2)
union = len(set1 | set2)
return intersection / union if union > 0 else 0.0
def get_item_text(item: Union[schema.RedditItem, schema.XItem]) -> str:
"""Get comparable text from an item."""
if isinstance(item, schema.RedditItem):
return item.title
else:
return item.text
def find_duplicates(
items: List[Union[schema.RedditItem, schema.XItem]],
threshold: float = 0.7,
) -> List[Tuple[int, int]]:
"""Find near-duplicate pairs in items.
Args:
items: List of items to check
threshold: Similarity threshold (0-1)
Returns:
List of (i, j) index pairs where i < j and items are similar
"""
duplicates = []
# Pre-compute n-grams
ngrams = [get_ngrams(get_item_text(item)) for item in items]
for i in range(len(items)):
for j in range(i + 1, len(items)):
similarity = jaccard_similarity(ngrams[i], ngrams[j])
if similarity >= threshold:
duplicates.append((i, j))
return duplicates
def dedupe_items(
items: List[Union[schema.RedditItem, schema.XItem]],
threshold: float = 0.7,
) -> List[Union[schema.RedditItem, schema.XItem]]:
"""Remove near-duplicates, keeping highest-scored item.
Args:
items: List of items (should be pre-sorted by score descending)
threshold: Similarity threshold
Returns:
Deduplicated items
"""
if len(items) <= 1:
return items
# Find duplicate pairs
dup_pairs = find_duplicates(items, threshold)
# Mark indices to remove (always remove the lower-scored one)
# Since items are pre-sorted by score, the second index is always lower
to_remove = set()
for i, j in dup_pairs:
# Keep the higher-scored one (lower index in sorted list)
if items[i].score >= items[j].score:
to_remove.add(j)
else:
to_remove.add(i)
# Return items not marked for removal
return [item for idx, item in enumerate(items) if idx not in to_remove]
def dedupe_reddit(
items: List[schema.RedditItem],
threshold: float = 0.7,
) -> List[schema.RedditItem]:
"""Dedupe Reddit items."""
return dedupe_items(items, threshold)
def dedupe_x(
items: List[schema.XItem],
threshold: float = 0.7,
) -> List[schema.XItem]:
"""Dedupe X items."""
return dedupe_items(items, threshold)
| {
"repo_id": "sickn33/antigravity-awesome-skills",
"file_path": "skills/last30days/scripts/lib/dedupe.py",
"license": "MIT License",
"lines": 91,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
sickn33/antigravity-awesome-skills:skills/last30days/scripts/lib/env.py | """Environment and API key management for last30days skill."""
import os
from pathlib import Path
from typing import Optional, Dict, Any
CONFIG_DIR = Path.home() / ".config" / "last30days"
CONFIG_FILE = CONFIG_DIR / ".env"
def load_env_file(path: Path) -> Dict[str, str]:
"""Load environment variables from a file."""
env = {}
if not path.exists():
return env
with open(path, 'r') as f:
for line in f:
line = line.strip()
if not line or line.startswith('#'):
continue
if '=' in line:
key, _, value = line.partition('=')
key = key.strip()
value = value.strip()
# Remove quotes if present
if value and value[0] in ('"', "'") and value[-1] == value[0]:
value = value[1:-1]
if key and value:
env[key] = value
return env
def get_config() -> Dict[str, Any]:
"""Load configuration from ~/.config/last30days/.env and environment."""
# Load from config file first
file_env = load_env_file(CONFIG_FILE)
# Environment variables override file
config = {
'OPENAI_API_KEY': os.environ.get('OPENAI_API_KEY') or file_env.get('OPENAI_API_KEY'),
'XAI_API_KEY': os.environ.get('XAI_API_KEY') or file_env.get('XAI_API_KEY'),
'OPENAI_MODEL_POLICY': os.environ.get('OPENAI_MODEL_POLICY') or file_env.get('OPENAI_MODEL_POLICY', 'auto'),
'OPENAI_MODEL_PIN': os.environ.get('OPENAI_MODEL_PIN') or file_env.get('OPENAI_MODEL_PIN'),
'XAI_MODEL_POLICY': os.environ.get('XAI_MODEL_POLICY') or file_env.get('XAI_MODEL_POLICY', 'latest'),
'XAI_MODEL_PIN': os.environ.get('XAI_MODEL_PIN') or file_env.get('XAI_MODEL_PIN'),
}
return config
def config_exists() -> bool:
"""Check if configuration file exists."""
return CONFIG_FILE.exists()
def get_available_sources(config: Dict[str, Any]) -> str:
"""Determine which sources are available based on API keys.
Returns: 'both', 'reddit', 'x', or 'web' (fallback when no keys)
"""
has_openai = bool(config.get('OPENAI_API_KEY'))
has_xai = bool(config.get('XAI_API_KEY'))
if has_openai and has_xai:
return 'both'
elif has_openai:
return 'reddit'
elif has_xai:
return 'x'
else:
return 'web' # Fallback: WebSearch only (no API keys needed)
def get_missing_keys(config: Dict[str, Any]) -> str:
"""Determine which API keys are missing.
Returns: 'both', 'reddit', 'x', or 'none'
"""
has_openai = bool(config.get('OPENAI_API_KEY'))
has_xai = bool(config.get('XAI_API_KEY'))
if has_openai and has_xai:
return 'none'
elif has_openai:
return 'x' # Missing xAI key
elif has_xai:
return 'reddit' # Missing OpenAI key
else:
return 'both' # Missing both keys
def validate_sources(requested: str, available: str, include_web: bool = False) -> tuple[str, Optional[str]]:
"""Validate requested sources against available keys.
Args:
requested: 'auto', 'reddit', 'x', 'both', or 'web'
available: Result from get_available_sources()
include_web: If True, add WebSearch to available sources
Returns:
Tuple of (effective_sources, error_message)
"""
# WebSearch-only mode (no API keys)
if available == 'web':
if requested == 'auto':
return 'web', None
elif requested == 'web':
return 'web', None
else:
return 'web', f"No API keys configured. Using WebSearch fallback. Add keys to ~/.config/last30days/.env for Reddit/X."
if requested == 'auto':
# Add web to sources if include_web is set
if include_web:
if available == 'both':
return 'all', None # reddit + x + web
elif available == 'reddit':
return 'reddit-web', None
elif available == 'x':
return 'x-web', None
return available, None
if requested == 'web':
return 'web', None
if requested == 'both':
if available not in ('both',):
missing = 'xAI' if available == 'reddit' else 'OpenAI'
return 'none', f"Requested both sources but {missing} key is missing. Use --sources=auto to use available keys."
if include_web:
return 'all', None
return 'both', None
if requested == 'reddit':
if available == 'x':
return 'none', "Requested Reddit but only xAI key is available."
if include_web:
return 'reddit-web', None
return 'reddit', None
if requested == 'x':
if available == 'reddit':
return 'none', "Requested X but only OpenAI key is available."
if include_web:
return 'x-web', None
return 'x', None
return requested, None
| {
"repo_id": "sickn33/antigravity-awesome-skills",
"file_path": "skills/last30days/scripts/lib/env.py",
"license": "MIT License",
"lines": 120,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
sickn33/antigravity-awesome-skills:skills/last30days/scripts/lib/http.py | """HTTP utilities for last30days skill (stdlib only)."""
import json
import os
import sys
import time
import urllib.error
import urllib.request
from typing import Any, Dict, Optional
from urllib.parse import urlencode
DEFAULT_TIMEOUT = 30
DEBUG = os.environ.get("LAST30DAYS_DEBUG", "").lower() in ("1", "true", "yes")
def log(msg: str):
"""Log debug message to stderr."""
if DEBUG:
sys.stderr.write(f"[DEBUG] {msg}\n")
sys.stderr.flush()
MAX_RETRIES = 3
RETRY_DELAY = 1.0
USER_AGENT = "last30days-skill/1.0 (Claude Code Skill)"
class HTTPError(Exception):
"""HTTP request error with status code."""
def __init__(self, message: str, status_code: Optional[int] = None, body: Optional[str] = None):
super().__init__(message)
self.status_code = status_code
self.body = body
def request(
method: str,
url: str,
headers: Optional[Dict[str, str]] = None,
json_data: Optional[Dict[str, Any]] = None,
timeout: int = DEFAULT_TIMEOUT,
retries: int = MAX_RETRIES,
) -> Dict[str, Any]:
"""Make an HTTP request and return JSON response.
Args:
method: HTTP method (GET, POST, etc.)
url: Request URL
headers: Optional headers dict
json_data: Optional JSON body (for POST)
timeout: Request timeout in seconds
retries: Number of retries on failure
Returns:
Parsed JSON response
Raises:
HTTPError: On request failure
"""
headers = headers or {}
headers.setdefault("User-Agent", USER_AGENT)
data = None
if json_data is not None:
data = json.dumps(json_data).encode('utf-8')
headers.setdefault("Content-Type", "application/json")
req = urllib.request.Request(url, data=data, headers=headers, method=method)
log(f"{method} {url}")
if json_data:
log(f"Payload keys: {list(json_data.keys())}")
last_error = None
for attempt in range(retries):
try:
with urllib.request.urlopen(req, timeout=timeout) as response:
body = response.read().decode('utf-8')
log(f"Response: {response.status} ({len(body)} bytes)")
return json.loads(body) if body else {}
except urllib.error.HTTPError as e:
body = None
try:
body = e.read().decode('utf-8')
except:
pass
log(f"HTTP Error {e.code}: {e.reason}")
if body:
log(f"Error body: {body[:500]}")
last_error = HTTPError(f"HTTP {e.code}: {e.reason}", e.code, body)
# Don't retry client errors (4xx) except rate limits
if 400 <= e.code < 500 and e.code != 429:
raise last_error
if attempt < retries - 1:
time.sleep(RETRY_DELAY * (attempt + 1))
except urllib.error.URLError as e:
log(f"URL Error: {e.reason}")
last_error = HTTPError(f"URL Error: {e.reason}")
if attempt < retries - 1:
time.sleep(RETRY_DELAY * (attempt + 1))
except json.JSONDecodeError as e:
log(f"JSON decode error: {e}")
last_error = HTTPError(f"Invalid JSON response: {e}")
raise last_error
except (OSError, TimeoutError, ConnectionResetError) as e:
# Handle socket-level errors (connection reset, timeout, etc.)
log(f"Connection error: {type(e).__name__}: {e}")
last_error = HTTPError(f"Connection error: {type(e).__name__}: {e}")
if attempt < retries - 1:
time.sleep(RETRY_DELAY * (attempt + 1))
if last_error:
raise last_error
raise HTTPError("Request failed with no error details")
def get(url: str, headers: Optional[Dict[str, str]] = None, **kwargs) -> Dict[str, Any]:
"""Make a GET request."""
return request("GET", url, headers=headers, **kwargs)
def post(url: str, json_data: Dict[str, Any], headers: Optional[Dict[str, str]] = None, **kwargs) -> Dict[str, Any]:
"""Make a POST request with JSON body."""
return request("POST", url, headers=headers, json_data=json_data, **kwargs)
def get_reddit_json(path: str) -> Dict[str, Any]:
"""Fetch Reddit thread JSON.
Args:
path: Reddit path (e.g., /r/subreddit/comments/id/title)
Returns:
Parsed JSON response
"""
# Ensure path starts with /
if not path.startswith('/'):
path = '/' + path
# Remove trailing slash and add .json
path = path.rstrip('/')
if not path.endswith('.json'):
path = path + '.json'
url = f"https://www.reddit.com{path}?raw_json=1"
headers = {
"User-Agent": USER_AGENT,
"Accept": "application/json",
}
return get(url, headers=headers)
| {
"repo_id": "sickn33/antigravity-awesome-skills",
"file_path": "skills/last30days/scripts/lib/http.py",
"license": "MIT License",
"lines": 122,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
sickn33/antigravity-awesome-skills:skills/last30days/scripts/lib/models.py | """Model auto-selection for last30days skill."""
import re
from typing import Dict, List, Optional, Tuple
from . import cache, http
# OpenAI API
OPENAI_MODELS_URL = "https://api.openai.com/v1/models"
OPENAI_FALLBACK_MODELS = ["gpt-5.2", "gpt-5.1", "gpt-5", "gpt-4o"]
# xAI API - Agent Tools API requires grok-4 family
XAI_MODELS_URL = "https://api.x.ai/v1/models"
XAI_ALIASES = {
"latest": "grok-4-1-fast", # Required for x_search tool
"stable": "grok-4-1-fast",
}
def parse_version(model_id: str) -> Optional[Tuple[int, ...]]:
"""Parse semantic version from model ID.
Examples:
gpt-5 -> (5,)
gpt-5.2 -> (5, 2)
gpt-5.2.1 -> (5, 2, 1)
"""
match = re.search(r'(\d+(?:\.\d+)*)', model_id)
if match:
return tuple(int(x) for x in match.group(1).split('.'))
return None
def is_mainline_openai_model(model_id: str) -> bool:
"""Check if model is a mainline GPT model (not mini/nano/chat/codex/pro)."""
model_lower = model_id.lower()
# Must be gpt-5 series
if not re.match(r'^gpt-5(\.\d+)*$', model_lower):
return False
# Exclude variants
excludes = ['mini', 'nano', 'chat', 'codex', 'pro', 'preview', 'turbo']
for exc in excludes:
if exc in model_lower:
return False
return True
def select_openai_model(
api_key: str,
policy: str = "auto",
pin: Optional[str] = None,
mock_models: Optional[List[Dict]] = None,
) -> str:
"""Select the best OpenAI model based on policy.
Args:
api_key: OpenAI API key
policy: 'auto' or 'pinned'
pin: Model to use if policy is 'pinned'
mock_models: Mock model list for testing
Returns:
Selected model ID
"""
if policy == "pinned" and pin:
return pin
# Check cache first
cached = cache.get_cached_model("openai")
if cached:
return cached
# Fetch model list
if mock_models is not None:
models = mock_models
else:
try:
headers = {"Authorization": f"Bearer {api_key}"}
response = http.get(OPENAI_MODELS_URL, headers=headers)
models = response.get("data", [])
except http.HTTPError:
# Fall back to known models
return OPENAI_FALLBACK_MODELS[0]
# Filter to mainline models
candidates = [m for m in models if is_mainline_openai_model(m.get("id", ""))]
if not candidates:
# No gpt-5 models found, use fallback
return OPENAI_FALLBACK_MODELS[0]
# Sort by version (descending), then by created timestamp
def sort_key(m):
version = parse_version(m.get("id", "")) or (0,)
created = m.get("created", 0)
return (version, created)
candidates.sort(key=sort_key, reverse=True)
selected = candidates[0]["id"]
# Cache the selection
cache.set_cached_model("openai", selected)
return selected
def select_xai_model(
api_key: str,
policy: str = "latest",
pin: Optional[str] = None,
mock_models: Optional[List[Dict]] = None,
) -> str:
"""Select the best xAI model based on policy.
Args:
api_key: xAI API key
policy: 'latest', 'stable', or 'pinned'
pin: Model to use if policy is 'pinned'
mock_models: Mock model list for testing
Returns:
Selected model ID
"""
if policy == "pinned" and pin:
return pin
# Use alias system
if policy in XAI_ALIASES:
alias = XAI_ALIASES[policy]
# Check cache first
cached = cache.get_cached_model("xai")
if cached:
return cached
# Cache the alias
cache.set_cached_model("xai", alias)
return alias
# Default to latest
return XAI_ALIASES["latest"]
def get_models(
config: Dict,
mock_openai_models: Optional[List[Dict]] = None,
mock_xai_models: Optional[List[Dict]] = None,
) -> Dict[str, Optional[str]]:
"""Get selected models for both providers.
Returns:
Dict with 'openai' and 'xai' keys
"""
result = {"openai": None, "xai": None}
if config.get("OPENAI_API_KEY"):
result["openai"] = select_openai_model(
config["OPENAI_API_KEY"],
config.get("OPENAI_MODEL_POLICY", "auto"),
config.get("OPENAI_MODEL_PIN"),
mock_openai_models,
)
if config.get("XAI_API_KEY"):
result["xai"] = select_xai_model(
config["XAI_API_KEY"],
config.get("XAI_MODEL_POLICY", "latest"),
config.get("XAI_MODEL_PIN"),
mock_xai_models,
)
return result
| {
"repo_id": "sickn33/antigravity-awesome-skills",
"file_path": "skills/last30days/scripts/lib/models.py",
"license": "MIT License",
"lines": 137,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
sickn33/antigravity-awesome-skills:skills/last30days/scripts/lib/normalize.py | """Normalization of raw API data to canonical schema."""
from typing import Any, Dict, List, TypeVar, Union
from . import dates, schema
T = TypeVar("T", schema.RedditItem, schema.XItem, schema.WebSearchItem)
def filter_by_date_range(
items: List[T],
from_date: str,
to_date: str,
require_date: bool = False,
) -> List[T]:
"""Hard filter: Remove items outside the date range.
This is the safety net - even if the prompt lets old content through,
this filter will exclude it.
Args:
items: List of items to filter
from_date: Start date (YYYY-MM-DD) - exclude items before this
to_date: End date (YYYY-MM-DD) - exclude items after this
require_date: If True, also remove items with no date
Returns:
Filtered list with only items in range (or unknown dates if not required)
"""
result = []
for item in items:
if item.date is None:
if not require_date:
result.append(item) # Keep unknown dates (with scoring penalty)
continue
# Hard filter: if date is before from_date, exclude
if item.date < from_date:
continue # DROP - too old
# Hard filter: if date is after to_date, exclude (likely parsing error)
if item.date > to_date:
continue # DROP - future date
result.append(item)
return result
def normalize_reddit_items(
items: List[Dict[str, Any]],
from_date: str,
to_date: str,
) -> List[schema.RedditItem]:
"""Normalize raw Reddit items to schema.
Args:
items: Raw Reddit items from API
from_date: Start of date range
to_date: End of date range
Returns:
List of RedditItem objects
"""
normalized = []
for item in items:
# Parse engagement
engagement = None
eng_raw = item.get("engagement")
if isinstance(eng_raw, dict):
engagement = schema.Engagement(
score=eng_raw.get("score"),
num_comments=eng_raw.get("num_comments"),
upvote_ratio=eng_raw.get("upvote_ratio"),
)
# Parse comments
top_comments = []
for c in item.get("top_comments", []):
top_comments.append(schema.Comment(
score=c.get("score", 0),
date=c.get("date"),
author=c.get("author", ""),
excerpt=c.get("excerpt", ""),
url=c.get("url", ""),
))
# Determine date confidence
date_str = item.get("date")
date_confidence = dates.get_date_confidence(date_str, from_date, to_date)
normalized.append(schema.RedditItem(
id=item.get("id", ""),
title=item.get("title", ""),
url=item.get("url", ""),
subreddit=item.get("subreddit", ""),
date=date_str,
date_confidence=date_confidence,
engagement=engagement,
top_comments=top_comments,
comment_insights=item.get("comment_insights", []),
relevance=item.get("relevance", 0.5),
why_relevant=item.get("why_relevant", ""),
))
return normalized
def normalize_x_items(
items: List[Dict[str, Any]],
from_date: str,
to_date: str,
) -> List[schema.XItem]:
"""Normalize raw X items to schema.
Args:
items: Raw X items from API
from_date: Start of date range
to_date: End of date range
Returns:
List of XItem objects
"""
normalized = []
for item in items:
# Parse engagement
engagement = None
eng_raw = item.get("engagement")
if isinstance(eng_raw, dict):
engagement = schema.Engagement(
likes=eng_raw.get("likes"),
reposts=eng_raw.get("reposts"),
replies=eng_raw.get("replies"),
quotes=eng_raw.get("quotes"),
)
# Determine date confidence
date_str = item.get("date")
date_confidence = dates.get_date_confidence(date_str, from_date, to_date)
normalized.append(schema.XItem(
id=item.get("id", ""),
text=item.get("text", ""),
url=item.get("url", ""),
author_handle=item.get("author_handle", ""),
date=date_str,
date_confidence=date_confidence,
engagement=engagement,
relevance=item.get("relevance", 0.5),
why_relevant=item.get("why_relevant", ""),
))
return normalized
def items_to_dicts(items: List) -> List[Dict[str, Any]]:
"""Convert schema items to dicts for JSON serialization."""
return [item.to_dict() for item in items]
| {
"repo_id": "sickn33/antigravity-awesome-skills",
"file_path": "skills/last30days/scripts/lib/normalize.py",
"license": "MIT License",
"lines": 129,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
sickn33/antigravity-awesome-skills:skills/last30days/scripts/lib/openai_reddit.py | """OpenAI Responses API client for Reddit discovery."""
import json
import re
import sys
from typing import Any, Dict, List, Optional
from . import http
def _log_error(msg: str):
"""Log error to stderr."""
sys.stderr.write(f"[REDDIT ERROR] {msg}\n")
sys.stderr.flush()
OPENAI_RESPONSES_URL = "https://api.openai.com/v1/responses"
# Depth configurations: (min, max) threads to request
# Request MORE than needed since many get filtered by date
DEPTH_CONFIG = {
"quick": (15, 25),
"default": (30, 50),
"deep": (70, 100),
}
REDDIT_SEARCH_PROMPT = """Find Reddit discussion threads about: {topic}
STEP 1: EXTRACT THE CORE SUBJECT
Get the MAIN NOUN/PRODUCT/TOPIC:
- "best nano banana prompting practices" β "nano banana"
- "killer features of clawdbot" β "clawdbot"
- "top Claude Code skills" β "Claude Code"
DO NOT include "best", "top", "tips", "practices", "features" in your search.
STEP 2: SEARCH BROADLY
Search for the core subject:
1. "[core subject] site:reddit.com"
2. "reddit [core subject]"
3. "[core subject] reddit"
Return as many relevant threads as you find. We filter by date server-side.
STEP 3: INCLUDE ALL MATCHES
- Include ALL threads about the core subject
- Set date to "YYYY-MM-DD" if you can determine it, otherwise null
- We verify dates and filter old content server-side
- DO NOT pre-filter aggressively - include anything relevant
REQUIRED: URLs must contain "/r/" AND "/comments/"
REJECT: developers.reddit.com, business.reddit.com
Find {min_items}-{max_items} threads. Return MORE rather than fewer.
Return JSON:
{{
"items": [
{{
"title": "Thread title",
"url": "https://www.reddit.com/r/sub/comments/xyz/title/",
"subreddit": "subreddit_name",
"date": "YYYY-MM-DD or null",
"why_relevant": "Why relevant",
"relevance": 0.85
}}
]
}}"""
def _extract_core_subject(topic: str) -> str:
"""Extract core subject from verbose query for retry."""
noise = ['best', 'top', 'how to', 'tips for', 'practices', 'features',
'killer', 'guide', 'tutorial', 'recommendations', 'advice',
'prompting', 'using', 'for', 'with', 'the', 'of', 'in', 'on']
words = topic.lower().split()
result = [w for w in words if w not in noise]
return ' '.join(result[:3]) or topic # Keep max 3 words
def search_reddit(
api_key: str,
model: str,
topic: str,
from_date: str,
to_date: str,
depth: str = "default",
mock_response: Optional[Dict] = None,
_retry: bool = False,
) -> Dict[str, Any]:
"""Search Reddit for relevant threads using OpenAI Responses API.
Args:
api_key: OpenAI API key
model: Model to use
topic: Search topic
from_date: Start date (YYYY-MM-DD) - only include threads after this
to_date: End date (YYYY-MM-DD) - only include threads before this
depth: Research depth - "quick", "default", or "deep"
mock_response: Mock response for testing
Returns:
Raw API response
"""
if mock_response is not None:
return mock_response
min_items, max_items = DEPTH_CONFIG.get(depth, DEPTH_CONFIG["default"])
headers = {
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json",
}
# Adjust timeout based on depth (generous for OpenAI web_search which can be slow)
timeout = 90 if depth == "quick" else 120 if depth == "default" else 180
# Note: allowed_domains accepts base domain, not subdomains
# We rely on prompt to filter out developers.reddit.com, etc.
payload = {
"model": model,
"tools": [
{
"type": "web_search",
"filters": {
"allowed_domains": ["reddit.com"]
}
}
],
"include": ["web_search_call.action.sources"],
"input": REDDIT_SEARCH_PROMPT.format(
topic=topic,
from_date=from_date,
to_date=to_date,
min_items=min_items,
max_items=max_items,
),
}
return http.post(OPENAI_RESPONSES_URL, payload, headers=headers, timeout=timeout)
def parse_reddit_response(response: Dict[str, Any]) -> List[Dict[str, Any]]:
"""Parse OpenAI response to extract Reddit items.
Args:
response: Raw API response
Returns:
List of item dicts
"""
items = []
# Check for API errors first
if "error" in response and response["error"]:
error = response["error"]
err_msg = error.get("message", str(error)) if isinstance(error, dict) else str(error)
_log_error(f"OpenAI API error: {err_msg}")
if http.DEBUG:
_log_error(f"Full error response: {json.dumps(response, indent=2)[:1000]}")
return items
# Try to find the output text
output_text = ""
if "output" in response:
output = response["output"]
if isinstance(output, str):
output_text = output
elif isinstance(output, list):
for item in output:
if isinstance(item, dict):
if item.get("type") == "message":
content = item.get("content", [])
for c in content:
if isinstance(c, dict) and c.get("type") == "output_text":
output_text = c.get("text", "")
break
elif "text" in item:
output_text = item["text"]
elif isinstance(item, str):
output_text = item
if output_text:
break
# Also check for choices (older format)
if not output_text and "choices" in response:
for choice in response["choices"]:
if "message" in choice:
output_text = choice["message"].get("content", "")
break
if not output_text:
print(f"[REDDIT WARNING] No output text found in OpenAI response. Keys present: {list(response.keys())}", flush=True)
return items
# Extract JSON from the response
json_match = re.search(r'\{[\s\S]*"items"[\s\S]*\}', output_text)
if json_match:
try:
data = json.loads(json_match.group())
items = data.get("items", [])
except json.JSONDecodeError:
pass
# Validate and clean items
clean_items = []
for i, item in enumerate(items):
if not isinstance(item, dict):
continue
url = item.get("url", "")
if not url or "reddit.com" not in url:
continue
clean_item = {
"id": f"R{i+1}",
"title": str(item.get("title", "")).strip(),
"url": url,
"subreddit": str(item.get("subreddit", "")).strip().lstrip("r/"),
"date": item.get("date"),
"why_relevant": str(item.get("why_relevant", "")).strip(),
"relevance": min(1.0, max(0.0, float(item.get("relevance", 0.5)))),
}
# Validate date format
if clean_item["date"]:
if not re.match(r'^\d{4}-\d{2}-\d{2}$', str(clean_item["date"])):
clean_item["date"] = None
clean_items.append(clean_item)
return clean_items
| {
"repo_id": "sickn33/antigravity-awesome-skills",
"file_path": "skills/last30days/scripts/lib/openai_reddit.py",
"license": "MIT License",
"lines": 190,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
sickn33/antigravity-awesome-skills:skills/last30days/scripts/lib/reddit_enrich.py | """Reddit thread enrichment with real engagement metrics."""
import re
from typing import Any, Dict, List, Optional
from urllib.parse import urlparse
from . import http, dates
def extract_reddit_path(url: str) -> Optional[str]:
"""Extract the path from a Reddit URL.
Args:
url: Reddit URL
Returns:
Path component or None
"""
try:
parsed = urlparse(url)
if "reddit.com" not in parsed.netloc:
return None
return parsed.path
except:
return None
def fetch_thread_data(url: str, mock_data: Optional[Dict] = None) -> Optional[Dict[str, Any]]:
"""Fetch Reddit thread JSON data.
Args:
url: Reddit thread URL
mock_data: Mock data for testing
Returns:
Thread data dict or None on failure
"""
if mock_data is not None:
return mock_data
path = extract_reddit_path(url)
if not path:
return None
try:
data = http.get_reddit_json(path)
return data
except http.HTTPError:
return None
def parse_thread_data(data: Any) -> Dict[str, Any]:
"""Parse Reddit thread JSON into structured data.
Args:
data: Raw Reddit JSON response
Returns:
Dict with submission and comments data
"""
result = {
"submission": None,
"comments": [],
}
if not isinstance(data, list) or len(data) < 1:
return result
# First element is submission listing
submission_listing = data[0]
if isinstance(submission_listing, dict):
children = submission_listing.get("data", {}).get("children", [])
if children:
sub_data = children[0].get("data", {})
result["submission"] = {
"score": sub_data.get("score"),
"num_comments": sub_data.get("num_comments"),
"upvote_ratio": sub_data.get("upvote_ratio"),
"created_utc": sub_data.get("created_utc"),
"permalink": sub_data.get("permalink"),
"title": sub_data.get("title"),
"selftext": sub_data.get("selftext", "")[:500], # Truncate
}
# Second element is comments listing
if len(data) >= 2:
comments_listing = data[1]
if isinstance(comments_listing, dict):
children = comments_listing.get("data", {}).get("children", [])
for child in children:
if child.get("kind") != "t1": # t1 = comment
continue
c_data = child.get("data", {})
if not c_data.get("body"):
continue
comment = {
"score": c_data.get("score", 0),
"created_utc": c_data.get("created_utc"),
"author": c_data.get("author", "[deleted]"),
"body": c_data.get("body", "")[:300], # Truncate
"permalink": c_data.get("permalink"),
}
result["comments"].append(comment)
return result
def get_top_comments(comments: List[Dict], limit: int = 10) -> List[Dict[str, Any]]:
"""Get top comments sorted by score.
Args:
comments: List of comment dicts
limit: Maximum number to return
Returns:
Top comments sorted by score
"""
# Filter out deleted/removed
valid = [c for c in comments if c.get("author") not in ("[deleted]", "[removed]")]
# Sort by score descending
sorted_comments = sorted(valid, key=lambda c: c.get("score", 0), reverse=True)
return sorted_comments[:limit]
def extract_comment_insights(comments: List[Dict], limit: int = 7) -> List[str]:
"""Extract key insights from top comments.
Uses simple heuristics to identify valuable comments:
- Has substantive text
- Contains actionable information
- Not just agreement/disagreement
Args:
comments: Top comments
limit: Max insights to extract
Returns:
List of insight strings
"""
insights = []
for comment in comments[:limit * 2]: # Look at more comments than we need
body = comment.get("body", "").strip()
if not body or len(body) < 30:
continue
# Skip low-value patterns
skip_patterns = [
r'^(this|same|agreed|exactly|yep|nope|yes|no|thanks|thank you)\.?$',
r'^lol|lmao|haha',
r'^\[deleted\]',
r'^\[removed\]',
]
if any(re.match(p, body.lower()) for p in skip_patterns):
continue
# Truncate to first meaningful sentence or ~150 chars
insight = body[:150]
if len(body) > 150:
# Try to find a sentence boundary
for i, char in enumerate(insight):
if char in '.!?' and i > 50:
insight = insight[:i+1]
break
else:
insight = insight.rstrip() + "..."
insights.append(insight)
if len(insights) >= limit:
break
return insights
def enrich_reddit_item(
item: Dict[str, Any],
mock_thread_data: Optional[Dict] = None,
) -> Dict[str, Any]:
"""Enrich a Reddit item with real engagement data.
Args:
item: Reddit item dict
mock_thread_data: Mock data for testing
Returns:
Enriched item dict
"""
url = item.get("url", "")
# Fetch thread data
thread_data = fetch_thread_data(url, mock_thread_data)
if not thread_data:
return item
parsed = parse_thread_data(thread_data)
submission = parsed.get("submission")
comments = parsed.get("comments", [])
# Update engagement metrics
if submission:
item["engagement"] = {
"score": submission.get("score"),
"num_comments": submission.get("num_comments"),
"upvote_ratio": submission.get("upvote_ratio"),
}
# Update date from actual data
created_utc = submission.get("created_utc")
if created_utc:
item["date"] = dates.timestamp_to_date(created_utc)
# Get top comments
top_comments = get_top_comments(comments)
item["top_comments"] = []
for c in top_comments:
permalink = c.get("permalink", "")
comment_url = f"https://reddit.com{permalink}" if permalink else ""
item["top_comments"].append({
"score": c.get("score", 0),
"date": dates.timestamp_to_date(c.get("created_utc")),
"author": c.get("author", ""),
"excerpt": c.get("body", "")[:200],
"url": comment_url,
})
# Extract insights
item["comment_insights"] = extract_comment_insights(top_comments)
return item
| {
"repo_id": "sickn33/antigravity-awesome-skills",
"file_path": "skills/last30days/scripts/lib/reddit_enrich.py",
"license": "MIT License",
"lines": 184,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
sickn33/antigravity-awesome-skills:skills/last30days/scripts/lib/render.py | """Output rendering for last30days skill."""
import json
from pathlib import Path
from typing import List, Optional
from . import schema
OUTPUT_DIR = Path.home() / ".local" / "share" / "last30days" / "out"
def ensure_output_dir():
"""Ensure output directory exists."""
OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
def _assess_data_freshness(report: schema.Report) -> dict:
"""Assess how much data is actually from the last 30 days."""
reddit_recent = sum(1 for r in report.reddit if r.date and r.date >= report.range_from)
x_recent = sum(1 for x in report.x if x.date and x.date >= report.range_from)
web_recent = sum(1 for w in report.web if w.date and w.date >= report.range_from)
total_recent = reddit_recent + x_recent + web_recent
total_items = len(report.reddit) + len(report.x) + len(report.web)
return {
"reddit_recent": reddit_recent,
"x_recent": x_recent,
"web_recent": web_recent,
"total_recent": total_recent,
"total_items": total_items,
"is_sparse": total_recent < 5,
"mostly_evergreen": total_items > 0 and total_recent < total_items * 0.3,
}
def render_compact(report: schema.Report, limit: int = 15, missing_keys: str = "none") -> str:
"""Render compact output for Claude to synthesize.
Args:
report: Report data
limit: Max items per source
missing_keys: 'both', 'reddit', 'x', or 'none'
Returns:
Compact markdown string
"""
lines = []
# Header
lines.append(f"## Research Results: {report.topic}")
lines.append("")
# Assess data freshness and add honesty warning if needed
freshness = _assess_data_freshness(report)
if freshness["is_sparse"]:
lines.append("**β οΈ LIMITED RECENT DATA** - Few discussions from the last 30 days.")
lines.append(f"Only {freshness['total_recent']} item(s) confirmed from {report.range_from} to {report.range_to}.")
lines.append("Results below may include older/evergreen content. Be transparent with the user about this.")
lines.append("")
# Web-only mode banner (when no API keys)
if report.mode == "web-only":
lines.append("**π WEB SEARCH MODE** - Claude will search blogs, docs & news")
lines.append("")
lines.append("---")
lines.append("**β‘ Want better results?** Add API keys to unlock Reddit & X data:")
lines.append("- `OPENAI_API_KEY` β Reddit threads with real upvotes & comments")
lines.append("- `XAI_API_KEY` β X posts with real likes & reposts")
lines.append("- Edit `~/.config/last30days/.env` to add keys")
lines.append("---")
lines.append("")
# Cache indicator
if report.from_cache:
age_str = f"{report.cache_age_hours:.1f}h old" if report.cache_age_hours else "cached"
lines.append(f"**β‘ CACHED RESULTS** ({age_str}) - use `--refresh` for fresh data")
lines.append("")
lines.append(f"**Date Range:** {report.range_from} to {report.range_to}")
lines.append(f"**Mode:** {report.mode}")
if report.openai_model_used:
lines.append(f"**OpenAI Model:** {report.openai_model_used}")
if report.xai_model_used:
lines.append(f"**xAI Model:** {report.xai_model_used}")
lines.append("")
# Coverage note for partial coverage
if report.mode == "reddit-only" and missing_keys == "x":
lines.append("*π‘ Tip: Add XAI_API_KEY for X/Twitter data and better triangulation.*")
lines.append("")
elif report.mode == "x-only" and missing_keys == "reddit":
lines.append("*π‘ Tip: Add OPENAI_API_KEY for Reddit data and better triangulation.*")
lines.append("")
# Reddit items
if report.reddit_error:
lines.append("### Reddit Threads")
lines.append("")
lines.append(f"**ERROR:** {report.reddit_error}")
lines.append("")
elif report.mode in ("both", "reddit-only") and not report.reddit:
lines.append("### Reddit Threads")
lines.append("")
lines.append("*No relevant Reddit threads found for this topic.*")
lines.append("")
elif report.reddit:
lines.append("### Reddit Threads")
lines.append("")
for item in report.reddit[:limit]:
eng_str = ""
if item.engagement:
eng = item.engagement
parts = []
if eng.score is not None:
parts.append(f"{eng.score}pts")
if eng.num_comments is not None:
parts.append(f"{eng.num_comments}cmt")
if parts:
eng_str = f" [{', '.join(parts)}]"
date_str = f" ({item.date})" if item.date else " (date unknown)"
conf_str = f" [date:{item.date_confidence}]" if item.date_confidence != "high" else ""
lines.append(f"**{item.id}** (score:{item.score}) r/{item.subreddit}{date_str}{conf_str}{eng_str}")
lines.append(f" {item.title}")
lines.append(f" {item.url}")
lines.append(f" *{item.why_relevant}*")
# Top comment insights
if item.comment_insights:
lines.append(f" Insights:")
for insight in item.comment_insights[:3]:
lines.append(f" - {insight}")
lines.append("")
# X items
if report.x_error:
lines.append("### X Posts")
lines.append("")
lines.append(f"**ERROR:** {report.x_error}")
lines.append("")
elif report.mode in ("both", "x-only", "all", "x-web") and not report.x:
lines.append("### X Posts")
lines.append("")
lines.append("*No relevant X posts found for this topic.*")
lines.append("")
elif report.x:
lines.append("### X Posts")
lines.append("")
for item in report.x[:limit]:
eng_str = ""
if item.engagement:
eng = item.engagement
parts = []
if eng.likes is not None:
parts.append(f"{eng.likes}likes")
if eng.reposts is not None:
parts.append(f"{eng.reposts}rt")
if parts:
eng_str = f" [{', '.join(parts)}]"
date_str = f" ({item.date})" if item.date else " (date unknown)"
conf_str = f" [date:{item.date_confidence}]" if item.date_confidence != "high" else ""
lines.append(f"**{item.id}** (score:{item.score}) @{item.author_handle}{date_str}{conf_str}{eng_str}")
lines.append(f" {item.text[:200]}...")
lines.append(f" {item.url}")
lines.append(f" *{item.why_relevant}*")
lines.append("")
# Web items (if any - populated by Claude)
if report.web_error:
lines.append("### Web Results")
lines.append("")
lines.append(f"**ERROR:** {report.web_error}")
lines.append("")
elif report.web:
lines.append("### Web Results")
lines.append("")
for item in report.web[:limit]:
date_str = f" ({item.date})" if item.date else " (date unknown)"
conf_str = f" [date:{item.date_confidence}]" if item.date_confidence != "high" else ""
lines.append(f"**{item.id}** [WEB] (score:{item.score}) {item.source_domain}{date_str}{conf_str}")
lines.append(f" {item.title}")
lines.append(f" {item.url}")
lines.append(f" {item.snippet[:150]}...")
lines.append(f" *{item.why_relevant}*")
lines.append("")
return "\n".join(lines)
def render_context_snippet(report: schema.Report) -> str:
"""Render reusable context snippet.
Args:
report: Report data
Returns:
Context markdown string
"""
lines = []
lines.append(f"# Context: {report.topic} (Last 30 Days)")
lines.append("")
lines.append(f"*Generated: {report.generated_at[:10]} | Sources: {report.mode}*")
lines.append("")
# Key sources summary
lines.append("## Key Sources")
lines.append("")
all_items = []
for item in report.reddit[:5]:
all_items.append((item.score, "Reddit", item.title, item.url))
for item in report.x[:5]:
all_items.append((item.score, "X", item.text[:50] + "...", item.url))
for item in report.web[:5]:
all_items.append((item.score, "Web", item.title[:50] + "...", item.url))
all_items.sort(key=lambda x: -x[0])
for score, source, text, url in all_items[:7]:
lines.append(f"- [{source}] {text}")
lines.append("")
lines.append("## Summary")
lines.append("")
lines.append("*See full report for best practices, prompt pack, and detailed sources.*")
lines.append("")
return "\n".join(lines)
def render_full_report(report: schema.Report) -> str:
"""Render full markdown report.
Args:
report: Report data
Returns:
Full report markdown
"""
lines = []
# Title
lines.append(f"# {report.topic} - Last 30 Days Research Report")
lines.append("")
lines.append(f"**Generated:** {report.generated_at}")
lines.append(f"**Date Range:** {report.range_from} to {report.range_to}")
lines.append(f"**Mode:** {report.mode}")
lines.append("")
# Models
lines.append("## Models Used")
lines.append("")
if report.openai_model_used:
lines.append(f"- **OpenAI:** {report.openai_model_used}")
if report.xai_model_used:
lines.append(f"- **xAI:** {report.xai_model_used}")
lines.append("")
# Reddit section
if report.reddit:
lines.append("## Reddit Threads")
lines.append("")
for item in report.reddit:
lines.append(f"### {item.id}: {item.title}")
lines.append("")
lines.append(f"- **Subreddit:** r/{item.subreddit}")
lines.append(f"- **URL:** {item.url}")
lines.append(f"- **Date:** {item.date or 'Unknown'} (confidence: {item.date_confidence})")
lines.append(f"- **Score:** {item.score}/100")
lines.append(f"- **Relevance:** {item.why_relevant}")
if item.engagement:
eng = item.engagement
lines.append(f"- **Engagement:** {eng.score or '?'} points, {eng.num_comments or '?'} comments")
if item.comment_insights:
lines.append("")
lines.append("**Key Insights from Comments:**")
for insight in item.comment_insights:
lines.append(f"- {insight}")
lines.append("")
# X section
if report.x:
lines.append("## X Posts")
lines.append("")
for item in report.x:
lines.append(f"### {item.id}: @{item.author_handle}")
lines.append("")
lines.append(f"- **URL:** {item.url}")
lines.append(f"- **Date:** {item.date or 'Unknown'} (confidence: {item.date_confidence})")
lines.append(f"- **Score:** {item.score}/100")
lines.append(f"- **Relevance:** {item.why_relevant}")
if item.engagement:
eng = item.engagement
lines.append(f"- **Engagement:** {eng.likes or '?'} likes, {eng.reposts or '?'} reposts")
lines.append("")
lines.append(f"> {item.text}")
lines.append("")
# Web section
if report.web:
lines.append("## Web Results")
lines.append("")
for item in report.web:
lines.append(f"### {item.id}: {item.title}")
lines.append("")
lines.append(f"- **Source:** {item.source_domain}")
lines.append(f"- **URL:** {item.url}")
lines.append(f"- **Date:** {item.date or 'Unknown'} (confidence: {item.date_confidence})")
lines.append(f"- **Score:** {item.score}/100")
lines.append(f"- **Relevance:** {item.why_relevant}")
lines.append("")
lines.append(f"> {item.snippet}")
lines.append("")
# Placeholders for Claude synthesis
lines.append("## Best Practices")
lines.append("")
lines.append("*To be synthesized by Claude*")
lines.append("")
lines.append("## Prompt Pack")
lines.append("")
lines.append("*To be synthesized by Claude*")
lines.append("")
return "\n".join(lines)
def write_outputs(
report: schema.Report,
raw_openai: Optional[dict] = None,
raw_xai: Optional[dict] = None,
raw_reddit_enriched: Optional[list] = None,
):
"""Write all output files.
Args:
report: Report data
raw_openai: Raw OpenAI API response
raw_xai: Raw xAI API response
raw_reddit_enriched: Raw enriched Reddit thread data
"""
ensure_output_dir()
# report.json
with open(OUTPUT_DIR / "report.json", 'w') as f:
json.dump(report.to_dict(), f, indent=2)
# report.md
with open(OUTPUT_DIR / "report.md", 'w') as f:
f.write(render_full_report(report))
# last30days.context.md
with open(OUTPUT_DIR / "last30days.context.md", 'w') as f:
f.write(render_context_snippet(report))
# Raw responses
if raw_openai:
with open(OUTPUT_DIR / "raw_openai.json", 'w') as f:
json.dump(raw_openai, f, indent=2)
if raw_xai:
with open(OUTPUT_DIR / "raw_xai.json", 'w') as f:
json.dump(raw_xai, f, indent=2)
if raw_reddit_enriched:
with open(OUTPUT_DIR / "raw_reddit_threads_enriched.json", 'w') as f:
json.dump(raw_reddit_enriched, f, indent=2)
def get_context_path() -> str:
"""Get path to context file."""
return str(OUTPUT_DIR / "last30days.context.md")
| {
"repo_id": "sickn33/antigravity-awesome-skills",
"file_path": "skills/last30days/scripts/lib/render.py",
"license": "MIT License",
"lines": 316,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
sickn33/antigravity-awesome-skills:skills/last30days/scripts/lib/schema.py | """Data schemas for last30days skill."""
from dataclasses import dataclass, field, asdict
from typing import Any, Dict, List, Optional
from datetime import datetime, timezone
@dataclass
class Engagement:
"""Engagement metrics."""
# Reddit fields
score: Optional[int] = None
num_comments: Optional[int] = None
upvote_ratio: Optional[float] = None
# X fields
likes: Optional[int] = None
reposts: Optional[int] = None
replies: Optional[int] = None
quotes: Optional[int] = None
def to_dict(self) -> Dict[str, Any]:
d = {}
if self.score is not None:
d['score'] = self.score
if self.num_comments is not None:
d['num_comments'] = self.num_comments
if self.upvote_ratio is not None:
d['upvote_ratio'] = self.upvote_ratio
if self.likes is not None:
d['likes'] = self.likes
if self.reposts is not None:
d['reposts'] = self.reposts
if self.replies is not None:
d['replies'] = self.replies
if self.quotes is not None:
d['quotes'] = self.quotes
return d if d else None
@dataclass
class Comment:
"""Reddit comment."""
score: int
date: Optional[str]
author: str
excerpt: str
url: str
def to_dict(self) -> Dict[str, Any]:
return {
'score': self.score,
'date': self.date,
'author': self.author,
'excerpt': self.excerpt,
'url': self.url,
}
@dataclass
class SubScores:
"""Component scores."""
relevance: int = 0
recency: int = 0
engagement: int = 0
def to_dict(self) -> Dict[str, int]:
return {
'relevance': self.relevance,
'recency': self.recency,
'engagement': self.engagement,
}
@dataclass
class RedditItem:
"""Normalized Reddit item."""
id: str
title: str
url: str
subreddit: str
date: Optional[str] = None
date_confidence: str = "low"
engagement: Optional[Engagement] = None
top_comments: List[Comment] = field(default_factory=list)
comment_insights: List[str] = field(default_factory=list)
relevance: float = 0.5
why_relevant: str = ""
subs: SubScores = field(default_factory=SubScores)
score: int = 0
def to_dict(self) -> Dict[str, Any]:
return {
'id': self.id,
'title': self.title,
'url': self.url,
'subreddit': self.subreddit,
'date': self.date,
'date_confidence': self.date_confidence,
'engagement': self.engagement.to_dict() if self.engagement else None,
'top_comments': [c.to_dict() for c in self.top_comments],
'comment_insights': self.comment_insights,
'relevance': self.relevance,
'why_relevant': self.why_relevant,
'subs': self.subs.to_dict(),
'score': self.score,
}
@dataclass
class XItem:
"""Normalized X item."""
id: str
text: str
url: str
author_handle: str
date: Optional[str] = None
date_confidence: str = "low"
engagement: Optional[Engagement] = None
relevance: float = 0.5
why_relevant: str = ""
subs: SubScores = field(default_factory=SubScores)
score: int = 0
def to_dict(self) -> Dict[str, Any]:
return {
'id': self.id,
'text': self.text,
'url': self.url,
'author_handle': self.author_handle,
'date': self.date,
'date_confidence': self.date_confidence,
'engagement': self.engagement.to_dict() if self.engagement else None,
'relevance': self.relevance,
'why_relevant': self.why_relevant,
'subs': self.subs.to_dict(),
'score': self.score,
}
@dataclass
class WebSearchItem:
"""Normalized web search item (no engagement metrics)."""
id: str
title: str
url: str
source_domain: str # e.g., "medium.com", "github.com"
snippet: str
date: Optional[str] = None
date_confidence: str = "low"
relevance: float = 0.5
why_relevant: str = ""
subs: SubScores = field(default_factory=SubScores)
score: int = 0
def to_dict(self) -> Dict[str, Any]:
return {
'id': self.id,
'title': self.title,
'url': self.url,
'source_domain': self.source_domain,
'snippet': self.snippet,
'date': self.date,
'date_confidence': self.date_confidence,
'relevance': self.relevance,
'why_relevant': self.why_relevant,
'subs': self.subs.to_dict(),
'score': self.score,
}
@dataclass
class Report:
"""Full research report."""
topic: str
range_from: str
range_to: str
generated_at: str
mode: str # 'reddit-only', 'x-only', 'both', 'web-only', etc.
openai_model_used: Optional[str] = None
xai_model_used: Optional[str] = None
reddit: List[RedditItem] = field(default_factory=list)
x: List[XItem] = field(default_factory=list)
web: List[WebSearchItem] = field(default_factory=list)
best_practices: List[str] = field(default_factory=list)
prompt_pack: List[str] = field(default_factory=list)
context_snippet_md: str = ""
# Status tracking
reddit_error: Optional[str] = None
x_error: Optional[str] = None
web_error: Optional[str] = None
# Cache info
from_cache: bool = False
cache_age_hours: Optional[float] = None
def to_dict(self) -> Dict[str, Any]:
d = {
'topic': self.topic,
'range': {
'from': self.range_from,
'to': self.range_to,
},
'generated_at': self.generated_at,
'mode': self.mode,
'openai_model_used': self.openai_model_used,
'xai_model_used': self.xai_model_used,
'reddit': [r.to_dict() for r in self.reddit],
'x': [x.to_dict() for x in self.x],
'web': [w.to_dict() for w in self.web],
'best_practices': self.best_practices,
'prompt_pack': self.prompt_pack,
'context_snippet_md': self.context_snippet_md,
}
if self.reddit_error:
d['reddit_error'] = self.reddit_error
if self.x_error:
d['x_error'] = self.x_error
if self.web_error:
d['web_error'] = self.web_error
if self.from_cache:
d['from_cache'] = self.from_cache
if self.cache_age_hours is not None:
d['cache_age_hours'] = self.cache_age_hours
return d
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "Report":
"""Create Report from serialized dict (handles cache format)."""
# Handle range field conversion
range_data = data.get('range', {})
range_from = range_data.get('from', data.get('range_from', ''))
range_to = range_data.get('to', data.get('range_to', ''))
# Reconstruct Reddit items
reddit_items = []
for r in data.get('reddit', []):
eng = None
if r.get('engagement'):
eng = Engagement(**r['engagement'])
comments = [Comment(**c) for c in r.get('top_comments', [])]
subs = SubScores(**r.get('subs', {})) if r.get('subs') else SubScores()
reddit_items.append(RedditItem(
id=r['id'],
title=r['title'],
url=r['url'],
subreddit=r['subreddit'],
date=r.get('date'),
date_confidence=r.get('date_confidence', 'low'),
engagement=eng,
top_comments=comments,
comment_insights=r.get('comment_insights', []),
relevance=r.get('relevance', 0.5),
why_relevant=r.get('why_relevant', ''),
subs=subs,
score=r.get('score', 0),
))
# Reconstruct X items
x_items = []
for x in data.get('x', []):
eng = None
if x.get('engagement'):
eng = Engagement(**x['engagement'])
subs = SubScores(**x.get('subs', {})) if x.get('subs') else SubScores()
x_items.append(XItem(
id=x['id'],
text=x['text'],
url=x['url'],
author_handle=x['author_handle'],
date=x.get('date'),
date_confidence=x.get('date_confidence', 'low'),
engagement=eng,
relevance=x.get('relevance', 0.5),
why_relevant=x.get('why_relevant', ''),
subs=subs,
score=x.get('score', 0),
))
# Reconstruct Web items
web_items = []
for w in data.get('web', []):
subs = SubScores(**w.get('subs', {})) if w.get('subs') else SubScores()
web_items.append(WebSearchItem(
id=w['id'],
title=w['title'],
url=w['url'],
source_domain=w.get('source_domain', ''),
snippet=w.get('snippet', ''),
date=w.get('date'),
date_confidence=w.get('date_confidence', 'low'),
relevance=w.get('relevance', 0.5),
why_relevant=w.get('why_relevant', ''),
subs=subs,
score=w.get('score', 0),
))
return cls(
topic=data['topic'],
range_from=range_from,
range_to=range_to,
generated_at=data['generated_at'],
mode=data['mode'],
openai_model_used=data.get('openai_model_used'),
xai_model_used=data.get('xai_model_used'),
reddit=reddit_items,
x=x_items,
web=web_items,
best_practices=data.get('best_practices', []),
prompt_pack=data.get('prompt_pack', []),
context_snippet_md=data.get('context_snippet_md', ''),
reddit_error=data.get('reddit_error'),
x_error=data.get('x_error'),
web_error=data.get('web_error'),
from_cache=data.get('from_cache', False),
cache_age_hours=data.get('cache_age_hours'),
)
def create_report(
topic: str,
from_date: str,
to_date: str,
mode: str,
openai_model: Optional[str] = None,
xai_model: Optional[str] = None,
) -> Report:
"""Create a new report with metadata."""
return Report(
topic=topic,
range_from=from_date,
range_to=to_date,
generated_at=datetime.now(timezone.utc).isoformat(),
mode=mode,
openai_model_used=openai_model,
xai_model_used=xai_model,
)
| {
"repo_id": "sickn33/antigravity-awesome-skills",
"file_path": "skills/last30days/scripts/lib/schema.py",
"license": "MIT License",
"lines": 306,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
sickn33/antigravity-awesome-skills:skills/last30days/scripts/lib/score.py | """Popularity-aware scoring for last30days skill."""
import math
from typing import List, Optional, Union
from . import dates, schema
# Score weights for Reddit/X (has engagement)
WEIGHT_RELEVANCE = 0.45
WEIGHT_RECENCY = 0.25
WEIGHT_ENGAGEMENT = 0.30
# WebSearch weights (no engagement, reweighted to 100%)
WEBSEARCH_WEIGHT_RELEVANCE = 0.55
WEBSEARCH_WEIGHT_RECENCY = 0.45
WEBSEARCH_SOURCE_PENALTY = 15 # Points deducted for lacking engagement
# WebSearch date confidence adjustments
WEBSEARCH_VERIFIED_BONUS = 10 # Bonus for URL-verified recent date (high confidence)
WEBSEARCH_NO_DATE_PENALTY = 20 # Heavy penalty for no date signals (low confidence)
# Default engagement score for unknown
DEFAULT_ENGAGEMENT = 35
UNKNOWN_ENGAGEMENT_PENALTY = 10
def log1p_safe(x: Optional[int]) -> float:
"""Safe log1p that handles None and negative values."""
if x is None or x < 0:
return 0.0
return math.log1p(x)
def compute_reddit_engagement_raw(engagement: Optional[schema.Engagement]) -> Optional[float]:
"""Compute raw engagement score for Reddit item.
Formula: 0.55*log1p(score) + 0.40*log1p(num_comments) + 0.05*(upvote_ratio*10)
"""
if engagement is None:
return None
if engagement.score is None and engagement.num_comments is None:
return None
score = log1p_safe(engagement.score)
comments = log1p_safe(engagement.num_comments)
ratio = (engagement.upvote_ratio or 0.5) * 10
return 0.55 * score + 0.40 * comments + 0.05 * ratio
def compute_x_engagement_raw(engagement: Optional[schema.Engagement]) -> Optional[float]:
"""Compute raw engagement score for X item.
Formula: 0.55*log1p(likes) + 0.25*log1p(reposts) + 0.15*log1p(replies) + 0.05*log1p(quotes)
"""
if engagement is None:
return None
if engagement.likes is None and engagement.reposts is None:
return None
likes = log1p_safe(engagement.likes)
reposts = log1p_safe(engagement.reposts)
replies = log1p_safe(engagement.replies)
quotes = log1p_safe(engagement.quotes)
return 0.55 * likes + 0.25 * reposts + 0.15 * replies + 0.05 * quotes
def normalize_to_100(values: List[float], default: float = 50) -> List[float]:
"""Normalize a list of values to 0-100 scale.
Args:
values: Raw values (None values are preserved)
default: Default value for None entries
Returns:
Normalized values
"""
# Filter out None
valid = [v for v in values if v is not None]
if not valid:
return [default if v is None else 50 for v in values]
min_val = min(valid)
max_val = max(valid)
range_val = max_val - min_val
if range_val == 0:
return [50 if v is None else 50 for v in values]
result = []
for v in values:
if v is None:
result.append(None)
else:
normalized = ((v - min_val) / range_val) * 100
result.append(normalized)
return result
def score_reddit_items(items: List[schema.RedditItem]) -> List[schema.RedditItem]:
"""Compute scores for Reddit items.
Args:
items: List of Reddit items
Returns:
Items with updated scores
"""
if not items:
return items
# Compute raw engagement scores
eng_raw = [compute_reddit_engagement_raw(item.engagement) for item in items]
# Normalize engagement to 0-100
eng_normalized = normalize_to_100(eng_raw)
for i, item in enumerate(items):
# Relevance subscore (model-provided, convert to 0-100)
rel_score = int(item.relevance * 100)
# Recency subscore
rec_score = dates.recency_score(item.date)
# Engagement subscore
if eng_normalized[i] is not None:
eng_score = int(eng_normalized[i])
else:
eng_score = DEFAULT_ENGAGEMENT
# Store subscores
item.subs = schema.SubScores(
relevance=rel_score,
recency=rec_score,
engagement=eng_score,
)
# Compute overall score
overall = (
WEIGHT_RELEVANCE * rel_score +
WEIGHT_RECENCY * rec_score +
WEIGHT_ENGAGEMENT * eng_score
)
# Apply penalty for unknown engagement
if eng_raw[i] is None:
overall -= UNKNOWN_ENGAGEMENT_PENALTY
# Apply penalty for low date confidence
if item.date_confidence == "low":
overall -= 10
elif item.date_confidence == "med":
overall -= 5
item.score = max(0, min(100, int(overall)))
return items
def score_x_items(items: List[schema.XItem]) -> List[schema.XItem]:
"""Compute scores for X items.
Args:
items: List of X items
Returns:
Items with updated scores
"""
if not items:
return items
# Compute raw engagement scores
eng_raw = [compute_x_engagement_raw(item.engagement) for item in items]
# Normalize engagement to 0-100
eng_normalized = normalize_to_100(eng_raw)
for i, item in enumerate(items):
# Relevance subscore (model-provided, convert to 0-100)
rel_score = int(item.relevance * 100)
# Recency subscore
rec_score = dates.recency_score(item.date)
# Engagement subscore
if eng_normalized[i] is not None:
eng_score = int(eng_normalized[i])
else:
eng_score = DEFAULT_ENGAGEMENT
# Store subscores
item.subs = schema.SubScores(
relevance=rel_score,
recency=rec_score,
engagement=eng_score,
)
# Compute overall score
overall = (
WEIGHT_RELEVANCE * rel_score +
WEIGHT_RECENCY * rec_score +
WEIGHT_ENGAGEMENT * eng_score
)
# Apply penalty for unknown engagement
if eng_raw[i] is None:
overall -= UNKNOWN_ENGAGEMENT_PENALTY
# Apply penalty for low date confidence
if item.date_confidence == "low":
overall -= 10
elif item.date_confidence == "med":
overall -= 5
item.score = max(0, min(100, int(overall)))
return items
def score_websearch_items(items: List[schema.WebSearchItem]) -> List[schema.WebSearchItem]:
"""Compute scores for WebSearch items WITHOUT engagement metrics.
Uses reweighted formula: 55% relevance + 45% recency - 15pt source penalty.
This ensures WebSearch items rank below comparable Reddit/X items.
Date confidence adjustments:
- High confidence (URL-verified date): +10 bonus
- Med confidence (snippet-extracted date): no change
- Low confidence (no date signals): -20 penalty
Args:
items: List of WebSearch items
Returns:
Items with updated scores
"""
if not items:
return items
for item in items:
# Relevance subscore (model-provided, convert to 0-100)
rel_score = int(item.relevance * 100)
# Recency subscore
rec_score = dates.recency_score(item.date)
# Store subscores (engagement is 0 for WebSearch - no data)
item.subs = schema.SubScores(
relevance=rel_score,
recency=rec_score,
engagement=0, # Explicitly zero - no engagement data available
)
# Compute overall score using WebSearch weights
overall = (
WEBSEARCH_WEIGHT_RELEVANCE * rel_score +
WEBSEARCH_WEIGHT_RECENCY * rec_score
)
# Apply source penalty (WebSearch < Reddit/X for same relevance/recency)
overall -= WEBSEARCH_SOURCE_PENALTY
# Apply date confidence adjustments
# High confidence (URL-verified): reward with bonus
# Med confidence (snippet-extracted): neutral
# Low confidence (no date signals): heavy penalty
if item.date_confidence == "high":
overall += WEBSEARCH_VERIFIED_BONUS # Reward verified recent dates
elif item.date_confidence == "low":
overall -= WEBSEARCH_NO_DATE_PENALTY # Heavy penalty for unknown
item.score = max(0, min(100, int(overall)))
return items
def sort_items(items: List[Union[schema.RedditItem, schema.XItem, schema.WebSearchItem]]) -> List:
"""Sort items by score (descending), then date, then source priority.
Args:
items: List of items to sort
Returns:
Sorted items
"""
def sort_key(item):
# Primary: score descending (negate for descending)
score = -item.score
# Secondary: date descending (recent first)
date = item.date or "0000-00-00"
date_key = -int(date.replace("-", ""))
# Tertiary: source priority (Reddit > X > WebSearch)
if isinstance(item, schema.RedditItem):
source_priority = 0
elif isinstance(item, schema.XItem):
source_priority = 1
else: # WebSearchItem
source_priority = 2
# Quaternary: title/text for stability
text = getattr(item, "title", "") or getattr(item, "text", "")
return (score, date_key, source_priority, text)
return sorted(items, key=sort_key)
| {
"repo_id": "sickn33/antigravity-awesome-skills",
"file_path": "skills/last30days/scripts/lib/score.py",
"license": "MIT License",
"lines": 230,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
sickn33/antigravity-awesome-skills:skills/last30days/scripts/lib/ui.py | """Terminal UI utilities for last30days skill."""
import os
import sys
import time
import threading
import random
from typing import Optional
# Check if we're in a real terminal (not captured by Claude Code)
IS_TTY = sys.stderr.isatty()
# ANSI color codes
class Colors:
PURPLE = '\033[95m'
BLUE = '\033[94m'
CYAN = '\033[96m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
DIM = '\033[2m'
RESET = '\033[0m'
BANNER = f"""{Colors.PURPLE}{Colors.BOLD}
βββ ββββββ ββββββββββββββββββββββββ βββββββ βββββββ ββββββ βββ βββββββββββ
βββ ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ ββββββββββββ
βββ ββββββββββββββββ βββ βββββββββββββββββββ βββββββββββ βββββββ ββββββββ
βββ ββββββββββββββββ βββ βββββββββββββββββββ βββββββββββ βββββ ββββββββ
βββββββββββ βββββββββββ βββ ββββββββββββββββββββββββββββ βββ βββ ββββββββ
βββββββββββ βββββββββββ βββ βββββββ βββββββ βββββββ βββ βββ βββ ββββββββ
{Colors.RESET}{Colors.DIM} 30 days of research. 30 seconds of work.{Colors.RESET}
"""
MINI_BANNER = f"""{Colors.PURPLE}{Colors.BOLD}/last30days{Colors.RESET} {Colors.DIM}Β· researching...{Colors.RESET}"""
# Fun status messages for each phase
REDDIT_MESSAGES = [
"Diving into Reddit threads...",
"Scanning subreddits for gold...",
"Reading what Redditors are saying...",
"Exploring the front page of the internet...",
"Finding the good discussions...",
"Upvoting mentally...",
"Scrolling through comments...",
]
X_MESSAGES = [
"Checking what X is buzzing about...",
"Reading the timeline...",
"Finding the hot takes...",
"Scanning tweets and threads...",
"Discovering trending insights...",
"Following the conversation...",
"Reading between the posts...",
]
ENRICHING_MESSAGES = [
"Getting the juicy details...",
"Fetching engagement metrics...",
"Reading top comments...",
"Extracting insights...",
"Analyzing discussions...",
]
PROCESSING_MESSAGES = [
"Crunching the data...",
"Scoring and ranking...",
"Finding patterns...",
"Removing duplicates...",
"Organizing findings...",
]
WEB_ONLY_MESSAGES = [
"Searching the web...",
"Finding blogs and docs...",
"Crawling news sites...",
"Discovering tutorials...",
]
# Promo message for users without API keys
PROMO_MESSAGE = f"""
{Colors.YELLOW}{Colors.BOLD}ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ{Colors.RESET}
{Colors.YELLOW}β‘ UNLOCK THE FULL POWER OF /last30days{Colors.RESET}
{Colors.DIM}Right now you're using web search only. Add API keys to unlock:{Colors.RESET}
{Colors.YELLOW}π Reddit{Colors.RESET} - Real upvotes, comments, and community insights
ββ Add OPENAI_API_KEY (uses OpenAI's web_search for Reddit)
{Colors.CYAN}π΅ X (Twitter){Colors.RESET} - Real-time posts, likes, reposts from creators
ββ Add XAI_API_KEY (uses xAI's live X search)
{Colors.DIM}Setup:{Colors.RESET} Edit {Colors.BOLD}~/.config/last30days/.env{Colors.RESET}
{Colors.YELLOW}{Colors.BOLD}ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ{Colors.RESET}
"""
PROMO_MESSAGE_PLAIN = """
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
β‘ UNLOCK THE FULL POWER OF /last30days
Right now you're using web search only. Add API keys to unlock:
π Reddit - Real upvotes, comments, and community insights
ββ Add OPENAI_API_KEY (uses OpenAI's web_search for Reddit)
π΅ X (Twitter) - Real-time posts, likes, reposts from creators
ββ Add XAI_API_KEY (uses xAI's live X search)
Setup: Edit ~/.config/last30days/.env
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
"""
# Shorter promo for single missing key
PROMO_SINGLE_KEY = {
"reddit": f"""
{Colors.DIM}π‘ Tip: Add {Colors.YELLOW}OPENAI_API_KEY{Colors.RESET}{Colors.DIM} to ~/.config/last30days/.env for Reddit data with real engagement metrics!{Colors.RESET}
""",
"x": f"""
{Colors.DIM}π‘ Tip: Add {Colors.CYAN}XAI_API_KEY{Colors.RESET}{Colors.DIM} to ~/.config/last30days/.env for X/Twitter data with real likes & reposts!{Colors.RESET}
""",
}
PROMO_SINGLE_KEY_PLAIN = {
"reddit": "\nπ‘ Tip: Add OPENAI_API_KEY to ~/.config/last30days/.env for Reddit data with real engagement metrics!\n",
"x": "\nπ‘ Tip: Add XAI_API_KEY to ~/.config/last30days/.env for X/Twitter data with real likes & reposts!\n",
}
# Spinner frames
SPINNER_FRAMES = ['β ', 'β ', 'β Ή', 'β Έ', 'β Ό', 'β ΄', 'β ¦', 'β §', 'β ', 'β ']
DOTS_FRAMES = [' ', '. ', '.. ', '...']
class Spinner:
"""Animated spinner for long-running operations."""
def __init__(self, message: str = "Working", color: str = Colors.CYAN):
self.message = message
self.color = color
self.running = False
self.thread: Optional[threading.Thread] = None
self.frame_idx = 0
self.shown_static = False
def _spin(self):
while self.running:
frame = SPINNER_FRAMES[self.frame_idx % len(SPINNER_FRAMES)]
sys.stderr.write(f"\r{self.color}{frame}{Colors.RESET} {self.message} ")
sys.stderr.flush()
self.frame_idx += 1
time.sleep(0.08)
def start(self):
self.running = True
if IS_TTY:
# Real terminal - animate
self.thread = threading.Thread(target=self._spin, daemon=True)
self.thread.start()
else:
# Not a TTY (Claude Code) - just print once
if not self.shown_static:
sys.stderr.write(f"β³ {self.message}\n")
sys.stderr.flush()
self.shown_static = True
def update(self, message: str):
self.message = message
if not IS_TTY and not self.shown_static:
# Print update in non-TTY mode
sys.stderr.write(f"β³ {message}\n")
sys.stderr.flush()
def stop(self, final_message: str = ""):
self.running = False
if self.thread:
self.thread.join(timeout=0.2)
if IS_TTY:
# Clear the line in real terminal
sys.stderr.write("\r" + " " * 80 + "\r")
if final_message:
sys.stderr.write(f"β {final_message}\n")
sys.stderr.flush()
class ProgressDisplay:
"""Progress display for research phases."""
def __init__(self, topic: str, show_banner: bool = True):
self.topic = topic
self.spinner: Optional[Spinner] = None
self.start_time = time.time()
if show_banner:
self._show_banner()
def _show_banner(self):
if IS_TTY:
sys.stderr.write(MINI_BANNER + "\n")
sys.stderr.write(f"{Colors.DIM}Topic: {Colors.RESET}{Colors.BOLD}{self.topic}{Colors.RESET}\n\n")
else:
# Simple text for non-TTY
sys.stderr.write(f"/last30days Β· researching: {self.topic}\n")
sys.stderr.flush()
def start_reddit(self):
msg = random.choice(REDDIT_MESSAGES)
self.spinner = Spinner(f"{Colors.YELLOW}Reddit{Colors.RESET} {msg}", Colors.YELLOW)
self.spinner.start()
def end_reddit(self, count: int):
if self.spinner:
self.spinner.stop(f"{Colors.YELLOW}Reddit{Colors.RESET} Found {count} threads")
def start_reddit_enrich(self, current: int, total: int):
if self.spinner:
self.spinner.stop()
msg = random.choice(ENRICHING_MESSAGES)
self.spinner = Spinner(f"{Colors.YELLOW}Reddit{Colors.RESET} [{current}/{total}] {msg}", Colors.YELLOW)
self.spinner.start()
def update_reddit_enrich(self, current: int, total: int):
if self.spinner:
msg = random.choice(ENRICHING_MESSAGES)
self.spinner.update(f"{Colors.YELLOW}Reddit{Colors.RESET} [{current}/{total}] {msg}")
def end_reddit_enrich(self):
if self.spinner:
self.spinner.stop(f"{Colors.YELLOW}Reddit{Colors.RESET} Enriched with engagement data")
def start_x(self):
msg = random.choice(X_MESSAGES)
self.spinner = Spinner(f"{Colors.CYAN}X{Colors.RESET} {msg}", Colors.CYAN)
self.spinner.start()
def end_x(self, count: int):
if self.spinner:
self.spinner.stop(f"{Colors.CYAN}X{Colors.RESET} Found {count} posts")
def start_processing(self):
msg = random.choice(PROCESSING_MESSAGES)
self.spinner = Spinner(f"{Colors.PURPLE}Processing{Colors.RESET} {msg}", Colors.PURPLE)
self.spinner.start()
def end_processing(self):
if self.spinner:
self.spinner.stop()
def show_complete(self, reddit_count: int, x_count: int):
elapsed = time.time() - self.start_time
if IS_TTY:
sys.stderr.write(f"\n{Colors.GREEN}{Colors.BOLD}β Research complete{Colors.RESET} ")
sys.stderr.write(f"{Colors.DIM}({elapsed:.1f}s){Colors.RESET}\n")
sys.stderr.write(f" {Colors.YELLOW}Reddit:{Colors.RESET} {reddit_count} threads ")
sys.stderr.write(f"{Colors.CYAN}X:{Colors.RESET} {x_count} posts\n\n")
else:
sys.stderr.write(f"β Research complete ({elapsed:.1f}s) - Reddit: {reddit_count} threads, X: {x_count} posts\n")
sys.stderr.flush()
def show_cached(self, age_hours: float = None):
if age_hours is not None:
age_str = f" ({age_hours:.1f}h old)"
else:
age_str = ""
sys.stderr.write(f"{Colors.GREEN}β‘{Colors.RESET} {Colors.DIM}Using cached results{age_str} - use --refresh for fresh data{Colors.RESET}\n\n")
sys.stderr.flush()
def show_error(self, message: str):
sys.stderr.write(f"{Colors.RED}β Error:{Colors.RESET} {message}\n")
sys.stderr.flush()
def start_web_only(self):
"""Show web-only mode indicator."""
msg = random.choice(WEB_ONLY_MESSAGES)
self.spinner = Spinner(f"{Colors.GREEN}Web{Colors.RESET} {msg}", Colors.GREEN)
self.spinner.start()
def end_web_only(self):
"""End web-only spinner."""
if self.spinner:
self.spinner.stop(f"{Colors.GREEN}Web{Colors.RESET} Claude will search the web")
def show_web_only_complete(self):
"""Show completion for web-only mode."""
elapsed = time.time() - self.start_time
if IS_TTY:
sys.stderr.write(f"\n{Colors.GREEN}{Colors.BOLD}β Ready for web search{Colors.RESET} ")
sys.stderr.write(f"{Colors.DIM}({elapsed:.1f}s){Colors.RESET}\n")
sys.stderr.write(f" {Colors.GREEN}Web:{Colors.RESET} Claude will search blogs, docs & news\n\n")
else:
sys.stderr.write(f"β Ready for web search ({elapsed:.1f}s)\n")
sys.stderr.flush()
def show_promo(self, missing: str = "both"):
"""Show promotional message for missing API keys.
Args:
missing: 'both', 'reddit', or 'x' - which keys are missing
"""
if missing == "both":
if IS_TTY:
sys.stderr.write(PROMO_MESSAGE)
else:
sys.stderr.write(PROMO_MESSAGE_PLAIN)
elif missing in PROMO_SINGLE_KEY:
if IS_TTY:
sys.stderr.write(PROMO_SINGLE_KEY[missing])
else:
sys.stderr.write(PROMO_SINGLE_KEY_PLAIN[missing])
sys.stderr.flush()
def print_phase(phase: str, message: str):
"""Print a phase message."""
colors = {
"reddit": Colors.YELLOW,
"x": Colors.CYAN,
"process": Colors.PURPLE,
"done": Colors.GREEN,
"error": Colors.RED,
}
color = colors.get(phase, Colors.RESET)
sys.stderr.write(f"{color}βΈ{Colors.RESET} {message}\n")
sys.stderr.flush()
| {
"repo_id": "sickn33/antigravity-awesome-skills",
"file_path": "skills/last30days/scripts/lib/ui.py",
"license": "MIT License",
"lines": 269,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
sickn33/antigravity-awesome-skills:skills/last30days/scripts/lib/websearch.py | """WebSearch module for last30days skill.
NOTE: WebSearch uses Claude's built-in WebSearch tool, which runs INSIDE Claude Code.
Unlike Reddit/X which use external APIs, WebSearch results are obtained by Claude
directly and passed to this module for normalization and scoring.
The typical flow is:
1. Claude invokes WebSearch tool with the topic
2. Claude passes results to parse_websearch_results()
3. Results are normalized into WebSearchItem objects
"""
import re
from datetime import datetime, timedelta
from typing import Any, Dict, List, Optional, Tuple
from urllib.parse import urlparse
from . import schema
# Month name mappings for date parsing
MONTH_MAP = {
"jan": 1, "january": 1,
"feb": 2, "february": 2,
"mar": 3, "march": 3,
"apr": 4, "april": 4,
"may": 5,
"jun": 6, "june": 6,
"jul": 7, "july": 7,
"aug": 8, "august": 8,
"sep": 9, "sept": 9, "september": 9,
"oct": 10, "october": 10,
"nov": 11, "november": 11,
"dec": 12, "december": 12,
}
def extract_date_from_url(url: str) -> Optional[str]:
"""Try to extract a date from URL path.
Many sites embed dates in URLs like:
- /2026/01/24/article-title
- /2026-01-24/article
- /blog/20260124/title
Args:
url: URL to parse
Returns:
Date string in YYYY-MM-DD format, or None
"""
# Pattern 1: /YYYY/MM/DD/ (most common)
match = re.search(r'/(\d{4})/(\d{2})/(\d{2})/', url)
if match:
year, month, day = match.groups()
if 2020 <= int(year) <= 2030 and 1 <= int(month) <= 12 and 1 <= int(day) <= 31:
return f"{year}-{month}-{day}"
# Pattern 2: /YYYY-MM-DD/ or /YYYY-MM-DD-
match = re.search(r'/(\d{4})-(\d{2})-(\d{2})[-/]', url)
if match:
year, month, day = match.groups()
if 2020 <= int(year) <= 2030 and 1 <= int(month) <= 12 and 1 <= int(day) <= 31:
return f"{year}-{month}-{day}"
# Pattern 3: /YYYYMMDD/ (compact)
match = re.search(r'/(\d{4})(\d{2})(\d{2})/', url)
if match:
year, month, day = match.groups()
if 2020 <= int(year) <= 2030 and 1 <= int(month) <= 12 and 1 <= int(day) <= 31:
return f"{year}-{month}-{day}"
return None
def extract_date_from_snippet(text: str) -> Optional[str]:
"""Try to extract a date from text snippet or title.
Looks for patterns like:
- January 24, 2026 or Jan 24, 2026
- 24 January 2026
- 2026-01-24
- "3 days ago", "yesterday", "last week"
Args:
text: Text to parse
Returns:
Date string in YYYY-MM-DD format, or None
"""
if not text:
return None
text_lower = text.lower()
# Pattern 1: Month DD, YYYY (e.g., "January 24, 2026")
match = re.search(
r'\b(jan(?:uary)?|feb(?:ruary)?|mar(?:ch)?|apr(?:il)?|may|jun(?:e)?|'
r'jul(?:y)?|aug(?:ust)?|sep(?:t(?:ember)?)?|oct(?:ober)?|nov(?:ember)?|dec(?:ember)?)'
r'\s+(\d{1,2})(?:st|nd|rd|th)?,?\s*(\d{4})\b',
text_lower
)
if match:
month_str, day, year = match.groups()
month = MONTH_MAP.get(month_str[:3])
if month and 2020 <= int(year) <= 2030 and 1 <= int(day) <= 31:
return f"{year}-{month:02d}-{int(day):02d}"
# Pattern 2: DD Month YYYY (e.g., "24 January 2026")
match = re.search(
r'\b(\d{1,2})(?:st|nd|rd|th)?\s+'
r'(jan(?:uary)?|feb(?:ruary)?|mar(?:ch)?|apr(?:il)?|may|jun(?:e)?|'
r'jul(?:y)?|aug(?:ust)?|sep(?:t(?:ember)?)?|oct(?:ober)?|nov(?:ember)?|dec(?:ember)?)'
r'\s+(\d{4})\b',
text_lower
)
if match:
day, month_str, year = match.groups()
month = MONTH_MAP.get(month_str[:3])
if month and 2020 <= int(year) <= 2030 and 1 <= int(day) <= 31:
return f"{year}-{month:02d}-{int(day):02d}"
# Pattern 3: YYYY-MM-DD (ISO format)
match = re.search(r'\b(\d{4})-(\d{2})-(\d{2})\b', text)
if match:
year, month, day = match.groups()
if 2020 <= int(year) <= 2030 and 1 <= int(month) <= 12 and 1 <= int(day) <= 31:
return f"{year}-{month}-{day}"
# Pattern 4: Relative dates ("3 days ago", "yesterday", etc.)
today = datetime.now()
if "yesterday" in text_lower:
date = today - timedelta(days=1)
return date.strftime("%Y-%m-%d")
if "today" in text_lower:
return today.strftime("%Y-%m-%d")
# "N days ago"
match = re.search(r'\b(\d+)\s*days?\s*ago\b', text_lower)
if match:
days = int(match.group(1))
if days <= 60: # Reasonable range
date = today - timedelta(days=days)
return date.strftime("%Y-%m-%d")
# "N hours ago" -> today
match = re.search(r'\b(\d+)\s*hours?\s*ago\b', text_lower)
if match:
return today.strftime("%Y-%m-%d")
# "last week" -> ~7 days ago
if "last week" in text_lower:
date = today - timedelta(days=7)
return date.strftime("%Y-%m-%d")
# "this week" -> ~3 days ago (middle of week)
if "this week" in text_lower:
date = today - timedelta(days=3)
return date.strftime("%Y-%m-%d")
return None
def extract_date_signals(
url: str,
snippet: str,
title: str,
) -> Tuple[Optional[str], str]:
"""Extract date from any available signal.
Tries URL first (most reliable), then snippet, then title.
Args:
url: Page URL
snippet: Page snippet/description
title: Page title
Returns:
Tuple of (date_string, confidence)
- date from URL: 'high' confidence
- date from snippet/title: 'med' confidence
- no date found: None, 'low' confidence
"""
# Try URL first (most reliable)
url_date = extract_date_from_url(url)
if url_date:
return url_date, "high"
# Try snippet
snippet_date = extract_date_from_snippet(snippet)
if snippet_date:
return snippet_date, "med"
# Try title
title_date = extract_date_from_snippet(title)
if title_date:
return title_date, "med"
return None, "low"
# Domains to exclude (Reddit and X are handled separately)
EXCLUDED_DOMAINS = {
"reddit.com",
"www.reddit.com",
"old.reddit.com",
"twitter.com",
"www.twitter.com",
"x.com",
"www.x.com",
"mobile.twitter.com",
}
def extract_domain(url: str) -> str:
"""Extract the domain from a URL.
Args:
url: Full URL
Returns:
Domain string (e.g., "medium.com")
"""
try:
parsed = urlparse(url)
domain = parsed.netloc.lower()
# Remove www. prefix for cleaner display
if domain.startswith("www."):
domain = domain[4:]
return domain
except Exception:
return ""
def is_excluded_domain(url: str) -> bool:
"""Check if URL is from an excluded domain (Reddit/X).
Args:
url: URL to check
Returns:
True if URL should be excluded
"""
try:
parsed = urlparse(url)
domain = parsed.netloc.lower()
return domain in EXCLUDED_DOMAINS
except Exception:
return False
def parse_websearch_results(
results: List[Dict[str, Any]],
topic: str,
from_date: str = "",
to_date: str = "",
) -> List[Dict[str, Any]]:
"""Parse WebSearch results into normalized format.
This function expects results from Claude's WebSearch tool.
Each result should have: title, url, snippet, and optionally date/relevance.
Uses "Date Detective" approach:
1. Extract dates from URLs (high confidence)
2. Extract dates from snippets/titles (med confidence)
3. Hard filter: exclude items with verified old dates
4. Keep items with no date signals (with low confidence penalty)
Args:
results: List of WebSearch result dicts
topic: Original search topic (for context)
from_date: Start date for filtering (YYYY-MM-DD)
to_date: End date for filtering (YYYY-MM-DD)
Returns:
List of normalized item dicts ready for WebSearchItem creation
"""
items = []
for i, result in enumerate(results):
if not isinstance(result, dict):
continue
url = result.get("url", "")
if not url:
continue
# Skip Reddit/X URLs (handled separately)
if is_excluded_domain(url):
continue
title = str(result.get("title", "")).strip()
snippet = str(result.get("snippet", result.get("description", ""))).strip()
if not title and not snippet:
continue
# Use Date Detective to extract date signals
date = result.get("date") # Use provided date if available
date_confidence = "low"
if date and re.match(r'^\d{4}-\d{2}-\d{2}$', str(date)):
# Provided date is valid
date_confidence = "med"
else:
# Try to extract date from URL/snippet/title
extracted_date, confidence = extract_date_signals(url, snippet, title)
if extracted_date:
date = extracted_date
date_confidence = confidence
# Hard filter: if we found a date and it's too old, skip
if date and from_date and date < from_date:
continue # DROP - verified old content
# Hard filter: if date is in the future, skip (parsing error)
if date and to_date and date > to_date:
continue # DROP - future date
# Get relevance if provided, default to 0.5
relevance = result.get("relevance", 0.5)
try:
relevance = min(1.0, max(0.0, float(relevance)))
except (TypeError, ValueError):
relevance = 0.5
item = {
"id": f"W{i+1}",
"title": title[:200], # Truncate long titles
"url": url,
"source_domain": extract_domain(url),
"snippet": snippet[:500], # Truncate long snippets
"date": date,
"date_confidence": date_confidence,
"relevance": relevance,
"why_relevant": str(result.get("why_relevant", "")).strip(),
}
items.append(item)
return items
def normalize_websearch_items(
items: List[Dict[str, Any]],
from_date: str,
to_date: str,
) -> List[schema.WebSearchItem]:
"""Convert parsed dicts to WebSearchItem objects.
Args:
items: List of parsed item dicts
from_date: Start of date range (YYYY-MM-DD)
to_date: End of date range (YYYY-MM-DD)
Returns:
List of WebSearchItem objects
"""
result = []
for item in items:
web_item = schema.WebSearchItem(
id=item["id"],
title=item["title"],
url=item["url"],
source_domain=item["source_domain"],
snippet=item["snippet"],
date=item.get("date"),
date_confidence=item.get("date_confidence", "low"),
relevance=item.get("relevance", 0.5),
why_relevant=item.get("why_relevant", ""),
)
result.append(web_item)
return result
def dedupe_websearch(items: List[schema.WebSearchItem]) -> List[schema.WebSearchItem]:
"""Remove duplicate WebSearch items.
Deduplication is based on URL.
Args:
items: List of WebSearchItem objects
Returns:
Deduplicated list
"""
seen_urls = set()
result = []
for item in items:
# Normalize URL for comparison
url_key = item.url.lower().rstrip("/")
if url_key not in seen_urls:
seen_urls.add(url_key)
result.append(item)
return result
| {
"repo_id": "sickn33/antigravity-awesome-skills",
"file_path": "skills/last30days/scripts/lib/websearch.py",
"license": "MIT License",
"lines": 320,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
sickn33/antigravity-awesome-skills:skills/last30days/scripts/lib/xai_x.py | """xAI API client for X (Twitter) discovery."""
import json
import re
import sys
from typing import Any, Dict, List, Optional
from . import http
def _log_error(msg: str):
"""Log error to stderr."""
sys.stderr.write(f"[X ERROR] {msg}\n")
sys.stderr.flush()
# xAI uses responses endpoint with Agent Tools API
XAI_RESPONSES_URL = "https://api.x.ai/v1/responses"
# Depth configurations: (min, max) posts to request
DEPTH_CONFIG = {
"quick": (8, 12),
"default": (20, 30),
"deep": (40, 60),
}
X_SEARCH_PROMPT = """You have access to real-time X (Twitter) data. Search for posts about: {topic}
Focus on posts from {from_date} to {to_date}. Find {min_items}-{max_items} high-quality, relevant posts.
IMPORTANT: Return ONLY valid JSON in this exact format, no other text:
{{
"items": [
{{
"text": "Post text content (truncated if long)",
"url": "https://x.com/user/status/...",
"author_handle": "username",
"date": "YYYY-MM-DD or null if unknown",
"engagement": {{
"likes": 100,
"reposts": 25,
"replies": 15,
"quotes": 5
}},
"why_relevant": "Brief explanation of relevance",
"relevance": 0.85
}}
]
}}
Rules:
- relevance is 0.0 to 1.0 (1.0 = highly relevant)
- date must be YYYY-MM-DD format or null
- engagement can be null if unknown
- Include diverse voices/accounts if applicable
- Prefer posts with substantive content, not just links"""
def search_x(
api_key: str,
model: str,
topic: str,
from_date: str,
to_date: str,
depth: str = "default",
mock_response: Optional[Dict] = None,
) -> Dict[str, Any]:
"""Search X for relevant posts using xAI API with live search.
Args:
api_key: xAI API key
model: Model to use
topic: Search topic
from_date: Start date (YYYY-MM-DD)
to_date: End date (YYYY-MM-DD)
depth: Research depth - "quick", "default", or "deep"
mock_response: Mock response for testing
Returns:
Raw API response
"""
if mock_response is not None:
return mock_response
min_items, max_items = DEPTH_CONFIG.get(depth, DEPTH_CONFIG["default"])
headers = {
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json",
}
# Adjust timeout based on depth (generous for API response time)
timeout = 90 if depth == "quick" else 120 if depth == "default" else 180
# Use Agent Tools API with x_search tool
payload = {
"model": model,
"tools": [
{"type": "x_search"}
],
"input": [
{
"role": "user",
"content": X_SEARCH_PROMPT.format(
topic=topic,
from_date=from_date,
to_date=to_date,
min_items=min_items,
max_items=max_items,
),
}
],
}
return http.post(XAI_RESPONSES_URL, payload, headers=headers, timeout=timeout)
def parse_x_response(response: Dict[str, Any]) -> List[Dict[str, Any]]:
"""Parse xAI response to extract X items.
Args:
response: Raw API response
Returns:
List of item dicts
"""
items = []
# Check for API errors first
if "error" in response and response["error"]:
error = response["error"]
err_msg = error.get("message", str(error)) if isinstance(error, dict) else str(error)
_log_error(f"xAI API error: {err_msg}")
if http.DEBUG:
_log_error(f"Full error response: {json.dumps(response, indent=2)[:1000]}")
return items
# Try to find the output text
output_text = ""
if "output" in response:
output = response["output"]
if isinstance(output, str):
output_text = output
elif isinstance(output, list):
for item in output:
if isinstance(item, dict):
if item.get("type") == "message":
content = item.get("content", [])
for c in content:
if isinstance(c, dict) and c.get("type") == "output_text":
output_text = c.get("text", "")
break
elif "text" in item:
output_text = item["text"]
elif isinstance(item, str):
output_text = item
if output_text:
break
# Also check for choices (older format)
if not output_text and "choices" in response:
for choice in response["choices"]:
if "message" in choice:
output_text = choice["message"].get("content", "")
break
if not output_text:
return items
# Extract JSON from the response
json_match = re.search(r'\{[\s\S]*"items"[\s\S]*\}', output_text)
if json_match:
try:
data = json.loads(json_match.group())
items = data.get("items", [])
except json.JSONDecodeError:
pass
# Validate and clean items
clean_items = []
for i, item in enumerate(items):
if not isinstance(item, dict):
continue
url = item.get("url", "")
if not url:
continue
# Parse engagement
engagement = None
eng_raw = item.get("engagement")
if isinstance(eng_raw, dict):
engagement = {
"likes": int(eng_raw.get("likes", 0)) if eng_raw.get("likes") else None,
"reposts": int(eng_raw.get("reposts", 0)) if eng_raw.get("reposts") else None,
"replies": int(eng_raw.get("replies", 0)) if eng_raw.get("replies") else None,
"quotes": int(eng_raw.get("quotes", 0)) if eng_raw.get("quotes") else None,
}
clean_item = {
"id": f"X{i+1}",
"text": str(item.get("text", "")).strip()[:500], # Truncate long text
"url": url,
"author_handle": str(item.get("author_handle", "")).strip().lstrip("@"),
"date": item.get("date"),
"engagement": engagement,
"why_relevant": str(item.get("why_relevant", "")).strip(),
"relevance": min(1.0, max(0.0, float(item.get("relevance", 0.5)))),
}
# Validate date format
if clean_item["date"]:
if not re.match(r'^\d{4}-\d{2}-\d{2}$', str(clean_item["date"])):
clean_item["date"] = None
clean_items.append(clean_item)
return clean_items
| {
"repo_id": "sickn33/antigravity-awesome-skills",
"file_path": "skills/last30days/scripts/lib/xai_x.py",
"license": "MIT License",
"lines": 182,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
sickn33/antigravity-awesome-skills:skills/last30days/tests/test_cache.py | """Tests for cache module."""
import sys
import unittest
from pathlib import Path
# Add lib to path
sys.path.insert(0, str(Path(__file__).parent.parent / "scripts"))
from lib import cache
class TestGetCacheKey(unittest.TestCase):
def test_returns_string(self):
result = cache.get_cache_key("test topic", "2026-01-01", "2026-01-31", "both")
self.assertIsInstance(result, str)
def test_consistent_for_same_inputs(self):
key1 = cache.get_cache_key("test topic", "2026-01-01", "2026-01-31", "both")
key2 = cache.get_cache_key("test topic", "2026-01-01", "2026-01-31", "both")
self.assertEqual(key1, key2)
def test_different_for_different_inputs(self):
key1 = cache.get_cache_key("topic a", "2026-01-01", "2026-01-31", "both")
key2 = cache.get_cache_key("topic b", "2026-01-01", "2026-01-31", "both")
self.assertNotEqual(key1, key2)
def test_key_length(self):
key = cache.get_cache_key("test", "2026-01-01", "2026-01-31", "both")
self.assertEqual(len(key), 16)
class TestCachePath(unittest.TestCase):
def test_returns_path(self):
result = cache.get_cache_path("abc123")
self.assertIsInstance(result, Path)
def test_has_json_extension(self):
result = cache.get_cache_path("abc123")
self.assertEqual(result.suffix, ".json")
class TestCacheValidity(unittest.TestCase):
def test_nonexistent_file_is_invalid(self):
fake_path = Path("/nonexistent/path/file.json")
result = cache.is_cache_valid(fake_path)
self.assertFalse(result)
class TestModelCache(unittest.TestCase):
def test_get_cached_model_returns_none_for_missing(self):
# Clear any existing cache first
result = cache.get_cached_model("nonexistent_provider")
# May be None or a cached value, but should not error
self.assertTrue(result is None or isinstance(result, str))
if __name__ == "__main__":
unittest.main()
| {
"repo_id": "sickn33/antigravity-awesome-skills",
"file_path": "skills/last30days/tests/test_cache.py",
"license": "MIT License",
"lines": 42,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
sickn33/antigravity-awesome-skills:skills/last30days/tests/test_dates.py | """Tests for dates module."""
import sys
import unittest
from datetime import datetime, timedelta, timezone
from pathlib import Path
# Add lib to path
sys.path.insert(0, str(Path(__file__).parent.parent / "scripts"))
from lib import dates
class TestGetDateRange(unittest.TestCase):
def test_returns_tuple_of_two_strings(self):
from_date, to_date = dates.get_date_range(30)
self.assertIsInstance(from_date, str)
self.assertIsInstance(to_date, str)
def test_date_format(self):
from_date, to_date = dates.get_date_range(30)
# Should be YYYY-MM-DD format
self.assertRegex(from_date, r'^\d{4}-\d{2}-\d{2}$')
self.assertRegex(to_date, r'^\d{4}-\d{2}-\d{2}$')
def test_range_is_correct_days(self):
from_date, to_date = dates.get_date_range(30)
start = datetime.strptime(from_date, "%Y-%m-%d")
end = datetime.strptime(to_date, "%Y-%m-%d")
delta = end - start
self.assertEqual(delta.days, 30)
class TestParseDate(unittest.TestCase):
def test_parse_iso_date(self):
result = dates.parse_date("2026-01-15")
self.assertIsNotNone(result)
self.assertEqual(result.year, 2026)
self.assertEqual(result.month, 1)
self.assertEqual(result.day, 15)
def test_parse_timestamp(self):
# Unix timestamp for 2026-01-15 00:00:00 UTC
result = dates.parse_date("1768435200")
self.assertIsNotNone(result)
def test_parse_none(self):
result = dates.parse_date(None)
self.assertIsNone(result)
def test_parse_empty_string(self):
result = dates.parse_date("")
self.assertIsNone(result)
class TestTimestampToDate(unittest.TestCase):
def test_valid_timestamp(self):
# 2026-01-15 00:00:00 UTC
result = dates.timestamp_to_date(1768435200)
self.assertEqual(result, "2026-01-15")
def test_none_timestamp(self):
result = dates.timestamp_to_date(None)
self.assertIsNone(result)
class TestGetDateConfidence(unittest.TestCase):
def test_high_confidence_in_range(self):
result = dates.get_date_confidence("2026-01-15", "2026-01-01", "2026-01-31")
self.assertEqual(result, "high")
def test_low_confidence_before_range(self):
result = dates.get_date_confidence("2025-12-15", "2026-01-01", "2026-01-31")
self.assertEqual(result, "low")
def test_low_confidence_no_date(self):
result = dates.get_date_confidence(None, "2026-01-01", "2026-01-31")
self.assertEqual(result, "low")
class TestDaysAgo(unittest.TestCase):
def test_today(self):
today = datetime.now(timezone.utc).date().isoformat()
result = dates.days_ago(today)
self.assertEqual(result, 0)
def test_none_date(self):
result = dates.days_ago(None)
self.assertIsNone(result)
class TestRecencyScore(unittest.TestCase):
def test_today_is_100(self):
today = datetime.now(timezone.utc).date().isoformat()
result = dates.recency_score(today)
self.assertEqual(result, 100)
def test_30_days_ago_is_0(self):
old_date = (datetime.now(timezone.utc).date() - timedelta(days=30)).isoformat()
result = dates.recency_score(old_date)
self.assertEqual(result, 0)
def test_15_days_ago_is_50(self):
mid_date = (datetime.now(timezone.utc).date() - timedelta(days=15)).isoformat()
result = dates.recency_score(mid_date)
self.assertEqual(result, 50)
def test_none_date_is_0(self):
result = dates.recency_score(None)
self.assertEqual(result, 0)
if __name__ == "__main__":
unittest.main()
| {
"repo_id": "sickn33/antigravity-awesome-skills",
"file_path": "skills/last30days/tests/test_dates.py",
"license": "MIT License",
"lines": 85,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
sickn33/antigravity-awesome-skills:skills/last30days/tests/test_dedupe.py | """Tests for dedupe module."""
import sys
import unittest
from pathlib import Path
# Add lib to path
sys.path.insert(0, str(Path(__file__).parent.parent / "scripts"))
from lib import dedupe, schema
class TestNormalizeText(unittest.TestCase):
def test_lowercase(self):
result = dedupe.normalize_text("HELLO World")
self.assertEqual(result, "hello world")
def test_removes_punctuation(self):
result = dedupe.normalize_text("Hello, World!")
# Punctuation replaced with space, then whitespace collapsed
self.assertEqual(result, "hello world")
def test_collapses_whitespace(self):
result = dedupe.normalize_text("hello world")
self.assertEqual(result, "hello world")
class TestGetNgrams(unittest.TestCase):
def test_short_text(self):
result = dedupe.get_ngrams("ab", n=3)
self.assertEqual(result, {"ab"})
def test_normal_text(self):
result = dedupe.get_ngrams("hello", n=3)
self.assertIn("hel", result)
self.assertIn("ell", result)
self.assertIn("llo", result)
class TestJaccardSimilarity(unittest.TestCase):
def test_identical_sets(self):
set1 = {"a", "b", "c"}
result = dedupe.jaccard_similarity(set1, set1)
self.assertEqual(result, 1.0)
def test_disjoint_sets(self):
set1 = {"a", "b", "c"}
set2 = {"d", "e", "f"}
result = dedupe.jaccard_similarity(set1, set2)
self.assertEqual(result, 0.0)
def test_partial_overlap(self):
set1 = {"a", "b", "c"}
set2 = {"b", "c", "d"}
result = dedupe.jaccard_similarity(set1, set2)
self.assertEqual(result, 0.5) # 2 overlap / 4 union
def test_empty_sets(self):
result = dedupe.jaccard_similarity(set(), set())
self.assertEqual(result, 0.0)
class TestFindDuplicates(unittest.TestCase):
def test_no_duplicates(self):
items = [
schema.RedditItem(id="R1", title="Completely different topic A", url="", subreddit=""),
schema.RedditItem(id="R2", title="Another unrelated subject B", url="", subreddit=""),
]
result = dedupe.find_duplicates(items)
self.assertEqual(result, [])
def test_finds_duplicates(self):
items = [
schema.RedditItem(id="R1", title="Best practices for Claude Code skills", url="", subreddit=""),
schema.RedditItem(id="R2", title="Best practices for Claude Code skills guide", url="", subreddit=""),
]
result = dedupe.find_duplicates(items, threshold=0.7)
self.assertEqual(len(result), 1)
self.assertEqual(result[0], (0, 1))
class TestDedupeItems(unittest.TestCase):
def test_keeps_higher_scored(self):
items = [
schema.RedditItem(id="R1", title="Best practices for skills", url="", subreddit="", score=90),
schema.RedditItem(id="R2", title="Best practices for skills guide", url="", subreddit="", score=50),
]
result = dedupe.dedupe_items(items, threshold=0.6)
self.assertEqual(len(result), 1)
self.assertEqual(result[0].id, "R1")
def test_keeps_all_unique(self):
items = [
schema.RedditItem(id="R1", title="Topic about apples", url="", subreddit="", score=90),
schema.RedditItem(id="R2", title="Discussion of oranges", url="", subreddit="", score=50),
]
result = dedupe.dedupe_items(items)
self.assertEqual(len(result), 2)
def test_empty_list(self):
result = dedupe.dedupe_items([])
self.assertEqual(result, [])
def test_single_item(self):
items = [schema.RedditItem(id="R1", title="Test", url="", subreddit="")]
result = dedupe.dedupe_items(items)
self.assertEqual(len(result), 1)
if __name__ == "__main__":
unittest.main()
| {
"repo_id": "sickn33/antigravity-awesome-skills",
"file_path": "skills/last30days/tests/test_dedupe.py",
"license": "MIT License",
"lines": 86,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
sickn33/antigravity-awesome-skills:skills/last30days/tests/test_models.py | """Tests for models module."""
import sys
import unittest
from pathlib import Path
# Add lib to path
sys.path.insert(0, str(Path(__file__).parent.parent / "scripts"))
from lib import models
class TestParseVersion(unittest.TestCase):
def test_simple_version(self):
result = models.parse_version("gpt-5")
self.assertEqual(result, (5,))
def test_minor_version(self):
result = models.parse_version("gpt-5.2")
self.assertEqual(result, (5, 2))
def test_patch_version(self):
result = models.parse_version("gpt-5.2.1")
self.assertEqual(result, (5, 2, 1))
def test_no_version(self):
result = models.parse_version("custom-model")
self.assertIsNone(result)
class TestIsMainlineOpenAIModel(unittest.TestCase):
def test_gpt5_is_mainline(self):
self.assertTrue(models.is_mainline_openai_model("gpt-5"))
def test_gpt52_is_mainline(self):
self.assertTrue(models.is_mainline_openai_model("gpt-5.2"))
def test_gpt5_mini_is_not_mainline(self):
self.assertFalse(models.is_mainline_openai_model("gpt-5-mini"))
def test_gpt4_is_not_mainline(self):
self.assertFalse(models.is_mainline_openai_model("gpt-4"))
class TestSelectOpenAIModel(unittest.TestCase):
def test_pinned_policy(self):
result = models.select_openai_model(
"fake-key",
policy="pinned",
pin="gpt-5.1"
)
self.assertEqual(result, "gpt-5.1")
def test_auto_with_mock_models(self):
mock_models = [
{"id": "gpt-5.2", "created": 1704067200},
{"id": "gpt-5.1", "created": 1701388800},
{"id": "gpt-5", "created": 1698710400},
]
result = models.select_openai_model(
"fake-key",
policy="auto",
mock_models=mock_models
)
self.assertEqual(result, "gpt-5.2")
def test_auto_filters_variants(self):
mock_models = [
{"id": "gpt-5.2", "created": 1704067200},
{"id": "gpt-5-mini", "created": 1704067200},
{"id": "gpt-5.1", "created": 1701388800},
]
result = models.select_openai_model(
"fake-key",
policy="auto",
mock_models=mock_models
)
self.assertEqual(result, "gpt-5.2")
class TestSelectXAIModel(unittest.TestCase):
def test_latest_policy(self):
result = models.select_xai_model(
"fake-key",
policy="latest"
)
self.assertEqual(result, "grok-4-latest")
def test_stable_policy(self):
# Clear cache first to avoid interference
from lib import cache
cache.MODEL_CACHE_FILE.unlink(missing_ok=True)
result = models.select_xai_model(
"fake-key",
policy="stable"
)
self.assertEqual(result, "grok-4")
def test_pinned_policy(self):
result = models.select_xai_model(
"fake-key",
policy="pinned",
pin="grok-3"
)
self.assertEqual(result, "grok-3")
class TestGetModels(unittest.TestCase):
def test_no_keys_returns_none(self):
config = {}
result = models.get_models(config)
self.assertIsNone(result["openai"])
self.assertIsNone(result["xai"])
def test_openai_key_only(self):
config = {"OPENAI_API_KEY": "sk-test"}
mock_models = [{"id": "gpt-5.2", "created": 1704067200}]
result = models.get_models(config, mock_openai_models=mock_models)
self.assertEqual(result["openai"], "gpt-5.2")
self.assertIsNone(result["xai"])
def test_both_keys(self):
config = {
"OPENAI_API_KEY": "sk-test",
"XAI_API_KEY": "xai-test",
}
mock_openai = [{"id": "gpt-5.2", "created": 1704067200}]
mock_xai = [{"id": "grok-4-latest", "created": 1704067200}]
result = models.get_models(config, mock_openai, mock_xai)
self.assertEqual(result["openai"], "gpt-5.2")
self.assertEqual(result["xai"], "grok-4-latest")
if __name__ == "__main__":
unittest.main()
| {
"repo_id": "sickn33/antigravity-awesome-skills",
"file_path": "skills/last30days/tests/test_models.py",
"license": "MIT License",
"lines": 108,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
sickn33/antigravity-awesome-skills:skills/last30days/tests/test_normalize.py | """Tests for normalize module."""
import sys
import unittest
from pathlib import Path
# Add lib to path
sys.path.insert(0, str(Path(__file__).parent.parent / "scripts"))
from lib import normalize, schema
class TestNormalizeRedditItems(unittest.TestCase):
def test_normalizes_basic_item(self):
items = [
{
"id": "R1",
"title": "Test Thread",
"url": "https://reddit.com/r/test/1",
"subreddit": "test",
"date": "2026-01-15",
"why_relevant": "Relevant because...",
"relevance": 0.85,
}
]
result = normalize.normalize_reddit_items(items, "2026-01-01", "2026-01-31")
self.assertEqual(len(result), 1)
self.assertIsInstance(result[0], schema.RedditItem)
self.assertEqual(result[0].id, "R1")
self.assertEqual(result[0].title, "Test Thread")
self.assertEqual(result[0].date_confidence, "high")
def test_sets_low_confidence_for_old_date(self):
items = [
{
"id": "R1",
"title": "Old Thread",
"url": "https://reddit.com/r/test/1",
"subreddit": "test",
"date": "2025-12-01", # Before range
"relevance": 0.5,
}
]
result = normalize.normalize_reddit_items(items, "2026-01-01", "2026-01-31")
self.assertEqual(result[0].date_confidence, "low")
def test_handles_engagement(self):
items = [
{
"id": "R1",
"title": "Thread with engagement",
"url": "https://reddit.com/r/test/1",
"subreddit": "test",
"engagement": {
"score": 100,
"num_comments": 50,
"upvote_ratio": 0.9,
},
"relevance": 0.5,
}
]
result = normalize.normalize_reddit_items(items, "2026-01-01", "2026-01-31")
self.assertIsNotNone(result[0].engagement)
self.assertEqual(result[0].engagement.score, 100)
self.assertEqual(result[0].engagement.num_comments, 50)
class TestNormalizeXItems(unittest.TestCase):
def test_normalizes_basic_item(self):
items = [
{
"id": "X1",
"text": "Test post content",
"url": "https://x.com/user/status/123",
"author_handle": "testuser",
"date": "2026-01-15",
"why_relevant": "Relevant because...",
"relevance": 0.9,
}
]
result = normalize.normalize_x_items(items, "2026-01-01", "2026-01-31")
self.assertEqual(len(result), 1)
self.assertIsInstance(result[0], schema.XItem)
self.assertEqual(result[0].id, "X1")
self.assertEqual(result[0].author_handle, "testuser")
def test_handles_x_engagement(self):
items = [
{
"id": "X1",
"text": "Post with engagement",
"url": "https://x.com/user/status/123",
"author_handle": "user",
"engagement": {
"likes": 100,
"reposts": 25,
"replies": 15,
"quotes": 5,
},
"relevance": 0.5,
}
]
result = normalize.normalize_x_items(items, "2026-01-01", "2026-01-31")
self.assertIsNotNone(result[0].engagement)
self.assertEqual(result[0].engagement.likes, 100)
self.assertEqual(result[0].engagement.reposts, 25)
class TestItemsToDicts(unittest.TestCase):
def test_converts_items(self):
items = [
schema.RedditItem(
id="R1",
title="Test",
url="https://reddit.com/r/test/1",
subreddit="test",
)
]
result = normalize.items_to_dicts(items)
self.assertEqual(len(result), 1)
self.assertIsInstance(result[0], dict)
self.assertEqual(result[0]["id"], "R1")
if __name__ == "__main__":
unittest.main()
| {
"repo_id": "sickn33/antigravity-awesome-skills",
"file_path": "skills/last30days/tests/test_normalize.py",
"license": "MIT License",
"lines": 112,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
sickn33/antigravity-awesome-skills:skills/last30days/tests/test_render.py | """Tests for render module."""
import sys
import unittest
from pathlib import Path
# Add lib to path
sys.path.insert(0, str(Path(__file__).parent.parent / "scripts"))
from lib import render, schema
class TestRenderCompact(unittest.TestCase):
def test_renders_basic_report(self):
report = schema.Report(
topic="test topic",
range_from="2026-01-01",
range_to="2026-01-31",
generated_at="2026-01-31T12:00:00Z",
mode="both",
openai_model_used="gpt-5.2",
xai_model_used="grok-4-latest",
)
result = render.render_compact(report)
self.assertIn("test topic", result)
self.assertIn("2026-01-01", result)
self.assertIn("both", result)
self.assertIn("gpt-5.2", result)
def test_renders_reddit_items(self):
report = schema.Report(
topic="test",
range_from="2026-01-01",
range_to="2026-01-31",
generated_at="2026-01-31T12:00:00Z",
mode="reddit-only",
reddit=[
schema.RedditItem(
id="R1",
title="Test Thread",
url="https://reddit.com/r/test/1",
subreddit="test",
date="2026-01-15",
date_confidence="high",
score=85,
why_relevant="Very relevant",
)
],
)
result = render.render_compact(report)
self.assertIn("R1", result)
self.assertIn("Test Thread", result)
self.assertIn("r/test", result)
def test_shows_coverage_tip_for_reddit_only(self):
report = schema.Report(
topic="test",
range_from="2026-01-01",
range_to="2026-01-31",
generated_at="2026-01-31T12:00:00Z",
mode="reddit-only",
)
result = render.render_compact(report)
self.assertIn("xAI key", result)
class TestRenderContextSnippet(unittest.TestCase):
def test_renders_snippet(self):
report = schema.Report(
topic="Claude Code Skills",
range_from="2026-01-01",
range_to="2026-01-31",
generated_at="2026-01-31T12:00:00Z",
mode="both",
)
result = render.render_context_snippet(report)
self.assertIn("Claude Code Skills", result)
self.assertIn("Last 30 Days", result)
class TestRenderFullReport(unittest.TestCase):
def test_renders_full_report(self):
report = schema.Report(
topic="test topic",
range_from="2026-01-01",
range_to="2026-01-31",
generated_at="2026-01-31T12:00:00Z",
mode="both",
openai_model_used="gpt-5.2",
xai_model_used="grok-4-latest",
)
result = render.render_full_report(report)
self.assertIn("# test topic", result)
self.assertIn("## Models Used", result)
self.assertIn("gpt-5.2", result)
class TestGetContextPath(unittest.TestCase):
def test_returns_path_string(self):
result = render.get_context_path()
self.assertIsInstance(result, str)
self.assertIn("last30days.context.md", result)
if __name__ == "__main__":
unittest.main()
| {
"repo_id": "sickn33/antigravity-awesome-skills",
"file_path": "skills/last30days/tests/test_render.py",
"license": "MIT License",
"lines": 91,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
sickn33/antigravity-awesome-skills:skills/last30days/tests/test_score.py | """Tests for score module."""
import sys
import unittest
from datetime import datetime, timezone
from pathlib import Path
# Add lib to path
sys.path.insert(0, str(Path(__file__).parent.parent / "scripts"))
from lib import schema, score
class TestLog1pSafe(unittest.TestCase):
def test_positive_value(self):
result = score.log1p_safe(100)
self.assertGreater(result, 0)
def test_zero(self):
result = score.log1p_safe(0)
self.assertEqual(result, 0)
def test_none(self):
result = score.log1p_safe(None)
self.assertEqual(result, 0)
def test_negative(self):
result = score.log1p_safe(-5)
self.assertEqual(result, 0)
class TestComputeRedditEngagementRaw(unittest.TestCase):
def test_with_engagement(self):
eng = schema.Engagement(score=100, num_comments=50, upvote_ratio=0.9)
result = score.compute_reddit_engagement_raw(eng)
self.assertIsNotNone(result)
self.assertGreater(result, 0)
def test_without_engagement(self):
result = score.compute_reddit_engagement_raw(None)
self.assertIsNone(result)
def test_empty_engagement(self):
eng = schema.Engagement()
result = score.compute_reddit_engagement_raw(eng)
self.assertIsNone(result)
class TestComputeXEngagementRaw(unittest.TestCase):
def test_with_engagement(self):
eng = schema.Engagement(likes=100, reposts=25, replies=15, quotes=5)
result = score.compute_x_engagement_raw(eng)
self.assertIsNotNone(result)
self.assertGreater(result, 0)
def test_without_engagement(self):
result = score.compute_x_engagement_raw(None)
self.assertIsNone(result)
class TestNormalizeTo100(unittest.TestCase):
def test_normalizes_values(self):
values = [0, 50, 100]
result = score.normalize_to_100(values)
self.assertEqual(result[0], 0)
self.assertEqual(result[1], 50)
self.assertEqual(result[2], 100)
def test_handles_none(self):
values = [0, None, 100]
result = score.normalize_to_100(values)
self.assertIsNone(result[1])
def test_single_value(self):
values = [50]
result = score.normalize_to_100(values)
self.assertEqual(result[0], 50)
class TestScoreRedditItems(unittest.TestCase):
def test_scores_items(self):
today = datetime.now(timezone.utc).date().isoformat()
items = [
schema.RedditItem(
id="R1",
title="Test",
url="https://reddit.com/r/test/1",
subreddit="test",
date=today,
date_confidence="high",
engagement=schema.Engagement(score=100, num_comments=50, upvote_ratio=0.9),
relevance=0.9,
),
schema.RedditItem(
id="R2",
title="Test 2",
url="https://reddit.com/r/test/2",
subreddit="test",
date=today,
date_confidence="high",
engagement=schema.Engagement(score=10, num_comments=5, upvote_ratio=0.8),
relevance=0.5,
),
]
result = score.score_reddit_items(items)
self.assertEqual(len(result), 2)
self.assertGreater(result[0].score, 0)
self.assertGreater(result[1].score, 0)
# Higher relevance and engagement should score higher
self.assertGreater(result[0].score, result[1].score)
def test_empty_list(self):
result = score.score_reddit_items([])
self.assertEqual(result, [])
class TestScoreXItems(unittest.TestCase):
def test_scores_items(self):
today = datetime.now(timezone.utc).date().isoformat()
items = [
schema.XItem(
id="X1",
text="Test post",
url="https://x.com/user/1",
author_handle="user1",
date=today,
date_confidence="high",
engagement=schema.Engagement(likes=100, reposts=25, replies=15, quotes=5),
relevance=0.9,
),
]
result = score.score_x_items(items)
self.assertEqual(len(result), 1)
self.assertGreater(result[0].score, 0)
class TestSortItems(unittest.TestCase):
def test_sorts_by_score_descending(self):
items = [
schema.RedditItem(id="R1", title="Low", url="", subreddit="", score=30),
schema.RedditItem(id="R2", title="High", url="", subreddit="", score=90),
schema.RedditItem(id="R3", title="Mid", url="", subreddit="", score=60),
]
result = score.sort_items(items)
self.assertEqual(result[0].id, "R2")
self.assertEqual(result[1].id, "R3")
self.assertEqual(result[2].id, "R1")
def test_stable_sort(self):
items = [
schema.RedditItem(id="R1", title="A", url="", subreddit="", score=50),
schema.RedditItem(id="R2", title="B", url="", subreddit="", score=50),
]
result = score.sort_items(items)
# Both have same score, should maintain order by title
self.assertEqual(len(result), 2)
if __name__ == "__main__":
unittest.main()
| {
"repo_id": "sickn33/antigravity-awesome-skills",
"file_path": "skills/last30days/tests/test_score.py",
"license": "MIT License",
"lines": 131,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
sickn33/antigravity-awesome-skills:skills/app-store-optimization/competitor_analyzer.py | """
Competitor analysis module for App Store Optimization.
Analyzes top competitors' ASO strategies and identifies opportunities.
"""
from typing import Dict, List, Any, Optional
from collections import Counter
import re
class CompetitorAnalyzer:
"""Analyzes competitor apps to identify ASO opportunities."""
def __init__(self, category: str, platform: str = 'apple'):
"""
Initialize competitor analyzer.
Args:
category: App category (e.g., "Productivity", "Games")
platform: 'apple' or 'google'
"""
self.category = category
self.platform = platform
self.competitors = []
def analyze_competitor(
self,
app_data: Dict[str, Any]
) -> Dict[str, Any]:
"""
Analyze a single competitor's ASO strategy.
Args:
app_data: Dictionary with app_name, title, description, rating, ratings_count, keywords
Returns:
Comprehensive competitor analysis
"""
app_name = app_data.get('app_name', '')
title = app_data.get('title', '')
description = app_data.get('description', '')
rating = app_data.get('rating', 0.0)
ratings_count = app_data.get('ratings_count', 0)
keywords = app_data.get('keywords', [])
analysis = {
'app_name': app_name,
'title_analysis': self._analyze_title(title),
'description_analysis': self._analyze_description(description),
'keyword_strategy': self._extract_keyword_strategy(title, description, keywords),
'rating_metrics': {
'rating': rating,
'ratings_count': ratings_count,
'rating_quality': self._assess_rating_quality(rating, ratings_count)
},
'competitive_strength': self._calculate_competitive_strength(
rating,
ratings_count,
len(description)
),
'key_differentiators': self._identify_differentiators(description)
}
self.competitors.append(analysis)
return analysis
def compare_competitors(
self,
competitors_data: List[Dict[str, Any]]
) -> Dict[str, Any]:
"""
Compare multiple competitors and identify patterns.
Args:
competitors_data: List of competitor data dictionaries
Returns:
Comparative analysis with insights
"""
# Analyze each competitor
analyses = []
for comp_data in competitors_data:
analysis = self.analyze_competitor(comp_data)
analyses.append(analysis)
# Extract common keywords across competitors
all_keywords = []
for analysis in analyses:
all_keywords.extend(analysis['keyword_strategy']['primary_keywords'])
common_keywords = self._find_common_keywords(all_keywords)
# Identify keyword gaps (used by some but not all)
keyword_gaps = self._identify_keyword_gaps(analyses)
# Rank competitors by strength
ranked_competitors = sorted(
analyses,
key=lambda x: x['competitive_strength'],
reverse=True
)
# Analyze rating distribution
rating_analysis = self._analyze_rating_distribution(analyses)
# Identify best practices
best_practices = self._identify_best_practices(ranked_competitors)
return {
'category': self.category,
'platform': self.platform,
'competitors_analyzed': len(analyses),
'ranked_competitors': ranked_competitors,
'common_keywords': common_keywords,
'keyword_gaps': keyword_gaps,
'rating_analysis': rating_analysis,
'best_practices': best_practices,
'opportunities': self._identify_opportunities(
analyses,
common_keywords,
keyword_gaps
)
}
def identify_gaps(
self,
your_app_data: Dict[str, Any],
competitors_data: List[Dict[str, Any]]
) -> Dict[str, Any]:
"""
Identify gaps between your app and competitors.
Args:
your_app_data: Your app's data
competitors_data: List of competitor data
Returns:
Gap analysis with actionable recommendations
"""
# Analyze your app
your_analysis = self.analyze_competitor(your_app_data)
# Analyze competitors
competitor_comparison = self.compare_competitors(competitors_data)
# Identify keyword gaps
your_keywords = set(your_analysis['keyword_strategy']['primary_keywords'])
competitor_keywords = set(competitor_comparison['common_keywords'])
missing_keywords = competitor_keywords - your_keywords
# Identify rating gap
avg_competitor_rating = competitor_comparison['rating_analysis']['average_rating']
rating_gap = avg_competitor_rating - your_analysis['rating_metrics']['rating']
# Identify description length gap
avg_competitor_desc_length = sum(
len(comp['description_analysis']['text'])
for comp in competitor_comparison['ranked_competitors']
) / len(competitor_comparison['ranked_competitors'])
your_desc_length = len(your_analysis['description_analysis']['text'])
desc_length_gap = avg_competitor_desc_length - your_desc_length
return {
'your_app': your_analysis,
'keyword_gaps': {
'missing_keywords': list(missing_keywords)[:10],
'recommendations': self._generate_keyword_recommendations(missing_keywords)
},
'rating_gap': {
'your_rating': your_analysis['rating_metrics']['rating'],
'average_competitor_rating': avg_competitor_rating,
'gap': round(rating_gap, 2),
'action_items': self._generate_rating_improvement_actions(rating_gap)
},
'content_gap': {
'your_description_length': your_desc_length,
'average_competitor_length': int(avg_competitor_desc_length),
'gap': int(desc_length_gap),
'recommendations': self._generate_content_recommendations(desc_length_gap)
},
'competitive_positioning': self._assess_competitive_position(
your_analysis,
competitor_comparison
)
}
def _analyze_title(self, title: str) -> Dict[str, Any]:
"""Analyze title structure and keyword usage."""
parts = re.split(r'[-' + r':|]', title)
return {
'title': title,
'length': len(title),
'has_brand': len(parts) > 0,
'has_keywords': len(parts) > 1,
'components': [part.strip() for part in parts],
'word_count': len(title.split()),
'strategy': 'brand_plus_keywords' if len(parts) > 1 else 'brand_only'
}
def _analyze_description(self, description: str) -> Dict[str, Any]:
"""Analyze description structure and content."""
lines = description.split('\n')
word_count = len(description.split())
# Check for structural elements
has_bullet_points = 'β’' in description or '*' in description
has_sections = any(line.isupper() for line in lines if len(line) > 0)
has_call_to_action = any(
cta in description.lower()
for cta in ['download', 'try', 'get', 'start', 'join']
)
# Extract features mentioned
features = self._extract_features(description)
return {
'text': description,
'length': len(description),
'word_count': word_count,
'structure': {
'has_bullet_points': has_bullet_points,
'has_sections': has_sections,
'has_call_to_action': has_call_to_action
},
'features_mentioned': features,
'readability': 'good' if 50 <= word_count <= 300 else 'needs_improvement'
}
def _extract_keyword_strategy(
self,
title: str,
description: str,
explicit_keywords: List[str]
) -> Dict[str, Any]:
"""Extract keyword strategy from metadata."""
# Extract keywords from title
title_keywords = [word.lower() for word in title.split() if len(word) > 3]
# Extract frequently used words from description
desc_words = re.findall(r'\b\w{4,}\b', description.lower())
word_freq = Counter(desc_words)
frequent_words = [word for word, count in word_freq.most_common(15) if count > 2]
# Combine with explicit keywords
all_keywords = list(set(title_keywords + frequent_words + explicit_keywords))
return {
'primary_keywords': title_keywords,
'description_keywords': frequent_words[:10],
'explicit_keywords': explicit_keywords,
'total_unique_keywords': len(all_keywords),
'keyword_focus': self._assess_keyword_focus(title_keywords, frequent_words)
}
def _assess_rating_quality(self, rating: float, ratings_count: int) -> str:
"""Assess the quality of ratings."""
if ratings_count < 100:
return 'insufficient_data'
elif rating >= 4.5 and ratings_count > 1000:
return 'excellent'
elif rating >= 4.0 and ratings_count > 500:
return 'good'
elif rating >= 3.5:
return 'average'
else:
return 'poor'
def _calculate_competitive_strength(
self,
rating: float,
ratings_count: int,
description_length: int
) -> float:
"""
Calculate overall competitive strength (0-100).
Factors:
- Rating quality (40%)
- Rating volume (30%)
- Metadata quality (30%)
"""
# Rating quality score (0-40)
rating_score = (rating / 5.0) * 40
# Rating volume score (0-30)
volume_score = min((ratings_count / 10000) * 30, 30)
# Metadata quality score (0-30)
metadata_score = min((description_length / 2000) * 30, 30)
total_score = rating_score + volume_score + metadata_score
return round(total_score, 1)
def _identify_differentiators(self, description: str) -> List[str]:
"""Identify key differentiators from description."""
differentiator_keywords = [
'unique', 'only', 'first', 'best', 'leading', 'exclusive',
'revolutionary', 'innovative', 'patent', 'award'
]
differentiators = []
sentences = description.split('.')
for sentence in sentences:
sentence_lower = sentence.lower()
if any(keyword in sentence_lower for keyword in differentiator_keywords):
differentiators.append(sentence.strip())
return differentiators[:5]
def _find_common_keywords(self, all_keywords: List[str]) -> List[str]:
"""Find keywords used by multiple competitors."""
keyword_counts = Counter(all_keywords)
# Return keywords used by at least 2 competitors
common = [kw for kw, count in keyword_counts.items() if count >= 2]
return sorted(common, key=lambda x: keyword_counts[x], reverse=True)[:20]
def _identify_keyword_gaps(self, analyses: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Identify keywords used by some competitors but not others."""
all_keywords_by_app = {}
for analysis in analyses:
app_name = analysis['app_name']
keywords = analysis['keyword_strategy']['primary_keywords']
all_keywords_by_app[app_name] = set(keywords)
# Find keywords used by some but not all
all_keywords_set = set()
for keywords in all_keywords_by_app.values():
all_keywords_set.update(keywords)
gaps = []
for keyword in all_keywords_set:
using_apps = [
app for app, keywords in all_keywords_by_app.items()
if keyword in keywords
]
if 1 < len(using_apps) < len(analyses):
gaps.append({
'keyword': keyword,
'used_by': using_apps,
'usage_percentage': round(len(using_apps) / len(analyses) * 100, 1)
})
return sorted(gaps, key=lambda x: x['usage_percentage'], reverse=True)[:15]
def _analyze_rating_distribution(self, analyses: List[Dict[str, Any]]) -> Dict[str, Any]:
"""Analyze rating distribution across competitors."""
ratings = [a['rating_metrics']['rating'] for a in analyses]
ratings_counts = [a['rating_metrics']['ratings_count'] for a in analyses]
return {
'average_rating': round(sum(ratings) / len(ratings), 2),
'highest_rating': max(ratings),
'lowest_rating': min(ratings),
'average_ratings_count': int(sum(ratings_counts) / len(ratings_counts)),
'total_ratings_in_category': sum(ratings_counts)
}
def _identify_best_practices(self, ranked_competitors: List[Dict[str, Any]]) -> List[str]:
"""Identify best practices from top competitors."""
if not ranked_competitors:
return []
top_competitor = ranked_competitors[0]
practices = []
# Title strategy
title_analysis = top_competitor['title_analysis']
if title_analysis['has_keywords']:
practices.append(
f"Title Strategy: Include primary keyword in title (e.g., '{title_analysis['title']}')"
)
# Description structure
desc_analysis = top_competitor['description_analysis']
if desc_analysis['structure']['has_bullet_points']:
practices.append("Description: Use bullet points to highlight key features")
if desc_analysis['structure']['has_sections']:
practices.append("Description: Organize content with clear section headers")
# Rating strategy
rating_quality = top_competitor['rating_metrics']['rating_quality']
if rating_quality in ['excellent', 'good']:
practices.append(
f"Ratings: Maintain high rating quality ({top_competitor['rating_metrics']['rating']}β
) "
f"with significant volume ({top_competitor['rating_metrics']['ratings_count']} ratings)"
)
return practices[:5]
def _identify_opportunities(
self,
analyses: List[Dict[str, Any]],
common_keywords: List[str],
keyword_gaps: List[Dict[str, Any]]
) -> List[str]:
"""Identify ASO opportunities based on competitive analysis."""
opportunities = []
# Keyword opportunities from gaps
if keyword_gaps:
underutilized_keywords = [
gap['keyword'] for gap in keyword_gaps
if gap['usage_percentage'] < 50
]
if underutilized_keywords:
opportunities.append(
f"Target underutilized keywords: {', '.join(underutilized_keywords[:5])}"
)
# Rating opportunity
avg_rating = sum(a['rating_metrics']['rating'] for a in analyses) / len(analyses)
if avg_rating < 4.5:
opportunities.append(
f"Category average rating is {avg_rating:.1f} - opportunity to differentiate with higher ratings"
)
# Content depth opportunity
avg_desc_length = sum(
a['description_analysis']['length'] for a in analyses
) / len(analyses)
if avg_desc_length < 1500:
opportunities.append(
"Competitors have relatively short descriptions - opportunity to provide more comprehensive information"
)
return opportunities[:5]
def _extract_features(self, description: str) -> List[str]:
"""Extract feature mentions from description."""
# Look for bullet points or numbered lists
lines = description.split('\n')
features = []
for line in lines:
line = line.strip()
# Check if line starts with bullet or number
if line and (line[0] in ['β’', '*', '-', 'β'] or line[0].isdigit()):
# Clean the line
cleaned = re.sub(r'^[β’*\-β\d.)\s]+', '', line)
if cleaned:
features.append(cleaned)
return features[:10]
def _assess_keyword_focus(
self,
title_keywords: List[str],
description_keywords: List[str]
) -> str:
"""Assess keyword focus strategy."""
overlap = set(title_keywords) & set(description_keywords)
if len(overlap) >= 3:
return 'consistent_focus'
elif len(overlap) >= 1:
return 'moderate_focus'
else:
return 'broad_focus'
def _generate_keyword_recommendations(self, missing_keywords: set) -> List[str]:
"""Generate recommendations for missing keywords."""
if not missing_keywords:
return ["Your keyword coverage is comprehensive"]
recommendations = []
missing_list = list(missing_keywords)[:5]
recommendations.append(
f"Consider adding these competitor keywords: {', '.join(missing_list)}"
)
recommendations.append(
"Test keyword variations in subtitle/promotional text first"
)
recommendations.append(
"Monitor competitor keyword changes monthly"
)
return recommendations
def _generate_rating_improvement_actions(self, rating_gap: float) -> List[str]:
"""Generate actions to improve ratings."""
actions = []
if rating_gap > 0.5:
actions.append("CRITICAL: Significant rating gap - prioritize user satisfaction improvements")
actions.append("Analyze negative reviews to identify top issues")
actions.append("Implement in-app rating prompts after positive experiences")
actions.append("Respond to all negative reviews professionally")
elif rating_gap > 0.2:
actions.append("Focus on incremental improvements to close rating gap")
actions.append("Optimize timing of rating requests")
else:
actions.append("Ratings are competitive - maintain quality and continue improvements")
return actions
def _generate_content_recommendations(self, desc_length_gap: int) -> List[str]:
"""Generate content recommendations based on length gap."""
recommendations = []
if desc_length_gap > 500:
recommendations.append(
"Expand description to match competitor detail level"
)
recommendations.append(
"Add use case examples and success stories"
)
recommendations.append(
"Include more feature explanations and benefits"
)
elif desc_length_gap < -500:
recommendations.append(
"Consider condensing description for better readability"
)
recommendations.append(
"Focus on most important features first"
)
else:
recommendations.append(
"Description length is competitive"
)
return recommendations
def _assess_competitive_position(
self,
your_analysis: Dict[str, Any],
competitor_comparison: Dict[str, Any]
) -> str:
"""Assess your competitive position."""
your_strength = your_analysis['competitive_strength']
competitors = competitor_comparison['ranked_competitors']
if not competitors:
return "No comparison data available"
# Find where you'd rank
better_than_count = sum(
1 for comp in competitors
if your_strength > comp['competitive_strength']
)
position_percentage = (better_than_count / len(competitors)) * 100
if position_percentage >= 75:
return "Strong Position: Top quartile in competitive strength"
elif position_percentage >= 50:
return "Competitive Position: Above average, opportunities for improvement"
elif position_percentage >= 25:
return "Challenging Position: Below average, requires strategic improvements"
else:
return "Weak Position: Bottom quartile, major ASO overhaul needed"
def analyze_competitor_set(
category: str,
competitors_data: List[Dict[str, Any]],
platform: str = 'apple'
) -> Dict[str, Any]:
"""
Convenience function to analyze a set of competitors.
Args:
category: App category
competitors_data: List of competitor data
platform: 'apple' or 'google'
Returns:
Complete competitive analysis
"""
analyzer = CompetitorAnalyzer(category, platform)
return analyzer.compare_competitors(competitors_data)
| {
"repo_id": "sickn33/antigravity-awesome-skills",
"file_path": "skills/app-store-optimization/competitor_analyzer.py",
"license": "MIT License",
"lines": 484,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
sickn33/antigravity-awesome-skills:skills/mcp-builder/scripts/connections.py | """Lightweight connection handling for MCP servers."""
from abc import ABC, abstractmethod
from contextlib import AsyncExitStack
from typing import Any
from mcp import ClientSession, StdioServerParameters
from mcp.client.sse import sse_client
from mcp.client.stdio import stdio_client
from mcp.client.streamable_http import streamablehttp_client
class MCPConnection(ABC):
"""Base class for MCP server connections."""
def __init__(self):
self.session = None
self._stack = None
@abstractmethod
def _create_context(self):
"""Create the connection context based on connection type."""
async def __aenter__(self):
"""Initialize MCP server connection."""
self._stack = AsyncExitStack()
await self._stack.__aenter__()
try:
ctx = self._create_context()
result = await self._stack.enter_async_context(ctx)
if len(result) == 2:
read, write = result
elif len(result) == 3:
read, write, _ = result
else:
raise ValueError(f"Unexpected context result: {result}")
session_ctx = ClientSession(read, write)
self.session = await self._stack.enter_async_context(session_ctx)
await self.session.initialize()
return self
except BaseException:
await self._stack.__aexit__(None, None, None)
raise
async def __aexit__(self, exc_type, exc_val, exc_tb):
"""Clean up MCP server connection resources."""
if self._stack:
await self._stack.__aexit__(exc_type, exc_val, exc_tb)
self.session = None
self._stack = None
async def list_tools(self) -> list[dict[str, Any]]:
"""Retrieve available tools from the MCP server."""
response = await self.session.list_tools()
return [
{
"name": tool.name,
"description": tool.description,
"input_schema": tool.inputSchema,
}
for tool in response.tools
]
async def call_tool(self, tool_name: str, arguments: dict[str, Any]) -> Any:
"""Call a tool on the MCP server with provided arguments."""
result = await self.session.call_tool(tool_name, arguments=arguments)
return result.content
class MCPConnectionStdio(MCPConnection):
"""MCP connection using standard input/output."""
def __init__(self, command: str, args: list[str] = None, env: dict[str, str] = None):
super().__init__()
self.command = command
self.args = args or []
self.env = env
def _create_context(self):
return stdio_client(
StdioServerParameters(command=self.command, args=self.args, env=self.env)
)
class MCPConnectionSSE(MCPConnection):
"""MCP connection using Server-Sent Events."""
def __init__(self, url: str, headers: dict[str, str] = None):
super().__init__()
self.url = url
self.headers = headers or {}
def _create_context(self):
return sse_client(url=self.url, headers=self.headers)
class MCPConnectionHTTP(MCPConnection):
"""MCP connection using Streamable HTTP."""
def __init__(self, url: str, headers: dict[str, str] = None):
super().__init__()
self.url = url
self.headers = headers or {}
def _create_context(self):
return streamablehttp_client(url=self.url, headers=self.headers)
def create_connection(
transport: str,
command: str = None,
args: list[str] = None,
env: dict[str, str] = None,
url: str = None,
headers: dict[str, str] = None,
) -> MCPConnection:
"""Factory function to create the appropriate MCP connection.
Args:
transport: Connection type ("stdio", "sse", or "http")
command: Command to run (stdio only)
args: Command arguments (stdio only)
env: Environment variables (stdio only)
url: Server URL (sse and http only)
headers: HTTP headers (sse and http only)
Returns:
MCPConnection instance
"""
transport = transport.lower()
if transport == "stdio":
if not command:
raise ValueError("Command is required for stdio transport")
return MCPConnectionStdio(command=command, args=args, env=env)
elif transport == "sse":
if not url:
raise ValueError("URL is required for sse transport")
return MCPConnectionSSE(url=url, headers=headers)
elif transport in ["http", "streamable_http", "streamable-http"]:
if not url:
raise ValueError("URL is required for http transport")
return MCPConnectionHTTP(url=url, headers=headers)
else:
raise ValueError(f"Unsupported transport type: {transport}. Use 'stdio', 'sse', or 'http'")
| {
"repo_id": "sickn33/antigravity-awesome-skills",
"file_path": "skills/mcp-builder/scripts/connections.py",
"license": "MIT License",
"lines": 118,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
sickn33/antigravity-awesome-skills:skills/mcp-builder/scripts/evaluation.py | """MCP Server Evaluation Harness
This script evaluates MCP servers by running test questions against them using Claude.
"""
import argparse
import asyncio
import json
import re
import sys
import time
import traceback
import xml.etree.ElementTree as ET
from pathlib import Path
from typing import Any
from anthropic import Anthropic
from connections import create_connection
EVALUATION_PROMPT = """You are an AI assistant with access to tools.
When given a task, you MUST:
1. Use the available tools to complete the task
2. Provide summary of each step in your approach, wrapped in <summary> tags
3. Provide feedback on the tools provided, wrapped in <feedback> tags
4. Provide your final response, wrapped in <response> tags
Summary Requirements:
- In your <summary> tags, you must explain:
- The steps you took to complete the task
- Which tools you used, in what order, and why
- The inputs you provided to each tool
- The outputs you received from each tool
- A summary for how you arrived at the response
Feedback Requirements:
- In your <feedback> tags, provide constructive feedback on the tools:
- Comment on tool names: Are they clear and descriptive?
- Comment on input parameters: Are they well-documented? Are required vs optional parameters clear?
- Comment on descriptions: Do they accurately describe what the tool does?
- Comment on any errors encountered during tool usage: Did the tool fail to execute? Did the tool return too many tokens?
- Identify specific areas for improvement and explain WHY they would help
- Be specific and actionable in your suggestions
Response Requirements:
- Your response should be concise and directly address what was asked
- Always wrap your final response in <response> tags
- If you cannot solve the task return <response>NOT_FOUND</response>
- For numeric responses, provide just the number
- For IDs, provide just the ID
- For names or text, provide the exact text requested
- Your response should go last"""
def parse_evaluation_file(file_path: Path) -> list[dict[str, Any]]:
"""Parse XML evaluation file with qa_pair elements."""
try:
tree = ET.parse(file_path)
root = tree.getroot()
evaluations = []
for qa_pair in root.findall(".//qa_pair"):
question_elem = qa_pair.find("question")
answer_elem = qa_pair.find("answer")
if question_elem is not None and answer_elem is not None:
evaluations.append({
"question": (question_elem.text or "").strip(),
"answer": (answer_elem.text or "").strip(),
})
return evaluations
except Exception as e:
print(f"Error parsing evaluation file {file_path}: {e}")
return []
def extract_xml_content(text: str, tag: str) -> str | None:
"""Extract content from XML tags."""
pattern = rf"<{tag}>(.*?)</{tag}>"
matches = re.findall(pattern, text, re.DOTALL)
return matches[-1].strip() if matches else None
async def agent_loop(
client: Anthropic,
model: str,
question: str,
tools: list[dict[str, Any]],
connection: Any,
) -> tuple[str, dict[str, Any]]:
"""Run the agent loop with MCP tools."""
messages = [{"role": "user", "content": question}]
response = await asyncio.to_thread(
client.messages.create,
model=model,
max_tokens=4096,
system=EVALUATION_PROMPT,
messages=messages,
tools=tools,
)
messages.append({"role": "assistant", "content": response.content})
tool_metrics = {}
while response.stop_reason == "tool_use":
tool_use = next(block for block in response.content if block.type == "tool_use")
tool_name = tool_use.name
tool_input = tool_use.input
tool_start_ts = time.time()
try:
tool_result = await connection.call_tool(tool_name, tool_input)
tool_response = json.dumps(tool_result) if isinstance(tool_result, (dict, list)) else str(tool_result)
except Exception as e:
tool_response = f"Error executing tool {tool_name}: {str(e)}\n"
tool_response += traceback.format_exc()
tool_duration = time.time() - tool_start_ts
if tool_name not in tool_metrics:
tool_metrics[tool_name] = {"count": 0, "durations": []}
tool_metrics[tool_name]["count"] += 1
tool_metrics[tool_name]["durations"].append(tool_duration)
messages.append({
"role": "user",
"content": [{
"type": "tool_result",
"tool_use_id": tool_use.id,
"content": tool_response,
}]
})
response = await asyncio.to_thread(
client.messages.create,
model=model,
max_tokens=4096,
system=EVALUATION_PROMPT,
messages=messages,
tools=tools,
)
messages.append({"role": "assistant", "content": response.content})
response_text = next(
(block.text for block in response.content if hasattr(block, "text")),
None,
)
return response_text, tool_metrics
async def evaluate_single_task(
client: Anthropic,
model: str,
qa_pair: dict[str, Any],
tools: list[dict[str, Any]],
connection: Any,
task_index: int,
) -> dict[str, Any]:
"""Evaluate a single QA pair with the given tools."""
start_time = time.time()
print(f"Task {task_index + 1}: Running task with question: {qa_pair['question']}")
response, tool_metrics = await agent_loop(client, model, qa_pair["question"], tools, connection)
response_value = extract_xml_content(response, "response")
summary = extract_xml_content(response, "summary")
feedback = extract_xml_content(response, "feedback")
duration_seconds = time.time() - start_time
return {
"question": qa_pair["question"],
"expected": qa_pair["answer"],
"actual": response_value,
"score": int(response_value == qa_pair["answer"]) if response_value else 0,
"total_duration": duration_seconds,
"tool_calls": tool_metrics,
"num_tool_calls": sum(len(metrics["durations"]) for metrics in tool_metrics.values()),
"summary": summary,
"feedback": feedback,
}
REPORT_HEADER = """
# Evaluation Report
## Summary
- **Accuracy**: {correct}/{total} ({accuracy:.1f}%)
- **Average Task Duration**: {average_duration_s:.2f}s
- **Average Tool Calls per Task**: {average_tool_calls:.2f}
- **Total Tool Calls**: {total_tool_calls}
---
"""
TASK_TEMPLATE = """
### Task {task_num}
**Question**: {question}
**Ground Truth Answer**: `{expected_answer}`
**Actual Answer**: `{actual_answer}`
**Correct**: {correct_indicator}
**Duration**: {total_duration:.2f}s
**Tool Calls**: {tool_calls}
**Summary**
{summary}
**Feedback**
{feedback}
---
"""
async def run_evaluation(
eval_path: Path,
connection: Any,
model: str = "claude-3-7-sonnet-20250219",
) -> str:
"""Run evaluation with MCP server tools."""
print("π Starting Evaluation")
client = Anthropic()
tools = await connection.list_tools()
print(f"π Loaded {len(tools)} tools from MCP server")
qa_pairs = parse_evaluation_file(eval_path)
print(f"π Loaded {len(qa_pairs)} evaluation tasks")
results = []
for i, qa_pair in enumerate(qa_pairs):
print(f"Processing task {i + 1}/{len(qa_pairs)}")
result = await evaluate_single_task(client, model, qa_pair, tools, connection, i)
results.append(result)
correct = sum(r["score"] for r in results)
accuracy = (correct / len(results)) * 100 if results else 0
average_duration_s = sum(r["total_duration"] for r in results) / len(results) if results else 0
average_tool_calls = sum(r["num_tool_calls"] for r in results) / len(results) if results else 0
total_tool_calls = sum(r["num_tool_calls"] for r in results)
report = REPORT_HEADER.format(
correct=correct,
total=len(results),
accuracy=accuracy,
average_duration_s=average_duration_s,
average_tool_calls=average_tool_calls,
total_tool_calls=total_tool_calls,
)
report += "".join([
TASK_TEMPLATE.format(
task_num=i + 1,
question=qa_pair["question"],
expected_answer=qa_pair["answer"],
actual_answer=result["actual"] or "N/A",
correct_indicator="β
" if result["score"] else "β",
total_duration=result["total_duration"],
tool_calls=json.dumps(result["tool_calls"], indent=2),
summary=result["summary"] or "N/A",
feedback=result["feedback"] or "N/A",
)
for i, (qa_pair, result) in enumerate(zip(qa_pairs, results))
])
return report
def parse_headers(header_list: list[str]) -> dict[str, str]:
"""Parse header strings in format 'Key: Value' into a dictionary."""
headers = {}
if not header_list:
return headers
for header in header_list:
if ":" in header:
key, value = header.split(":", 1)
headers[key.strip()] = value.strip()
else:
print(f"Warning: Ignoring malformed header: {header}")
return headers
def parse_env_vars(env_list: list[str]) -> dict[str, str]:
"""Parse environment variable strings in format 'KEY=VALUE' into a dictionary."""
env = {}
if not env_list:
return env
for env_var in env_list:
if "=" in env_var:
key, value = env_var.split("=", 1)
env[key.strip()] = value.strip()
else:
print(f"Warning: Ignoring malformed environment variable: {env_var}")
return env
async def main():
parser = argparse.ArgumentParser(
description="Evaluate MCP servers using test questions",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Evaluate a local stdio MCP server
python evaluation.py -t stdio -c python -a my_server.py eval.xml
# Evaluate an SSE MCP server
python evaluation.py -t sse -u https://example.com/mcp -H "Authorization: Bearer token" eval.xml
# Evaluate an HTTP MCP server with custom model
python evaluation.py -t http -u https://example.com/mcp -m claude-3-5-sonnet-20241022 eval.xml
""",
)
parser.add_argument("eval_file", type=Path, help="Path to evaluation XML file")
parser.add_argument("-t", "--transport", choices=["stdio", "sse", "http"], default="stdio", help="Transport type (default: stdio)")
parser.add_argument("-m", "--model", default="claude-3-7-sonnet-20250219", help="Claude model to use (default: claude-3-7-sonnet-20250219)")
stdio_group = parser.add_argument_group("stdio options")
stdio_group.add_argument("-c", "--command", help="Command to run MCP server (stdio only)")
stdio_group.add_argument("-a", "--args", nargs="+", help="Arguments for the command (stdio only)")
stdio_group.add_argument("-e", "--env", nargs="+", help="Environment variables in KEY=VALUE format (stdio only)")
remote_group = parser.add_argument_group("sse/http options")
remote_group.add_argument("-u", "--url", help="MCP server URL (sse/http only)")
remote_group.add_argument("-H", "--header", nargs="+", dest="headers", help="HTTP headers in 'Key: Value' format (sse/http only)")
parser.add_argument("-o", "--output", type=Path, help="Output file for evaluation report (default: stdout)")
args = parser.parse_args()
if not args.eval_file.exists():
print(f"Error: Evaluation file not found: {args.eval_file}")
sys.exit(1)
headers = parse_headers(args.headers) if args.headers else None
env_vars = parse_env_vars(args.env) if args.env else None
try:
connection = create_connection(
transport=args.transport,
command=args.command,
args=args.args,
env=env_vars,
url=args.url,
headers=headers,
)
except ValueError as e:
print(f"Error: {e}")
sys.exit(1)
print(f"π Connecting to MCP server via {args.transport}...")
async with connection:
print("β
Connected successfully")
report = await run_evaluation(args.eval_file, connection, args.model)
if args.output:
args.output.write_text(report)
print(f"\nβ
Report saved to {args.output}")
else:
print("\n" + report)
if __name__ == "__main__":
asyncio.run(main())
| {
"repo_id": "sickn33/antigravity-awesome-skills",
"file_path": "skills/mcp-builder/scripts/evaluation.py",
"license": "MIT License",
"lines": 297,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
squidfunk/mkdocs-material:material/extensions/preview.py | # Copyright (c) 2016-2025 Martin Donath <martin.donath@squidfunk.com>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from __future__ import annotations
import logging
from material.utilities.filter import FileFilter, FilterConfig
from mkdocs.structure.pages import _RelativePathTreeprocessor
from markdown import Extension, Markdown
from markdown.treeprocessors import Treeprocessor
from mkdocs.exceptions import ConfigurationError
from urllib.parse import urlparse
from xml.etree.ElementTree import Element
# -----------------------------------------------------------------------------
# Classes
# -----------------------------------------------------------------------------
class PreviewProcessor(Treeprocessor):
"""
A Markdown treeprocessor to enable instant previews on links.
Note that this treeprocessor is dependent on the `relpath` treeprocessor
registered programmatically by MkDocs before rendering a page.
"""
def __init__(self, md: Markdown, config: dict):
"""
Initialize the treeprocessor.
Arguments:
md: The Markdown instance.
config: The configuration.
"""
super().__init__(md)
self.config = config
def run(self, root: Element):
"""
Run the treeprocessor.
Arguments:
root: The root element of the parsed Markdown document.
"""
at = self.md.treeprocessors.get_index_for_name("relpath")
# Hack: Python Markdown has no notion of where it is, i.e., which file
# is being processed. This seems to be a deliberate design decision, as
# it is not possible to access the file path of the current page, but
# it might also be an oversight that is now impossible to fix. However,
# since this extension is only useful in the context of Material for
# MkDocs, we can assume that the _RelativePathTreeprocessor is always
# present, telling us the file path of the current page. If that ever
# changes, we would need to wrap this extension in a plugin, but for
# the time being we are sneaky and will probably get away with it.
processor = self.md.treeprocessors[at]
if not isinstance(processor, _RelativePathTreeprocessor):
raise TypeError("Relative path processor not registered")
# Normalize configurations
configurations = self.config["configurations"]
configurations.append({
"sources": self.config.get("sources"),
"targets": self.config.get("targets")
})
# Walk through all configurations - @todo refactor so that we don't
# iterate multiple times over the same elements
for configuration in configurations:
# Skip, if the configuration defines nothing β we could also fix
# this in the file filter, but we first fix it here and check if
# it generalizes well enough to other inclusion/exclusion sites,
# because here, it would hinder the ability to automaticaly
# include all sources, while excluding specific targets.
if (
not configuration.get("sources") and
not configuration.get("targets")
):
continue
# Skip if page should not be considered
filter = get_filter(configuration, "sources")
if not filter(processor.file):
continue
# Walk through all links and add preview attributes
filter = get_filter(configuration, "targets")
for el in root.iter("a"):
href = el.get("href")
if not href:
continue
# Skip footnotes
if "footnote-ref" in el.get("class", ""):
continue
# Skip external links
url = urlparse(href)
if url.scheme or url.netloc:
continue
# Add preview attribute to internal links
for path in processor._possible_target_uris(
processor.file, url.path,
processor.config.use_directory_urls
):
target = processor.files.get_file_from_path(path)
if not target:
continue
# Include, if filter matches
if filter(target):
el.set("data-preview", "")
# -----------------------------------------------------------------------------
class PreviewExtension(Extension):
"""
A Markdown extension to enable instant previews on links.
This extensions allows to automatically add the `data-preview` attribute to
internal links matching specific criteria, so Material for MkDocs renders a
nice preview on hover as part of a tooltip. It is the recommended way to
add previews to links in a programmatic way.
"""
def __init__(self, *args, **kwargs):
"""
"""
self.config = {
"configurations": [[], "Filter configurations"],
"sources": [{}, "Link sources"],
"targets": [{}, "Link targets"]
}
super().__init__(*args, **kwargs)
def extendMarkdown(self, md: Markdown):
"""
Register Markdown extension.
Arguments:
md: The Markdown instance.
"""
md.registerExtension(self)
# Create and register treeprocessor - we use the same priority as the
# `relpath` treeprocessor, the latter of which is guaranteed to run
# after our treeprocessor, so we can check the original Markdown URIs
# before they are resolved to URLs.
processor = PreviewProcessor(md, self.getConfigs())
md.treeprocessors.register(processor, "preview", 0)
# -----------------------------------------------------------------------------
# Functions
# -----------------------------------------------------------------------------
def get_filter(settings: dict, key: str):
"""
Get file filter from settings.
Arguments:
settings: The settings.
key: The key in the settings.
Returns:
The file filter.
"""
config = FilterConfig()
config.load_dict(settings.get(key) or {})
# Validate filter configuration
errors, warnings = config.validate()
for _, w in warnings:
log.warning(
f"Error reading filter configuration in '{key}':\n"
f"{w}"
)
for _, e in errors:
raise ConfigurationError(
f"Error reading filter configuration in '{key}':\n"
f"{e}"
)
# Return file filter
return FileFilter(config = config) # type: ignore
def makeExtension(**kwargs):
"""
Register Markdown extension.
Arguments:
**kwargs: Configuration options.
Returns:
The Markdown extension.
"""
return PreviewExtension(**kwargs)
# -----------------------------------------------------------------------------
# Data
# -----------------------------------------------------------------------------
# Set up logging
log = logging.getLogger("mkdocs.material.extensions.preview")
| {
"repo_id": "squidfunk/mkdocs-material",
"file_path": "material/extensions/preview.py",
"license": "MIT License",
"lines": 182,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
squidfunk/mkdocs-material:material/plugins/optimize/config.py | # Copyright (c) 2016-2025 Martin Donath <martin.donath@squidfunk.com>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import os
from mkdocs.config.base import Config
from mkdocs.config.config_options import ListOfItems, Type
# -----------------------------------------------------------------------------
# Classes
# -----------------------------------------------------------------------------
# Optimize plugin configuration
class OptimizeConfig(Config):
enabled = Type(bool, default = True)
concurrency = Type(int, default = max(1, os.cpu_count() - 1))
# Settings for caching
cache = Type(bool, default = True)
cache_dir = Type(str, default = ".cache/plugin/optimize")
# Settings for optimization
optimize = Type(bool, default = True)
optimize_png = Type(bool, default = True)
optimize_png_speed = Type(int, default = 3)
optimize_png_strip = Type(bool, default = True)
optimize_jpg = Type(bool, default = True)
optimize_jpg_quality = Type(int, default = 60)
optimize_jpg_progressive = Type(bool, default = True)
optimize_include = ListOfItems(Type(str), default = [])
optimize_exclude = ListOfItems(Type(str), default = [])
# Settings for reporting
print_gain = Type(bool, default = True)
print_gain_summary = Type(bool, default = True)
| {
"repo_id": "squidfunk/mkdocs-material",
"file_path": "material/plugins/optimize/config.py",
"license": "MIT License",
"lines": 42,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
squidfunk/mkdocs-material:material/plugins/optimize/plugin.py | # Copyright (c) 2016-2025 Martin Donath <martin.donath@squidfunk.com>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from __future__ import annotations
import functools
import json
import logging
import os
import subprocess
import sys
from fnmatch import fnmatch
from colorama import Fore, Style
from concurrent.futures import Future
from concurrent.futures.thread import ThreadPoolExecutor
from hashlib import sha1
from mkdocs import utils
from mkdocs.config.defaults import MkDocsConfig
from mkdocs.exceptions import PluginError
from mkdocs.plugins import BasePlugin
from mkdocs.structure.files import File
from shutil import which
try:
from PIL import Image
except ImportError:
pass
from .config import OptimizeConfig
# -----------------------------------------------------------------------------
# Classes
# -----------------------------------------------------------------------------
# Optimize plugin
class OptimizePlugin(BasePlugin[OptimizeConfig]):
supports_multiple_instances = True
# Manifest
manifest: dict[str, str] = {}
# Initialize plugin
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Initialize incremental builds
self.is_serve = False
# Determine whether we're serving the site
def on_startup(self, *, command, dirty):
self.is_serve = command == "serve"
# Initialize thread pool
self.pool = ThreadPoolExecutor(self.config.concurrency)
self.pool_jobs: dict[str, Future] = {}
# Resolve and load manifest
def on_config(self, config):
if not self.config.enabled:
return
# Resolve cache directory (once) - this is necessary, so the cache is
# always relative to the configuration file, and thus project, and not
# relative to the current working directory, or it would not work with
# the projects plugin.
path = os.path.abspath(self.config.cache_dir)
if path != self.config.cache_dir:
self.config.cache_dir = os.path.join(
os.path.dirname(config.config_file_path),
os.path.normpath(self.config.cache_dir)
)
# Ensure cache directory exists
os.makedirs(self.config.cache_dir, exist_ok = True)
# Initialize manifest
self.manifest_file = os.path.join(
self.config.cache_dir, "manifest.json"
)
# Load manifest if it exists and the cache should be used
if os.path.isfile(self.manifest_file) and self.config.cache:
try:
with open(self.manifest_file) as f:
self.manifest = json.load(f)
except:
pass
# Initialize optimization pipeline
def on_env(self, env, *, config, files):
if not self.config.enabled:
return
# Skip if media files should not be optimized
if not self.config.optimize:
return
# Filter all optimizable media files and steal reponsibility from MkDocs
# by removing them from the files collection. Then, start a concurrent
# job that checks if an image was already optimized and can be returned
# from the cache, or optimize it accordingly.
for file in files.media_files():
if self._is_excluded(file):
continue
# Spawn concurrent job to optimize the given image and add future
# to job dictionary, as it returns the file we need to copy later
path = os.path.join(self.config.cache_dir, file.src_path)
self.pool_jobs[file.abs_src_path] = self.pool.submit(
self._optimize_image, file, path, config
)
# Steal responsibility from MkDocs
files.remove(file)
# Finish optimization pipeline
def on_post_build(self, *, config):
if not self.config.enabled:
return
# Skip if media files should not be optimized
if not self.config.optimize:
return
# Reconcile concurrent jobs - we need to wait for all jobs to finish
# before we can copy the optimized files to the output directory. If an
# exception occurred in one of the jobs, we raise it here, so the build
# fails and the author can fix the issue.
for path, future in self.pool_jobs.items():
if future.exception():
raise future.exception()
else:
file: File = future.result()
file.copy_file()
# Save manifest if cache should be used
if self.config.cache:
with open(self.manifest_file, "w") as f:
f.write(json.dumps(self.manifest, indent = 2, sort_keys = True))
# Compute and print gains through optimization
if self.config.print_gain_summary:
print(Style.NORMAL)
print(f" Optimizations:")
# Print summary for file extension
for seek in [".png", ".jpg"]:
size = size_opt = 0
for path, future in self.pool_jobs.items():
file: File = future.result()
# Skip files that are not of the given type
_, extension = os.path.splitext(path)
extension = ".jpg" if extension == ".jpeg" else extension
if extension != seek:
continue
# Compute size before and after optimization
size += os.path.getsize(path)
size_opt += os.path.getsize(file.abs_dest_path)
# Compute absolute and relative gain
if size and size_opt:
gain_abs = size - size_opt
gain_rel = (1 - size_opt / size) * 100
# Print summary for files
print(
f" *{seek} {Fore.GREEN}{_size(size_opt)}"
f"{Fore.WHITE}{Style.DIM} β "
f"{_size(gain_abs)} [{gain_rel:3.1f}%]"
f"{Style.RESET_ALL}"
)
# Reset all styles
print(Style.RESET_ALL)
# Save manifest on shutdown
def on_shutdown(self):
if not self.config.enabled:
return
# Shutdown thread pool - if we're on Python 3.9 and above, cancel all
# pending futures that have not yet been scheduled
if sys.version_info >= (3, 9):
self.pool.shutdown(cancel_futures = True)
else:
self.pool.shutdown()
# Save manifest if cache should be used
if self.manifest and self.config.cache:
with open(self.manifest_file, "w") as f:
f.write(json.dumps(self.manifest, indent = 2, sort_keys = True))
# -------------------------------------------------------------------------
# Check if a file can be optimized
def _is_optimizable(self, file: File):
# Check if PNG images should be optimized
if file.url.endswith((".png")):
return self.config.optimize_png
# Check if JPG images should be optimized
if file.url.endswith((".jpg", ".jpeg")):
return self.config.optimize_jpg
# File can not be optimized by the plugin
return False
# Check if the given file is excluded
def _is_excluded(self, file: File):
if not self._is_optimizable(file):
return True
# Check if file matches one of the inclusion patterns
path = file.src_path
if self.config.optimize_include:
for pattern in self.config.optimize_include:
if fnmatch(file.src_uri, pattern):
return False
# File is not included
log.debug(f"Excluding file '{path}' due to inclusion patterns")
return True
# Check if file matches one of the exclusion patterns
for pattern in self.config.optimize_exclude:
if fnmatch(file.src_uri, pattern):
log.debug(f"Excluding file '{path}' due to exclusion patterns")
return True
# File is not excluded
return False
# Optimize image and write to cache
def _optimize_image(self, file: File, path: str, config: MkDocsConfig):
with open(file.abs_src_path, "rb") as f:
data = f.read()
hash = sha1(data).hexdigest()
# Check if file hash changed, so we need to optimize again
prev = self.manifest.get(file.url, "")
if hash != prev or not os.path.isfile(path):
os.makedirs(os.path.dirname(path), exist_ok = True)
# Optimize PNG image using pngquant
if file.url.endswith((".png")):
self._optimize_image_png(file, path, config)
# Optimize JPG image using pillow
if file.url.endswith((".jpg", ".jpeg")):
self._optimize_image_jpg(file, path, config)
# Compute size before and after optimization
size = len(data)
size_opt = os.path.getsize(path)
# Compute absolute and relative gain
gain_abs = size - size_opt
gain_rel = (1 - size_opt / size) * 100
# Print how much we gained, if we did and desired
gain = ""
if gain_abs and self.config.print_gain:
gain += " β "
gain += " ".join([_size(gain_abs), f"[{gain_rel:3.1f}%]"])
# Print summary for file
log.info(
f"Optimized media file: {file.src_uri} "
f"{Fore.GREEN}{_size(size_opt)}"
f"{Fore.WHITE}{Style.DIM}{gain}"
f"{Style.RESET_ALL}"
)
# Update manifest by associating file with hash
self.manifest[file.url] = hash
# Compute project root
root = os.path.dirname(config.config_file_path)
# Compute source file system path
file.abs_src_path = path
file.src_path = os.path.relpath(path, root)
# Return file to be copied from cache
return file
# Optimize PNG image - we first tried to use libimagequant, but encountered
# the occassional segmentation fault, which means it's probably not a good
# choice. Instead, we just rely on pngquant which seems much more stable.
def _optimize_image_png(self, file: File, path: str, config: MkDocsConfig):
# Check if the required dependencies for optimizing are available, which
# is, at the absolute minimum, the 'pngquant' binary, and raise an error
# to the caller, so he can decide what to do with the error. The caller
# can treat this as a warning or an error to abort the build.
if not which("pngquant"):
docs = os.path.relpath(config.docs_dir)
path = os.path.relpath(file.abs_src_path, docs)
raise PluginError(
f"Couldn't optimize image '{path}' in '{docs}': 'pngquant' "
f"not found. Make sure 'pngquant' is installed and in your path"
)
# Build command line arguments
args = ["pngquant",
"--force", "--skip-if-larger",
"--output", path,
"--speed", f"{self.config.optimize_png_speed}"
]
# Add flag to remove optional metadata
if self.config.optimize_png_strip:
args.append("--strip")
# Set input file and run, then check if pngquant actually wrote a file,
# as we instruct it not to if the size of the optimized file is larger.
# This can happen if files are already compressed and optimized by
# the author. In that case, just copy the original file.
subprocess.run([*args, file.abs_src_path])
if not os.path.isfile(path):
utils.copy_file(file.abs_src_path, path)
# Optimize JPG image
def _optimize_image_jpg(self, file: File, path: str, config: MkDocsConfig):
# Check if the required dependencies for optimizing are available, which
# is, at the absolute minimum, the 'pillow' package, and raise an error
# to the caller, so he can decide what to do with the error. The caller
# can treat this as a warning or an error to abort the build.
if not _supports("Image"):
docs = os.path.relpath(config.docs_dir)
path = os.path.relpath(file.abs_src_path, docs)
raise PluginError(
f"Couldn't optimize image '{path}' in '{docs}': install "
f"required dependencies β pip install 'mkdocs-material[imaging]'"
)
# Open and save optimized image
image = Image.open(file.abs_src_path)
image.save(path, "jpeg",
quality = self.config.optimize_jpg_quality,
progressive = self.config.optimize_jpg_progressive
)
# -----------------------------------------------------------------------------
# Helper functions
# -----------------------------------------------------------------------------
# Check for presence of optional imports
@functools.lru_cache(maxsize = None)
def _supports(name: str):
return name in globals()
# -----------------------------------------------------------------------------
# Print human-readable size
def _size(value):
for unit in ["B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB"]:
if abs(value) < 1000.0:
return f"{value:3.1f} {unit}"
value /= 1000.0
# -----------------------------------------------------------------------------
# Data
# -----------------------------------------------------------------------------
# Set up logging
log = logging.getLogger("mkdocs.material.optimize")
| {
"repo_id": "squidfunk/mkdocs-material",
"file_path": "material/plugins/optimize/plugin.py",
"license": "MIT License",
"lines": 314,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
squidfunk/mkdocs-material:material/plugins/projects/builder/log.py | # Copyright (c) 2016-2025 Martin Donath <martin.donath@squidfunk.com>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from __future__ import annotations
import logging
from click import style
from logging import Filter
from material.plugins.projects.structure import Project
from mkdocs.__main__ import ColorFormatter
# -----------------------------------------------------------------------------
# Classes
# -----------------------------------------------------------------------------
# Dirty build warning filter
class ProjectsFilter(Filter):
# Filter log messages
def filter(self, record):
message = record.getMessage()
return not message.startswith("A 'dirty' build")
# -----------------------------------------------------------------------------
# Functions
# -----------------------------------------------------------------------------
# Retrieve logger for project
def get_log_for(project: Project):
log = logging.getLogger("".join(["mkdocs.material.projects", project.slug]))
# Ensure logger does not propagate messags to parent logger, or messages
# will be printed multiple times, and attach handler with color formatter
log.propagate = False
if not log.hasHandlers():
log.addHandler(get_log_handler(project))
log.setLevel(get_log_level_for(project))
# Return logger
return log
# Retrieve log level for project
def get_log_level_for(project: Project):
level = logging.INFO
# Determine log level as set in MkDocs - if the build is started with the
# `--quiet` flag, the log level is set to `ERROR` to suppress all messages,
# except for errors. If it's started with `--verbose`, MkDocs sets the log
# level to `DEBUG`, the most verbose of all log levels.
log = logging.getLogger("mkdocs")
for handler in log.handlers:
level = handler.level
break
# Determine if MkDocs was invoked with the `--quiet` flag and the log level
# as configured in the plugin configuration. When `--quiet` is set, or the
# projects plugin configuration disables logging, ignore the configured log
# level and set it to `ERROR` to suppress all messages.
quiet = level == logging.ERROR
level = project.plugin.log_level.upper()
if quiet or not project.plugin.log:
level = logging.ERROR
# Retun log level
return level
# -----------------------------------------------------------------------------
# Retrieve log handler for project
def get_log_handler(project: Project):
handler = logging.StreamHandler()
handler.setFormatter(get_log_formatter(project))
# Add filter to suppress dirty build warning, or we'll get as many of those
# as projects are built - one warning is surely enough, KTHXBYE
handler.addFilter(ProjectsFilter())
return handler
# Retrieve log formatter for project
def get_log_formatter(project: Project):
prefix = style(f"project://{project.slug}", underline = True)
return ColorFormatter(f"[{prefix}] %(message)s")
| {
"repo_id": "squidfunk/mkdocs-material",
"file_path": "material/plugins/projects/builder/log.py",
"license": "MIT License",
"lines": 79,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
squidfunk/mkdocs-material:material/plugins/projects/builder/watcher/handler.py | # Copyright (c) 2016-2025 Martin Donath <martin.donath@squidfunk.com>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from __future__ import annotations
import logging
import os
from collections.abc import Callable
from material.plugins.projects.structure import Project
from watchdog.events import FileSystemEvent, FileSystemEventHandler
# -----------------------------------------------------------------------------
# Classes
# -----------------------------------------------------------------------------
# Project changed
class ProjectChanged(FileSystemEventHandler):
# Initialize event handler
def __init__(self, project: Project, handler: Callable):
self.project = project
self.handler = handler
# Handle file event
def on_any_event(self, event: FileSystemEvent):
self._handle(event)
# -------------------------------------------------------------------------
# Invoke file event handler
def _handle(self, event: FileSystemEvent):
config = self.project.config
# Resolve path to docs directory
base = os.path.dirname(config.config_file_path)
docs = os.path.join(base, config.docs_dir)
# Resolve project root and path to changed file
root = os.path.relpath(base)
path = os.path.relpath(event.src_path, root)
# Check if mkdocs.yml or docs directory was deleted
if event.src_path in [docs, config.config_file_path]:
if event.event_type == "deleted":
return
# Invoke handler and print message that we're scheduling a build
log.info(f"Schedule build due to '{path}' in '{root}'")
self.handler(self.project)
# -----------------------------------------------------------------------------
# Project added or removed
class ProjectAddedOrRemoved(FileSystemEventHandler):
# Initialize event handler
def __init__(self, project: Project, handler: Callable):
self.project = project
self.handler = handler
# Handle file creation event
def on_created(self, event: FileSystemEvent):
self._handle(event)
# Handle file deletion event
def on_deleted(self, event: FileSystemEvent):
self._handle(event)
# ------------------------------------------------------------------------
# Invoke file event handler
def _handle(self, event: FileSystemEvent):
config = self.project.config
# Touch mkdocs.yml to trigger rebuild
if os.path.isfile(config.config_file_path):
os.utime(config.config_file_path, None)
# Invoke handler
self.handler(self.project)
# -----------------------------------------------------------------------------
# Data
# -----------------------------------------------------------------------------
# Set up logging
log = logging.getLogger("mkdocs")
| {
"repo_id": "squidfunk/mkdocs-material",
"file_path": "material/plugins/projects/builder/watcher/handler.py",
"license": "MIT License",
"lines": 78,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
squidfunk/mkdocs-material:material/plugins/projects/config.py | # Copyright (c) 2016-2025 Martin Donath <martin.donath@squidfunk.com>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import os
from collections.abc import Callable
from mkdocs.config.config_options import Choice, Optional, Type
from mkdocs.config.base import Config
# -----------------------------------------------------------------------------
# Options
# -----------------------------------------------------------------------------
# Options for log level
LogLevel = (
"error",
"warn",
"info",
"debug"
)
# -----------------------------------------------------------------------------
# Classes
# -----------------------------------------------------------------------------
# Projects plugin configuration
class ProjectsConfig(Config):
enabled = Type(bool, default = True)
concurrency = Type(int, default = max(1, os.cpu_count() - 1))
# Settings for caching
cache = Type(bool, default = True)
cache_dir = Type(str, default = ".cache/plugin/projects")
# Settings for logging
log = Type(bool, default = True)
log_level = Choice(LogLevel, default = "info")
# Settings for projects
projects = Type(bool, default = True)
projects_dir = Type(str, default = "projects")
projects_config_files = Type(str, default = "*/mkdocs.yml")
projects_config_transform = Optional(Type(Callable))
projects_root_dir = Optional(Type(str))
# Settings for hoisting
hoisting = Type(bool, default = True)
| {
"repo_id": "squidfunk/mkdocs-material",
"file_path": "material/plugins/projects/config.py",
"license": "MIT License",
"lines": 51,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
squidfunk/mkdocs-material:material/plugins/projects/plugin.py | # Copyright (c) 2016-2025 Martin Donath <martin.donath@squidfunk.com>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from __future__ import annotations
import json
import os
import posixpath
from jinja2 import pass_context
from jinja2.runtime import Context
from mkdocs.config.defaults import MkDocsConfig
from mkdocs.exceptions import PluginError
from mkdocs import utils
from mkdocs.plugins import BasePlugin, event_priority
from mkdocs.structure import StructureItem
from mkdocs.structure.files import Files
from mkdocs.structure.nav import Link, Section
from mkdocs.utils import get_theme_dir
from urllib.parse import ParseResult as URL, urlparse
from .builder import ProjectsBuilder
from .config import ProjectsConfig
from .structure import Project, ProjectLink
# -----------------------------------------------------------------------------
# Classes
# -----------------------------------------------------------------------------
# Projects plugin
class ProjectsPlugin(BasePlugin[ProjectsConfig]):
# Projects builder
builder: ProjectsBuilder = None
# Initialize plugin
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Initialize incremental builds
self.is_serve = False
self.is_dirty = False
# Hack: Since we're building in topological order, we cannot let MkDocs
# clean the directory, because it means that nested projects are always
# deleted before a project is built. We also don't need to restore this
# functionality, because it's only used once in the process.
utils.clean_directory = lambda _: _
# Determine whether we're serving the site
def on_startup(self, *, command, dirty):
self.is_serve = command == "serve"
self.is_dirty = dirty
# Resolve projects β compared to our other concurrent plugins, this plugin
# is forced to use a process pool in order to guarantee proper isolation, as
# MkDocs itself is not thread-safe. Additionally, all project configurations
# are resolved and written to the cache (if enabled), as it's sufficient to
# resolve them once on the top-level before projects are built. We might
# need adjacent project configurations for interlinking projects.
def on_config(self, config):
if not self.config.enabled:
return
# Skip if projects should not be built - we can only exit here if we're
# at the top-level, but not when building a nested project
root = self.config.projects_root_dir is None
if root and not self.config.projects:
return
# Set projects root directory to the top-level project
if not self.config.projects_root_dir:
self.config.projects_root_dir = os.path.dirname(
config.config_file_path
)
# Initialize manifest
self.manifest: dict[str, str] = {}
self.manifest_file = os.path.join(
self.config.projects_root_dir,
self.config.cache_dir,
"manifest.json"
)
# Load manifest if it exists and the cache should be used
if os.path.isfile(self.manifest_file):
try:
with open(self.manifest_file) as f:
self.manifest = json.load(f)
except:
pass
# Building the top-level project, we must resolve and load all project
# configurations, as we need all information upfront to build them in
# the correct order, and to resolve links between projects. Furthermore,
# the author might influence a project's path by setting the site URL.
if root:
if not self.builder:
self.builder = ProjectsBuilder(config, self.config)
# @todo: detach project resolution from build
self.manifest = { ".": os.path.relpath(config.config_file_path) }
for job in self.builder.root.jobs():
path = os.path.relpath(job.project.config.config_file_path)
self.manifest[job.project.slug] = path
# Save manifest, a we need it in nested projects
os.makedirs(os.path.dirname(self.manifest_file), exist_ok = True)
with open(self.manifest_file, "w") as f:
f.write(json.dumps(self.manifest, indent = 2, sort_keys = True))
# Schedule projects for building - the general case is that all projects
# can be considered independent of each other, so we build them in parallel
def on_pre_build(self, config):
if not self.config.enabled:
return
# Skip if projects should not be built or we're not at the top-level
if not self.config.projects or not self.builder:
return
# Build projects
self.builder.build(self.is_serve, self.is_dirty)
# Patch environment to allow for hoisting of media files provided by the
# theme itself, which will also work for other themes, not only this one
def on_env(self, env, *, config, files):
if not self.config.enabled:
return
# Skip if projects should not be built or we're at the top-level
if not self.config.projects or self.builder:
return
# If hoisting is enabled and we're building a project, remove all media
# files that are provided by the theme and hoist them to the top
if self.config.hoisting:
theme = get_theme_dir(config.theme.name)
hoist = Files([])
# Retrieve top-level project and check if the current project uses
# the same theme as the top-level project - if not, don't hoist
root = Project("mkdocs.yml", self.config)
if config.theme.name != root.config.theme["name"]:
return
# Remove all media files that are provided by the theme
for file in files.media_files():
if file.abs_src_path.startswith(theme):
files.remove(file)
hoist.append(file)
# Resolve source and target project
source: Project | None = None
target: Project | None = None
for ref, file in self.manifest.items():
base = os.path.join(self.config.projects_root_dir, file)
if file == os.path.relpath(
config.config_file_path, self.config.projects_root_dir
):
source = Project(base, self.config, ref)
if "." == ref:
target = Project(base, self.config, ref)
# Compute path for slug from source and target project
path = target.path(source)
# Fetch URL template filter from environment - the filter might
# be overridden by other plugins, so we must retrieve and wrap it
url_filter = env.filters["url"]
# Patch URL template filter to add support for correctly resolving
# media files that were hoisted to the top-level project
@pass_context
def url_filter_with_hoisting(context: Context, url: str | None):
if url and hoist.get_file_from_path(url):
return posixpath.join(path, url_filter(context, url))
else:
return url_filter(context, url)
# Register custom template filters
env.filters["url"] = url_filter_with_hoisting
# Adjust project navigation in page (run latest) - as always, allow
# other plugins to alter the navigation before we process it here
@event_priority(-100)
def on_page_context(self, context, *, page, config, nav):
if not self.config.enabled:
return
# Skip if projects should not be built
if not self.config.projects:
return
# Replace project URLs in navigation
self._replace(nav.items, config)
# Adjust project navigation in template (run latest) - as always, allow
# other plugins to alter the navigation before we process it here
@event_priority(-100)
def on_template_context(self, context, *, template_name, config):
if not self.config.enabled:
return
# Skip if projects should not be built
if not self.config.projects:
return
# Replace project URLs in navigation
self._replace(context["nav"].items, config)
# Serve projects
def on_serve(self, server, *, config, builder):
if self.config.enabled:
self.builder.serve(server, self.is_dirty)
# -------------------------------------------------------------------------
# Replace project links in the given list of navigation items
def _replace(self, items: list[StructureItem], config: MkDocsConfig):
for index, item in enumerate(items):
# Handle section
if isinstance(item, Section):
self._replace(item.children, config)
# Handle link
if isinstance(item, Link):
url = urlparse(item.url)
if url.scheme == "project":
project, url = self._resolve_project_url(url, config)
# Append file name if directory URLs are disabled
if not project.config.use_directory_urls:
url += "index.html"
# Replace link with project link
items[index] = ProjectLink(
item.title or project.config.site_name,
url
)
# Resolve project URL and slug
def _resolve_project_url(self, url: URL, config: MkDocsConfig):
# Abort if the project URL contains a path, as we first need to collect
# use cases for when, how and whether we need and want to support this
if url.path != "":
raise PluginError(
f"Couldn't resolve project URL: paths currently not supported\n"
f"Please only use 'project://{url.hostname}'"
)
# Compute slug from host name and convert to dot notation
slug = url.hostname
slug = slug if slug.startswith(".") else f".{slug}"
# Resolve source and target project
source: Project | None = None
target: Project | None = None
for ref, file in self.manifest.items():
base = os.path.join(self.config.projects_root_dir, file)
if file == os.path.relpath(
config.config_file_path, self.config.projects_root_dir
):
source = Project(base, self.config, ref)
if slug == ref:
target = Project(base, self.config, ref)
# Abort if slug doesn't match a known project
if not target:
raise PluginError(f"Couldn't find project '{slug}'")
# Return project slug and path
return target, target.path(source)
| {
"repo_id": "squidfunk/mkdocs-material",
"file_path": "material/plugins/projects/plugin.py",
"license": "MIT License",
"lines": 238,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
squidfunk/mkdocs-material:material/plugins/social/layout.py | # Copyright (c) 2016-2025 Martin Donath <martin.donath@squidfunk.com>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from __future__ import annotations
import re
from mkdocs.config.base import Config
from mkdocs.config.config_options import (
Choice, DictOfItems, ListOfItems, SubConfig, Type
)
try:
from PIL.Image import Image as _Image
except ImportError:
pass
# -----------------------------------------------------------------------------
# Options
# -----------------------------------------------------------------------------
# Options for origin
Origin = (
"start top", "center top", "end top",
"start center", "center", "end center",
"start bottom", "center bottom", "end bottom",
"start", "end"
)
# Options for overflow
Overflow = (
"truncate",
"shrink"
)
# -----------------------------------------------------------------------------
# Classes
# -----------------------------------------------------------------------------
# Size
class Size(Config):
width = Type(int, default = 0)
height = Type(int, default = 0)
# Offset
class Offset(Config):
x = Type(int, default = 0)
y = Type(int, default = 0)
# # -----------------------------------------------------------------------------
# Background
class Background(Config):
color = Type(str, default = "")
image = Type(str, default = "")
# # -----------------------------------------------------------------------------
# Icon
class Icon(Config):
value = Type(str, default = "")
color = Type(str, default = "")
# # -----------------------------------------------------------------------------
# Line
class Line(Config):
amount = Type((int, float), default = 1)
height = Type((int, float), default = 1)
# Font
class Font(Config):
family = Type(str, default = "Roboto")
variant = Type(str, default = "")
style = Type(str, default = "Regular")
# Typography
class Typography(Config):
content = Type(str, default = "")
align = Choice(Origin, default = "start top")
overflow = Choice(Overflow, default = "truncate")
color = Type(str, default = "")
line = SubConfig(Line)
font = SubConfig(Font)
# -----------------------------------------------------------------------------
# Layer
class Layer(Config):
size = SubConfig(Size)
offset = SubConfig(Offset)
origin = Choice(Origin, default = "start top")
background = SubConfig(Background)
icon = SubConfig(Icon)
typography = SubConfig(Typography)
# -----------------------------------------------------------------------------
# Layout
class Layout(Config):
definitions = ListOfItems(Type(str), default = [])
tags = DictOfItems(Type(str), default = {})
size = SubConfig(Size)
layers = ListOfItems(SubConfig(Layer), default = [])
# -----------------------------------------------------------------------------
# Functions
# -----------------------------------------------------------------------------
# Get layer or layout size as tuple
def get_size(layer: Layer | Layout):
return layer.size.width, layer.size.height
# Get layer offset as tuple
def get_offset(layer: Layer, image: _Image):
x, y = layer.offset.x, layer.offset.y
# Compute offset from origin - if an origin is given, compute the offset
# relative to the image and layer size to allow for flexible positioning
if layer.origin != "start top":
origin = re.split(r"\s+", layer.origin)
# Get layer size
w, h = get_size(layer)
# Compute origin on x-axis
if "start" in origin: pass
elif "end" in origin: x += (image.width - w) - 2 * x
elif "center" in origin: x += (image.width - w) >> 1
# Compute origin on y-axis
if "top" in origin: pass
elif "bottom" in origin: y += (image.height - h) - 2 * y
elif "center" in origin: y += (image.height - h) >> 1
# Return offset
return x, y
| {
"repo_id": "squidfunk/mkdocs-material",
"file_path": "material/plugins/social/layout.py",
"license": "MIT License",
"lines": 121,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
squidfunk/mkdocs-material:material/plugins/typeset/config.py | # Copyright (c) 2016-2025 Martin Donath <martin.donath@squidfunk.com>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from mkdocs.config.config_options import Type
from mkdocs.config.base import Config
# -----------------------------------------------------------------------------
# Classes
# -----------------------------------------------------------------------------
# Typeset plugin configuration
class TypesetConfig(Config):
enabled = Type(bool, default = True)
| {
"repo_id": "squidfunk/mkdocs-material",
"file_path": "material/plugins/typeset/config.py",
"license": "MIT License",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
squidfunk/mkdocs-material:material/plugins/typeset/plugin.py | # Copyright (c) 2016-2025 Martin Donath <martin.donath@squidfunk.com>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import re
from mkdocs.plugins import BasePlugin
from .config import TypesetConfig
# -----------------------------------------------------------------------------
# Classes
# -----------------------------------------------------------------------------
# Typeset plugin
class TypesetPlugin(BasePlugin[TypesetConfig]):
# Initialize plugin
def on_config(self, config):
if not self.config.enabled:
return
# Initialize titles
self.title_map: dict[str, str] = {}
# Extract source of page title before it's lost
def on_pre_page(self, page, *, config, files):
if not self.config.enabled:
return
# Check if page title was set in configuration
if page.title:
path = page.file.src_uri
self.title_map[path] = "config"
# Extract typeset content for headlines
def on_page_content(self, html, *, page, config, files):
if not self.config.enabled:
return
# Check if page title was set in metadata
path = page.file.src_uri
if path not in self.title_map:
if "title" in page.meta:
self.title_map[path] = "meta"
# Flatten anchors and map to headlines
anchors = _flatten(page.toc.items)
for (level, id, title) in re.findall(
r"<h(\d)[^>]+id=\"([^\"]+)[^>]*>(.*?)</h\1>",
html, flags = re.I | re.M
):
if id not in anchors:
continue
# If the author uses `data-toc-label` to override a heading (which
# doesn't support adding of HTML tags), we can abort here, since
# the headline will be rendered as-is. It's more or less a hack, so
# we should check if we can improve it in the future.
label = re.escape(anchors[id].title)
if re.search(rf"data-toc-label=['\"]{label}", page.markdown):
continue
# Remove anchor links from headlines β we need to do that, or we
# end up with anchor links inside anchor links, which is invalid
# HTML5. There are two cases we need to account for here:
#
# 1. If toc.anchorlink is enabled, the entire headline is wrapped
# in an anchor link, so we unpack its contents
#
# 2. If toc.permalink is enabled, an anchor link is appended to the
# contents of the headline, so we just remove it
#
# Albeit it doesn't make much sense, both options can be used at
# the same time, so we need to account for both cases. This problem
# was first reported in https://bit.ly/456AjUm
title = re.sub(r"^<a\s+[^>]+>(.*?)</a>", r"\1", title)
title = re.sub(r"<a\s+[^>]+>[^<]+?</a>$", "", title)
# Remove author-provided ids - see https://bit.ly/3ngiZea
title = re.sub(r"id=\"?[^\">]+\"?", "", title)
# Assign headline content to anchor
anchors[id].typeset = { "title": title }
if path not in self.title_map:
# Assign first top-level headline to page
if not hasattr(page, "typeset") and int(level) == 1:
page.typeset = anchors[id].typeset
page.title = re.sub(r"<[^>]+>", "", title)
# -----------------------------------------------------------------------------
# Helper functions
# -----------------------------------------------------------------------------
# Flatten a tree of anchors
def _flatten(items):
anchors = {}
for item in items:
anchors[item.id] = item
# Recursively expand children
if item.children:
anchors.update(_flatten(item.children))
# Return anchors
return anchors
| {
"repo_id": "squidfunk/mkdocs-material",
"file_path": "material/plugins/typeset/plugin.py",
"license": "MIT License",
"lines": 99,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
stanfordnlp/dspy:tests/teleprompt/test_bettertogether.py | """BetterTogether optimizer tests.
Most of the code in this test file was LLM-generated but has been verified
to correctly test the BetterTogether optimizer functionality.
"""
from unittest.mock import Mock, patch
import pytest
import dspy
from dspy import Example
from dspy.predict import Predict
from dspy.teleprompt import BetterTogether, BootstrapFewShotWithRandomSearch, BootstrapFinetune
from dspy.teleprompt.teleprompt import Teleprompter
from dspy.utils.dummies import DummyLM
# Define a simple metric function for testing
def simple_metric(example, prediction, trace=None):
return 1.0 if example.output == prediction.output else 0.0
examples = [
Example(input="What is the oldest known human-made monument?", output="GΓΆbekli Tepe in southeastern Turkiye, dating back to around 9600 BCE").with_inputs("input"),
Example(input="Why can't fish fall in love?", output="Because love is in the air").with_inputs("input"),
Example(input="What would bring world peace?", output="8 billion people meeting for a tea party in my backyard").with_inputs("input"),
]
trainset = examples[:2]
valset = [examples[2]]
class SimpleModule(dspy.Module):
def __init__(self, signature):
super().__init__()
self.predictor = Predict(signature)
def forward(self, **kwargs):
return self.predictor(**kwargs)
# ============================================================================
# Reusable Mock Optimizers
# ============================================================================
class SimpleOptimizer(Teleprompter):
"""A simple optimizer that returns the student unchanged."""
def compile(self, student, **kwargs):
return student
class MarkedOptimizer(Teleprompter):
"""An optimizer that marks the program with a specific identifier."""
def __init__(self, marker):
self.marker = marker
def compile(self, student, **kwargs):
prog = SimpleModule("input -> output")
prog.marker = self.marker
return prog
class CapturingOptimizer(Teleprompter):
"""An optimizer that captures the kwargs it receives."""
def __init__(self):
self.received_kwargs = {}
def compile(self, student, trainset=None, valset=None, teacher=None,
num_trials=None, max_bootstrapped_demos=None, **kwargs):
self.received_kwargs = {
"trainset": trainset,
"valset": valset,
"teacher": teacher,
"num_trials": num_trials,
"max_bootstrapped_demos": max_bootstrapped_demos,
**kwargs
}
return student
# ============================================================================
# Pytest Fixtures
# ============================================================================
@pytest.fixture
def student_with_lm():
"""Create a student module with a DummyLM."""
student = SimpleModule("input -> output")
lm = DummyLM([{"output": "test"}])
student.set_lm(lm)
return student
@pytest.fixture
def mock_bt_dependencies():
"""Mock the common BetterTogether dependencies."""
with patch("dspy.teleprompt.bettertogether.eval_candidate_program") as mock_eval, \
patch("dspy.teleprompt.bettertogether.launch_lms") as mock_launch, \
patch("dspy.teleprompt.bettertogether.kill_lms") as mock_kill:
mock_eval.return_value = Mock(score=0.8)
yield mock_eval, mock_launch, mock_kill
# ============================================================================
# Tests
# ============================================================================
def test_bettertogether_import():
"""Sanity check: Test that BetterTogether can be imported."""
assert BetterTogether is not None, "Failed to import BetterTogether"
def test_bettertogether_initialization_default():
"""Test BetterTogether initialization with default optimizers."""
optimizer = BetterTogether(metric=simple_metric)
assert optimizer.metric == simple_metric, "Metric not correctly initialized"
assert "p" in optimizer.optimizers, "Default 'p' optimizer not created"
assert "w" in optimizer.optimizers, "Default 'w' optimizer not created"
assert isinstance(optimizer.optimizers["p"], BootstrapFewShotWithRandomSearch), \
"Default 'p' should be BootstrapFewShotWithRandomSearch"
assert isinstance(optimizer.optimizers["w"], BootstrapFinetune), \
"Default 'w' should be BootstrapFinetune"
def test_bettertogether_initialization_custom():
"""Test BetterTogether initialization with custom optimizers."""
custom_p = BootstrapFewShotWithRandomSearch(metric=simple_metric)
custom_w = BootstrapFinetune(metric=simple_metric)
optimizer = BetterTogether(
metric=simple_metric,
p=custom_p,
w=custom_w
)
assert optimizer.optimizers["p"] is custom_p, "Custom 'p' optimizer not set"
assert optimizer.optimizers["w"] is custom_w, "Custom 'w' optimizer not set"
def test_bettertogether_initialization_invalid_optimizer():
"""Test that BetterTogether rejects non-Teleprompter optimizers."""
try:
optimizer = BetterTogether(
metric=simple_metric,
p="not_a_teleprompter" # Invalid type
)
assert False, "Should have raised TypeError for invalid optimizer"
except TypeError as e:
assert "must be a Teleprompter" in str(e)
def test_strategy_validation():
"""Test strategy validation: valid, invalid, and empty strategies."""
optimizer = BetterTogether(metric=simple_metric)
# Valid strategies should parse without errors
valid_strategies = ["p", "w", "p -> w", "w -> p", "p -> w -> p"]
for strategy in valid_strategies:
parsed = optimizer._prepare_strategy(strategy)
assert parsed is not None, f"Failed to parse valid strategy: {strategy}"
# Invalid strategies should raise ValueError
with pytest.raises(ValueError, match="invalid optimizer keys"):
optimizer._prepare_strategy("p -> x -> w")
with pytest.raises(ValueError, match="cannot be empty"):
optimizer._prepare_strategy("")
def test_compile_basic():
"""Test basic compilation with mocked optimizers."""
from dspy.teleprompt.teleprompt import Teleprompter
student = SimpleModule("input -> output")
lm = DummyLM([{"output": "blue"}, {"output": "4"}])
student.set_lm(lm)
# Create a mock Teleprompter that returns the student
class MockTeleprompter(Teleprompter):
def __init__(self):
self.compile_called = False
def compile(self, student, **kwargs):
self.compile_called = True
return student
mock_p = MockTeleprompter()
optimizer = BetterTogether(metric=simple_metric, p=mock_p)
# Mock evaluation to avoid actually running the metric
with patch("dspy.teleprompt.bettertogether.eval_candidate_program") as mock_eval:
mock_eval.return_value = Mock(score=0.8)
with patch("dspy.teleprompt.bettertogether.launch_lms"):
with patch("dspy.teleprompt.bettertogether.kill_lms"):
compiled = optimizer.compile(
student,
trainset=trainset,
valset=valset,
strategy="p"
)
assert compiled is not None, "Compilation returned None"
assert hasattr(compiled, "candidate_programs"), "Missing candidate_programs attribute"
assert hasattr(compiled, "flag_compilation_error_occurred"), "Missing flag_compilation_error_occurred attribute"
assert mock_p.compile_called, "Mock optimizer compile was not called"
def test_trainset_validation():
"""Test that empty trainset is rejected."""
optimizer = BetterTogether(metric=simple_metric)
student = SimpleModule("input -> output")
lm = DummyLM([{"output": "test"}])
student.set_lm(lm)
try:
optimizer.compile(student, trainset=[], valset=valset)
assert False, "Should have raised ValueError for empty trainset"
except ValueError as e:
assert "cannot be empty" in str(e).lower()
def test_valset_ratio_validation():
"""Test that invalid valset_ratio is rejected."""
optimizer = BetterTogether(metric=simple_metric)
student = SimpleModule("input -> output")
lm = DummyLM([{"output": "test"}])
student.set_lm(lm)
# Test valset_ratio >= 1
try:
optimizer.compile(student, trainset=trainset, valset_ratio=1.0)
assert False, "Should have raised ValueError for valset_ratio >= 1"
except ValueError as e:
assert "must be in range [0, 1)" in str(e)
# Test valset_ratio < 0
try:
optimizer.compile(student, trainset=trainset, valset_ratio=-0.1)
assert False, "Should have raised ValueError for valset_ratio < 0"
except ValueError as e:
assert "must be in range [0, 1)" in str(e)
def test_optimizer_compile_args_validation():
"""Test that optimizer_compile_args is validated correctly."""
optimizer = BetterTogether(metric=simple_metric)
# Test invalid optimizer key
try:
optimizer._prepare_optimizer_compile_args(
{"invalid_key": {"num_trials": 10}},
teacher=None
)
assert False, "Should have raised ValueError for invalid optimizer key"
except ValueError as e:
assert "invalid optimizer key" in str(e).lower()
def test_student_in_optimizer_compile_args():
"""Test that 'student' in optimizer_compile_args is rejected."""
optimizer = BetterTogether(metric=simple_metric)
try:
optimizer._validate_compile_args(
optimizer.optimizers["p"],
"p",
{"student": SimpleModule("input -> output")}
)
assert False, "Should have raised ValueError for 'student' in compile_args"
except ValueError as e:
assert "student" in str(e).lower()
assert "not allowed" in str(e).lower()
def test_compile_args_passed_to_optimizer(student_with_lm, mock_bt_dependencies):
"""Test that optimizer_compile_args are correctly passed to optimizers."""
mock_eval, _, _ = mock_bt_dependencies
mock_eval.return_value = Mock(score=0.9)
mock_p = CapturingOptimizer()
optimizer = BetterTogether(metric=simple_metric, p=mock_p)
# Define custom compile args for optimizer 'p'
custom_args = {"num_trials": 20, "max_bootstrapped_demos": 8}
optimizer.compile(
student_with_lm,
trainset=trainset,
valset=valset,
strategy="p",
optimizer_compile_args={"p": custom_args}
)
# Verify the custom args were passed to the optimizer
assert mock_p.received_kwargs is not None, "Optimizer compile was not called"
assert "num_trials" in mock_p.received_kwargs, "num_trials not passed to optimizer"
assert mock_p.received_kwargs["num_trials"] == 20, "num_trials value incorrect"
assert "max_bootstrapped_demos" in mock_p.received_kwargs, "max_bootstrapped_demos not passed"
assert mock_p.received_kwargs["max_bootstrapped_demos"] == 8, "max_bootstrapped_demos value incorrect"
def test_compile_args_multi_optimizer_strategy():
"""Test that different optimizers in a strategy receive their respective compile_args."""
from dspy.teleprompt.teleprompt import Teleprompter
student = SimpleModule("input -> output")
lm = DummyLM([{"output": "test"}])
student.set_lm(lm)
# Create mock Teleprompters that capture their compile kwargs
class PromptOptimizer(Teleprompter):
def __init__(self):
self.received_kwargs = {}
def compile(self, student, trainset=None, num_trials=None, **kwargs):
self.received_kwargs = {
"trainset": trainset,
"num_trials": num_trials,
**kwargs
}
return student
class WeightOptimizer(Teleprompter):
def __init__(self):
self.received_kwargs = {}
def compile(self, student, trainset=None, num_batches=None, **kwargs):
self.received_kwargs = {
"trainset": trainset,
"num_batches": num_batches,
**kwargs
}
return student
mock_p = PromptOptimizer()
mock_w = WeightOptimizer()
optimizer = BetterTogether(metric=simple_metric, p=mock_p, w=mock_w)
# Define different compile args for each optimizer
compile_args = {
"p": {"num_trials": 10},
"w": {"num_batches": 5}
}
with patch("dspy.teleprompt.bettertogether.eval_candidate_program") as mock_eval:
mock_eval.return_value = Mock(score=0.85)
with patch("dspy.teleprompt.bettertogether.launch_lms"):
with patch("dspy.teleprompt.bettertogether.kill_lms"):
with patch.object(optimizer, "_models_changed", return_value=False):
optimizer.compile(
student,
trainset=trainset,
valset=valset,
strategy="p -> w",
optimizer_compile_args=compile_args
)
# Verify each optimizer received its specific args
assert mock_p.received_kwargs is not None, "Optimizer 'p' compile was not called"
assert "num_trials" in mock_p.received_kwargs, "num_trials not passed to optimizer 'p'"
assert mock_p.received_kwargs["num_trials"] == 10, "num_trials value incorrect for 'p'"
assert mock_p.received_kwargs.get("num_batches") is None, "Optimizer 'p' should not receive 'w' args"
assert mock_w.received_kwargs is not None, "Optimizer 'w' compile was not called"
assert "num_batches" in mock_w.received_kwargs, "num_batches not passed to optimizer 'w'"
assert mock_w.received_kwargs["num_batches"] == 5, "num_batches value incorrect for 'w'"
assert mock_w.received_kwargs.get("num_trials") is None, "Optimizer 'w' should not receive 'p' args"
def test_compile_args_override_global_params():
"""Test that optimizer_compile_args override global trainset/valset/teacher parameters."""
from dspy.teleprompt.teleprompt import Teleprompter
student = SimpleModule("input -> output")
lm = DummyLM([{"output": "test"}])
student.set_lm(lm)
# Create a mock Teleprompter that captures compile kwargs
class CapturingTeleprompter(Teleprompter):
def __init__(self):
self.received_kwargs = {}
def compile(self, student, trainset=None, valset=None, teacher=None, **kwargs):
self.received_kwargs = {
"trainset": trainset,
"valset": valset,
"teacher": teacher,
**kwargs
}
return student
mock_p = CapturingTeleprompter()
optimizer = BetterTogether(metric=simple_metric, p=mock_p)
# Create override values
override_trainset = [examples[2]] # Different from global trainset
override_valset = [examples[0]] # Different from global valset
override_teacher = SimpleModule("input -> output")
# Pass global values to compile, but override them in optimizer_compile_args
compile_args = {
"p": {
"trainset": override_trainset,
"valset": override_valset,
"teacher": override_teacher,
}
}
with patch("dspy.teleprompt.bettertogether.eval_candidate_program") as mock_eval:
mock_eval.return_value = Mock(score=0.9)
with patch("dspy.teleprompt.bettertogether.launch_lms"):
with patch("dspy.teleprompt.bettertogether.kill_lms"):
optimizer.compile(
student,
trainset=trainset, # Global trainset (examples[:2])
valset=valset, # Global valset (examples[2])
teacher=None, # Global teacher (None)
strategy="p",
optimizer_compile_args=compile_args
)
# Verify the optimizer received the override values, not the global ones
assert mock_p.received_kwargs["trainset"] == override_trainset, \
"Optimizer should receive override trainset from compile_args"
assert mock_p.received_kwargs["valset"] == override_valset, \
"Optimizer should receive override valset from compile_args"
assert mock_p.received_kwargs["teacher"] is override_teacher, \
"Optimizer should receive override teacher from compile_args"
# Verify they're different from the global values
assert mock_p.received_kwargs["trainset"] != trainset, \
"Override trainset should differ from global trainset"
assert mock_p.received_kwargs["valset"] != valset, \
"Override valset should differ from global valset"
def test_trainset_shuffling_between_steps():
"""Test that trainset is shuffled between steps when shuffle_trainset_between_steps=True."""
from dspy.teleprompt.teleprompt import Teleprompter
student = SimpleModule("input -> output")
lm = DummyLM([{"output": "test"}])
student.set_lm(lm)
# Create mock optimizers that capture the trainset they receive
trainsets_received = []
class TrainsetCapturingOptimizer(Teleprompter):
def compile(self, student, trainset=None, **kwargs):
trainsets_received.append(trainset)
return student
mock_p = TrainsetCapturingOptimizer()
mock_w = TrainsetCapturingOptimizer()
optimizer = BetterTogether(metric=simple_metric, p=mock_p, w=mock_w)
with patch("dspy.teleprompt.bettertogether.eval_candidate_program") as mock_eval:
mock_eval.return_value = Mock(score=0.8)
with patch("dspy.teleprompt.bettertogether.launch_lms"):
with patch("dspy.teleprompt.bettertogether.kill_lms"):
with patch.object(optimizer, "_models_changed", return_value=False):
optimizer.compile(
student,
trainset=trainset,
valset=valset,
strategy="p -> w",
shuffle_trainset_between_steps=True
)
# Verify trainset was shuffled between steps
assert len(trainsets_received) == 2, "Should have received trainset twice (for p and w)"
trainset_p = trainsets_received[0]
trainset_w = trainsets_received[1]
# Both should have same examples but potentially different order
assert len(trainset_p) == len(trainset_w), "Trainsets should have same length"
# With shuffling enabled and only 2 examples, there's a 50% chance they're in different order
# We can't reliably test order difference with small dataset, but we can verify they contain same examples
assert set(id(ex) for ex in trainset_p) == set(id(ex) for ex in trainset_w), \
"Trainsets should contain the same example objects"
def test_strategy_execution_order():
"""Test that strategy steps are executed in order and programs are passed correctly."""
from dspy.teleprompt.teleprompt import Teleprompter
student = SimpleModule("input -> output")
lm = DummyLM([{"output": "test"}])
student.set_lm(lm)
# Track execution order and what program each optimizer receives
execution_log = []
class LoggingOptimizer(Teleprompter):
def __init__(self, name):
self.name = name
def compile(self, student, **kwargs):
# Create a new student with a marker to track the optimization path
optimized = SimpleModule("input -> output")
if not hasattr(student, "optimization_path"):
optimized.optimization_path = [self.name]
else:
optimized.optimization_path = student.optimization_path + [self.name]
execution_log.append((self.name, optimized.optimization_path.copy()))
return optimized
mock_p = LoggingOptimizer("p")
mock_w = LoggingOptimizer("w")
optimizer = BetterTogether(metric=simple_metric, p=mock_p, w=mock_w)
with patch("dspy.teleprompt.bettertogether.eval_candidate_program") as mock_eval:
mock_eval.return_value = Mock(score=0.85)
with patch("dspy.teleprompt.bettertogether.launch_lms"):
with patch("dspy.teleprompt.bettertogether.kill_lms"):
with patch.object(optimizer, "_models_changed", return_value=False):
result = optimizer.compile(
student,
trainset=trainset,
valset=valset,
strategy="p -> w -> p"
)
# Verify execution order
assert len(execution_log) == 3, "Should have executed 3 optimization steps"
assert execution_log[0] == ("p", ["p"]), "First step should be 'p'"
assert execution_log[1] == ("w", ["p", "w"]), "Second step should be 'w' receiving output from 'p'"
assert execution_log[2] == ("p", ["p", "w", "p"]), "Third step should be 'p' receiving output from 'w'"
def test_lm_lifecycle_management():
"""Test that launch_lms and kill_lms are called appropriately between steps."""
from dspy.teleprompt.teleprompt import Teleprompter
student = SimpleModule("input -> output")
lm = DummyLM([{"output": "test"}])
student.set_lm(lm)
class SimpleOptimizer(Teleprompter):
def compile(self, student, **kwargs):
return student
mock_p = SimpleOptimizer()
mock_w = SimpleOptimizer()
optimizer = BetterTogether(metric=simple_metric, p=mock_p, w=mock_w)
with patch("dspy.teleprompt.bettertogether.eval_candidate_program") as mock_eval:
mock_eval.return_value = Mock(score=0.8)
with patch("dspy.teleprompt.bettertogether.launch_lms") as mock_launch:
with patch("dspy.teleprompt.bettertogether.kill_lms") as mock_kill:
with patch.object(optimizer, "_models_changed", return_value=True):
optimizer.compile(
student,
trainset=trainset,
valset=valset,
strategy="p -> w"
)
# Verify launch and kill were called
# When models change (which we mocked to return True), launch should be called
assert mock_launch.called, "launch_lms should be called when models change"
assert mock_kill.called, "kill_lms should be called when models change"
def test_error_handling_returns_best_program():
"""Test that if a step fails, the best program found so far is still returned."""
from dspy.teleprompt.teleprompt import Teleprompter
student = SimpleModule("input -> output")
lm = DummyLM([{"output": "test"}])
student.set_lm(lm)
# Create optimizers where the second one will fail
class SuccessfulOptimizer(Teleprompter):
def compile(self, student, **kwargs):
optimized = SimpleModule("input -> output")
optimized.step_name = "p_success"
return optimized
class FailingOptimizer(Teleprompter):
def compile(self, student, **kwargs):
raise RuntimeError("Intentional failure for testing")
mock_p = SuccessfulOptimizer()
mock_w = FailingOptimizer()
optimizer = BetterTogether(metric=simple_metric, p=mock_p, w=mock_w)
# First call succeeds with score 0.7, second call (to failing optimizer) fails
with patch("dspy.teleprompt.bettertogether.eval_candidate_program") as mock_eval:
mock_eval.side_effect = [
Mock(score=0.5), # Baseline
Mock(score=0.7), # After p (success)
]
with patch("dspy.teleprompt.bettertogether.launch_lms"):
with patch("dspy.teleprompt.bettertogether.kill_lms"):
with patch.object(optimizer, "_models_changed", return_value=False):
result = optimizer.compile(
student,
trainset=trainset,
valset=valset,
strategy="p -> w"
)
# Verify a program was returned despite the failure
assert result is not None, "Should return a program even if a step fails"
assert hasattr(result, "flag_compilation_error_occurred"), "Should have error flag"
assert result.flag_compilation_error_occurred is True, "Error flag should be True"
assert hasattr(result, "candidate_programs"), "Should have candidate_programs"
assert len(result.candidate_programs) > 0, "Should have at least one candidate program"
@pytest.mark.parametrize("test_valset,expected_marker,test_description", [
(valset, "p_optimized", "With valset: returns best score (p), not latest (w)"),
(None, "w_optimized", "Without valset: returns latest program (w)"),
])
def test_program_selection(student_with_lm, test_valset, expected_marker, test_description):
"""Test program selection logic with and without validation set."""
mock_p = MarkedOptimizer("p_optimized")
mock_w = MarkedOptimizer("w_optimized")
optimizer = BetterTogether(metric=simple_metric, p=mock_p, w=mock_w)
# Set up scores: baseline=0.5, p=0.9 (best), w=0.7
# When test_valset is provided, best score wins; when None, latest wins
with patch("dspy.teleprompt.bettertogether.eval_candidate_program") as mock_eval:
if test_valset is not None:
mock_eval.side_effect = [
Mock(score=0.5), # Baseline
Mock(score=0.9), # After p (best score)
Mock(score=0.7), # After w (lower than p)
]
with patch("dspy.teleprompt.bettertogether.launch_lms"):
with patch("dspy.teleprompt.bettertogether.kill_lms"):
with patch.object(optimizer, "_models_changed", return_value=False):
result = optimizer.compile(
student_with_lm,
trainset=trainset,
valset=test_valset,
strategy="p -> w"
)
# Verify the correct program was returned based on valset presence
assert hasattr(result, "marker"), "Result should have marker"
assert result.marker == expected_marker, test_description
def test_candidate_programs_structure(student_with_lm):
"""Test that candidate_programs has the correct structure and content."""
mock_p = MarkedOptimizer("p")
mock_w = MarkedOptimizer("w")
optimizer = BetterTogether(metric=simple_metric, p=mock_p, w=mock_w)
# Set up scores: baseline=0.5, p=0.8, w=0.9 (best)
with patch("dspy.teleprompt.bettertogether.eval_candidate_program") as mock_eval:
mock_eval.side_effect = [
Mock(score=0.5), # Baseline
Mock(score=0.8), # After p
Mock(score=0.9), # After w (best)
]
with patch("dspy.teleprompt.bettertogether.launch_lms"):
with patch("dspy.teleprompt.bettertogether.kill_lms"):
with patch.object(optimizer, "_models_changed", return_value=False):
result = optimizer.compile(
student_with_lm,
trainset=trainset,
valset=valset,
strategy="p -> w"
)
# Verify candidate_programs exists and has correct structure
assert hasattr(result, "candidate_programs"), "Result should have candidate_programs attribute"
candidates = result.candidate_programs
# Should have 3 candidates: baseline, p, w
assert len(candidates) == 3, f"Should have 3 candidates, got {len(candidates)}"
# Each candidate should have the required keys
for i, candidate in enumerate(candidates):
assert "score" in candidate, f"Candidate {i} missing 'score' key"
assert "program" in candidate, f"Candidate {i} missing 'program' key"
assert "strategy" in candidate, f"Candidate {i} missing 'strategy' key"
assert isinstance(candidate["score"], (int, float)), f"Candidate {i} score should be numeric"
assert isinstance(candidate["program"], dspy.Module), f"Candidate {i} program should be a Module"
assert isinstance(candidate["strategy"], (str, type(None))), f"Candidate {i} strategy should be str or None"
# Candidates should be sorted by score (best first)
scores = [c["score"] for c in candidates]
assert scores == sorted(scores, reverse=True), "Candidates should be sorted by score (descending)"
# Verify the best candidate is first
assert candidates[0]["score"] == 0.9, "Best candidate should have score 0.9"
assert candidates[0]["program"].marker == "w", "Best candidate should be from optimizer 'w'"
# Verify baseline candidate
baseline = [c for c in candidates if c["strategy"] is None or c["strategy"] == ""]
assert len(baseline) == 1, "Should have exactly one baseline candidate"
assert baseline[0]["score"] == 0.5, "Baseline should have score 0.5"
def test_empty_valset_handling(student_with_lm):
"""Test behavior when valset is an empty list vs None."""
# Test with empty list []
mock_p = MarkedOptimizer("optimized")
optimizer = BetterTogether(metric=simple_metric, p=mock_p)
with patch("dspy.teleprompt.bettertogether.launch_lms"):
with patch("dspy.teleprompt.bettertogether.kill_lms"):
with patch.object(optimizer, "_models_changed", return_value=False):
result = optimizer.compile(
student_with_lm,
trainset=trainset,
valset=[], # Empty list (not None)
strategy="p"
)
# With empty valset, should return latest program (same behavior as valset=None)
assert hasattr(result, "marker"), "Result should have marker"
assert result.marker == "optimized", "Should return the latest program when valset is empty list"
assert hasattr(result, "candidate_programs"), "Should have candidate_programs"
# Test with None - create fresh student and optimizer
student2 = SimpleModule("input -> output")
lm = DummyLM([{"output": "test"}])
student2.set_lm(lm)
mock_p2 = MarkedOptimizer("optimized")
optimizer2 = BetterTogether(metric=simple_metric, p=mock_p2)
with patch("dspy.teleprompt.bettertogether.launch_lms"):
with patch("dspy.teleprompt.bettertogether.kill_lms"):
with patch.object(optimizer2, "_models_changed", return_value=False):
result2 = optimizer2.compile(
student2,
trainset=trainset,
valset=None, # Explicit None
strategy="p"
)
# Both should behave the same way
assert hasattr(result2, "marker"), "Result2 should have marker"
assert result2.marker == "optimized", "Should return the latest program when valset is None"
| {
"repo_id": "stanfordnlp/dspy",
"file_path": "tests/teleprompt/test_bettertogether.py",
"license": "MIT License",
"lines": 595,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
stanfordnlp/dspy:tests/retrievers/test_colbertv2.py | from unittest.mock import MagicMock, patch
import pytest
from dspy.dsp.colbertv2 import colbertv2_get_request_v2, colbertv2_post_request_v2
def test_get_request_raises_on_server_error():
mock_response = MagicMock()
mock_response.json.return_value = {"error": True, "message": "connection failed"}
with patch("dspy.dsp.colbertv2.requests.get", return_value=mock_response):
with pytest.raises(ValueError, match="connection failed"):
colbertv2_get_request_v2("http://test", "query", k=3)
def test_post_request_raises_on_server_error():
mock_response = MagicMock()
mock_response.json.return_value = {"error": True, "message": "server error"}
with patch("dspy.dsp.colbertv2.requests.post", return_value=mock_response):
with pytest.raises(ValueError, match="server error"):
colbertv2_post_request_v2("http://test2", "query", k=3)
def test_get_request_success():
mock_response = MagicMock()
mock_response.json.return_value = {"topk": [{"text": "doc1", "score": 0.9}]}
with patch("dspy.dsp.colbertv2.requests.get", return_value=mock_response):
result = colbertv2_get_request_v2("http://test3", "query", k=3)
assert result[0]["long_text"] == "doc1"
def test_post_request_success():
mock_response = MagicMock()
mock_response.json.return_value = {"topk": [{"text": "doc1", "score": 0.9}]}
with patch("dspy.dsp.colbertv2.requests.post", return_value=mock_response):
result = colbertv2_post_request_v2("http://test4", "query", k=3)
assert result[0]["text"] == "doc1"
| {
"repo_id": "stanfordnlp/dspy",
"file_path": "tests/retrievers/test_colbertv2.py",
"license": "MIT License",
"lines": 27,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
stanfordnlp/dspy:tests/adapters/test_reasoning.py | import pytest
import dspy
def test_reasoning_basic_operations():
reasoning = dspy.Reasoning(content="Hello World")
# Test str conversion
assert str(reasoning) == "Hello World"
assert repr(reasoning) == "'Hello World'"
# Test equality
assert reasoning == "Hello World"
assert reasoning == dspy.Reasoning(content="Hello World")
assert reasoning != "hello world"
assert reasoning != dspy.Reasoning(content="hello world")
# Test len
assert len(reasoning) == 11
# Test indexing
assert reasoning[0] == "H"
assert reasoning[-1] == "d"
assert reasoning[0:5] == "Hello"
# Test in operator
assert "World" in reasoning
assert "xyz" not in reasoning
# Test iteration
chars = [c for c in reasoning]
assert len(chars) == 11
assert chars[0] == "H"
def test_reasoning_concatenation():
reasoning = dspy.Reasoning(content="Hello")
# Test + operator
result1 = reasoning + " World"
assert result1 == "Hello World"
assert isinstance(result1, str)
# Test reverse + operator
result2 = "Prefix: " + reasoning
assert result2 == "Prefix: Hello"
assert isinstance(result2, str)
# Test Reasoning + Reasoning
reasoning2 = dspy.Reasoning(content=" World")
result3 = reasoning + reasoning2
assert isinstance(result3, dspy.Reasoning)
assert result3.content == "Hello World"
def test_reasoning_string_methods():
reasoning = dspy.Reasoning(content=" Hello World ")
# Test strip
assert reasoning.strip() == "Hello World"
# Test lower/upper
assert reasoning.lower() == " hello world "
assert reasoning.upper() == " HELLO WORLD "
# Test split
assert reasoning.strip().split() == ["Hello", "World"]
assert reasoning.strip().split(" ") == ["Hello", "World"]
# Test replace
assert reasoning.replace("World", "Python") == " Hello Python "
# Test startswith/endswith
assert reasoning.strip().startswith("Hello")
assert reasoning.strip().endswith("World")
assert not reasoning.strip().startswith("World")
# Test find
assert reasoning.find("World") == 8
assert reasoning.find("xyz") == -1
# Test count
assert reasoning.count("l") == 3
# Test join
assert reasoning.strip().join(["a", "b", "c"]) == "aHello WorldbHello Worldc"
def test_reasoning_with_chain_of_thought():
from dspy.utils import DummyLM
lm = DummyLM([{"reasoning": "Let me think step by step", "answer": "42"}])
dspy.configure(lm=lm)
cot = dspy.ChainOfThought("question -> answer")
result = cot(question="What is the answer?")
# Test that we can use string methods on result.reasoning
assert isinstance(result.reasoning, str)
assert result.reasoning.strip() == "Let me think step by step"
assert result.reasoning.lower() == "let me think step by step"
assert "step by step" in result.reasoning
assert len(result.reasoning) == 25
def test_reasoning_error_message():
reasoning = dspy.Reasoning(content="Hello")
with pytest.raises(AttributeError, match="`Reasoning` object has no attribute 'nonexistent_method'"):
reasoning.nonexistent_method
| {
"repo_id": "stanfordnlp/dspy",
"file_path": "tests/adapters/test_reasoning.py",
"license": "MIT License",
"lines": 79,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
stanfordnlp/dspy:dspy/predict/rlm.py | """
Recursive Language Model (RLM) module for DSPy.
RLMs are an inference strategy where LLMs treat long contexts as part of an external
environment rather than feeding them directly to the model. The LLM writes Python code
to programmatically examine, decompose, and recursively call sub-LLMs over snippets.
Reference: "Recursive Language Models" (Zhang, Kraska, Khattab, 2025)
"""
from __future__ import annotations
import logging
import threading
from concurrent.futures import ThreadPoolExecutor, as_completed
from contextlib import contextmanager
from typing import TYPE_CHECKING, Any, Callable, Iterator
import pydantic
import dspy
from dspy.adapters.types.tool import Tool
from dspy.adapters.utils import parse_value, translate_field_type
from dspy.primitives.code_interpreter import SIMPLE_TYPES, CodeInterpreter, CodeInterpreterError, FinalOutput
from dspy.primitives.module import Module
from dspy.primitives.prediction import Prediction
from dspy.primitives.python_interpreter import PythonInterpreter
from dspy.primitives.repl_types import REPLEntry, REPLHistory, REPLVariable
from dspy.signatures.signature import ensure_signature
from dspy.utils.annotation import experimental
if TYPE_CHECKING:
from dspy.signatures.signature import Signature
logger = logging.getLogger(__name__)
# TODO: Optimize this prompt across a diverse benchmark
ACTION_INSTRUCTIONS_TEMPLATE = """You are tasked with producing the following outputs given the inputs {inputs}:
{output_fields}
You have access to a Python REPL environment. Write Python code and it will be executed. You will see the output, then write more code based on what you learned. This is an iterative process.
Available:
- Variables: {inputs} (your input data)
- `llm_query(prompt)` - query a sub-LLM (~500K char capacity) for semantic analysis
- `llm_query_batched(prompts)` - query multiple prompts concurrently (much faster for multiple queries)
- `print()` - ALWAYS print to see results
- `SUBMIT({final_output_names})` - submit final output when done
- Standard libraries: re, json, collections, math, etc.
IMPORTANT: This is ITERATIVE. Each code block you write will execute, you'll see the output, then you decide what to do next. Do NOT try to solve everything in one step.
1. EXPLORE FIRST - Look at your data before processing it. Print samples, check types/lengths, understand the structure.
2. ITERATE - Write small code snippets, observe outputs, then decide next steps. State persists between iterations.
3. VERIFY BEFORE SUBMITTING - If results seem wrong (zeros, empty, unexpected), reconsider your approach.
4. USE llm_query FOR SEMANTICS - String matching finds WHERE things are; llm_query understands WHAT things mean.
5. MINIMIZE RETYPING (INPUTS & OUTPUTS) - When values are long, precise, or error-prone (IDs, numbers, code, quotes), re-access them via variables and parse/compute in code instead of retyping. Use small, targeted prints to sanity-check, but avoid manual copying when variables can carry the exact value.
6. SUBMIT ONLY AFTER SEEING OUTPUTS - SUBMIT ends the current run immediately. If you need to inspect printed output, run it in one step, review the result, then call SUBMIT in a later step.
You have max {max_llm_calls} sub-LLM calls. When done, call SUBMIT() with your output."""
_PYTHON_FENCE_LANGS = {"python", "py", "python3", "py3", ""}
def _strip_code_fences(code: str) -> str:
"""Extract Python code from markdown fences, or return as-is if no fences."""
code = code.strip()
if "```" not in code:
return code
# Strip outer decorative fence pairs (e.g. ```\n```python\n...\n```\n```)
lines = code.splitlines()
while len(lines) >= 2 and lines[0].strip() == "```" and lines[-1].strip() == "```":
lines.pop(0)
lines.pop()
code = "\n".join(lines).strip()
if "```" not in code:
return code
# Find the first opening fence (skip any text before it)
fence_start = code.find("```")
lang_line, separator, remainder = code[fence_start + 3:].partition("\n")
if not separator:
return code
# Accept python-labeled fences or bare ``` fences; reject explicit non-Python tags
lang = (lang_line.strip().split(maxsplit=1)[0] if lang_line.strip() else "").lower()
if lang not in _PYTHON_FENCE_LANGS:
raise SyntaxError(f"Expected Python code but got ```{lang} fence. Write Python code, not {lang}.")
# Find closing fence
block_end = remainder.find("```")
if block_end == -1:
return remainder.strip()
return remainder[:block_end].strip()
@experimental
class RLM(Module):
"""Recursive Language Model module.
Uses a sandboxed REPL to let the LLM programmatically explore large contexts
through code execution. The LLM writes Python code to examine data, call
sub-LLMs for semantic analysis, and build up answers iteratively.
The default interpreter is PythonInterpreter (Deno/Pyodide/WASM), but you
can provide any CodeInterpreter implementation (e.g., MockInterpreter, or write a custom one using E2B or Modal).
Note: RLM instances are not thread-safe when using a custom interpreter.
Create separate RLM instances for concurrent use, or use the default
PythonInterpreter which creates a fresh instance per forward() call.
Example:
```python
# Basic usage
rlm = dspy.RLM("context, query -> output", max_iterations=10)
result = rlm(context="...very long text...", query="What is the magic number?")
print(result.output)
```
"""
def __init__(
self,
signature: type[Signature] | str,
max_iterations: int = 20,
max_llm_calls: int = 50,
max_output_chars: int = 10_000,
verbose: bool = False,
tools: list[Callable] | None = None,
sub_lm: dspy.LM | None = None,
interpreter: CodeInterpreter | None = None,
):
"""
Args:
signature: Defines inputs and outputs. String like "context, query -> answer"
or a Signature class.
max_iterations: Maximum REPL interaction iterations.
max_llm_calls: Maximum sub-LLM calls (llm_query/llm_query_batched) per execution.
max_output_chars: Maximum characters to include from REPL output.
verbose: Whether to log detailed execution info.
tools: List of tool functions or dspy.Tool objects callable from interpreter code.
Built-in tools: llm_query(prompt), llm_query_batched(prompts).
sub_lm: LM for llm_query/llm_query_batched. Defaults to dspy.settings.lm.
Allows using a different (e.g., cheaper) model for sub-queries.
interpreter: CodeInterpreter implementation to use. Defaults to PythonInterpreter.
"""
super().__init__()
self.signature = ensure_signature(signature)
self.max_iterations = max_iterations
self.max_llm_calls = max_llm_calls
self.max_output_chars = max_output_chars
self.verbose = verbose
self.sub_lm = sub_lm
self._interpreter = interpreter
self._user_tools = self._normalize_tools(tools)
self._validate_tools(self._user_tools)
# Build the action and extract signatures
action_sig, extract_sig = self._build_signatures()
self.generate_action = dspy.Predict(action_sig)
self.extract = dspy.Predict(extract_sig)
# =========================================================================
# Tool Creation and Validation
# =========================================================================
# Reserved tool names that conflict with built-in sandbox functions
_RESERVED_TOOL_NAMES = frozenset({"llm_query", "llm_query_batched", "SUBMIT", "print"})
def _normalize_tools(self, tools: list[Callable] | None) -> dict[str, Tool]:
"""Normalize tools list to a dict of Tool objects keyed by name."""
if not tools:
return {}
if isinstance(tools, dict):
raise TypeError(
"tools must be a list, not a dict. "
"Change tools={'name': func} to tools=[func] "
"(tool names are inferred from function names, or use dspy.Tool(func, name='custom_name'))"
)
def to_tool(func: Callable | Tool) -> Tool:
if isinstance(func, Tool):
return func
if not callable(func):
raise TypeError(f"Tool {func!r} must be callable, got {type(func).__name__}")
return Tool(func)
# List of callables/Tools -> normalize to Tool objects
tool_list = [to_tool(t) for t in tools]
return {tool.name: tool for tool in tool_list}
def _validate_tools(self, tools: dict[str, Tool]) -> None:
"""Validate user-provided tools have valid names."""
for name, tool in tools.items():
if not name.isidentifier():
raise ValueError(f"Invalid tool name '{name}': must be a valid Python identifier")
if name in self._RESERVED_TOOL_NAMES:
raise ValueError(f"Tool name '{name}' conflicts with built-in sandbox function")
def _format_tool_docs(self, tools: dict[str, Tool]) -> str:
"""Format user-provided tools for inclusion in instructions."""
if not tools:
return ""
lines = ["\nAdditional tools available (use these instead of standard library equivalents):"]
for tool in tools.values():
# Build signature string from Tool's args
params = []
for arg_name, arg_schema in (tool.args or {}).items():
arg_type = arg_schema.get("type", "Any")
params.append(f"{arg_name}: {arg_type}")
params_str = ", ".join(params)
sig_str = f"{tool.name}({params_str})"
# Get description with newlines escaped
desc = (tool.desc or "No description").replace("\n", " ")
lines.append(f"- `{sig_str}` - {desc}")
return "\n".join(lines)
def _make_llm_tools(self, max_workers: int = 8) -> dict[str, Callable]:
"""Create llm_query and llm_query_batched tools with a fresh call counter."""
state = {"call_count": 0}
lock = threading.Lock()
lm = self.sub_lm
def _check_and_increment(n: int = 1) -> None:
with lock:
if state["call_count"] + n > self.max_llm_calls:
raise RuntimeError(
f"LLM call limit exceeded: {state['call_count']} + {n} > {self.max_llm_calls}. "
f"Use Python code for aggregation instead of making more LLM calls."
)
state["call_count"] += n
def _query_lm(prompt: str) -> str:
target_lm = lm if lm is not None else dspy.settings.lm
if target_lm is None:
raise RuntimeError("No LM configured. Use dspy.configure(lm=...) or pass sub_lm to RLM.")
response = target_lm(prompt)
if isinstance(response, list) and response:
item = response[0]
if isinstance(item, dict) and "text" in item:
return item["text"]
return item
return str(response)
def llm_query(prompt: str) -> str:
"""Query the LLM with a prompt string."""
if not prompt:
raise ValueError("prompt cannot be empty")
_check_and_increment(1)
return _query_lm(prompt)
def llm_query_batched(prompts: list[str]) -> list[str]:
"""Query the LLM with multiple prompts concurrently."""
if not prompts:
return []
_check_and_increment(len(prompts))
results: dict[int, str] = {}
with ThreadPoolExecutor(max_workers=max_workers) as executor:
future_to_idx = {executor.submit(_query_lm, p): i for i, p in enumerate(prompts)}
for future in as_completed(future_to_idx):
idx = future_to_idx[future]
try:
results[idx] = future.result()
except Exception as e:
results[idx] = f"[ERROR] {e}"
return [results[i] for i in range(len(prompts))]
return {"llm_query": llm_query, "llm_query_batched": llm_query_batched}
@property
def tools(self) -> dict[str, Tool]:
"""User-provided tools (excludes internal llm_query/llm_query_batched)."""
return dict(self._user_tools)
# =========================================================================
# Signature Building
# =========================================================================
def _build_signatures(self) -> tuple[Signature, Signature]:
"""Build the action and extract signatures from templates."""
inputs_str = ", ".join(f"`{n}`" for n in self.signature.input_fields)
# Simple names for SUBMIT() examples
final_output_names = ", ".join(self.signature.output_fields.keys())
output_fields = "\n".join(
f"- {translate_field_type(n, f)}"
for n, f in self.signature.output_fields.items()
)
# Include original signature instructions (docstring) if present
task_instructions = f"{self.signature.instructions}\n\n" if self.signature.instructions else ""
# Format tool documentation for user-provided tools
tool_docs = self._format_tool_docs(self._user_tools)
action_sig = (
dspy.Signature({}, task_instructions + ACTION_INSTRUCTIONS_TEMPLATE.format(
inputs=inputs_str, final_output_names=final_output_names, output_fields=output_fields,
max_llm_calls=self.max_llm_calls,
) + tool_docs)
.append("variables_info", dspy.InputField(desc="Metadata about the variables available in the REPL"), type_=str)
.append("repl_history", dspy.InputField(desc="Previous REPL code executions and their outputs"), type_=REPLHistory)
.append("iteration", dspy.InputField(desc="Current iteration number (1-indexed) out of max_iterations"), type_=str)
.append("reasoning", dspy.OutputField(desc="Think step-by-step: what do you know? What remains? Plan your next action."), type_=str)
.append("code", dspy.OutputField(desc="Python code to execute. Use markdown code block format: ```python\\n<code>\\n```"), type_=str)
)
# Extract signature: includes the original signature's output fields and task instructions.
extract_instructions = """Based on the REPL trajectory, extract the final outputs now.
Review your trajectory to see what information you gathered and what values you computed, then provide the final outputs."""
# Prepend original task instructions to extract instructions so the LLM knows what task to extract for
extended_task_instructions = ""
if task_instructions:
extended_task_instructions = "The trajectory was generated with the following objective: \n" + task_instructions + "\n"
full_extract_instructions = extended_task_instructions + extract_instructions
extract_sig = dspy.Signature(
{**self.signature.output_fields},
full_extract_instructions,
)
extract_sig = extract_sig.prepend("repl_history", dspy.InputField(desc="Your REPL interactions so far"), type_=REPLHistory)
extract_sig = extract_sig.prepend("variables_info", dspy.InputField(desc="Metadata about the variables available in the REPL"), type_=str)
return action_sig, extract_sig
# =========================================================================
# Input/Output Processing
# =========================================================================
def _get_output_fields_info(self) -> list[dict]:
"""Get output field info for sandbox registration."""
fields = []
for name, field in self.signature.output_fields.items():
annotation = getattr(field, "annotation", str)
field_info = {"name": name}
# Only include type for simple types that work in function signatures
# Complex types like Literal, Union, etc. are not included
if annotation in SIMPLE_TYPES:
field_info["type"] = annotation.__name__
fields.append(field_info)
return fields
def _build_variables(self, **input_args: Any) -> list[REPLVariable]:
"""Build REPLVariable list from input arguments with field metadata."""
variables = []
for name, value in input_args.items():
field_info = self.signature.input_fields.get(name)
variables.append(REPLVariable.from_value(name, value, field_info=field_info))
return variables
def _format_output(self, output: str) -> str:
if not output:
return "(no output - did you forget to print?)"
return output
def _validate_inputs(self, input_args: dict[str, Any]) -> None:
"""Raise ValueError if required input fields are missing."""
missing = set(self.signature.input_fields.keys()) - set(input_args.keys())
if missing:
raise ValueError(f"Missing required inputs: {sorted(missing)}")
# =========================================================================
# CodeInterpreter Lifecycle
# =========================================================================
def _prepare_execution_tools(self) -> dict[str, Callable]:
"""Create fresh LLM tools and merge with user-provided tools."""
execution_tools = self._make_llm_tools()
# Extract underlying functions from Tool objects for the interpreter
execution_tools.update({name: tool.func for name, tool in self._user_tools.items()})
return execution_tools
def _inject_execution_context(self, interpreter: CodeInterpreter, execution_tools: dict[str, Callable]) -> None:
"""Inject execution tools and output fields into an interpreter.
This ensures llm_query, llm_query_batched, and typed FINAL signatures are available,
even for user-provided interpreters. Each forward() call gets fresh tools with a
fresh call counter, so we must inject on every execution.
"""
interpreter.tools.update(execution_tools)
if hasattr(interpreter, "output_fields"):
interpreter.output_fields = self._get_output_fields_info()
# Reset registration flag to force re-registration with fresh tools
if hasattr(interpreter, "_tools_registered"):
interpreter._tools_registered = False
@contextmanager
def _interpreter_context(self, execution_tools: dict[str, Callable]) -> Iterator[CodeInterpreter]:
"""Yield interpreter, creating PythonInterpreter if none provided at init."""
if self._interpreter is not None:
self._inject_execution_context(self._interpreter, execution_tools)
yield self._interpreter
else:
repl = PythonInterpreter(
tools=execution_tools,
output_fields=self._get_output_fields_info(),
)
try:
yield repl
finally:
repl.shutdown()
# =========================================================================
# Execution Core
# =========================================================================
def _extract_fallback(
self,
variables: list[REPLVariable],
history: REPLHistory,
output_field_names: list[str],
) -> Prediction:
"""Use extract module to get final output when max iterations reached."""
logger.warning("RLM reached max iterations, using extract to get final output")
variables_info = [variable.format() for variable in variables]
extract_pred = self.extract(
variables_info=variables_info,
repl_history=history,
)
return Prediction(
trajectory=[e.model_dump() for e in history],
final_reasoning="Extract forced final output",
**{name: getattr(extract_pred, name) for name in output_field_names},
)
def _process_final_output(
self,
result: FinalOutput,
output_field_names: list[str],
) -> tuple[dict[str, Any] | None, str | None]:
"""Validate and parse FinalOutput. Returns (parsed_outputs, None) or (None, error)."""
raw_output = result.output
# Validate raw_output is a dict
if not isinstance(raw_output, dict):
return None, f"[Error] FINAL returned {type(raw_output).__name__}, expected dict with fields: {output_field_names}"
# Validate all required output fields are present
missing = set(output_field_names) - set(raw_output.keys())
if missing:
return None, f"[Error] Missing output fields: {sorted(missing)}. Use SUBMIT({', '.join(output_field_names)})"
# Parse and validate each output field
parsed_outputs = {}
type_errors = []
for name in output_field_names:
field = self.signature.output_fields[name]
annotation = getattr(field, "annotation", str)
try:
parsed_outputs[name] = parse_value(raw_output[name], annotation)
except (ValueError, pydantic.ValidationError) as e:
type_errors.append(
f"{name}: expected {annotation.__name__ if hasattr(annotation, '__name__') else annotation}, "
f"got {type(raw_output[name]).__name__}: {e}"
)
if type_errors:
return None, "[Type Error] " + "; ".join(type_errors)
return parsed_outputs, None
def _process_execution_result(
self,
pred: Prediction,
code: str,
result: Any,
history: REPLHistory,
output_field_names: list[str],
) -> Prediction | REPLHistory:
"""Process interpreter result, returning Prediction if final, else updated history.
This shared helper reduces duplication between sync and async execution paths.
Args:
pred: The prediction containing reasoning and code attributes
code: Code to record in history (already stripped when possible)
result: Result from interpreter.execute() - FinalOutput, list, str, or error string
history: Current REPL history
output_field_names: List of expected output field names
Returns:
Prediction if FINAL was called successfully, else updated REPLHistory
"""
# Handle error strings from caught exceptions
if isinstance(result, str) and result.startswith("[Error]"):
output = self._format_output(result)
return history.append(reasoning=pred.reasoning, code=code, output=output)
# Handle FINAL output
if isinstance(result, FinalOutput):
parsed_outputs, error = self._process_final_output(result, output_field_names)
if error:
return history.append(reasoning=pred.reasoning, code=code, output=error)
final_history = history.append(
reasoning=pred.reasoning, code=code, output=f"FINAL: {parsed_outputs}"
)
return Prediction(
**parsed_outputs,
trajectory=[e.model_dump() for e in final_history],
final_reasoning=pred.reasoning,
)
# Format non-final result as output
if isinstance(result, list):
output = "\n".join(map(str, result))
else:
output = str(result) if result else ""
output = self._format_output(output)
if self.verbose:
logger.info(REPLEntry.format_output(output, self.max_output_chars))
return history.append(reasoning=pred.reasoning, code=code, output=output)
def _execute_code(
self,
repl: CodeInterpreter,
code: str,
input_args: dict[str, Any],
) -> Any:
"""Execute code in the interpreter, returning the result or an error string."""
try:
return repl.execute(code, variables=dict(input_args))
except (CodeInterpreterError, SyntaxError) as e:
return f"[Error] {e}"
def _execute_iteration(
self,
repl: CodeInterpreter,
variables: list[REPLVariable],
history: REPLHistory,
iteration: int,
input_args: dict[str, Any],
output_field_names: list[str],
) -> Prediction | REPLHistory:
"""Execute one iteration. Returns Prediction if done, else updated REPLHistory."""
variables_info = [variable.format() for variable in variables]
action = self.generate_action(
variables_info=variables_info,
repl_history=history,
iteration=f"{iteration + 1}/{self.max_iterations}",
)
if self.verbose:
logger.info(
f"RLM iteration {iteration + 1}/{self.max_iterations}\n"
f"Reasoning: {action.reasoning}\nCode:\n{action.code}"
)
try:
code = _strip_code_fences(action.code)
except SyntaxError as e:
code = action.code
result = f"[Error] {e}"
return self._process_execution_result(action, code, result, history, output_field_names)
result = self._execute_code(repl, code, input_args)
return self._process_execution_result(action, code, result, history, output_field_names)
# =========================================================================
# Public Interface
# =========================================================================
def forward(self, **input_args) -> Prediction:
"""Execute RLM to produce outputs from the given inputs.
Args:
**input_args: Input values matching the signature's input fields
Returns:
Prediction with output field(s) from the signature and 'trajectory' for debugging
Raises:
ValueError: If required input fields are missing
"""
self._validate_inputs(input_args)
output_field_names = list(self.signature.output_fields.keys())
execution_tools = self._prepare_execution_tools()
variables = self._build_variables(**input_args)
with self._interpreter_context(execution_tools) as repl:
history: REPLHistory = REPLHistory(max_output_chars=self.max_output_chars)
for iteration in range(self.max_iterations):
result: Prediction | REPLHistory = self._execute_iteration(
repl, variables, history, iteration, input_args, output_field_names
)
if isinstance(result, Prediction):
return result
history = result
# Max iterations reached - use extract fallback
return self._extract_fallback(variables, history, output_field_names)
async def _aextract_fallback(
self,
variables: list[REPLVariable],
history: REPLHistory,
output_field_names: list[str],
) -> Prediction:
"""Async version: Use extract module when max iterations reached."""
logger.warning("RLM reached max iterations, using extract to get final output")
variables_info = [variable.format() for variable in variables]
extract_pred = await self.extract.acall(
variables_info=variables_info,
repl_history=history,
)
return Prediction(
trajectory=[e.model_dump() for e in history],
final_reasoning="Extract forced final output",
**{name: getattr(extract_pred, name) for name in output_field_names},
)
async def _aexecute_iteration(
self,
repl: CodeInterpreter,
variables: list[REPLVariable],
history: REPLHistory,
iteration: int,
input_args: dict[str, Any],
output_field_names: list[str],
) -> Prediction | REPLHistory:
"""Async version: Execute one iteration."""
variables_info = [variable.format() for variable in variables]
pred = await self.generate_action.acall(
variables_info=variables_info,
repl_history=history,
iteration=f"{iteration + 1}/{self.max_iterations}",
)
if self.verbose:
logger.info(
f"RLM iteration {iteration + 1}/{self.max_iterations}\n"
f"Reasoning: {pred.reasoning}\nCode:\n{pred.code}"
)
try:
code = _strip_code_fences(pred.code)
except SyntaxError as e:
code = pred.code
result = f"[Error] {e}"
return self._process_execution_result(pred, code, result, history, output_field_names)
result = self._execute_code(repl, code, input_args)
return self._process_execution_result(pred, code, result, history, output_field_names)
async def aforward(self, **input_args) -> Prediction:
"""Async version of forward(). Execute RLM to produce outputs.
Args:
**input_args: Input values matching the signature's input fields
Returns:
Prediction with output field(s) from the signature and 'trajectory' for debugging
Raises:
ValueError: If required input fields are missing
"""
self._validate_inputs(input_args)
output_field_names = list(self.signature.output_fields.keys())
execution_tools = self._prepare_execution_tools()
variables = self._build_variables(**input_args)
with self._interpreter_context(execution_tools) as repl:
history = REPLHistory(max_output_chars=self.max_output_chars)
for iteration in range(self.max_iterations):
result = await self._aexecute_iteration(
repl, variables, history, iteration, input_args, output_field_names
)
if isinstance(result, Prediction):
return result
history = result
# Max iterations reached - use extract fallback
return await self._aextract_fallback(variables, history, output_field_names)
| {
"repo_id": "stanfordnlp/dspy",
"file_path": "dspy/predict/rlm.py",
"license": "MIT License",
"lines": 573,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
stanfordnlp/dspy:dspy/primitives/code_interpreter.py | """
Abstract interpreter interface for code execution environments.
This module defines the CodeInterpreter protocol that allows RLM and other
code-executing modules to work with different interpreter implementations:
- PythonInterpreter: Local Deno/Pyodide WASM interpreter
- MockInterpreter: Scriptable responses for testing
"""
from typing import Any, Callable, Protocol, runtime_checkable
# Types that can be used directly in Python function signatures for SUBMIT()
SIMPLE_TYPES = (str, int, float, bool, list, dict, type(None))
class CodeInterpreterError(RuntimeError):
"""Error raised during code interpretation.
This exception covers two distinct failure modes:
1. **Execution errors**: The sandbox ran user code that failed.
- NameError, TypeError, ValueError, etc.
- Tool call failures (unknown tool, tool raised exception)
- These are normal user code errors.
2. **Protocol errors**: Communication between host and sandbox failed.
- Malformed JSON from sandbox
- Sandbox process crashed or became unresponsive
- Invalid JSON-RPC message structure
- These may indicate a corrupted sandbox needing restart.
The error message typically includes the original error type (e.g., "NameError: ...")
which can help distinguish the failure mode.
Note: SyntaxError is raised separately (not wrapped) for invalid Python syntax.
"""
class FinalOutput:
"""Returned by interpreter.execute() when SUBMIT() is called.
This signals that the code execution loop should terminate and return
the contained output to the caller.
"""
def __init__(self, output: Any):
self.output = output
def __repr__(self) -> str:
return f"FinalOutput({self.output!r})"
def __eq__(self, other: object) -> bool:
if not isinstance(other, FinalOutput):
return NotImplemented
return self.output == other.output
@runtime_checkable
class CodeInterpreter(Protocol):
"""Protocol for code execution environments (interpreters).
Implementations must provide:
- start(): Initialize the interpreter (optional, can be lazy)
- execute(): Run code and return results
- shutdown(): Clean up resources
The interpreter maintains state across execute() calls within a session,
allowing variables defined in one call to be used in subsequent calls.
Lifecycle:
1. Create instance (config only, no resources allocated)
2. start() - Initialize interpreter (explicit) or let execute() do it (lazy)
3. execute() - Run code (can be called many times)
4. shutdown() - Release resources
Example implementations:
- LocalInterpreter: Deno/Pyodide WASM interpreter (local)
- MockInterpreter: Scriptable responses for testing
Pooling:
For interpreter pooling, call start() to pre-warm instances, then
distribute execute() calls across the pool.
"""
@property
def tools(self) -> dict[str, Callable[..., str]]:
"""Tools available for interpreter code to call.
Tools are host-side functions that can be invoked from within the
interpreter. Each tool accepts keyword arguments and returns a string.
Implementations should accept tools via constructor and expose them
through this property.
"""
...
def start(self) -> None:
"""Initialize the interpreter and allocate resources.
This method prepares the interpreter for code execution. It can be called
explicitly to pre-warm the interpreter, or implementations may call it
lazily on first execute().
For pooling scenarios, call start() on multiple instances to have
them ready before distributing work.
Calling start() multiple times should be safe (idempotent).
"""
...
def execute(
self,
code: str,
variables: dict[str, Any] | None = None,
) -> Any:
"""Execute Python code and return the result.
Args:
code: Python code to execute
variables: Variables to inject into the namespace before execution.
These are available as top-level variables in the code.
Returns:
One of:
- FinalOutput: If SUBMIT() was called in code
- str: Captured stdout from print() statements
- list: Multiple output lines
- None: If no output was produced
Raises:
CodeInterpreterError: On runtime errors (undefined vars, tool failures, etc.)
SyntaxError: On invalid Python syntax
Note:
State persists across calls. Variables defined in one execute()
call are available in subsequent calls until shutdown().
If start() was not called, implementations should call it lazily.
"""
...
def shutdown(self) -> None:
"""Release resources and terminate the interpreter session.
After shutdown(), the interpreter should not be used again.
A new instance should be created for a fresh session.
"""
...
| {
"repo_id": "stanfordnlp/dspy",
"file_path": "dspy/primitives/code_interpreter.py",
"license": "MIT License",
"lines": 110,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
stanfordnlp/dspy:dspy/primitives/repl_types.py | """
REPL data types for RLM and interpreter interactions.
These types represent the state and history of REPL-based execution:
- REPLVariable: Metadata about variables available in the REPL
- REPLEntry: A single interaction (reasoning, code, output)
- REPLHistory: Container for the full interaction history
"""
from __future__ import annotations
import json
from typing import TYPE_CHECKING, Any, Iterator
import pydantic
from pydantic import Field
from dspy.adapters.utils import serialize_for_json
if TYPE_CHECKING:
from pydantic.fields import FieldInfo
__all__ = ["REPLVariable", "REPLEntry", "REPLHistory"]
class REPLVariable(pydantic.BaseModel):
"""Metadata about a variable available in the REPL environment."""
name: str
type_name: str
desc: str = ""
constraints: str = ""
total_length: int
preview: str
model_config = pydantic.ConfigDict(frozen=True)
@classmethod
def from_value(
cls,
name: str,
value: Any,
field_info: FieldInfo | None = None,
preview_chars: int = 1000,
) -> REPLVariable:
"""Create REPLVariable from an actual value and optional field info.
Args:
name: Variable name
value: The actual value
field_info: Optional pydantic FieldInfo with desc/constraints metadata
preview_chars: Max characters for preview
"""
jsonable = serialize_for_json(value)
if isinstance(jsonable, (dict, list)):
value_str = json.dumps(jsonable, indent=2)
else:
value_str = str(jsonable)
is_truncated = len(value_str) > preview_chars
if is_truncated:
half = preview_chars // 2
preview = value_str[:half] + "..." + value_str[-half:]
else:
preview = value_str
# Extract desc and constraints from field_info if provided
desc = ""
constraints = ""
if field_info and hasattr(field_info, "json_schema_extra") and field_info.json_schema_extra:
raw_desc = field_info.json_schema_extra.get("desc", "")
# Skip placeholder descs like "${name}"
if raw_desc and not raw_desc.startswith("${"):
desc = raw_desc
constraints = field_info.json_schema_extra.get("constraints", "")
return cls(
name=name,
type_name=type(value).__name__,
desc=desc,
constraints=constraints,
total_length=len(value_str),
preview=preview,
)
def format(self) -> str:
"""Format variable metadata for prompt inclusion."""
lines = [f"Variable: `{self.name}` (access it in your code)"]
lines.append(f"Type: {self.type_name}")
if self.desc:
lines.append(f"Description: {self.desc}")
if self.constraints:
lines.append(f"Constraints: {self.constraints}")
lines.append(f"Total length: {self.total_length:,} characters")
lines.append(f"Preview:\n```\n{self.preview}\n```")
return "\n".join(lines)
@pydantic.model_serializer()
def serialize_model(self) -> str:
return self.format()
class REPLEntry(pydantic.BaseModel):
"""A single REPL interaction entry containing reasoning, code, and output."""
reasoning: str = ""
code: str
output: str
model_config = pydantic.ConfigDict(frozen=True)
@staticmethod
def format_output(output: str, max_output_chars: int = 10_000) -> str:
"""Format output with head+tail truncation, preserving true length in header."""
raw_len = len(output)
if raw_len > max_output_chars:
half = max_output_chars // 2
omitted = raw_len - max_output_chars
output = output[:half] + f"\n\n... ({omitted:,} characters omitted) ...\n\n" + output[-half:]
return f"Output ({raw_len:,} chars):\n{output}"
def format(self, index: int, max_output_chars: int = 10_000) -> str:
"""Format this entry for inclusion in prompts."""
reasoning_line = f"Reasoning: {self.reasoning}\n" if self.reasoning else ""
code_block = f"```python\n{self.code}\n```"
return f"=== Step {index + 1} ===\n{reasoning_line}Code:\n{code_block}\n{self.format_output(self.output, max_output_chars)}"
class REPLHistory(pydantic.BaseModel):
"""Container for REPL interaction history.
Immutable: append() returns a new instance with the entry added.
"""
entries: list[REPLEntry] = Field(default_factory=list)
max_output_chars: int = 10_000
model_config = pydantic.ConfigDict(frozen=True)
def format(self) -> str:
if not self.entries:
return "You have not interacted with the REPL environment yet."
return "\n".join(entry.format(index=i, max_output_chars=self.max_output_chars) for i, entry in enumerate(self.entries))
@pydantic.model_serializer()
def serialize_model(self) -> str:
return self.format()
def append(self, *, reasoning: str = "", code: str, output: str) -> REPLHistory:
"""Return a new REPLHistory with the entry appended."""
new_entry = REPLEntry(reasoning=reasoning, code=code, output=output)
return REPLHistory(entries=list(self.entries) + [new_entry], max_output_chars=self.max_output_chars)
def __len__(self) -> int:
return len(self.entries)
def __iter__(self) -> Iterator[REPLEntry]:
return iter(self.entries)
def __bool__(self) -> bool:
return len(self.entries) > 0
| {
"repo_id": "stanfordnlp/dspy",
"file_path": "dspy/primitives/repl_types.py",
"license": "MIT License",
"lines": 126,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
stanfordnlp/dspy:tests/mock_interpreter.py | """
Mock interpreter for testing RLM and other code-executing modules.
This interpreter doesn't actually execute code - it returns scripted responses
or uses a custom function to generate responses. Useful for:
- Unit testing without Deno/Pyodide dependencies
- Testing specific execution paths (errors, SUBMIT, etc.)
- Recording what code was submitted for execution
"""
from typing import Any, Callable
from dspy.primitives.code_interpreter import CodeInterpreterError, FinalOutput
__all__ = ["MockInterpreter"]
class MockInterpreter:
"""Mock interpreter that returns scripted responses.
Implements the Interpreter protocol for testing purposes.
Example usage:
```python
# Script specific responses
mock = MockInterpreter(responses=[
"data explored",
FinalOutput("42"),
])
result1 = mock.execute("print(len(context))") # Returns "data explored"
result2 = mock.execute("SUBMIT('42')") # Returns FinalOutput("42")
# Use custom execution function
def custom_exec(code, variables):
if "SUBMIT" in code:
return FinalOutput("done")
return f"executed: {code[:20]}..."
mock = MockInterpreter(execute_fn=custom_exec)
```
"""
def __init__(
self,
responses: list[str | FinalOutput | Exception] | None = None,
execute_fn: Callable[[str, dict[str, Any]], Any] | None = None,
tools: dict[str, Callable[..., str]] | None = None,
):
"""Initialize the mock interpreter.
Args:
responses: List of responses to return in sequence. Each call to
execute() pops the next response. If an Exception is
in the list, it will be raised.
execute_fn: Custom function that receives (code, variables) and
returns the result. Takes precedence over responses.
tools: Dictionary mapping tool names to callable functions.
MockInterpreter doesn't use tools, but stores them for protocol compliance.
"""
self.responses = list(responses) if responses else []
self.execute_fn = execute_fn
self.tools = tools or {}
self.call_count = 0
self.call_history: list[tuple[str, dict[str, Any]]] = []
self._shutdown = False
def start(self) -> None:
pass
def execute(
self,
code: str,
variables: dict[str, Any] | None = None,
) -> Any:
"""Execute code and return the next scripted response.
Args:
code: The code that would be executed (recorded in call_history)
variables: Variables that would be injected (recorded in call_history)
Returns:
The next response from the responses list, or result from execute_fn
Raises:
CodeInterpreterError: If the interpreter was shutdown, or if an Exception
is in the responses list
"""
if self._shutdown:
raise CodeInterpreterError("MockInterpreter has been shutdown")
variables = variables or {}
self.call_history.append((code, variables))
self.call_count += 1
# Custom function takes precedence
if self.execute_fn is not None:
return self.execute_fn(code, variables)
# Return scripted responses
if not self.responses:
return ""
response = self.responses.pop(0)
if isinstance(response, Exception):
raise response
return response
def shutdown(self) -> None:
self._shutdown = True
def reset(self) -> None:
"""Reset the interpreter state for reuse in tests."""
self.call_count = 0
self.call_history = []
self._shutdown = False
# Context manager support
def __enter__(self) -> "MockInterpreter":
return self
def __exit__(self, *args: Any) -> None:
self.shutdown()
def __call__(
self,
code: str,
variables: dict[str, Any] | None = None,
) -> Any:
"""Shorthand for execute()."""
return self.execute(code, variables)
| {
"repo_id": "stanfordnlp/dspy",
"file_path": "tests/mock_interpreter.py",
"license": "MIT License",
"lines": 104,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
stanfordnlp/dspy:tests/predict/test_rlm.py | """
Tests for the RLM (Recursive Language Model) module.
Test organization:
- Unit tests (no Deno required): MockInterpreter, RLM formatting, signatures
- Integration tests (@pytest.mark.deno): PythonInterpreter with Deno
"""
from contextlib import contextmanager
import pytest
from dspy.adapters.types.tool import Tool
from dspy.predict.rlm import RLM, _strip_code_fences
from dspy.primitives.code_interpreter import CodeInterpreterError, FinalOutput
from dspy.primitives.prediction import Prediction
from dspy.primitives.python_interpreter import PythonInterpreter
from dspy.primitives.repl_types import REPLEntry, REPLHistory, REPLVariable
from tests.mock_interpreter import MockInterpreter
# ============================================================================
# Test Helpers and Factories
# ============================================================================
def make_mock_predictor(responses: list[dict], async_mode: bool = False):
"""Factory for mock predictors with scripted responses.
Args:
responses: List of dicts with keys like 'reasoning', 'code'.
async_mode: If True, returns a predictor with acall() instead of __call__().
"""
class MockPredictor:
def __init__(self):
self.idx = 0
def _next_response(self):
result = responses[self.idx % len(responses)]
self.idx += 1
return Prediction(**result)
def __call__(self, **kwargs):
return self._next_response()
async def acall(self, **kwargs):
return self._next_response()
return MockPredictor()
@contextmanager
def dummy_lm_context(responses: list[dict]):
"""Context manager for DummyLM setup."""
import dspy
from dspy.utils.dummies import DummyLM
lm = DummyLM(responses)
with dspy.context(lm=lm):
yield lm
# Common test tools
def echo_tool(text: str = "") -> str:
"""Echo the input text."""
return f"Echo: {text}"
def add_tool(a: int = 0, b: int = 0) -> str:
"""Add two numbers."""
return str(a + b)
def multiply_tool(a: int = 0, b: int = 0) -> str:
"""Multiply two numbers."""
return str(a * b)
# ============================================================================
# Unit Tests: MockInterpreter
# ============================================================================
class TestMockInterpreter:
"""Unit tests for MockInterpreter."""
def test_scripted_responses(self):
"""Test that MockInterpreter returns scripted responses in order."""
mock = MockInterpreter(responses=["first", "second", "third"])
assert mock.execute("code1") == "first"
assert mock.execute("code2") == "second"
assert mock.execute("code3") == "third"
def test_returns_final_output_result(self):
"""Test that MockInterpreter can return FinalOutput."""
mock = MockInterpreter(responses=["exploring", FinalOutput("42")])
assert mock.execute("print(len(data))") == "exploring"
result = mock.execute("SUBMIT('42')")
assert isinstance(result, FinalOutput)
assert result.output == "42"
def test_raises_exception_from_responses(self):
"""Test that MockInterpreter raises exceptions from responses."""
mock = MockInterpreter(responses=["ok", CodeInterpreterError("undefined variable")])
assert mock.execute("code1") == "ok"
with pytest.raises(CodeInterpreterError, match="undefined variable"):
mock.execute("code2")
def test_records_call_history(self):
"""Test that MockInterpreter records call history for test assertions."""
mock = MockInterpreter(responses=["resp"])
mock.execute("print(1)", variables={"x": 10})
assert mock.call_history == [("print(1)", {"x": 10})]
# ============================================================================
# Unit Tests: RLM Module (no interpreter needed)
# ============================================================================
class TestRLMInitialization:
"""Tests for RLM module initialization."""
def test_basic_initialization(self):
"""Test RLM module initializes correctly with signature."""
rlm = RLM("context, query -> answer", max_iterations=5)
assert rlm.max_iterations == 5
assert rlm.generate_action is not None
assert rlm.extract is not None
assert rlm.tools == {} # No user tools provided
assert "context" in rlm.signature.input_fields
assert "query" in rlm.signature.input_fields
assert "answer" in rlm.signature.output_fields
def test_custom_signature(self):
"""Test RLM with custom signature."""
rlm = RLM("document, question -> summary, key_facts", max_iterations=5)
assert "document" in rlm.signature.input_fields
assert "question" in rlm.signature.input_fields
assert "summary" in rlm.signature.output_fields
assert "key_facts" in rlm.signature.output_fields
def test_custom_tools(self):
"""Test RLM with custom tools."""
def custom_tool(x: str = "") -> str:
return x.upper()
rlm = RLM("context -> answer", max_iterations=5, tools=[custom_tool])
assert "custom_tool" in rlm.tools
assert len(rlm.tools) == 1 # Only user tools, not internal llm_query/llm_query_batched
@pytest.mark.parametrize("tool_name", ["invalid-name", "123start"])
def test_tool_validation_invalid_identifier(self, tool_name):
"""Test RLM rejects tool names that aren't valid Python identifiers."""
def my_tool() -> str:
return "result"
tool = Tool(my_tool, name=tool_name)
with pytest.raises(ValueError, match="must be a valid Python identifier"):
RLM("context -> answer", tools=[tool])
@pytest.mark.parametrize("tool_name", ["llm_query", "SUBMIT", "print"])
def test_tool_validation_reserved_names(self, tool_name):
"""Test RLM rejects tool names that conflict with built-in functions."""
def my_tool() -> str:
return "result"
tool = Tool(my_tool, name=tool_name)
with pytest.raises(ValueError, match="conflicts with built-in"):
RLM("context -> answer", tools=[tool])
@pytest.mark.parametrize("invalid_value", ["not a function", 123])
def test_tool_validation_not_callable(self, invalid_value):
"""Test RLM rejects tools that aren't callable."""
with pytest.raises(TypeError, match="must be callable"):
RLM("context -> answer", tools=[invalid_value])
def test_tools_dict_rejected(self):
"""Test RLM rejects dict format for tools with helpful error."""
def my_tool() -> str:
return "result"
with pytest.raises(TypeError, match="tools must be a list, not a dict"):
RLM("context -> answer", tools={"my_tool": my_tool})
def test_optional_parameters(self):
"""Test RLM optional parameters and their defaults."""
import dspy
# Test defaults
rlm = RLM("context -> answer")
assert rlm.max_llm_calls == 50
assert rlm.sub_lm is None
assert rlm._interpreter is None
# Test custom values
mock = MockInterpreter()
mock_lm = dspy.LM("openai/gpt-4o-mini")
rlm = RLM("context -> answer", max_llm_calls=100, sub_lm=mock_lm, interpreter=mock)
assert rlm.max_llm_calls == 100
assert rlm.sub_lm is mock_lm
assert rlm._interpreter is mock
def test_forward_validates_required_inputs(self):
"""Test that forward() raises ValueError for missing required inputs."""
mock = MockInterpreter(responses=["result"])
# Single missing input
rlm = RLM("context, query -> answer", max_iterations=3, interpreter=mock)
with pytest.raises(ValueError, match="Missing required input"):
rlm.forward(context="some context") # Missing 'query'
# Multiple missing inputs - all should be reported
rlm = RLM("a, b, c -> answer", max_iterations=3, interpreter=mock)
with pytest.raises(ValueError) as exc_info:
rlm.forward(a="only a") # Missing 'b' and 'c'
assert "b" in str(exc_info.value)
assert "c" in str(exc_info.value)
def test_batched_query_errors_have_clear_markers(self):
"""Test that errors in llm_query_batched are prefixed with [ERROR]."""
from unittest.mock import MagicMock
mock_lm = MagicMock()
mock_lm.side_effect = RuntimeError("LM failed")
rlm = RLM("context -> answer", max_llm_calls=10, sub_lm=mock_lm)
tools = rlm._make_llm_tools()
results = tools["llm_query_batched"](prompts=["test prompt"])
assert len(results) == 1
assert results[0].startswith("[ERROR]")
assert "LM failed" in results[0]
def test_tools_call_counter_is_thread_safe(self):
"""Test that the LLM call counter is thread-safe for concurrent llm_query_batched calls.
The call counter must be protected by a lock since llm_query_batched uses
ThreadPoolExecutor for concurrent execution.
"""
from concurrent.futures import ThreadPoolExecutor
from unittest.mock import MagicMock
mock_lm = MagicMock()
mock_lm.return_value = ["response"]
rlm = RLM("context -> answer", max_llm_calls=10, sub_lm=mock_lm)
tools = rlm._make_llm_tools()
call_count = [0]
errors = []
def make_call():
try:
tools["llm_query"](prompt="test")
call_count[0] += 1
except RuntimeError as e:
errors.append(e)
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(make_call) for _ in range(10)]
for f in futures:
f.result()
assert call_count[0] == 10, f"Expected 10 successful calls, got {call_count[0]}"
assert len(errors) == 0, f"Unexpected errors: {errors}"
with pytest.raises(RuntimeError, match="LLM call limit exceeded"):
tools["llm_query"](prompt="one more")
class TestRLMCodeFenceParsing:
"""Tests for robust fenced-code extraction."""
@pytest.mark.parametrize(
("raw", "expected"),
[
# Standard python fence
("```python\nprint(1)\n```", "print(1)"),
("```py\nx = 1\nprint(x)\n```", "x = 1\nprint(x)"),
# Bare fence (no language tag)
("```\nprint('no lang')\n```", "print('no lang')"),
# No fences at all
("not fenced code", "not fenced code"),
# Text before fence (preamble is skipped)
("I'll inspect first.\n```python\nprint('hello')\n```\nThen I will submit.", "print('hello')"),
# Text after closing fence (ignored)
("```python\nprint(1)\n```\nsome trailing text", "print(1)"),
# Unclosed fence (just return the body)
("```python\nprint('oops')", "print('oops')"),
# Double fences (outer decorative ```)
("```\n```python\nprint(1)\n```\n```", "print(1)"),
("```\n```\nprint(2)\n```\n```", "print(2)"),
],
)
def test_strip_code_fences(self, raw, expected):
assert _strip_code_fences(raw) == expected
def test_strip_code_fences_rejects_non_python_lang(self):
with pytest.raises(SyntaxError, match="json"):
_strip_code_fences('```json\n{"a": 1}\n```')
class TestRLMFormatting:
"""Tests for RLM formatting helpers."""
def test_format_history(self):
"""Test history formatting using REPLHistory."""
history = REPLHistory()
history = history.append(reasoning="Need to check the data", code="print(1)", output="1")
history = history.append(reasoning="Now calculate", code="x = 2", output="")
formatted = history.format()
assert "Step 1" in formatted
assert "Step 2" in formatted
assert "print(1)" in formatted
assert "Need to check" in formatted
def test_format_history_empty(self):
"""Test history formatting with empty history."""
history = REPLHistory()
formatted = history.format()
assert "have not interacted with the REPL" in formatted
def test_action_signature_has_iteration_field(self):
"""Test action signature includes iteration input field."""
rlm = RLM("context -> answer")
action_sig = rlm.generate_action.signature
assert "iteration" in action_sig.input_fields
def test_format_output(self):
"""Test output formatting."""
rlm = RLM("context -> answer")
formatted = rlm._format_output("output text")
assert "output text" in formatted
def test_format_output_empty(self):
"""Test output formatting with empty output."""
rlm = RLM("context -> answer")
formatted = rlm._format_output("")
assert "no output" in formatted.lower()
def test_format_output_passthrough(self):
"""Test that _format_output passes through non-empty output without truncation."""
rlm = RLM("context -> answer", max_output_chars=100)
long_output = "a" * 200
formatted = rlm._format_output(long_output)
assert formatted == long_output
def test_format_variable_info_string(self):
"""Test variable info formatting for string value using REPLVariable."""
var = REPLVariable.from_value("context", "Hello world", preview_chars=5)
formatted = var.format()
assert "Variable: `context`" in formatted
assert "Type: str" in formatted
assert "11" in formatted # length
assert "He" in formatted # head
assert "ld" in formatted # tail
assert "..." in formatted # truncation indicator
def test_format_variable_info_dict(self):
"""Test variable info formatting for dict value using REPLVariable."""
var = REPLVariable.from_value("data", {"key": "value"})
formatted = var.format()
assert "Variable: `data`" in formatted
assert "Type: dict" in formatted
assert "key" in formatted
def test_build_variables_multiple(self):
"""Test building multiple variables."""
rlm = RLM("context, query -> answer")
variables = rlm._build_variables(
context="Hello world",
query="What is this?"
)
assert len(variables) == 2
formatted = "\n\n".join(v.format() for v in variables)
assert "Variable: `context`" in formatted
assert "Variable: `query`" in formatted
assert "Hello world" in formatted
assert "What is this?" in formatted
class TestREPLTypes:
"""Tests for the REPL type classes."""
def test_repl_history_immutability(self):
"""Test that REPLHistory.append() returns new instance."""
h1 = REPLHistory()
h2 = h1.append(code="print(1)", output="1")
assert len(h1) == 0 # Original unchanged
assert len(h2) == 1 # New has entry
def test_repl_history_len_iter_bool(self):
"""Test REPLHistory list-like interface."""
h = REPLHistory()
assert len(h) == 0
assert not bool(h)
h = h.append(code="x = 1", output="")
h = h.append(code="x = 2", output="")
assert len(h) == 2
assert bool(h)
codes = [e.code for e in h]
assert codes == ["x = 1", "x = 2"]
def test_repl_entry_format(self):
"""Test REPLEntry formatting."""
entry = REPLEntry(reasoning="test reason", code="print(1)", output="1")
formatted = entry.format(index=0)
assert "Step 1" in formatted
assert "test reason" in formatted
assert "print(1)" in formatted
assert "1" in formatted
def test_repl_entry_format_truncation(self):
"""Test REPLEntry.format() truncates with head+tail and shows true length."""
output = "a" * 100 + "b" * 100
entry = REPLEntry(code="print('a' + 'b')", output=output)
formatted = entry.format(index=0, max_output_chars=100)
# Head and tail preserved
assert "a" * 50 in formatted
assert "b" * 50 in formatted
assert "100 characters omitted" in formatted
# True original length shown in header
assert "200 chars" in formatted
def test_repl_entry_format_no_truncation(self):
"""Test REPLEntry.format() passes short output through without truncation."""
output = "a" * 50
entry = REPLEntry(code="print('a')", output=output)
formatted = entry.format(index=0, max_output_chars=100)
assert output in formatted
assert "omitted" not in formatted
def test_repl_history_threads_max_output_chars(self):
"""Test REPLHistory carries max_output_chars through append()."""
h = REPLHistory(max_output_chars=50)
h2 = h.append(code="print(1)", output="a" * 100)
assert h2.max_output_chars == 50
# Formatting should truncate at 50 chars
formatted = h2.format()
assert "50 characters omitted" in formatted
def test_repl_variable_from_value(self):
"""Test REPLVariable.from_value() factory."""
var = REPLVariable.from_value("test", "hello world")
assert var.name == "test"
assert var.type_name == "str"
assert var.total_length == 11
assert "hello world" in var.preview
def test_repl_variable_truncation(self):
"""Test REPLVariable preview shows head and tail."""
var = REPLVariable.from_value("big", "a" * 500 + "b" * 500, preview_chars=50)
assert var.preview.startswith("a" * 25)
assert var.preview.endswith("b" * 25)
assert "..." in var.preview
def test_repl_variable_with_field_info(self):
"""Test REPLVariable includes desc and constraints from field_info."""
import dspy
# Create a field with description and constraints
field = dspy.InputField(desc="The user's question", ge=0, le=100)
var = REPLVariable.from_value("query", "What is 2+2?", field_info=field)
assert var.desc == "The user's question"
assert "greater than or equal to" in var.constraints
# Verify format includes the metadata
formatted = var.format()
assert "Description: The user's question" in formatted
assert "Constraints:" in formatted
def test_repl_variable_without_field_info(self):
"""Test REPLVariable works without field_info."""
var = REPLVariable.from_value("data", [1, 2, 3])
assert var.desc == ""
assert var.constraints == ""
# Format should not include empty desc/constraints lines
formatted = var.format()
assert "Description:" not in formatted
assert "Constraints:" not in formatted
def test_build_variables_includes_field_metadata(self):
"""Test _build_variables passes field_info to REPLVariable."""
import dspy
class QASig(dspy.Signature):
"""Answer questions."""
context: str = dspy.InputField(desc="Background information")
question: str = dspy.InputField(desc="The question to answer")
answer: str = dspy.OutputField()
rlm = RLM(QASig, max_iterations=3)
variables = rlm._build_variables(context="Some text", question="What?")
# Find the context variable
context_var = next(v for v in variables if v.name == "context")
assert context_var.desc == "Background information"
question_var = next(v for v in variables if v.name == "question")
assert question_var.desc == "The question to answer"
class TestRLMCallMethod:
"""Tests for RLM __call__ method."""
def test_call_is_alias_for_forward(self):
"""Test that __call__ is an alias for forward()."""
mock = MockInterpreter(responses=[FinalOutput({"answer": "42"})])
rlm = RLM("query -> answer", max_iterations=3, interpreter=mock)
rlm.generate_action = make_mock_predictor([
{"reasoning": "Return answer", "code": 'SUBMIT("42")'},
])
result = rlm(query="What is the answer?")
assert result.answer == "42"
class TestRLMMaxIterationsFallback:
"""Tests for max_iterations reached and extract fallback."""
def test_max_iterations_triggers_extract(self):
"""Test that reaching max_iterations uses extract fallback."""
mock = MockInterpreter(responses=[
"exploring...",
"still exploring...",
"more exploring...",
])
rlm = RLM("query -> answer", max_iterations=3, interpreter=mock)
rlm.generate_action = make_mock_predictor([
{"reasoning": "Explore 1", "code": "print('exploring')"},
{"reasoning": "Explore 2", "code": "print('exploring')"},
{"reasoning": "Explore 3", "code": "print('exploring')"},
])
# Mock the extract predictor to return a value
rlm.extract = make_mock_predictor([
{"answer": "extracted_answer"},
])
result = rlm.forward(query="test")
assert result.answer == "extracted_answer"
assert result.final_reasoning == "Extract forced final output"
class TestRLMToolExceptions:
"""Tests for tool exception handling."""
def test_tool_exception_returns_error_in_output(self):
"""Test that tool exceptions are caught and returned as errors."""
def failing_tool() -> str:
raise RuntimeError("Tool failed!")
mock = MockInterpreter(responses=[
CodeInterpreterError("RuntimeError: Tool failed!"),
FinalOutput({"answer": "recovered"}),
])
rlm = RLM("query -> answer", max_iterations=5, interpreter=mock, tools=[failing_tool])
rlm.generate_action = make_mock_predictor([
{"reasoning": "Call tool", "code": "failing_tool()"},
{"reasoning": "Recover", "code": 'SUBMIT("recovered")'},
])
result = rlm.forward(query="test")
assert result.answer == "recovered"
def test_runtime_error_history_uses_stripped_code(self):
"""Runtime execution failures should preserve stripped code in history."""
mock = MockInterpreter(responses=[
CodeInterpreterError("NameError: name 'x' is not defined"),
FinalOutput({"answer": "recovered"}),
])
rlm = RLM("query -> answer", max_iterations=5, interpreter=mock)
rlm.generate_action = make_mock_predictor([
{"reasoning": "Will fail", "code": "```python\nprint(x)\n```"},
{"reasoning": "Recover", "code": 'SUBMIT("recovered")'},
])
result = rlm.forward(query="test")
assert result.answer == "recovered"
first_step = result.trajectory[0]
assert first_step["code"] == "print(x)"
def test_syntax_error_from_execute_is_recoverable(self):
"""SyntaxError from interpreter.execute should be surfaced as an iteration error."""
mock = MockInterpreter(responses=[
SyntaxError("invalid syntax"),
FinalOutput({"answer": "recovered"}),
])
rlm = RLM("query -> answer", max_iterations=5, interpreter=mock)
rlm.generate_action = make_mock_predictor([
{"reasoning": "Bad code", "code": "```python\ndef incomplete(\n```"},
{"reasoning": "Recover", "code": 'SUBMIT("recovered")'},
])
result = rlm.forward(query="test")
assert result.answer == "recovered"
assert result.trajectory[0]["output"].startswith("[Error] invalid syntax")
def test_syntax_error_from_strip_code_fences_is_recoverable(self):
"""SyntaxError raised by _strip_code_fences (e.g. non-Python fence tag) should be recoverable."""
mock = MockInterpreter(responses=[
FinalOutput({"answer": "recovered"}),
])
rlm = RLM("query -> answer", max_iterations=5, interpreter=mock)
rlm.generate_action = make_mock_predictor([
{"reasoning": "Wrong language", "code": "```bash\nls -la\n```"},
{"reasoning": "Recover", "code": 'SUBMIT("recovered")'},
])
result = rlm.forward(query="test")
assert result.answer == "recovered"
assert result.trajectory[0]["output"].startswith("[Error]")
class TestRLMDynamicSignature:
"""Tests for the dynamically built RLM signatures."""
def test_action_signature_structure(self):
"""Test action signature has required fields and instructions."""
rlm = RLM("document, question -> summary, answer")
action_sig = rlm.generate_action.signature
# Required input/output fields
assert "variables_info" in action_sig.input_fields
assert "repl_history" in action_sig.input_fields
assert "reasoning" in action_sig.output_fields
assert "code" in action_sig.output_fields
# Instructions mention key tools and variables
instructions = action_sig.instructions
assert "llm_query" in instructions
assert "llm_query_batched" in instructions
assert "SUBMIT" in instructions
assert "`document`" in instructions
assert "`question`" in instructions
assert "`summary`" in instructions
assert "`answer`" in instructions
def test_extract_signature_structure(self):
"""Test extract signature has required fields for all outputs."""
rlm = RLM("document, question -> summary, key_facts, confidence")
extract_sig = rlm.extract.signature
assert "variables_info" in extract_sig.input_fields
assert "repl_history" in extract_sig.input_fields
assert "summary" in extract_sig.output_fields
assert "key_facts" in extract_sig.output_fields
assert "confidence" in extract_sig.output_fields
# ============================================================================
# Integration Tests: PythonInterpreter (require Deno)
# ============================================================================
@pytest.mark.deno
class TestPythonInterpreter:
"""Integration tests for the secure sandbox with tool support."""
def test_start_prewarms_sandbox(self):
"""Test that start() pre-warms the sandbox."""
interp = PythonInterpreter()
try:
# Before start, deno_process should be None
assert interp.deno_process is None
# After start, it should be running
interp.start()
assert interp.deno_process is not None
assert interp.deno_process.poll() is None # Still running
# Execute should work
result = interp.execute("print(42)")
assert "42" in result
finally:
interp.shutdown()
def test_start_is_idempotent(self):
"""Test that start() can be called multiple times safely."""
interp = PythonInterpreter()
try:
interp.start()
first_process = interp.deno_process
interp.start() # Second call - should be idempotent
assert interp.deno_process is first_process # Same process
finally:
interp.shutdown()
def test_basic_execution(self):
"""Test basic code execution."""
with PythonInterpreter() as interp:
result = interp.execute("print(1 + 1)")
assert "2" in result
def test_variable_injection(self):
"""Test variable injection."""
with PythonInterpreter(tools={}) as interp:
result = interp.execute(
"print(x + y)",
variables={"x": 10, "y": 5}
)
assert "15" in result
def test_variable_injection_with_none_values(self):
"""Test variable injection with None values in dicts/lists (JSON null -> Python None)."""
with PythonInterpreter(tools={}) as interp:
# Test None in dict
result = interp.execute(
"print(data['key'] is None)",
variables={"data": {"key": None, "other": "value"}}
)
assert "True" in result
# Test None in list
result = interp.execute(
"print(items[1] is None)",
variables={"items": [1, None, 3]}
)
assert "True" in result
# Test nested None
result = interp.execute(
"print(nested['inner']['value'] is None)",
variables={"nested": {"inner": {"value": None}}}
)
assert "True" in result
def test_tool_call_kwargs(self):
"""Test tool call with keyword arguments."""
def echo(message: str = "") -> str:
return f"Echo: {message}"
with PythonInterpreter(tools={"echo": echo}) as interp:
result = interp.execute('print(echo(message="hello"))')
assert "Echo: hello" in result
def test_tool_call_positional(self):
"""Test tool call with positional arguments."""
def greet(name: str) -> str:
return f"Hello: {name}"
with PythonInterpreter(tools={"greet": greet}) as interp:
result = interp.execute('print(greet("world"))')
assert "Hello: world" in result
def test_multiple_tools(self):
"""Test multiple tools."""
def add(a: int = 0, b: int = 0) -> str:
return str(a + b)
def multiply(a: int = 0, b: int = 0) -> str:
return str(a * b)
with PythonInterpreter(tools={"add": add, "multiply": multiply}) as interp:
result = interp.execute("""
sum_result = add(a=3, b=4)
prod_result = multiply(a=3, b=4)
print(f"Sum: {sum_result}, Product: {prod_result}")
""")
assert "Sum: 7" in result
assert "Product: 12" in result
def test_tool_returns_list(self):
"""Test tool that returns a list (like llm_query_batched)."""
def batch_process(items: list | None = None) -> list:
items = items or []
return [f"processed_{item}" for item in items]
with PythonInterpreter(tools={"batch_process": batch_process}) as interp:
result = interp.execute("""
results = batch_process(items=["a", "b", "c"])
print(f"Type: {type(results).__name__}")
print(f"Length: {len(results)}")
print(f"First: {results[0]}")
print(f"All: {results}")
""")
assert "Type: list" in result
assert "Length: 3" in result
assert "First: processed_a" in result
def test_tool_returns_dict(self):
"""Test tool that returns a dict."""
def get_info() -> dict:
return {"name": "test", "count": 42}
with PythonInterpreter(tools={"get_info": get_info}) as interp:
result = interp.execute("""
info = get_info()
print(f"Type: {type(info).__name__}")
print(f"Name: {info['name']}")
print(f"Count: {info['count']}")
""")
assert "Type: dict" in result
assert "Name: test" in result
assert "Count: 42" in result
def test_state_persists(self):
"""Test that state persists across executions."""
with PythonInterpreter(tools={}) as interp:
interp.execute("x = 10")
result = interp.execute("print(x + 5)")
assert "15" in result
def test_syntax_error(self):
"""Test syntax error handling."""
with PythonInterpreter(tools={}) as interp:
with pytest.raises(SyntaxError):
interp.execute("def incomplete(")
def test_runtime_error(self):
"""Test runtime error handling."""
with PythonInterpreter(tools={}) as interp:
with pytest.raises(CodeInterpreterError):
interp.execute("undefined_variable")
@pytest.mark.deno
class TestSandboxSecurity:
"""Integration tests for sandbox security restrictions."""
def test_no_network_access(self):
"""Test that network access is blocked."""
with PythonInterpreter(tools={}) as interp:
with pytest.raises(CodeInterpreterError) as exc_info:
interp.execute("""
from pyodide.http import pyfetch
import asyncio
asyncio.get_event_loop().run_until_complete(pyfetch("https://example.com"))
""")
assert "net access" in str(exc_info.value).lower() or "allow-net" in str(exc_info.value).lower()
def test_imports_work(self):
"""Test that standard library imports work."""
with PythonInterpreter(tools={}) as interp:
result = interp.execute("""
import json
import re
from collections import Counter
data = {"key": "value"}
print(json.dumps(data))
""")
assert "key" in result
# ============================================================================
# Unit Tests: RLM with MockInterpreter (no Deno required)
# ============================================================================
class TestRLMAsyncMock:
"""Unit tests for RLM aforward() using MockInterpreter (no Deno required)."""
@pytest.mark.asyncio
async def test_aforward_basic(self):
"""Test aforward() returns Prediction with expected output (MockInterpreter)."""
mock = MockInterpreter(responses=[FinalOutput({"answer": "42"})])
rlm = RLM("query -> answer", max_iterations=3, interpreter=mock)
rlm.generate_action = make_mock_predictor([
{"reasoning": "Return answer", "code": 'SUBMIT("42")'},
])
result = await rlm.aforward(query="What is the answer?")
assert result.answer == "42"
@pytest.mark.asyncio
async def test_aforward_int_output_mock(self):
"""Test aforward() returns int when signature expects int (MockInterpreter)."""
mock = MockInterpreter(responses=[FinalOutput({"count": 42})])
rlm = RLM("query -> count: int", max_iterations=3, interpreter=mock)
rlm.generate_action = make_mock_predictor([
{"reasoning": "Return count", "code": "SUBMIT(42)"},
])
result = await rlm.aforward(query="count items")
assert result.count == 42
assert isinstance(result.count, int)
@pytest.mark.asyncio
async def test_aforward_multi_iteration_mock(self):
"""Test aforward() handles multiple iterations before SUBMIT (MockInterpreter)."""
mock = MockInterpreter(responses=[
"explored data",
FinalOutput({"answer": "done"}),
])
rlm = RLM("query -> answer", max_iterations=5, interpreter=mock)
rlm.generate_action = make_mock_predictor([
{"reasoning": "Explore first", "code": "print('exploring')"},
{"reasoning": "Now finish", "code": 'SUBMIT("done")'},
])
result = await rlm.aforward(query="test")
assert result.answer == "done"
class TestRLMTypeCoercionMock:
"""Unit tests for RLM type coercion using MockInterpreter (no Deno required)."""
@pytest.mark.parametrize("output_field,output_type,final_value,code,expected", [
("count", "int", 42, "SUBMIT(42)", 42),
("score", "float", 3.14, "SUBMIT(3.14)", 3.14),
("valid", "bool", True, "SUBMIT(True)", True),
("numbers", "list[int]", [1, 2, 3], "SUBMIT([1, 2, 3])", [1, 2, 3]),
("answer", "Literal['yes', 'no']", "yes", 'SUBMIT("yes")', "yes"),
])
def test_type_coercion(self, output_field, output_type, final_value, code, expected):
"""Test RLM type coercion for various types (MockInterpreter)."""
mock = MockInterpreter(responses=[FinalOutput({output_field: final_value})])
rlm = RLM(f"query -> {output_field}: {output_type}", max_iterations=3, interpreter=mock)
rlm.generate_action = make_mock_predictor([
{"reasoning": "Return value", "code": code},
])
result = rlm.forward(query="test")
assert getattr(result, output_field) == expected
def test_type_error_retries(self):
"""Test RLM retries when type validation fails (MockInterpreter)."""
mock = MockInterpreter(responses=[
FinalOutput({"answer": "maybe"}), # Invalid for Literal
FinalOutput({"answer": "yes"}), # Valid
])
rlm = RLM("query -> answer: Literal['yes', 'no']", max_iterations=5, interpreter=mock)
rlm.generate_action = make_mock_predictor([
{"reasoning": "Try maybe", "code": 'SUBMIT("maybe")'},
{"reasoning": "Try yes", "code": 'SUBMIT("yes")'},
])
result = rlm.forward(query="is it yes?")
assert result.answer == "yes"
# ============================================================================
# Integration Tests: RLM Type Coercion with PythonInterpreter
# ============================================================================
@pytest.mark.deno
class TestRLMTypeCoercion:
"""Tests for RLM type coercion through full forward pass with PythonInterpreter.
Note: These tests let RLM create its own PythonInterpreter so it can register
typed output_fields for SUBMIT based on the signature.
"""
@pytest.mark.parametrize("output_field,output_type,code,expected,expected_type", [
("count", "int", "SUBMIT(42)", 42, int),
("score", "float", "SUBMIT(3.14)", 3.14, float),
("valid", "bool", "SUBMIT(True)", True, bool),
("numbers", "list[int]", "SUBMIT([1, 2, 3])", [1, 2, 3], list),
("data", "dict[str, str]", 'SUBMIT({"key": "value"})', {"key": "value"}, dict),
("answer", "Literal['yes', 'no']", 'SUBMIT("yes")', "yes", str),
])
def test_type_coercion(self, output_field, output_type, code, expected, expected_type):
"""Test RLM type coercion for various types with PythonInterpreter."""
rlm = RLM(f"query -> {output_field}: {output_type}", max_iterations=3)
rlm.generate_action = make_mock_predictor([
{"reasoning": "Return value", "code": code},
])
result = rlm.forward(query="test")
assert getattr(result, output_field) == expected
assert isinstance(getattr(result, output_field), expected_type)
def test_submit_extracts_typed_value(self):
"""Test RLM SUBMIT correctly extracts typed value."""
rlm = RLM("query -> count: int", max_iterations=3)
rlm.generate_action = make_mock_predictor([
{"reasoning": "Compute and return", "code": "result = 42\nSUBMIT(result)"},
])
result = rlm.forward(query="count items")
assert result.count == 42
assert isinstance(result.count, int)
# ============================================================================
# Integration Tests: RLM Multiple Output Fields
# ============================================================================
@pytest.mark.deno
class TestRLMMultipleOutputs:
"""Tests for signatures with multiple typed output fields.
Tests SUBMIT() calling patterns with multi-output signatures.
"""
def test_multi_output_final_kwargs(self):
"""SUBMIT(field1=val1, field2=val2) with keyword args."""
rlm = RLM("query -> name: str, count: int", max_iterations=3)
rlm.generate_action = make_mock_predictor([
{"reasoning": "Return both outputs", "code": 'SUBMIT(name="alice", count=5)'},
])
result = rlm.forward(query="test")
assert result.name == "alice"
assert result.count == 5
assert isinstance(result.count, int)
def test_multi_output_final_positional(self):
"""SUBMIT(val1, val2) with positional args mapped to field order."""
rlm = RLM("query -> name: str, count: int", max_iterations=3)
rlm.generate_action = make_mock_predictor([
{"reasoning": "Return both outputs positionally", "code": 'SUBMIT("bob", 10)'},
])
result = rlm.forward(query="test")
assert result.name == "bob"
assert result.count == 10
def test_multi_output_three_fields(self):
"""Signature with 3+ output fields of different types."""
rlm = RLM("query -> name: str, age: int, active: bool", max_iterations=3)
rlm.generate_action = make_mock_predictor([
{"reasoning": "Return all three", "code": 'SUBMIT(name="carol", age=30, active=True)'},
])
result = rlm.forward(query="test")
assert result.name == "carol"
assert result.age == 30
assert result.active is True
def test_multi_output_final_missing_field_errors(self):
"""SUBMIT() with missing field should return error in output."""
rlm = RLM("query -> name: str, count: int", max_iterations=3)
rlm.generate_action = make_mock_predictor([
{"reasoning": "Missing count field", "code": 'SUBMIT(name="alice")'},
{"reasoning": "Now provide both", "code": 'SUBMIT(name="alice", count=5)'},
])
# RLM should retry after getting error for missing field
result = rlm.forward(query="test")
assert result.name == "alice"
assert result.count == 5
def test_multi_output_submit_vars(self):
"""SUBMIT can pass variables directly for multiple outputs."""
rlm = RLM("query -> name: str, count: int", max_iterations=3)
rlm.generate_action = make_mock_predictor([
{"reasoning": "Use SUBMIT", "code": 'n = "dave"\nc = 15\nSUBMIT(n, c)'},
])
result = rlm.forward(query="test")
assert result.name == "dave"
assert result.count == 15
def test_multi_output_type_coercion(self):
"""Each output field is coerced to its declared type."""
rlm = RLM("query -> count: int, ratio: float, flag: bool", max_iterations=3)
rlm.generate_action = make_mock_predictor([
{"reasoning": "Return mixed types", "code": "SUBMIT(count=42, ratio=3.14, flag=True)"},
])
result = rlm.forward(query="test")
assert result.count == 42
assert isinstance(result.count, int)
assert result.ratio == 3.14
assert isinstance(result.ratio, float)
assert result.flag is True
assert isinstance(result.flag, bool)
# ============================================================================
# Integration Tests: RLM with DummyLM and PythonInterpreter
# ============================================================================
@pytest.mark.deno
class TestRLMWithDummyLM:
"""End-to-end tests using DummyLM with RLM and PythonInterpreter.
Note: These tests let RLM create its own PythonInterpreter so it can register
typed output_fields for SUBMIT based on the signature.
"""
def test_simple_computation_e2e(self):
"""Test full RLM pipeline: DummyLM -> RLM -> PythonInterpreter -> result."""
with dummy_lm_context([
{"reasoning": "I need to compute 2 + 3", "code": "result = 2 + 3\nSUBMIT(result)"},
]):
rlm = RLM("query -> answer: int", max_iterations=3)
result = rlm.forward(query="What is 2 + 3?")
assert result.answer == 5
assert isinstance(result.answer, int)
def test_multi_turn_computation_e2e(self):
"""Test RLM with multiple turns before SUBMIT."""
with dummy_lm_context([
{"reasoning": "First explore the data", "code": "x = 10\nprint(f'x = {x}')"},
{"reasoning": "Now compute and return", "code": "y = x * 2\nSUBMIT(y)"},
]):
rlm = RLM("query -> answer: int", max_iterations=5)
result = rlm.forward(query="Double ten")
assert result.answer == 20
assert len(result.trajectory) == 2
def test_with_input_variables_e2e(self):
"""Test RLM with input variables passed to sandbox."""
with dummy_lm_context([
{"reasoning": "Sum the numbers in the list", "code": "SUBMIT(sum(numbers))"},
]):
rlm = RLM("numbers: list[int] -> total: int", max_iterations=3)
result = rlm.forward(numbers=[1, 2, 3, 4, 5])
assert result.total == 15
def test_with_tool_e2e(self):
"""Test RLM calling a host-side tool through the sandbox."""
def lookup(key: str) -> str:
return {"apple": "red", "banana": "yellow"}.get(key, "unknown")
with dummy_lm_context([
{"reasoning": "Look up the color of apple", "code": 'color = lookup(key="apple")\nSUBMIT(color)'},
]):
rlm = RLM("fruit -> color: str", max_iterations=3, tools=[lookup])
result = rlm.forward(fruit="apple")
assert result.color == "red"
@pytest.mark.asyncio
async def test_aforward_simple_computation_e2e(self):
"""Test aforward() full pipeline: DummyLM -> RLM -> PythonInterpreter -> result."""
with dummy_lm_context([
{"reasoning": "I need to compute 2 + 3", "code": "result = 2 + 3\nSUBMIT(result)"},
]):
rlm = RLM("query -> answer: int", max_iterations=3)
result = await rlm.aforward(query="What is 2 + 3?")
assert result.answer == 5
assert isinstance(result.answer, int)
@pytest.mark.asyncio
async def test_aforward_multi_turn_e2e(self):
"""Test aforward() with multiple turns before SUBMIT."""
with dummy_lm_context([
{"reasoning": "First explore the data", "code": "x = 10\nprint(f'x = {x}')"},
{"reasoning": "Now compute and return", "code": "y = x * 2\nSUBMIT(y)"},
]):
rlm = RLM("query -> answer: int", max_iterations=5)
result = await rlm.aforward(query="Double ten")
assert result.answer == 20
assert len(result.trajectory) == 2
@pytest.mark.asyncio
async def test_aforward_with_input_variables_e2e(self):
"""Test aforward() with input variables passed to sandbox."""
with dummy_lm_context([
{"reasoning": "Sum the numbers in the list", "code": "SUBMIT(sum(numbers))"},
]):
rlm = RLM("numbers: list[int] -> total: int", max_iterations=3)
result = await rlm.aforward(numbers=[1, 2, 3, 4, 5])
assert result.total == 15
# ============================================================================
# Integration Tests: RLM with real LM (require API key and Deno)
# ============================================================================
@pytest.mark.skip(reason="Requires actual LM and Deno - run manually")
class TestRLMIntegration:
"""Integration tests that require a configured LM."""
def test_simple_computation(self):
"""Test RLM on simple computation."""
import dspy
dspy.configure(lm=dspy.LM("openai/gpt-4o-mini"))
rlm = RLM("context, query -> answer", max_iterations=5)
result = rlm(
context={"numbers": [1, 2, 3, 4, 5]},
query="What is the sum of the numbers?"
)
assert "15" in result.answer
def test_with_llm_query(self):
"""Test RLM using the llm_query tool."""
import dspy
dspy.configure(lm=dspy.LM("openai/gpt-4o-mini"))
rlm = RLM("context, query -> answer", max_iterations=5)
result = rlm(
context="The quick brown fox jumps over the lazy dog.",
query="Use llm_query to describe what animal is mentioned as lazy."
)
assert "dog" in result.answer.lower()
if __name__ == "__main__":
pytest.main([__file__, "-v"])
| {
"repo_id": "stanfordnlp/dspy",
"file_path": "tests/predict/test_rlm.py",
"license": "MIT License",
"lines": 967,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
stanfordnlp/dspy:tests/clients/test_lm_local.py | from unittest import mock
from unittest.mock import patch
from dspy.clients.lm_local import LocalProvider
@patch("dspy.clients.lm_local.threading.Thread")
@patch("dspy.clients.lm_local.subprocess.Popen")
@patch("dspy.clients.lm_local.get_free_port")
@patch("dspy.clients.lm_local.wait_for_server")
def test_command_with_spaces_in_path(mock_wait, mock_port, mock_popen, mock_thread):
mock_port.return_value = 8000
mock_process = mock.Mock()
mock_process.pid = 12345
mock_process.stdout.readline.return_value = ""
mock_process.poll.return_value = 0
mock_popen.return_value = mock_process
lm = mock.Mock(spec=[])
lm.model = "/path/to/my models/llama"
lm.launch_kwargs = {}
lm.kwargs = {}
with mock.patch.dict("sys.modules", {"sglang": mock.Mock(), "sglang.utils": mock.Mock()}):
LocalProvider.launch(lm, launch_kwargs={})
assert mock_popen.called
call_args = mock_popen.call_args
command = call_args[0][0]
assert isinstance(command, list)
assert "--model-path" in command
model_index = command.index("--model-path")
assert command[model_index + 1] == "/path/to/my models/llama"
@patch("dspy.clients.lm_local.threading.Thread")
@patch("dspy.clients.lm_local.subprocess.Popen")
@patch("dspy.clients.lm_local.get_free_port")
@patch("dspy.clients.lm_local.wait_for_server")
def test_command_construction_prevents_injection(mock_wait, mock_port, mock_popen, mock_thread):
mock_port.return_value = 8000
mock_process = mock.Mock()
mock_process.pid = 12345
mock_process.stdout.readline.return_value = ""
mock_process.poll.return_value = 0
mock_popen.return_value = mock_process
lm = mock.Mock(spec=[])
lm.model = "model --trust-remote-code"
lm.launch_kwargs = {}
lm.kwargs = {}
with mock.patch.dict("sys.modules", {"sglang": mock.Mock(), "sglang.utils": mock.Mock()}):
LocalProvider.launch(lm, launch_kwargs={})
assert mock_popen.called
call_args = mock_popen.call_args
command = call_args[0][0]
assert isinstance(command, list)
assert "--model-path" in command
model_index = command.index("--model-path")
assert command[model_index + 1] == "model --trust-remote-code"
@patch("dspy.clients.lm_local.threading.Thread")
@patch("dspy.clients.lm_local.subprocess.Popen")
@patch("dspy.clients.lm_local.get_free_port")
@patch("dspy.clients.lm_local.wait_for_server")
def test_command_is_list_not_string(mock_wait, mock_port, mock_popen, mock_thread):
mock_port.return_value = 8000
mock_process = mock.Mock()
mock_process.pid = 12345
mock_process.stdout.readline.return_value = ""
mock_process.poll.return_value = 0
mock_popen.return_value = mock_process
lm = mock.Mock(spec=[])
lm.model = "meta-llama/Llama-2-7b"
lm.launch_kwargs = {}
lm.kwargs = {}
with mock.patch.dict("sys.modules", {"sglang": mock.Mock(), "sglang.utils": mock.Mock()}):
LocalProvider.launch(lm, launch_kwargs={})
assert mock_popen.called
call_args = mock_popen.call_args
command = call_args[0][0]
assert isinstance(command, list)
assert command[0] == "python"
assert command[1] == "-m"
assert command[2] == "sglang.launch_server"
assert "--model-path" in command
assert "--port" in command
assert "--host" in command
| {
"repo_id": "stanfordnlp/dspy",
"file_path": "tests/clients/test_lm_local.py",
"license": "MIT License",
"lines": 78,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
stanfordnlp/dspy:dspy/adapters/types/reasoning.py | from typing import TYPE_CHECKING, Any, Optional
import litellm
import pydantic
from dspy.adapters.types.base_type import Type
if TYPE_CHECKING:
from dspy.clients.lm import LM
from dspy.signatures.signature import Signature
class Reasoning(Type):
"""Reasoning type in DSPy.
This type is useful when you want the DSPy output to include the reasoning of the LM. We build this type so that
DSPy can support the reasoning model and non-reasoning model with the same code.
This is a str-like type, you can convert a string directly to a Reasoning object, and from DSPy adapters'
perspective, `Reasoning` is treated as a string.
"""
content: str
def format(self):
return f"{self.content}"
@pydantic.model_validator(mode="before")
@classmethod
def validate_input(cls, data: Any):
if isinstance(data, cls):
return data
if isinstance(data, str):
return {"content": data}
if isinstance(data, dict):
if "content" not in data:
raise ValueError("`content` field is required for `dspy.Reasoning`")
if not isinstance(data["content"], str):
raise ValueError(f"`content` field must be a string, but received type: {type(data['content'])}")
return {"content": data["content"]}
raise ValueError(f"Received invalid value for `dspy.Reasoning`: {data}")
@classmethod
def adapt_to_native_lm_feature(
cls,
signature: type["Signature"],
field_name: str,
lm: "LM",
lm_kwargs: dict[str, Any],
) -> type["Signature"]:
if "reasoning_effort" in lm_kwargs:
# `lm_kwargs` overrides `lm.kwargs`.
reasoning_effort = lm_kwargs["reasoning_effort"]
elif "reasoning_effort" in lm.kwargs:
reasoning_effort = lm.kwargs["reasoning_effort"]
else:
# Turn on the native reasoning explicitly if Reasoning field is present in the signature and no explicit
# reasoning effort is set in `lm_kwargs` or `lm.kwargs`.
reasoning_effort = "low"
if reasoning_effort is None or not litellm.supports_reasoning(lm.model):
# If users explicitly set `reasoning_effort` to None or the LM doesn't support reasoning, we don't enable
# native reasoning.
return signature
if "gpt-5" in lm.model and lm.model_type == "chat":
# There is a caveat of Litellm as 1.79.0 that when using the chat completion API on GPT-5 family models,
# the reasoning content is not available in the response. As a workaround, we don't enable the native
# reasoning feature for GPT-5 family models when using the chat completion API.
# Litellm issue: https://github.com/BerriAI/litellm/issues/14748
return signature
lm_kwargs["reasoning_effort"] = reasoning_effort
# Delete the reasoning field from the signature to use the native reasoning feature.
return signature.delete(field_name)
@classmethod
def parse_lm_response(cls, response: str | dict[str, Any]) -> Optional["Reasoning"]:
"""Parse the LM response into a Reasoning object."""
if "reasoning_content" in response:
return Reasoning(content=response["reasoning_content"])
return None
@classmethod
def parse_stream_chunk(cls, chunk) -> str | None:
"""
Parse a stream chunk into reasoning content if available.
Args:
chunk: A stream chunk from the LM.
Returns:
The reasoning content (str) if available, None otherwise.
"""
try:
if choices := getattr(chunk, "choices", None):
return getattr(choices[0].delta, "reasoning_content", None)
except Exception:
return None
@classmethod
def is_streamable(cls) -> bool:
return True
def __repr__(self) -> str:
return f"{self.content!r}"
def __str__(self) -> str:
return self.content
def __eq__(self, other: object) -> bool:
if isinstance(other, Reasoning):
return self.content == other.content
if isinstance(other, str):
return self.content == other
return False
def __ne__(self, other: object) -> bool:
return not self.__eq__(other)
def __len__(self) -> int:
return len(self.content)
def __getitem__(self, key):
return self.content[key]
def __contains__(self, item) -> bool:
return item in self.content
def __iter__(self):
return iter(self.content)
def __add__(self, other):
if isinstance(other, Reasoning):
return Reasoning(content=self.content + other.content)
if isinstance(other, str):
return self.content + other
return NotImplemented
def __radd__(self, other):
if isinstance(other, str):
return other + self.content
if isinstance(other, Reasoning):
return Reasoning(content=other.content + self.content)
return NotImplemented
def __getattr__(self, name):
"""
Delegate string methods to the underlying content.
This makes Reasoning fully str-like by forwarding any string method calls
(like .strip(), .lower(), .split(), etc.) to the content string.
Note: This is called only when the attribute is not found on the Reasoning instance,
so it won't interfere with Pydantic fields or existing methods.
"""
# Check if this is a valid string method/attribute
if hasattr(str, name):
# Delegate to the content string
return getattr(self.content, name)
# If it's not a string method, provide a helpful error
raise AttributeError(
f"`{type(self).__name__}` object has no attribute '{name}'. "
f"If you are using `dspy.ChainOfThought`, note that the 'reasoning' field in ChainOfThought is now a "
"`dspy.Reasoning` object (not a plain string). "
f"You can convert it to a string with str(reasoning) or access the content with reasoning.content."
)
| {
"repo_id": "stanfordnlp/dspy",
"file_path": "dspy/adapters/types/reasoning.py",
"license": "MIT License",
"lines": 135,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
stanfordnlp/dspy:dspy/adapters/types/file.py | import base64
import mimetypes
import os
from typing import Any
import pydantic
from dspy.adapters.types.base_type import Type
class File(Type):
"""A file input type for DSPy.
See https://platform.openai.com/docs/api-reference/chat/create#chat_create-messages-user_message-content-array_of_content_parts-file_content_part-file for specification.
The file_data field should be a data URI with the format:
data:<mime_type>;base64,<base64_encoded_data>
Example:
```python
import dspy
class QA(dspy.Signature):
file: dspy.File = dspy.InputField()
summary = dspy.OutputField()
program = dspy.Predict(QA)
result = program(file=dspy.File.from_path("./research.pdf"))
print(result.summary)
```
"""
file_data: str | None = None
file_id: str | None = None
filename: str | None = None
model_config = pydantic.ConfigDict(
frozen=True,
str_strip_whitespace=True,
validate_assignment=True,
extra="forbid",
)
@pydantic.model_validator(mode="before")
@classmethod
def validate_input(cls, values: Any) -> Any:
if isinstance(values, cls):
return {
"file_data": values.file_data,
"file_id": values.file_id,
"filename": values.filename,
}
if isinstance(values, dict):
if "file_data" in values or "file_id" in values or "filename" in values:
return values
raise ValueError("Value of `dspy.File` must contain at least one of: file_data, file_id, or filename")
return encode_file_to_dict(values)
def format(self) -> list[dict[str, Any]]:
try:
file_dict = {}
if self.file_data:
file_dict["file_data"] = self.file_data
if self.file_id:
file_dict["file_id"] = self.file_id
if self.filename:
file_dict["filename"] = self.filename
return [{"type": "file", "file": file_dict}]
except Exception as e:
raise ValueError(f"Failed to format file for DSPy: {e}")
def __str__(self):
return self.serialize_model()
def __repr__(self):
parts = []
if self.file_data is not None:
if self.file_data.startswith("data:"):
# file data has "data:text/plain;base64,..." format
mime_type = self.file_data.split(";")[0].split(":")[1]
len_data = len(self.file_data.split("base64,")[1]) if "base64," in self.file_data else len(self.file_data)
parts.append(f"file_data=<DATA_URI({mime_type}, {len_data} chars)>")
else:
len_data = len(self.file_data)
parts.append(f"file_data=<DATA({len_data} chars)>")
if self.file_id is not None:
parts.append(f"file_id='{self.file_id}'")
if self.filename is not None:
parts.append(f"filename='{self.filename}'")
return f"File({', '.join(parts)})"
@classmethod
def from_path(cls, file_path: str, filename: str | None = None, mime_type: str | None = None) -> "File":
"""Create a File from a local file path.
Args:
file_path: Path to the file to read
filename: Optional filename to use (defaults to basename of path)
mime_type: Optional MIME type (defaults to auto-detection from file extension)
"""
if not os.path.isfile(file_path):
raise ValueError(f"File not found: {file_path}")
with open(file_path, "rb") as f:
file_bytes = f.read()
if filename is None:
filename = os.path.basename(file_path)
if mime_type is None:
mime_type, _ = mimetypes.guess_type(file_path)
if mime_type is None:
mime_type = "application/octet-stream"
encoded_data = base64.b64encode(file_bytes).decode("utf-8")
file_data = f"data:{mime_type};base64,{encoded_data}"
return cls(file_data=file_data, filename=filename)
@classmethod
def from_bytes(
cls, file_bytes: bytes, filename: str | None = None, mime_type: str = "application/octet-stream"
) -> "File":
"""Create a File from raw bytes.
Args:
file_bytes: Raw bytes of the file
filename: Optional filename
mime_type: MIME type (defaults to 'application/octet-stream')
"""
encoded_data = base64.b64encode(file_bytes).decode("utf-8")
file_data = f"data:{mime_type};base64,{encoded_data}"
return cls(file_data=file_data, filename=filename)
@classmethod
def from_file_id(cls, file_id: str, filename: str | None = None) -> "File":
"""Create a File from an uploaded file ID."""
return cls(file_id=file_id, filename=filename)
def encode_file_to_dict(file_input: Any) -> dict:
"""
Encode various file inputs to a dict with file_data, file_id, and/or filename.
Args:
file_input: Can be a file path (str), bytes, or File instance.
Returns:
dict: A dictionary with file_data, file_id, and/or filename keys.
"""
if isinstance(file_input, File):
result = {}
if file_input.file_data is not None:
result["file_data"] = file_input.file_data
if file_input.file_id is not None:
result["file_id"] = file_input.file_id
if file_input.filename is not None:
result["filename"] = file_input.filename
return result
elif isinstance(file_input, str):
if os.path.isfile(file_input):
file_obj = File.from_path(file_input)
else:
raise ValueError(f"Unrecognized file string: {file_input}; must be a valid file path")
return {
"file_data": file_obj.file_data,
"filename": file_obj.filename,
}
elif isinstance(file_input, bytes):
file_obj = File.from_bytes(file_input)
return {"file_data": file_obj.file_data}
else:
raise ValueError(f"Unsupported file input type: {type(file_input)}")
| {
"repo_id": "stanfordnlp/dspy",
"file_path": "dspy/adapters/types/file.py",
"license": "MIT License",
"lines": 144,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
stanfordnlp/dspy:tests/signatures/test_adapter_file.py | import os
import tempfile
import pydantic
import pytest
import dspy
from dspy.adapters.types.file import encode_file_to_dict
from dspy.utils.dummies import DummyLM
@pytest.fixture
def sample_text_file():
with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".txt") as tmp_file:
tmp_file.write("This is a test file.")
tmp_file_path = tmp_file.name
yield tmp_file_path
try:
os.unlink(tmp_file_path)
except Exception:
pass
def count_messages_with_file_pattern(messages):
pattern = {"type": "file", "file": lambda x: isinstance(x, dict)}
def check_pattern(obj, pattern):
if isinstance(pattern, dict):
if not isinstance(obj, dict):
return False
return all(k in obj and check_pattern(obj[k], v) for k, v in pattern.items())
if callable(pattern):
return pattern(obj)
return obj == pattern
def count_patterns(obj, pattern):
count = 0
if check_pattern(obj, pattern):
count += 1
if isinstance(obj, dict):
count += sum(count_patterns(v, pattern) for v in obj.values())
if isinstance(obj, list | tuple):
count += sum(count_patterns(v, pattern) for v in obj)
return count
return count_patterns(messages, pattern)
def setup_predictor(signature, expected_output):
lm = DummyLM([expected_output])
dspy.settings.configure(lm=lm)
return dspy.Predict(signature), lm
def test_file_from_local_path(sample_text_file):
file_obj = dspy.File.from_path(sample_text_file)
assert file_obj.file_data is not None
assert file_obj.file_data.startswith("data:text/plain;base64,")
assert file_obj.filename == os.path.basename(sample_text_file)
def test_file_from_path_method(sample_text_file):
file_obj = dspy.File.from_path(sample_text_file)
assert file_obj.file_data is not None
assert file_obj.file_data.startswith("data:text/plain;base64,")
assert file_obj.filename == os.path.basename(sample_text_file)
def test_file_from_path_with_custom_filename(sample_text_file):
file_obj = dspy.File.from_path(sample_text_file, filename="custom.txt")
assert file_obj.file_data is not None
assert file_obj.file_data.startswith("data:text/plain;base64,")
assert file_obj.filename == "custom.txt"
def test_file_from_bytes():
file_bytes = b"Test file content"
file_obj = dspy.File.from_bytes(file_bytes)
assert file_obj.file_data is not None
assert file_obj.file_data.startswith("data:application/octet-stream;base64,")
assert file_obj.filename is None
def test_file_from_bytes_with_filename():
file_bytes = b"Test file content"
file_obj = dspy.File.from_bytes(file_bytes, filename="test.txt")
assert file_obj.file_data is not None
assert file_obj.file_data.startswith("data:application/octet-stream;base64,")
assert file_obj.filename == "test.txt"
def test_file_from_file_id():
file_obj = dspy.File.from_file_id("file-abc123")
assert file_obj.file_id == "file-abc123"
assert file_obj.file_data is None
def test_file_from_file_id_with_filename():
file_obj = dspy.File.from_file_id("file-abc123", filename="document.pdf")
assert file_obj.file_id == "file-abc123"
assert file_obj.filename == "document.pdf"
def test_file_from_dict_with_file_data():
file_obj = dspy.File(file_data="data:text/plain;base64,dGVzdA==", filename="test.txt")
assert file_obj.file_data == "data:text/plain;base64,dGVzdA=="
assert file_obj.filename == "test.txt"
def test_file_from_dict_with_file_id():
file_obj = dspy.File(file_id="file-xyz789")
assert file_obj.file_id == "file-xyz789"
def test_file_format_with_file_data():
file_obj = dspy.File.from_bytes(b"test", filename="test.txt")
formatted = file_obj.format()
assert isinstance(formatted, list)
assert len(formatted) == 1
assert formatted[0]["type"] == "file"
assert "file" in formatted[0]
assert "file_data" in formatted[0]["file"]
assert "filename" in formatted[0]["file"]
def test_file_format_with_file_id():
file_obj = dspy.File.from_file_id("file-123")
formatted = file_obj.format()
assert formatted[0]["type"] == "file"
assert formatted[0]["file"]["file_id"] == "file-123"
def test_file_repr_with_file_data():
file_obj = dspy.File.from_bytes(b"Test content", filename="test.txt")
repr_str = repr(file_obj)
assert "DATA_URI" in repr_str
assert "application/octet-stream" in repr_str
assert "filename='test.txt'" in repr_str
def test_file_repr_with_file_id():
file_obj = dspy.File.from_file_id("file-abc", filename="doc.pdf")
repr_str = repr(file_obj)
assert "file_id='file-abc'" in repr_str
assert "filename='doc.pdf'" in repr_str
def test_file_str():
file_obj = dspy.File.from_bytes(b"test")
str_repr = str(file_obj)
assert "<<CUSTOM-TYPE-START-IDENTIFIER>>" in str_repr
assert "<<CUSTOM-TYPE-END-IDENTIFIER>>" in str_repr
def test_encode_file_to_dict_from_path(sample_text_file):
result = encode_file_to_dict(sample_text_file)
assert "file_data" in result
assert result["file_data"].startswith("data:text/plain;base64,")
assert "filename" in result
def test_encode_file_to_dict_from_bytes():
result = encode_file_to_dict(b"test content")
assert "file_data" in result
assert result["file_data"].startswith("data:application/octet-stream;base64,")
def test_invalid_file_string():
with pytest.raises(ValueError, match="Unrecognized"):
encode_file_to_dict("https://this_is_not_a_file_path")
def test_invalid_dict():
with pytest.raises(ValueError, match="must contain at least one"):
dspy.File(invalid="dict")
def test_file_in_signature(sample_text_file):
signature = "document: dspy.File -> summary: str"
expected = {"summary": "This is a summary"}
predictor, lm = setup_predictor(signature, expected)
file_obj = dspy.File.from_path(sample_text_file)
result = predictor(document=file_obj)
assert result.summary == "This is a summary"
assert count_messages_with_file_pattern(lm.history[-1]["messages"]) == 1
def test_file_list_in_signature(sample_text_file):
class FileListSignature(dspy.Signature):
documents: list[dspy.File] = dspy.InputField()
summary: str = dspy.OutputField()
expected = {"summary": "Multiple files"}
predictor, lm = setup_predictor(FileListSignature, expected)
files = [
dspy.File.from_path(sample_text_file),
dspy.File.from_file_id("file-123"),
]
result = predictor(documents=files)
assert result.summary == "Multiple files"
assert count_messages_with_file_pattern(lm.history[-1]["messages"]) == 2
def test_optional_file_field():
class OptionalFileSignature(dspy.Signature):
document: dspy.File | None = dspy.InputField()
output: str = dspy.OutputField()
predictor, lm = setup_predictor(OptionalFileSignature, {"output": "Hello"})
result = predictor(document=None)
assert result.output == "Hello"
assert count_messages_with_file_pattern(lm.history[-1]["messages"]) == 0
def test_save_load_file_signature(sample_text_file):
signature = "document: dspy.File -> summary: str"
file_obj = dspy.File.from_path(sample_text_file)
examples = [dspy.Example(document=file_obj, summary="Test summary")]
predictor, lm = setup_predictor(signature, {"summary": "A summary"})
optimizer = dspy.teleprompt.LabeledFewShot(k=1)
compiled_predictor = optimizer.compile(student=predictor, trainset=examples, sample=False)
with tempfile.NamedTemporaryFile(mode="w+", delete=True, suffix=".json") as temp_file:
compiled_predictor.save(temp_file.name)
loaded_predictor = dspy.Predict(signature)
loaded_predictor.load(temp_file.name)
loaded_predictor(document=dspy.File.from_file_id("file-test"))
assert count_messages_with_file_pattern(lm.history[-1]["messages"]) == 2
def test_file_frozen():
file_obj = dspy.File.from_bytes(b"test")
with pytest.raises((TypeError, ValueError, pydantic.ValidationError)):
file_obj.file_data = "new_data"
def test_file_with_all_fields():
file_data_uri = "data:text/plain;base64,dGVzdA=="
file_obj = dspy.File(file_data=file_data_uri, file_id="file-123", filename="test.txt")
assert file_obj.file_data == file_data_uri
assert file_obj.file_id == "file-123"
assert file_obj.filename == "test.txt"
formatted = file_obj.format()
assert formatted[0]["file"]["file_data"] == file_data_uri
assert formatted[0]["file"]["file_id"] == "file-123"
assert formatted[0]["file"]["filename"] == "test.txt"
def test_file_path_not_found():
with pytest.raises(ValueError, match="File not found"):
dspy.File.from_path("/nonexistent/path/file.txt")
def test_file_custom_mime_type(sample_text_file):
file_obj = dspy.File.from_path(sample_text_file, mime_type="text/custom")
assert file_obj.file_data.startswith("data:text/custom;base64,")
def test_file_from_bytes_custom_mime():
file_obj = dspy.File.from_bytes(b"audio data", mime_type="audio/mp3")
assert file_obj.file_data.startswith("data:audio/mp3;base64,")
def test_file_data_uri_in_format():
file_obj = dspy.File.from_bytes(b"test", filename="test.txt", mime_type="text/plain")
formatted = file_obj.format()
assert "data:text/plain;base64," in formatted[0]["file"]["file_data"]
| {
"repo_id": "stanfordnlp/dspy",
"file_path": "tests/signatures/test_adapter_file.py",
"license": "MIT License",
"lines": 197,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
stanfordnlp/dspy:dspy/utils/magicattr.py | """
Compatibility layer for magicattr that works with Python 3.14+
This module provides a patched version of magicattr's functionality
that is compatible with Python 3.14's removal of ast.Num and ast.Str.
Based on magicattr 0.1.6 by Jairus Martin (MIT License)
https://github.com/frmdstryr/magicattr
"""
import ast
import sys
from functools import reduce
_AST_TYPES = (ast.Name, ast.Attribute, ast.Subscript, ast.Call)
_STRING_TYPE = str
def get(obj, attr, **kwargs):
"""A getattr that supports nested lookups on objects, dicts, lists, and
any combination in between.
"""
for chunk in _parse(attr):
try:
obj = _lookup(obj, chunk)
except Exception as ex:
if "default" in kwargs:
return kwargs["default"]
else:
raise ex
return obj
def set(obj, attr, val):
"""A setattr that supports nested lookups on objects, dicts, lists, and
any combination in between.
"""
obj, attr_or_key, is_subscript = lookup(obj, attr)
if is_subscript:
obj[attr_or_key] = val
else:
setattr(obj, attr_or_key, val)
def delete(obj, attr):
"""A delattr that supports deletion of a nested lookups on objects,
dicts, lists, and any combination in between.
"""
obj, attr_or_key, is_subscript = lookup(obj, attr)
if is_subscript:
del obj[attr_or_key]
else:
delattr(obj, attr_or_key)
def lookup(obj, attr):
"""Like get but instead of returning the final value it returns the
object and action that will be done.
"""
nodes = tuple(_parse(attr))
if len(nodes) > 1:
obj = reduce(_lookup, nodes[:-1], obj)
node = nodes[-1]
else:
node = nodes[0]
if isinstance(node, ast.Attribute):
return obj, node.attr, False
elif isinstance(node, ast.Subscript):
return obj, _lookup_subscript_value(node.slice), True
elif isinstance(node, ast.Name):
return obj, node.id, False
raise NotImplementedError("Node is not supported: %s" % node)
def _parse(attr):
"""Parse and validate an attr string"""
if not isinstance(attr, _STRING_TYPE):
raise TypeError("Attribute name must be a string")
nodes = ast.parse(attr).body
if not nodes or not isinstance(nodes[0], ast.Expr):
raise ValueError("Invalid expression: %s" % attr)
return reversed([n for n in ast.walk(nodes[0]) if isinstance(n, _AST_TYPES)])
def _lookup_subscript_value(node):
"""Lookup the value of ast node on the object.
Compatible with Python 3.14+ which removed ast.Num and ast.Str
"""
if isinstance(node, ast.Index):
node = node.value
# Python 3.14+ uses ast.Constant for all constants
if isinstance(node, ast.Constant):
return node.value
# Fallback for older Python versions
if sys.version_info < (3, 14):
# Handle numeric indexes
if hasattr(ast, "Num") and isinstance(node, ast.Num):
return node.n
# Handle string keys
elif hasattr(ast, "Str") and isinstance(node, ast.Str):
return node.s
# Handle negative indexes
if isinstance(node, ast.UnaryOp) and isinstance(node.op, ast.USub):
operand = node.operand
if isinstance(operand, ast.Constant):
return -operand.value
# Fallback for older Python
elif sys.version_info < (3, 14) and hasattr(ast, "Num") and isinstance(operand, ast.Num):
return -operand.n
raise NotImplementedError("Subscript node is not supported: %s" % ast.dump(node))
def _lookup(obj, node):
"""Lookup the given ast node on the object."""
if isinstance(node, ast.Attribute):
return getattr(obj, node.attr)
elif isinstance(node, ast.Subscript):
return obj[_lookup_subscript_value(node.slice)]
elif isinstance(node, ast.Name):
return getattr(obj, node.id)
elif isinstance(node, ast.Call):
raise ValueError("Function calls are not allowed.")
raise NotImplementedError("Node is not supported: %s" % node)
| {
"repo_id": "stanfordnlp/dspy",
"file_path": "dspy/utils/magicattr.py",
"license": "MIT License",
"lines": 105,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
stanfordnlp/dspy:tests/utils/test_magicattr.py | import pytest
from dspy.utils import magicattr
class Test:
l = [1, 2]
a = [0, [1, 2, [3, 4]]]
b = {"x": {"y": "y"}, "z": [1, 2]}
z = "z"
class Person:
settings = {
"autosave": True,
"style": {"height": 30, "width": 200},
"themes": ["light", "dark"],
}
def __init__(self, name, age, friends):
self.name = name
self.age = age
self.friends = friends
@pytest.mark.parametrize(
"key, value",
[
("l", Test.l),
("t.t.t.t.z", "z"),
("a[0]", 0),
("a[1][0]", 1),
("a[1][2]", [3, 4]),
('b["x"]', {"y": "y"}),
('b["x"]["y"]', "y"),
('b["z"]', [1, 2]),
('b["z"][1]', 2),
('b["w"].z', "z"),
('b["w"].t.l', [1, 2]),
("a[-1].z", "z"),
("l[-1]", 2),
("a[2].t.a[-1].z", "z"),
('a[2].t.b["z"][0]', 1),
("a[-1].t.z", "z"),
],
)
def test_magicattr_get(key, value):
obj = Test()
obj.t = obj
obj.a.append(obj)
obj.b["w"] = obj
assert magicattr.get(obj, key) == value
def test_person_example():
bob = Person(name="Bob", age=31, friends=[])
jill = Person(name="Jill", age=29, friends=[bob])
jack = Person(name="Jack", age=28, friends=[bob, jill])
assert magicattr.get(bob, "age") == 31
with pytest.raises(AttributeError):
magicattr.get(bob, "weight")
assert magicattr.get(bob, "weight", default=75) == 75
assert magicattr.get(jill, "friends[0].name") == "Bob"
assert magicattr.get(jack, "friends[-1].age") == 29
assert magicattr.get(jack, 'settings["style"]["width"]') == 200
assert magicattr.get(jack, 'settings["themes"][-2]') == "light"
assert magicattr.get(jack, 'friends[-1].settings["themes"][1]') == "dark"
magicattr.set(bob, 'settings["style"]["width"]', 400)
assert magicattr.get(bob, 'settings["style"]["width"]') == 400
magicattr.set(bob, "friends", [jack, jill])
assert magicattr.get(jack, "friends[0].friends[0]") == jack
magicattr.set(jill, "friends[0].age", 32)
assert bob.age == 32
magicattr.delete(jill, "friends[0]")
assert len(jill.friends) == 0
magicattr.delete(jill, "age")
assert not hasattr(jill, "age")
magicattr.delete(bob, "friends[0].age")
assert not hasattr(jack, "age")
with pytest.raises(NotImplementedError):
magicattr.get(bob, "friends[0+1]")
with pytest.raises(ValueError):
magicattr.get(bob, "friends.pop(0)")
with pytest.raises(ValueError):
magicattr.get(bob, "friends = []")
with pytest.raises(SyntaxError):
magicattr.get(bob, "friends..")
with pytest.raises(KeyError):
magicattr.get(bob, 'settings["DoesNotExist"]')
with pytest.raises(IndexError):
magicattr.get(bob, "friends[100]")
def test_empty():
obj = Test()
with pytest.raises(ValueError):
magicattr.get(obj, " ")
with pytest.raises(ValueError):
magicattr.get(obj, "")
with pytest.raises(TypeError):
magicattr.get(obj, 0)
with pytest.raises(TypeError):
magicattr.get(obj, None)
with pytest.raises(TypeError):
magicattr.get(obj, obj)
| {
"repo_id": "stanfordnlp/dspy",
"file_path": "tests/utils/test_magicattr.py",
"license": "MIT License",
"lines": 93,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
stanfordnlp/dspy:tests/adapters/test_audio.py | import pytest
from dspy.adapters.types.audio import _normalize_audio_format
@pytest.mark.parametrize(
"input_format, expected_format",
[
# Case 1: Standard format (no change)
("wav", "wav"),
("mp3", "mp3"),
# Case 2: The 'x-' prefix
("x-wav", "wav"),
("x-mp3", "mp3"),
("x-flac", "flac"),
# Case 3: The edge case
("my-x-format", "my-x-format"),
("x-my-format", "my-format"),
# Case 4: Empty string and edge cases
("", ""),
("x-", ""),
],
)
def test_normalize_audio_format(input_format, expected_format):
"""
Tests that the _normalize_audio_format helper correctly removes 'x-' prefixes.
This single test covers the logic for from_url, from_file, and encode_audio.
"""
assert _normalize_audio_format(input_format) == expected_format
| {
"repo_id": "stanfordnlp/dspy",
"file_path": "tests/adapters/test_audio.py",
"license": "MIT License",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
stanfordnlp/dspy:dspy/adapters/types/citation.py | from typing import Any, Optional
import pydantic
from dspy.adapters.types.base_type import Type
from dspy.utils.annotation import experimental
@experimental(version="3.0.4")
class Citations(Type):
"""Citations extracted from an LM response with source references.
This type represents citations returned by language models that support
citation extraction, particularly Anthropic's Citations API through LiteLLM.
Citations include the quoted text and source information.
Example:
```python
import os
import dspy
from dspy.signatures import Signature
from dspy.experimental import Citations, Document
os.environ["ANTHROPIC_API_KEY"] = "YOUR_ANTHROPIC_API_KEY"
class AnswerWithSources(Signature):
'''Answer questions using provided documents with citations.'''
documents: list[Document] = dspy.InputField()
question: str = dspy.InputField()
answer: str = dspy.OutputField()
citations: Citations = dspy.OutputField()
# Create documents to provide as sources
docs = [
Document(
data="The Earth orbits the Sun in an elliptical path.",
title="Basic Astronomy Facts"
),
Document(
data="Water boils at 100Β°C at standard atmospheric pressure.",
title="Physics Fundamentals",
metadata={"author": "Dr. Smith", "year": 2023}
)
]
# Use with a model that supports citations like Claude
lm = dspy.LM("anthropic/claude-opus-4-1-20250805")
predictor = dspy.Predict(AnswerWithSources)
result = predictor(documents=docs, question="What temperature does water boil?", lm=lm)
for citation in result.citations.citations:
print(citation.format())
```
"""
class Citation(Type):
"""Individual citation with character location information."""
type: str = "char_location"
cited_text: str
document_index: int
document_title: str | None = None
start_char_index: int
end_char_index: int
supported_text: str | None = None
def format(self) -> dict[str, Any]:
"""Format citation as dictionary for LM consumption.
Returns:
A dictionary in the format expected by citation APIs.
"""
citation_dict = {
"type": self.type,
"cited_text": self.cited_text,
"document_index": self.document_index,
"start_char_index": self.start_char_index,
"end_char_index": self.end_char_index,
}
if self.document_title:
citation_dict["document_title"] = self.document_title
if self.supported_text:
citation_dict["supported_text"] = self.supported_text
return citation_dict
citations: list[Citation]
@classmethod
def from_dict_list(cls, citations_dicts: list[dict[str, Any]]) -> "Citations":
"""Convert a list of dictionaries to a Citations instance.
Args:
citations_dicts: A list of dictionaries, where each dictionary should have 'cited_text' key
and 'document_index', 'start_char_index', 'end_char_index' keys.
Returns:
A Citations instance.
Example:
```python
citations_dict = [
{
"cited_text": "The sky is blue",
"document_index": 0,
"document_title": "Weather Guide",
"start_char_index": 0,
"end_char_index": 15,
"supported_text": "The sky was blue yesterday."
}
]
citations = Citations.from_dict_list(citations_dict)
```
"""
citations = [cls.Citation(**item) for item in citations_dicts]
return cls(citations=citations)
@classmethod
def description(cls) -> str:
"""Description of the citations type for use in prompts."""
return (
"Citations with quoted text and source references. "
"Include the exact text being cited and information about its source."
)
def format(self) -> list[dict[str, Any]]:
"""Format citations as a list of dictionaries."""
return [citation.format() for citation in self.citations]
@pydantic.model_validator(mode="before")
@classmethod
def validate_input(cls, data: Any):
if isinstance(data, cls):
return data
# Handle case where data is a list of dicts with citation info
if isinstance(data, list) and all(isinstance(item, dict) and "cited_text" in item for item in data):
return {"citations": [cls.Citation(**item) for item in data]}
# Handle case where data is a dict
elif isinstance(data, dict):
if "citations" in data:
# Handle case where data is a dict with "citations" key
citations_data = data["citations"]
if isinstance(citations_data, list):
return {
"citations": [
cls.Citation(**item) if isinstance(item, dict) else item for item in citations_data
]
}
elif "cited_text" in data:
# Handle case where data is a single citation dict
return {"citations": [cls.Citation(**data)]}
raise ValueError(f"Received invalid value for `Citations`: {data}")
def __iter__(self):
"""Allow iteration over citations."""
return iter(self.citations)
def __len__(self):
"""Return the number of citations."""
return len(self.citations)
def __getitem__(self, index):
"""Allow indexing into citations."""
return self.citations[index]
@classmethod
def adapt_to_native_lm_feature(cls, signature, field_name, lm, lm_kwargs) -> bool:
if lm.model.startswith("anthropic/"):
return signature.delete(field_name)
return signature
@classmethod
def is_streamable(cls) -> bool:
"""Whether the Citations type is streamable."""
return True
@classmethod
def parse_stream_chunk(cls, chunk) -> Optional["Citations"]:
"""
Parse a stream chunk into Citations.
Args:
chunk: A stream chunk from the LM.
Returns:
A Citations object if the chunk contains citation data, None otherwise.
"""
try:
# Check if the chunk has citation data in provider_specific_fields
if hasattr(chunk, "choices") and chunk.choices:
delta = chunk.choices[0].delta
if hasattr(delta, "provider_specific_fields") and delta.provider_specific_fields:
citation_data = delta.provider_specific_fields.get("citation")
if citation_data:
return cls.from_dict_list([citation_data])
except Exception:
pass
return None
@classmethod
def parse_lm_response(cls, response: str | dict[str, Any]) -> Optional["Citations"]:
"""Parse a LM response into Citations.
Args:
response: A LM response that may contain citation data.
Returns:
A Citations object if citation data is found, None otherwise.
"""
if isinstance(response, dict):
# Check if the response contains citations in the expected format
if "citations" in response:
citations_data = response["citations"]
if isinstance(citations_data, list):
return cls.from_dict_list(citations_data)
return None
| {
"repo_id": "stanfordnlp/dspy",
"file_path": "dspy/adapters/types/citation.py",
"license": "MIT License",
"lines": 181,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
stanfordnlp/dspy:dspy/adapters/types/document.py | from typing import Any, Literal
import pydantic
from dspy.adapters.types.base_type import Type
from dspy.utils.annotation import experimental
@experimental(version="3.0.4")
class Document(Type):
"""A document type for providing content that can be cited by language models.
This type represents documents that can be passed to language models for citation-enabled
responses, particularly useful with Anthropic's Citations API. Documents include the content
and metadata that helps the LM understand and reference the source material.
Attributes:
data: The text content of the document
title: Optional title for the document (used in citations)
media_type: MIME type of the document content (defaults to "text/plain")
context: Optional context information about the document
Example:
```python
import dspy
from dspy.signatures import Signature
from dspy.experimental import Document, Citations
class AnswerWithSources(Signature):
'''Answer questions using provided documents with citations.'''
documents: list[Document] = dspy.InputField()
question: str = dspy.InputField()
answer: str = dspy.OutputField()
citations: Citations = dspy.OutputField()
# Create documents
docs = [
Document(
data="The Earth orbits the Sun in an elliptical path.",
title="Basic Astronomy Facts"
),
Document(
data="Water boils at 100Β°C at standard atmospheric pressure.",
title="Physics Fundamentals",
)
]
# Use with a citation-supporting model
lm = dspy.LM("anthropic/claude-opus-4-1-20250805")
predictor = dspy.Predict(AnswerWithSources)
result = predictor(documents=docs, question="What temperature does water boil?", lm=lm)
print(result.citations)
```
"""
data: str
title: str | None = None
media_type: Literal["text/plain", "application/pdf"] = "text/plain"
context: str | None = None
def format(self) -> list[dict[str, Any]]:
"""Format document for LM consumption.
Returns:
A list containing the document block in the format expected by citation-enabled language models.
"""
document_block = {
"type": "document",
"source": {
"type": "text",
"media_type": self.media_type,
"data": self.data
},
"citations": {"enabled": True}
}
if self.title:
document_block["title"] = self.title
if self.context:
document_block["context"] = self.context
return [document_block]
@classmethod
def description(cls) -> str:
"""Description of the document type for use in prompts."""
return (
"A document containing text content that can be referenced and cited. "
"Include the full text content and optionally a title for proper referencing."
)
@pydantic.model_validator(mode="before")
@classmethod
def validate_input(cls, data: Any):
if isinstance(data, cls):
return data
# Handle case where data is just a string (data only)
if isinstance(data, str):
return {"data": data}
# Handle case where data is a dict
elif isinstance(data, dict):
return data
raise ValueError(f"Received invalid value for `Document`: {data}")
def __str__(self) -> str:
"""String representation showing title and content length."""
title_part = f"'{self.title}': " if self.title else ""
return f"Document({title_part}{len(self.data)} chars)"
| {
"repo_id": "stanfordnlp/dspy",
"file_path": "dspy/adapters/types/document.py",
"license": "MIT License",
"lines": 90,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
stanfordnlp/dspy:tests/adapters/test_citation.py | import pydantic
import pytest
import dspy
from dspy.experimental import Citations
def test_citation_validate_input():
citation = Citations.Citation(
cited_text="The Earth orbits the Sun.",
document_index=0,
start_char_index=0,
end_char_index=23,
supported_text="The Earth orbits the Sun."
)
assert citation.cited_text == "The Earth orbits the Sun."
assert citation.document_index == 0
assert citation.start_char_index == 0
assert citation.end_char_index == 23
assert citation.type == "char_location"
assert citation.supported_text == "The Earth orbits the Sun."
with pytest.raises(pydantic.ValidationError):
Citations.Citation(cited_text="text")
def test_citations_in_nested_type():
class Wrapper(pydantic.BaseModel):
citations: Citations
citation = Citations.Citation(
cited_text="Hello, world!",
document_index=0,
start_char_index=0,
end_char_index=13,
supported_text="Hello, world!"
)
citations = Citations(citations=[citation])
wrapper = Wrapper(citations=citations)
assert wrapper.citations.citations[0].cited_text == "Hello, world!"
def test_citation_with_all_fields():
citation = Citations.Citation(
cited_text="Water boils at 100Β°C.",
document_index=1,
document_title="Physics Facts",
start_char_index=10,
end_char_index=31,
supported_text="Water boils at 100Β°C."
)
assert citation.cited_text == "Water boils at 100Β°C."
assert citation.document_index == 1
assert citation.document_title == "Physics Facts"
assert citation.start_char_index == 10
assert citation.end_char_index == 31
assert citation.supported_text == "Water boils at 100Β°C."
def test_citation_format():
citation = Citations.Citation(
cited_text="The sky is blue.",
document_index=0,
document_title="Weather Guide",
start_char_index=5,
end_char_index=21,
supported_text="The sky is blue."
)
formatted = citation.format()
assert formatted["type"] == "char_location"
assert formatted["cited_text"] == "The sky is blue."
assert formatted["document_index"] == 0
assert formatted["document_title"] == "Weather Guide"
assert formatted["start_char_index"] == 5
assert formatted["end_char_index"] == 21
assert formatted["supported_text"] == "The sky is blue."
def test_citations_format():
citations = Citations(citations=[
Citations.Citation(
cited_text="First citation",
document_index=0,
start_char_index=0,
end_char_index=14,
supported_text="First citation"
),
Citations.Citation(
cited_text="Second citation",
document_index=1,
document_title="Source",
start_char_index=20,
end_char_index=35,
supported_text="Second citation"
)
])
formatted = citations.format()
assert isinstance(formatted, list)
assert len(formatted) == 2
assert formatted[0]["cited_text"] == "First citation"
assert formatted[1]["cited_text"] == "Second citation"
assert formatted[1]["document_title"] == "Source"
def test_citations_from_dict_list():
citations_data = [
{
"cited_text": "The sky is blue",
"document_index": 0,
"document_title": "Weather Guide",
"start_char_index": 0,
"end_char_index": 15,
"supported_text": "The sky was blue yesterday."
}
]
citations = Citations.from_dict_list(citations_data)
assert len(citations.citations) == 1
assert citations.citations[0].cited_text == "The sky is blue"
assert citations.citations[0].document_title == "Weather Guide"
def test_citations_postprocessing():
from dspy.adapters.chat_adapter import ChatAdapter
from dspy.signatures.signature import Signature
class CitationSignature(Signature):
"""Test signature with citations."""
question: str = dspy.InputField()
answer: str = dspy.OutputField()
citations: Citations = dspy.OutputField()
adapter = ChatAdapter(native_response_types=[Citations])
outputs = [{
"text": "[[ ## answer ## ]]\nThe answer is blue.\n\n[[ ## citations ## ]]\n[]",
"citations": [
{
"cited_text": "The sky is blue",
"document_index": 0,
"document_title": "Weather Guide",
"start_char_index": 10,
"end_char_index": 25,
"supported_text": "The sky is blue"
}
]
}]
result = adapter._call_postprocess(
CitationSignature.delete("citations"),
CitationSignature,
outputs,
dspy.LM(model="anthropic/claude-3-5-sonnet-20241022"),
lm_kwargs={},
)
assert len(result) == 1
assert "citations" in result[0]
assert isinstance(result[0]["citations"], Citations)
assert len(result[0]["citations"]) == 1
assert result[0]["citations"][0].cited_text == "The sky is blue"
def test_citation_extraction_from_lm_response():
from unittest.mock import MagicMock
mock_choice = MagicMock(message=MagicMock(provider_specific_fields={"citations": [[
{
"type": "char_location",
"cited_text": "The sky is blue",
"document_index": 0,
"document_title": "Weather Guide",
"start_char_index": 10,
"end_char_index": 25,
"supported_text": "The sky is blue"
}
]]}))
lm = dspy.LM(model="test")
citations = lm._extract_citations_from_response(mock_choice)
assert citations is not None
assert len(citations) == 1
assert citations[0]["cited_text"] == "The sky is blue"
assert citations[0]["document_index"] == 0
assert citations[0]["document_title"] == "Weather Guide"
assert citations[0]["start_char_index"] == 10
assert citations[0]["end_char_index"] == 25
assert citations[0]["supported_text"] == "The sky is blue"
| {
"repo_id": "stanfordnlp/dspy",
"file_path": "tests/adapters/test_citation.py",
"license": "MIT License",
"lines": 161,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
stanfordnlp/dspy:tests/adapters/test_document.py | import pydantic
import pytest
from dspy.experimental import Document
def test_document_validate_input():
# Create a `Document` instance with valid data.
doc = Document(data="The Earth orbits the Sun.")
assert doc.data == "The Earth orbits the Sun."
with pytest.raises(pydantic.ValidationError):
# Try to create a `Document` instance with invalid type.
Document(data=123)
def test_document_in_nested_type():
class Wrapper(pydantic.BaseModel):
document: Document
doc = Document(data="Hello, world!")
wrapper = Wrapper(document=doc)
assert wrapper.document.data == "Hello, world!"
def test_document_with_all_fields():
doc = Document(
data="Water boils at 100Β°C at standard pressure.",
title="Physics Facts",
media_type="application/pdf",
context="Laboratory conditions"
)
assert doc.data == "Water boils at 100Β°C at standard pressure."
assert doc.title == "Physics Facts"
assert doc.media_type == "application/pdf"
assert doc.context == "Laboratory conditions"
def test_document_format():
doc = Document(
data="The sky is blue.",
title="Color Facts",
media_type="text/plain"
)
formatted = doc.format()
assert isinstance(formatted, list)
assert len(formatted) == 1
doc_block = formatted[0]
assert doc_block["type"] == "document"
assert doc_block["source"]["type"] == "text"
assert doc_block["source"]["media_type"] == "text/plain"
assert doc_block["source"]["data"] == "The sky is blue."
assert doc_block["title"] == "Color Facts"
assert doc_block["citations"]["enabled"] is True
| {
"repo_id": "stanfordnlp/dspy",
"file_path": "tests/adapters/test_document.py",
"license": "MIT License",
"lines": 43,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
stanfordnlp/dspy:dspy/utils/annotation.py | import inspect
import re
import types
from typing import Callable, ParamSpec, TypeVar, overload
P = ParamSpec("P")
R = TypeVar("R")
@overload
def experimental(f: Callable[P, R], version: str | None = None) -> Callable[P, R]: ...
@overload
def experimental(f: None = None, version: str | None = None) -> Callable[[Callable[P, R]], Callable[P, R]]: ...
def experimental(
f: Callable[P, R] | None = None,
version: str | None = None,
) -> Callable[[Callable[P, R]], Callable[P, R]]:
"""Decorator / decorator creator for marking APIs experimental in the docstring.
Args:
f: The function to be decorated.
version: The version in which the API was introduced as experimental.
The version is used to determine whether the API should be considered
as stable or not when releasing a new version of DSPy.
Returns:
A decorator that adds a note to the docstring of the decorated API.
"""
if f:
return _experimental(f, version)
else:
def decorator(f: Callable[P, R]) -> Callable[P, R]:
return _experimental(f, version)
return decorator
def _experimental(api: Callable[P, R], version: str | None = None) -> Callable[P, R]:
"""Add experimental notice to the API's docstring."""
if inspect.isclass(api):
api_type = "class"
elif inspect.isfunction(api):
api_type = "function"
elif isinstance(api, property):
api_type = "property"
elif isinstance(api, types.MethodType):
api_type = "method"
else:
api_type = str(type(api))
indent = _get_min_indent_of_docstring(api.__doc__) if api.__doc__ else ""
version_text = f" (introduced in v{version})" if version else ""
notice = (
indent + f"Experimental: This {api_type} may change or "
f"be removed in a future release without warning{version_text}."
)
if api_type == "property":
api.__doc__ = api.__doc__ + "\n\n" + notice if api.__doc__ else notice
else:
if api.__doc__:
api.__doc__ = notice + "\n\n" + api.__doc__
else:
api.__doc__ = notice
return api
def _get_min_indent_of_docstring(docstring_str: str) -> str:
"""
Get the minimum indentation string of a docstring, based on the assumption
that the closing triple quote for multiline comments must be on a new line.
Note that based on ruff rule D209, the closing triple quote for multiline
comments must be on a new line.
Args:
docstring_str: string with docstring
Returns:
Whitespace corresponding to the indent of a docstring.
"""
if not docstring_str or "\n" not in docstring_str:
return ""
match = re.match(r"^\s*", docstring_str.rsplit("\n", 1)[-1])
return match.group() if match else ""
| {
"repo_id": "stanfordnlp/dspy",
"file_path": "dspy/utils/annotation.py",
"license": "MIT License",
"lines": 70,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
stanfordnlp/dspy:tests/utils/test_annotation.py | from dspy.utils.annotation import experimental
def test_experimental_decorator_on_function():
@experimental
def test_function():
"""A test function."""
return "test"
assert "Experimental: This function may change or be removed in a future release without warning." in test_function.__doc__
assert "A test function." in test_function.__doc__
assert test_function() == "test"
def test_experimental_decorator_on_function_with_version():
@experimental(version="3.1.0")
def test_function():
"""A test function with version."""
return "versioned"
assert "introduced in v3.1.0" in test_function.__doc__
assert "Experimental: This function may change or be removed in a future release without warning (introduced in v3.1.0)." in test_function.__doc__
assert "A test function with version." in test_function.__doc__
assert test_function() == "versioned"
def test_experimental_decorator_on_class():
@experimental
class TestClass:
"""A test class."""
def method(self):
return "method"
assert "Experimental: This class may change or be removed in a future release without warning." in TestClass.__doc__
assert "A test class." in TestClass.__doc__
instance = TestClass()
assert instance.method() == "method"
def test_experimental_decorator_on_class_with_version():
@experimental(version="2.5.0")
class TestClass:
"""A test class with version."""
pass
assert "introduced in v2.5.0" in TestClass.__doc__
assert "Experimental: This class may change or be removed in a future release without warning (introduced in v2.5.0)." in TestClass.__doc__
assert "A test class with version." in TestClass.__doc__
def test_experimental_decorator_without_docstring():
@experimental
def test_function():
return "no_doc"
assert test_function.__doc__ == "Experimental: This function may change or be removed in a future release without warning."
assert test_function() == "no_doc"
def test_experimental_decorator_without_docstring_with_version():
@experimental(version="1.0.0")
def test_function():
return "no_doc_version"
assert test_function.__doc__ == "Experimental: This function may change or be removed in a future release without warning (introduced in v1.0.0)."
assert test_function() == "no_doc_version"
def test_experimental_decorator_with_callable_syntax():
def test_function():
"""A test function."""
return "callable"
decorated = experimental(test_function)
assert "Experimental:" in decorated.__doc__
assert "A test function." in decorated.__doc__
assert decorated() == "callable"
def test_experimental_decorator_with_version_callable_syntax():
def test_function():
"""A test function."""
return "callable_version"
decorated = experimental(test_function, version="4.0.0")
assert "introduced in v4.0.0" in decorated.__doc__
assert "Experimental:" in decorated.__doc__
assert decorated() == "callable_version"
| {
"repo_id": "stanfordnlp/dspy",
"file_path": "tests/utils/test_annotation.py",
"license": "MIT License",
"lines": 64,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
stanfordnlp/dspy:tests/adapters/test_adapter_utils.py | # ruff: noqa: UP007
from typing import Literal, Optional, Union
import pytest
from pydantic import BaseModel
from dspy.adapters.utils import parse_value
class Profile(BaseModel):
name: str
age: int
def test_parse_value_str_annotation():
# Test basic string conversion
assert parse_value(123, str) == "123"
assert parse_value(True, str) == "True"
assert parse_value("hello", str) == "hello"
assert parse_value(None, str) == "None"
assert parse_value([1, 2, 3], str) == "[1, 2, 3]"
def test_parse_value_pydantic_types():
# Test with pydantic BaseModel - JSON string input
json_str = '{"name": "John", "age": 30}'
result = parse_value(json_str, Profile)
assert isinstance(result, Profile)
assert result.name == "John"
assert result.age == 30
# Test with pydantic BaseModel - dict input
dict_input = {"name": "Jane", "age": 25}
result = parse_value(dict_input, Profile)
assert isinstance(result, Profile)
assert result.name == "Jane"
assert result.age == 25
# Test with invalid pydantic data
with pytest.raises(Exception):
parse_value('{"name": "John"}', Profile) # missing required age field
def test_parse_value_basic_types():
# Test int
assert parse_value("42", int) == 42
assert parse_value(42, int) == 42
# Test float
assert parse_value("3.14", float) == 3.14
assert parse_value(3.14, float) == 3.14
# Test bool
assert parse_value("true", bool) is True
assert parse_value(True, bool) is True
assert parse_value("false", bool) is False
# Test list
assert parse_value("[1, 2, 3]", list[int]) == [1, 2, 3]
assert parse_value([1, 2, 3], list[int]) == [1, 2, 3]
def test_parse_value_literal():
# Test Literal type
assert parse_value("option1", Literal["option1", "option2"]) == "option1"
assert parse_value("option2", Literal["option1", "option2"]) == "option2"
# Test Literal with quotes and prefixes
assert parse_value("'option1'", Literal["option1", "option2"]) == "option1"
assert parse_value('"option1"', Literal["option1", "option2"]) == "option1"
assert parse_value("Literal[option1]", Literal["option1", "option2"]) == "option1"
assert parse_value("str[option1]", Literal["option1", "option2"]) == "option1"
# Test invalid literal
with pytest.raises(ValueError):
parse_value("invalid", Literal["option1", "option2"])
def test_parse_value_union():
# Test Union with None (Optional)
assert parse_value("test", Optional[str]) == "test"
assert parse_value("test", str | None) == "test"
assert parse_value("5", int | None) == 5
assert parse_value(None, Optional[str]) is None
assert parse_value("text with [placeholder]", Optional[str]) == "text with [placeholder]"
assert parse_value("text with [placeholder]", str | None) == "text with [placeholder]"
# Test Union fallback to str
assert parse_value("fallback", Union[int, str, None]) == "fallback"
assert parse_value(5, Union[int, str, None]) == 5
assert parse_value("fallback", int | str | None) == "fallback"
assert parse_value(5, int | str | None) == 5
assert parse_value("text with [placeholder]", Union[int, str, None]) == "text with [placeholder]"
def test_parse_value_json_repair():
# Test cases where json_repair is needed
assert parse_value('{"key": "value"}', dict) == {"key": "value"}
# Test ast.literal_eval fallback
assert parse_value("{'key': 'value'}", dict) == {"key": "value"}
# Test fallback to original value when parsing fails
malformed = "not json or literal"
with pytest.raises(Exception):
parse_value(malformed, dict)
| {
"repo_id": "stanfordnlp/dspy",
"file_path": "tests/adapters/test_adapter_utils.py",
"license": "MIT License",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
stanfordnlp/dspy:dspy/teleprompt/gepa/instruction_proposal.py | import logging
from typing import Any
from gepa.core.adapter import ProposalFn
import dspy
from dspy.adapters.types.base_type import Type
from dspy.teleprompt.gepa.gepa_utils import ReflectiveExample
logger = logging.getLogger(__name__)
class GenerateEnhancedMultimodalInstructionFromFeedback(dspy.Signature):
"""I provided an assistant with instructions to perform a task involving visual content, but the assistant's performance needs improvement based on the examples and feedback below.
Your task is to write a better instruction for the assistant that addresses the specific issues identified in the feedback, with particular attention to how visual and textual information should be analyzed and integrated.
## Analysis Steps:
1. **Read the inputs carefully** and identify both the visual and textual input formats, understanding how they work together
2. **Read all the assistant responses and corresponding feedback** to understand what went wrong with visual analysis, text processing, or their integration
3. **Identify visual analysis patterns** - what visual features, relationships, or details are important for this task
4. **Identify domain-specific knowledge** about both visual and textual aspects, as this information may not be available to the assistant in the future
5. **Look for successful visual-textual integration strategies** and include these patterns in the instruction
6. **Address specific visual analysis issues** mentioned in the feedback
## Instruction Requirements:
- **Clear task definition** explaining how to process both visual and textual inputs
- **Visual analysis guidance** specific to this task (what to look for, how to describe, what features matter)
- **Integration strategies** for combining visual observations with textual information
- **Domain-specific knowledge** about visual concepts, terminology, or relationships
- **Error prevention guidance** for common visual analysis mistakes shown in the feedback
- **Precise, actionable language** for both visual and textual processing
Focus on creating an instruction that helps the assistant properly analyze visual content, integrate it with textual information, and avoid the specific visual analysis mistakes shown in the examples."""
current_instruction = dspy.InputField(
desc="The current instruction that was provided to the assistant to perform the multimodal task"
)
examples_with_feedback = dspy.InputField(
desc="Task examples with visual content showing inputs, assistant outputs, and feedback. "
"Pay special attention to feedback about visual analysis accuracy, visual-textual integration, "
"and any domain-specific visual knowledge that the assistant missed."
)
improved_instruction = dspy.OutputField(
desc="A better instruction for the assistant that addresses visual analysis issues, provides "
"clear guidance on how to process and integrate visual and textual information, includes "
"necessary visual domain knowledge, and prevents the visual analysis mistakes shown in the examples."
)
class SingleComponentMultiModalProposer(dspy.Module):
"""
dspy.Module for proposing improved instructions based on feedback.
"""
def __init__(self):
super().__init__()
self.propose_instruction = dspy.Predict(GenerateEnhancedMultimodalInstructionFromFeedback)
def forward(self, current_instruction: str, reflective_dataset: list[ReflectiveExample]) -> str:
"""
Generate an improved instruction based on current instruction and feedback examples.
Args:
current_instruction: The current instruction that needs improvement
reflective_dataset: List of examples with inputs, outputs, and feedback
May contain dspy.Image objects in inputs
Returns:
str: Improved instruction text
"""
# Format examples with enhanced pattern recognition
formatted_examples, image_map = self._format_examples_with_pattern_analysis(reflective_dataset)
# Build kwargs for the prediction call
predict_kwargs = {
"current_instruction": current_instruction,
"examples_with_feedback": formatted_examples,
}
# Create a rich multimodal examples_with_feedback that includes both text and images
predict_kwargs["examples_with_feedback"] = self._create_multimodal_examples(formatted_examples, image_map)
# Use current dspy LM settings (GEPA will pass reflection_lm via context)
result = self.propose_instruction(**predict_kwargs)
return result.improved_instruction
def _format_examples_with_pattern_analysis(
self, reflective_dataset: list[ReflectiveExample]
) -> tuple[str, dict[int, list[Type]]]:
"""
Format examples with pattern analysis and feedback categorization.
Returns:
tuple: (formatted_text_with_patterns, image_map)
"""
# First, use the existing proven formatting approach
formatted_examples, image_map = self._format_examples_for_instruction_generation(reflective_dataset)
# Enhanced analysis: categorize feedback patterns
feedback_analysis = self._analyze_feedback_patterns(reflective_dataset)
# Add pattern analysis to the formatted examples
if feedback_analysis["summary"]:
pattern_summary = self._create_pattern_summary(feedback_analysis)
enhanced_examples = f"{pattern_summary}\n\n{formatted_examples}"
return enhanced_examples, image_map
return formatted_examples, image_map
def _analyze_feedback_patterns(self, reflective_dataset: list[ReflectiveExample]) -> dict[str, Any]:
"""
Analyze feedback patterns to provide better context for instruction generation.
Categorizes feedback into:
- Error patterns: Common mistakes and their types
- Success patterns: What worked well and should be preserved/emphasized
- Domain knowledge gaps: Missing information that should be included
- Task-specific guidance: Specific requirements or edge cases
"""
analysis = {
"error_patterns": [],
"success_patterns": [],
"domain_knowledge_gaps": [],
"task_specific_guidance": [],
"summary": "",
}
# Simple pattern recognition - could be enhanced further
for example in reflective_dataset:
feedback = example.get("Feedback", "").lower()
# Identify error patterns
if any(error_word in feedback for error_word in ["incorrect", "wrong", "error", "failed", "missing"]):
analysis["error_patterns"].append(feedback)
# Identify success patterns
if any(
success_word in feedback for success_word in ["correct", "good", "accurate", "well", "successfully"]
):
analysis["success_patterns"].append(feedback)
# Identify domain knowledge needs
if any(
knowledge_word in feedback
for knowledge_word in ["should know", "domain", "specific", "context", "background"]
):
analysis["domain_knowledge_gaps"].append(feedback)
# Create summary if patterns were found
if any(analysis[key] for key in ["error_patterns", "success_patterns", "domain_knowledge_gaps"]):
analysis["summary"] = (
f"Patterns identified: {len(analysis['error_patterns'])} error(s), {len(analysis['success_patterns'])} success(es), {len(analysis['domain_knowledge_gaps'])} knowledge gap(s)"
)
return analysis
def _create_pattern_summary(self, feedback_analysis: dict[str, Any]) -> str:
"""Create a summary of feedback patterns to help guide instruction generation."""
summary_parts = ["## Feedback Pattern Analysis\n"]
if feedback_analysis["error_patterns"]:
summary_parts.append(f"**Common Issues Found ({len(feedback_analysis['error_patterns'])} examples):**")
summary_parts.append("Focus on preventing these types of mistakes in the new instruction.\n")
if feedback_analysis["success_patterns"]:
summary_parts.append(
f"**Successful Approaches Found ({len(feedback_analysis['success_patterns'])} examples):**"
)
summary_parts.append("Build on these successful strategies in the new instruction.\n")
if feedback_analysis["domain_knowledge_gaps"]:
summary_parts.append(
f"**Domain Knowledge Needs Identified ({len(feedback_analysis['domain_knowledge_gaps'])} examples):**"
)
summary_parts.append("Include this specialized knowledge in the new instruction.\n")
return "\n".join(summary_parts)
def _format_examples_for_instruction_generation(
self, reflective_dataset: list[ReflectiveExample]
) -> tuple[str, dict[int, list[Type]]]:
"""
Format examples using GEPA's markdown structure while preserving image objects.
Returns:
tuple: (formatted_text, image_map) where image_map maps example_index -> list[images]
"""
def render_value_with_images(value, level=3, example_images=None):
if example_images is None:
example_images = []
if isinstance(value, Type):
image_idx = len(example_images) + 1
example_images.append(value)
return f"[IMAGE-{image_idx} - see visual content]\n\n"
elif isinstance(value, dict):
s = ""
for k, v in value.items():
s += f"{'#' * level} {k}\n"
s += render_value_with_images(v, min(level + 1, 6), example_images)
if not value:
s += "\n"
return s
elif isinstance(value, (list, tuple)):
s = ""
for i, item in enumerate(value):
s += f"{'#' * level} Item {i + 1}\n"
s += render_value_with_images(item, min(level + 1, 6), example_images)
if not value:
s += "\n"
return s
else:
return f"{str(value).strip()}\n\n"
def convert_sample_to_markdown_with_images(sample, example_num):
example_images = []
s = f"# Example {example_num}\n"
for key, val in sample.items():
s += f"## {key}\n"
s += render_value_with_images(val, level=3, example_images=example_images)
return s, example_images
formatted_parts = []
image_map = {}
for i, example_data in enumerate(reflective_dataset):
formatted_example, example_images = convert_sample_to_markdown_with_images(example_data, i + 1)
formatted_parts.append(formatted_example)
if example_images:
image_map[i] = example_images
formatted_text = "\n\n".join(formatted_parts)
if image_map:
total_images = sum(len(imgs) for imgs in image_map.values())
formatted_text = (
f"The examples below include visual content ({total_images} images total). "
"Please analyze both the text and visual elements when suggesting improvements.\n\n" + formatted_text
)
return formatted_text, image_map
def _create_multimodal_examples(self, formatted_text: str, image_map: dict[int, list[Type]]) -> Any:
"""
Create a multimodal input that contains both text and images for the reflection LM.
Args:
formatted_text: The formatted text with image placeholders
image_map: Dictionary mapping example_index -> list[images] for structured access
"""
if not image_map:
return formatted_text
# Collect all images from all examples
all_images = []
for example_images in image_map.values():
all_images.extend(example_images)
multimodal_content = [formatted_text]
multimodal_content.extend(all_images)
return multimodal_content
class MultiModalInstructionProposer(ProposalFn):
"""GEPA-compatible multimodal instruction proposer.
This class handles multimodal inputs (like dspy.Image) during GEPA optimization by using
a single-component proposer for each component that needs to be updated.
"""
def __init__(self):
self.single_proposer = SingleComponentMultiModalProposer()
def __call__(
self,
candidate: dict[str, str],
reflective_dataset: dict[str, list[ReflectiveExample]],
components_to_update: list[str],
) -> dict[str, str]:
"""GEPA-compatible proposal function.
Args:
candidate: Current component name -> instruction mapping
reflective_dataset: Component name -> list of reflective examples
components_to_update: List of component names to update
Returns:
dict: Component name -> new instruction mapping
"""
updated_components = {}
for component_name in components_to_update:
if component_name in candidate and component_name in reflective_dataset:
current_instruction = candidate[component_name]
component_reflective_data = reflective_dataset[component_name]
# Call the single-instruction proposer.
#
# In the future, proposals could consider multiple components instructions,
# instead of just the current instruction, for more holistic instruction proposals.
new_instruction = self.single_proposer(
current_instruction=current_instruction, reflective_dataset=component_reflective_data
)
updated_components[component_name] = new_instruction
return updated_components
| {
"repo_id": "stanfordnlp/dspy",
"file_path": "dspy/teleprompt/gepa/instruction_proposal.py",
"license": "MIT License",
"lines": 247,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
stanfordnlp/dspy:tests/teleprompt/test_gepa_instruction_proposer.py | import logging
from dataclasses import dataclass
from typing import Any
import pytest
import dspy
from dspy.teleprompt.gepa import instruction_proposal
from dspy.utils.dummies import DummyLM
def count_messages_with_image_url_pattern(messages):
"""Helper to count image URLs in messages - borrowed from image adapter tests"""
pattern = {"type": "image_url", "image_url": {"url": lambda x: isinstance(x, str)}}
try:
def check_pattern(obj, pattern):
if isinstance(pattern, dict):
if not isinstance(obj, dict):
return False
return all(k in obj and check_pattern(obj[k], v) for k, v in pattern.items())
if callable(pattern):
return pattern(obj)
return obj == pattern
def count_patterns(obj, pattern):
count = 0
if check_pattern(obj, pattern):
count += 1
if isinstance(obj, dict):
count += sum(count_patterns(v, pattern) for v in obj.values())
if isinstance(obj, (list, tuple)):
count += sum(count_patterns(v, pattern) for v in obj)
return count
return count_patterns(messages, pattern)
except Exception:
return 0
@dataclass
class ImagesInHistory:
has_structured_images: bool
has_text_serialized_images: bool
def check_images_in_history(history: list[Any]) -> ImagesInHistory:
def check_text_serialized(item: Any) -> bool:
if isinstance(item, list):
return any(check_text_serialized(i) for i in item)
if isinstance(item, dict):
return any(check_text_serialized(i) for i in item.values())
if isinstance(item, str):
return "CUSTOM-TYPE-START-IDENTIFIER" in item
return False
has_structured_images = False
for call in history:
if call.get("messages"):
image_count = count_messages_with_image_url_pattern(call["messages"])
if image_count > 0:
has_structured_images = True
break
return ImagesInHistory(
has_structured_images=has_structured_images,
has_text_serialized_images=any(check_text_serialized(i) for i in history),
)
def test_reflection_lm_gets_structured_images():
"""
Verify reflection LM receives structured image messages, not serialized text.
"""
student = dspy.Predict("image: dspy.Image -> label: str")
image = dspy.Image("https://example.com/test.jpg")
example = dspy.Example(image=image, label="dog").with_inputs("image")
reflection_lm = DummyLM(
[
{"improved_instruction": "Better instruction"},
{"improved_instruction": "Enhanced visual analysis instruction"},
{"improved_instruction": "Focus on key features"},
{"improved_instruction": "Analyze visual patterns systematically"},
{"improved_instruction": "Consider distinctive visual elements"},
{"improved_instruction": "Enhance recognition accuracy"},
{"improved_instruction": "Improve classification methodology"},
]
)
lm = DummyLM(
[
{"label": "cat"},
{"label": "dog"},
{"label": "animal"},
{"label": "pet"},
{"label": "feline"},
{"label": "canine"},
{"label": "mammal"},
{"label": "creature"},
{"label": "species"},
{"label": "domestic"},
{"label": "wild"},
{"label": "carnivore"},
{"label": "herbivore"},
{"label": "quadruped"},
{"label": "vertebrate"},
]
)
dspy.configure(lm=lm)
gepa = dspy.GEPA(
metric=lambda gold, pred, trace=None, pred_name=None, pred_trace=None: 0.3,
max_metric_calls=2,
reflection_lm=reflection_lm,
instruction_proposer=instruction_proposal.MultiModalInstructionProposer(),
)
gepa.compile(student, trainset=[example], valset=[example])
assert len(lm.history) > 0, "LM should have been called"
assert len(reflection_lm.history) > 0, "Reflection LM should have been called"
images_in_history = check_images_in_history(reflection_lm.history)
assert images_in_history.has_structured_images, "Reflection LM should have received structured images"
assert not images_in_history.has_text_serialized_images, "Reflection LM received serialized images in prompts"
def test_custom_proposer_without_reflection_lm():
"""Test that custom instruction proposers can work without reflection_lm when using updated GEPA core."""
# External reflection LM managed by the custom proposer
external_reflection_lm = DummyLM(
[
{"improved_instruction": "External LM response"},
{"improved_instruction": "Enhanced instruction"},
{"improved_instruction": "Better guidance"},
{"improved_instruction": "Optimized instruction"},
{"improved_instruction": "Refined approach"},
]
)
class ProposerWithExternalLM:
def __call__(self, candidate, reflective_dataset, components_to_update):
# This proposer manages its own external reflection LM
with dspy.context(lm=external_reflection_lm):
# Use external LM for reflection (optional - could be any custom logic)
external_reflection_lm([{"role": "user", "content": "Improve this instruction"}])
return {name: f"Externally-improved: {candidate[name]}" for name in components_to_update}
student = dspy.Predict("text -> label")
example = dspy.Example(text="test input", label="test").with_inputs("text")
# Use a robust dummy LM with enough responses for optimization steps
lm = DummyLM(
[
{"label": "test"},
{"label": "result"},
{"label": "output"},
{"label": "response"},
{"label": "classification"},
{"label": "prediction"},
{"label": "category"},
{"label": "type"},
{"label": "class"},
{"label": "group"},
{"label": "kind"},
{"label": "variant"},
{"label": "form"},
{"label": "style"},
{"label": "mode"},
]
)
dspy.configure(lm=lm)
# Test the full flexibility: no reflection_lm provided to GEPA at all!
# The updated GEPA core library now allows this when using custom proposers
gepa = dspy.GEPA(
metric=lambda gold, pred, trace=None, pred_name=None, pred_trace=None: 0.7, # Score to trigger optimization
max_metric_calls=5, # More calls to allow proper optimization
reflection_lm=None, # No reflection_lm provided - this now works!
instruction_proposer=ProposerWithExternalLM(),
)
result = gepa.compile(student, trainset=[example], valset=[example])
assert result is not None
assert len(lm.history) > 0, "Main LM should have been called"
assert len(external_reflection_lm.history) > 0, "External reflection LM should have been called by custom proposer"
def test_image_serialization_into_strings():
"""
Test that demonstrates the image serialization problem when calling lm directly with serialized image data.
"""
class InstructionProposerCallingLMDirectly:
def __call__(
self,
candidate: dict[str, str],
reflective_dataset: dict[str, list[dict[str, Any]]],
components_to_update: list[str],
) -> dict[str, str]:
updated_components = {}
for component_name in components_to_update:
if component_name not in candidate or component_name not in reflective_dataset:
continue
current_instruction = candidate[component_name]
component_data = reflective_dataset[component_name]
feedback_analysis = "Feedback analysis:\n"
for i, example in enumerate(component_data):
feedback_analysis += f"Example {i + 1}:\n"
# Non ideal approach: extract and serialize image objects directly
inputs = example.get("Inputs", {})
for key, value in inputs.items():
feedback_analysis += f" {key}: {value}\n"
outputs = example.get("Generated Outputs", {})
feedback = example.get("Feedback", "")
feedback_analysis += f" Outputs: {outputs}\n"
feedback_analysis += f" Feedback: {feedback}\n\n"
context_lm = dspy.settings.lm
messages = [
{"role": "system", "content": "You are an instruction improvement assistant."},
{
"role": "user",
"content": f"Current instruction: {current_instruction}\n\nFeedback: {feedback_analysis}\n\nProvide an improved instruction:",
},
]
result = context_lm(messages=messages)
updated_components[component_name] = result[0]
return updated_components
direct_lm_call_proposer = InstructionProposerCallingLMDirectly()
student = dspy.Predict("image -> label")
image = dspy.Image("https://picsum.photos/id/237/200/300")
examples = [
dspy.Example(image=image, label="cat").with_inputs("image"),
dspy.Example(image=image, label="animal").with_inputs("image"),
]
lm = DummyLM(
[
{"label": "cat"},
{"label": "dog"},
{"label": "animal"},
{"label": "pet"},
{"label": "feline"},
{"label": "mammal"},
{"label": "creature"},
{"label": "species"},
{"label": "domestic"},
{"label": "wild"},
{"label": "carnivore"},
{"label": "herbivore"},
]
)
dspy.configure(lm=lm)
reflection_lm = DummyLM(
[
{"improved_instruction": "Be more specific about image analysis"},
{"improved_instruction": "Focus on visual features when classifying"},
{"improved_instruction": "Consider contextual clues in the image"},
{"improved_instruction": "Analyze shape, color, and texture patterns"},
{"improved_instruction": "Look for distinguishing characteristics"},
]
)
gepa = dspy.GEPA(
metric=lambda gold, pred, trace=None, pred_name=None, pred_trace=None: 0.3,
max_metric_calls=5,
reflection_lm=reflection_lm,
instruction_proposer=direct_lm_call_proposer,
)
gepa.compile(student, trainset=examples, valset=examples)
assert len(lm.history) > 0, "LM should have been called"
assert len(reflection_lm.history) > 0, "Reflection LM should have been called"
images_in_history = check_images_in_history(reflection_lm.history)
assert images_in_history.has_text_serialized_images, (
"Expected to find serialized images (CUSTOM-TYPE-START-IDENTIFIER)"
)
@pytest.mark.parametrize("reasoning", [True, False])
def test_default_proposer(reasoning: bool, caplog):
student = dspy.Predict("image -> label")
image = dspy.Image("https://picsum.photos/id/237/200/300")
examples = [
dspy.Example(image=image, label="cat").with_inputs("image"),
dspy.Example(image=image, label="animal").with_inputs("image"),
]
lm = DummyLM(
[
{"label": "cat"},
{"label": "dog"},
{"label": "animal"},
{"label": "pet"},
{"label": "feline"},
{"label": "mammal"},
{"label": "creature"},
{"label": "species"},
{"label": "domestic"},
{"label": "wild"},
{"label": "carnivore"},
{"label": "herbivore"},
]
)
dspy.configure(lm=lm)
reflection_lm = DummyLM(
[
{"improved_instruction": "Be more specific about image analysis"},
{"improved_instruction": "Focus on visual features when classifying"},
{"improved_instruction": "Consider contextual clues in the image"},
{"improved_instruction": "Analyze shape, color, and texture patterns"},
{"improved_instruction": "Look for distinguishing characteristics"},
],
reasoning=reasoning,
)
gepa = dspy.GEPA(
metric=lambda gold, pred, trace=None, pred_name=None, pred_trace=None: 0.3,
max_metric_calls=5,
reflection_lm=reflection_lm,
)
with caplog.at_level(logging.INFO, logger="dspy.teleprompt.gepa.gepa"):
# Let logs propagate up to root because gepa uses try-catch and logs the error
# https://github.com/gepa-ai/gepa/blob/1b5eff5133be1015210e0512953c25a4b85ad454/src/gepa/proposer/reflective_mutation/reflective_mutation.py#L128
dspy_logger = logging.getLogger("dspy")
original_propagate = dspy_logger.propagate
dspy_logger.propagate = True
gepa.compile(student, trainset=examples, valset=examples)
dspy_logger.propagate = original_propagate
# Check that no internal GEPA reflection errors occurred
assert "Exception during reflection/proposal" not in caplog.text
assert len(lm.history) > 0, "LM should have been called"
assert len(reflection_lm.history) > 0, "Reflection LM should have been called"
images_in_history = check_images_in_history(reflection_lm.history)
assert images_in_history.has_text_serialized_images, (
"Expected to find serialized images (CUSTOM-TYPE-START-IDENTIFIER)"
)
| {
"repo_id": "stanfordnlp/dspy",
"file_path": "tests/teleprompt/test_gepa_instruction_proposer.py",
"license": "MIT License",
"lines": 300,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
stanfordnlp/dspy:dspy/teleprompt/bootstrap_trace.py | import logging
from dataclasses import dataclass
from types import MethodType
from typing import Any, Callable, TypedDict
import dspy
from dspy.evaluate.evaluate import Evaluate
from dspy.primitives.example import Example
from dspy.primitives.module import Module
from dspy.primitives.prediction import Prediction
from dspy.utils.exceptions import AdapterParseError
logger = logging.getLogger(__name__)
@dataclass
class FailedPrediction:
completion_text: str
format_reward: float | None = None
class TraceData(TypedDict):
example_ind: int
example: Example
prediction: Prediction
trace: list[tuple[Any, dict[str, Any], Prediction]]
score: float | None
def bootstrap_trace_data(
program: Module,
dataset: list[Example],
metric: Callable | None = None,
num_threads: int | None = None,
raise_on_error=True,
capture_failed_parses=False,
failure_score: float = 0,
format_failure_score: float = -1,
log_format_failures: bool = False,
callback_metadata: dict[str, Any] | None = None,
) -> list[TraceData]:
# Return a list of dicts with the following keys: example_ind, example, prediction, trace, and score
# (if metric != None)
evaluator = Evaluate(
devset=dataset,
num_threads=num_threads,
display_progress=True,
provide_traceback=False, # TODO(check with team)
max_errors=len(dataset) * 10, # TODO(check with team)
failure_score=failure_score,
)
def wrapped_metric(example, prediction, trace=None):
prediction, _ = prediction
if isinstance(prediction, FailedPrediction):
return prediction.format_reward or format_failure_score
return metric(example, prediction, trace) if metric else True
# Use `object.__getattribute__` to bypass the custom hook `Module.__getattribute__` so that we avoid
# the warning that `forward` is not accessed through `__call__`.
original_forward = object.__getattribute__(program, "forward")
def patched_forward(program_to_use: Module, **kwargs):
with dspy.context(trace=[]):
try:
return original_forward(**kwargs), dspy.settings.trace.copy()
except AdapterParseError as e:
completion_str = e.lm_response
parsed_result = e.parsed_result
failed_signature = e.signature
failed_inputs = kwargs
present = list(parsed_result.keys()) if parsed_result else None
expected = list(failed_signature.output_fields.keys())
found_pred = None
for pred in program_to_use.predictors():
if pred.signature == failed_signature:
found_pred = pred
break
if found_pred is None:
raise ValueError(f"Failed to find the predictor for the failed signature: {failed_signature}")
trace = dspy.settings.trace.copy()
# Trace is Tuple[signature, inputs, prediction outputs]
if present:
failed_pred = FailedPrediction(
completion_text=completion_str,
format_reward=format_failure_score
+ (failure_score - format_failure_score) * (present / expected),
)
else:
failed_pred = FailedPrediction(completion_text=completion_str, format_reward=format_failure_score)
trace.append(
(
found_pred,
failed_inputs,
failed_pred,
)
)
if log_format_failures:
logging.warning(
"Failed to parse output for example. This is likely due to the LLM response not following "
"the adapter's formatting."
)
return failed_pred, trace
program.forward = MethodType(patched_forward, program)
try:
results = evaluator(
program,
metric=wrapped_metric,
callback_metadata=callback_metadata,
).results
finally:
program.forward = original_forward
data = []
for example_ind, (example, prediction, score) in enumerate(results):
try:
prediction, trace = prediction
except ValueError as ve:
# TODO(GRPO Team): Often during GRPO bootstrapping, the LLM response does not follow dspy formatting. This
# leads to a value error. To reproduce this issue, try Qwen/Qwen2.5-Coder-0.5B-Instruct with MATH dataset.
# Proposal(Lakshya): We should capture the incorrectly-formatted LLM response, and store it in the trace,
# and pass it to in the GRPO group with a high-negative user-configurable score.
logger.warning(
"Failed to unpack prediction and trace. This is likely due to the LLM response not following "
"dspy formatting."
)
if raise_on_error:
raise ve
else:
continue
data_dict = {"example": example, "prediction": prediction, "trace": trace, "example_ind": example_ind}
if metric:
data_dict["score"] = score
data.append(data_dict)
return data
| {
"repo_id": "stanfordnlp/dspy",
"file_path": "dspy/teleprompt/bootstrap_trace.py",
"license": "MIT License",
"lines": 123,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
stanfordnlp/dspy:tests/teleprompt/test_bootstrap_trace.py | from typing import Any
from unittest import mock
from litellm import Choices, Message, ModelResponse
import dspy
from dspy.primitives.example import Example
from dspy.teleprompt.bootstrap_trace import FailedPrediction, bootstrap_trace_data
def test_bootstrap_trace_data():
"""Test bootstrap_trace_data function with single dspy.Predict program."""
# Define signature for string -> int conversion
class StringToIntSignature(dspy.Signature):
"""Convert a string number to integer"""
text: str = dspy.InputField()
number: int = dspy.OutputField()
# Create program with single dspy.Predict
program = dspy.Predict(StringToIntSignature)
# Create dummy dataset of size 5
dataset = [
Example(text="one", number=1).with_inputs("text"),
Example(text="two", number=2).with_inputs("text"),
Example(text="three", number=3).with_inputs("text"),
Example(text="four", number=4).with_inputs("text"),
Example(text="five", number=5).with_inputs("text"),
]
# Define exact match metric
def exact_match_metric(example, prediction, trace=None):
return example.number == prediction.number
# Configure dspy
dspy.configure(lm=dspy.LM(model="openai/gpt-4o-mini", cache=False), adapter=dspy.JSONAdapter())
# Mock litellm completion responses
# 4 successful responses and 1 that will trigger AdapterParseError
successful_responses = [
ModelResponse(
choices=[Choices(message=Message(content='```json\n{"number": 1}\n```'))],
model="openai/gpt-4o-mini",
),
ModelResponse(
choices=[Choices(message=Message(content='```json\n{"number": 2}\n```'))],
model="openai/gpt-4o-mini",
),
ModelResponse(
choices=[Choices(message=Message(content='```json\n{"number": 3}\n```'))],
model="openai/gpt-4o-mini",
),
ModelResponse(
choices=[Choices(message=Message(content='```json\n{"number": 4}\n```'))],
model="openai/gpt-4o-mini",
),
]
# Create a side effect that will trigger AdapterParseError on the 3rd call (index 2)
def completion_side_effect(*args, **kwargs):
call_count = completion_side_effect.call_count
completion_side_effect.call_count += 1
if call_count == 5: # Third call (0-indexed)
# Return malformed response that will cause AdapterParseError
return ModelResponse(
choices=[Choices(message=Message(content="This is an invalid JSON!"))],
model="openai/gpt-4o-mini",
)
else:
return successful_responses[call_count]
completion_side_effect.call_count = 0
with mock.patch("litellm.completion", side_effect=completion_side_effect):
# Call bootstrap_trace_data
results = bootstrap_trace_data(
program=program,
dataset=dataset,
metric=exact_match_metric,
raise_on_error=False,
capture_failed_parses=True,
)
# Verify results
assert len(results) == 5, f"Expected 5 results, got {len(results)}"
# Count successful and failed predictions
successful_count = 0
failed_count = 0
for result in results:
assert "example" in result
assert "prediction" in result
assert "trace" in result
assert "example_ind" in result
assert "score" in result
if isinstance(result["prediction"], FailedPrediction):
failed_count += 1
# Verify failed prediction structure
assert hasattr(result["prediction"], "completion_text")
assert hasattr(result["prediction"], "format_reward")
assert result["prediction"].completion_text == "This is an invalid JSON!"
else:
successful_count += 1
# Verify successful prediction structure
assert hasattr(result["prediction"], "number")
# Verify we have the expected number of successful and failed bootstrapping
assert successful_count == 4, f"Expected 4 successful predictions, got {successful_count}"
assert failed_count == 1, f"Expected 1 failed prediction, got {failed_count}"
# Verify that traces are present
for result in results:
assert len(result["trace"]) > 0, "Trace should not be empty"
# Each trace entry should be a tuple of (predictor, inputs, prediction)
for trace_entry in result["trace"]:
assert len(trace_entry) == 3, "Trace entry should have 3 elements"
def test_bootstrap_trace_data_passes_callback_metadata(monkeypatch):
from dspy.teleprompt import bootstrap_trace as bootstrap_trace_module
class DummyProgram(dspy.Module):
def forward(self, **kwargs): # pragma: no cover - stub forward
return dspy.Prediction()
captured_metadata: dict[str, Any] = {}
class DummyEvaluate:
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, callback_metadata=None, **kwargs):
captured_metadata["value"] = callback_metadata
class _Result:
results: list[Any] = []
return _Result()
monkeypatch.setattr(bootstrap_trace_module, "Evaluate", DummyEvaluate)
bootstrap_trace_module.bootstrap_trace_data(
program=DummyProgram(),
dataset=[],
callback_metadata={"disable_logging": True},
)
assert captured_metadata["value"] == {"disable_logging": True}
| {
"repo_id": "stanfordnlp/dspy",
"file_path": "tests/teleprompt/test_bootstrap_trace.py",
"license": "MIT License",
"lines": 121,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
stanfordnlp/dspy:dspy/teleprompt/gepa/gepa.py | import inspect
import logging
import random
from dataclasses import dataclass
from typing import Any, Literal, Optional, Protocol, Union
from gepa import GEPAResult
from gepa.core.adapter import ProposalFn
from gepa.proposer.reflective_mutation.base import ReflectionComponentSelector
from dspy.clients.lm import LM
from dspy.primitives import Example, Module, Prediction
from dspy.teleprompt.gepa.gepa_utils import DspyAdapter, DSPyTrace, PredictorFeedbackFn, ScoreWithFeedback
from dspy.teleprompt.teleprompt import Teleprompter
from dspy.utils.annotation import experimental
logger = logging.getLogger(__name__)
AUTO_RUN_SETTINGS = {
"light": {"n": 6},
"medium": {"n": 12},
"heavy": {"n": 18},
}
@experimental(version="3.0.0")
class GEPAFeedbackMetric(Protocol):
def __call__(
self,
gold: Example,
pred: Prediction,
trace: Optional["DSPyTrace"],
pred_name: str | None,
pred_trace: Optional["DSPyTrace"],
) -> Union[float, "ScoreWithFeedback"]:
"""
This function is called with the following arguments:
- gold: The gold example.
- pred: The predicted output.
- trace: Optional. The trace of the program's execution.
- pred_name: Optional. The name of the target predictor currently being optimized by GEPA, for which
the feedback is being requested.
- pred_trace: Optional. The trace of the target predictor's execution GEPA is seeking feedback for.
Note the `pred_name` and `pred_trace` arguments. During optimization, GEPA will call the metric to obtain
feedback for individual predictors being optimized. GEPA provides the name of the predictor in `pred_name`
and the sub-trace (of the trace) corresponding to the predictor in `pred_trace`.
If available at the predictor level, the metric should return dspy.Prediction(score: float, feedback: str)
corresponding to the predictor.
If not available at the predictor level, the metric can also return a text feedback at the program level
(using just the gold, pred and trace).
If no feedback is returned, GEPA will use a simple text feedback consisting of just the score:
f"This trajectory got a score of {score}."
"""
...
@experimental(version="3.0.0")
@dataclass(frozen=True)
class DspyGEPAResult:
"""
Additional data related to the GEPA run.
Fields:
- candidates: list of proposed candidates (component_name -> component_text)
- parents: lineage info; for each candidate i, parents[i] is a list of parent indices or None
- val_aggregate_scores: per-candidate aggregate score on the validation set (higher is better)
- val_subscores: per-candidate per-instance scores on the validation set (len == num_val_instances)
- per_val_instance_best_candidates: for each val instance t, a set of candidate indices achieving the best score on t
- discovery_eval_counts: Budget (number of metric calls / rollouts) consumed up to the discovery of each candidate
- total_metric_calls: total number of metric calls made across the run
- num_full_val_evals: number of full validation evaluations performed
- log_dir: where artifacts were written (if any)
- seed: RNG seed for reproducibility (if known)
- best_idx: candidate index with the highest val_aggregate_scores
- best_candidate: the program text mapping for best_idx
"""
# Data about the proposed candidates
candidates: list[Module]
parents: list[list[int | None]]
val_aggregate_scores: list[float]
val_subscores: list[list[float]]
per_val_instance_best_candidates: list[set[int]]
discovery_eval_counts: list[int]
# Optional data
best_outputs_valset: list[list[tuple[int, list[Prediction]]]] | None = None
# Optimization metadata
total_metric_calls: int | None = None
num_full_val_evals: int | None = None
log_dir: str | None = None
seed: int | None = None
@property
def best_idx(self) -> int:
scores = self.val_aggregate_scores
return max(range(len(scores)), key=lambda i: scores[i])
@property
def best_candidate(self) -> dict[str, str]:
return self.candidates[self.best_idx]
@property
def highest_score_achieved_per_val_task(self) -> list[float]:
return [
self.val_subscores[list(self.per_val_instance_best_candidates[val_idx])[0]][val_idx]
for val_idx in range(len(self.val_subscores[0]))
]
def to_dict(self) -> dict[str, Any]:
cands = [{k: v for k, v in cand.items()} for cand in self.candidates]
return dict(
candidates=cands,
parents=self.parents,
val_aggregate_scores=self.val_aggregate_scores,
best_outputs_valset=self.best_outputs_valset,
val_subscores=self.val_subscores,
per_val_instance_best_candidates=[list(s) for s in self.per_val_instance_best_candidates],
discovery_eval_counts=self.discovery_eval_counts,
total_metric_calls=self.total_metric_calls,
num_full_val_evals=self.num_full_val_evals,
log_dir=self.log_dir,
seed=self.seed,
best_idx=self.best_idx,
)
@staticmethod
def from_gepa_result(gepa_result: "GEPAResult", adapter: "DspyAdapter") -> "DspyGEPAResult":
return DspyGEPAResult(
candidates=[adapter.build_program(c) for c in gepa_result.candidates],
parents=gepa_result.parents,
val_aggregate_scores=gepa_result.val_aggregate_scores,
best_outputs_valset=gepa_result.best_outputs_valset,
val_subscores=gepa_result.val_subscores,
per_val_instance_best_candidates=gepa_result.per_val_instance_best_candidates,
discovery_eval_counts=gepa_result.discovery_eval_counts,
total_metric_calls=gepa_result.total_metric_calls,
num_full_val_evals=gepa_result.num_full_val_evals,
log_dir=gepa_result.run_dir,
seed=gepa_result.seed,
)
@experimental(version="3.0.0")
class GEPA(Teleprompter):
"""
GEPA is an evolutionary optimizer, which uses reflection to evolve text components
of complex systems. GEPA is proposed in the paper [GEPA: Reflective Prompt Evolution Can Outperform Reinforcement Learning](https://arxiv.org/abs/2507.19457).
The GEPA optimization engine is provided by the `gepa` package, available from [https://github.com/gepa-ai/gepa](https://github.com/gepa-ai/gepa).
GEPA captures full traces of the DSPy module's execution, identifies the parts of the trace
corresponding to a specific predictor, and reflects on the behaviour of the predictor to
propose a new instruction for the predictor. GEPA allows users to provide textual feedback
to the optimizer, which is used to guide the evolution of the predictor. The textual feedback
can be provided at the granularity of individual predictors, or at the level of the entire system's
execution.
To provide feedback to the GEPA optimizer, implement a metric as follows:
```
def metric(
gold: Example,
pred: Prediction,
trace: Optional[DSPyTrace] = None,
pred_name: Optional[str] = None,
pred_trace: Optional[DSPyTrace] = None,
) -> float | ScoreWithFeedback:
\"""
This function is called with the following arguments:
- gold: The gold example.
- pred: The predicted output.
- trace: Optional. The trace of the program's execution.
- pred_name: Optional. The name of the target predictor currently being optimized by GEPA, for which
the feedback is being requested.
- pred_trace: Optional. The trace of the target predictor's execution GEPA is seeking feedback for.
Note the `pred_name` and `pred_trace` arguments. During optimization, GEPA will call the metric to obtain
feedback for individual predictors being optimized. GEPA provides the name of the predictor in `pred_name`
and the sub-trace (of the trace) corresponding to the predictor in `pred_trace`.
If available at the predictor level, the metric should return {'score': float, 'feedback': str} corresponding
to the predictor.
If not available at the predictor level, the metric can also return a text feedback at the program level
(using just the gold, pred and trace).
If no feedback is returned, GEPA will use a simple text feedback consisting of just the score:
f"This trajectory got a score of {score}."
\"""
...
```
GEPA can also be used as a batch inference-time search strategy, by passing `valset=trainset, track_stats=True, track_best_outputs=True`, and using the
`detailed_results` attribute of the optimized program (returned by `compile`) to get the Pareto frontier of the batch. `optimized_program.detailed_results.best_outputs_valset` will contain the best outputs for each task in the batch.
Example:
```
gepa = GEPA(metric=metric, track_stats=True)
batch_of_tasks = [dspy.Example(...) for task in tasks]
new_prog = gepa.compile(student, trainset=trainset, valset=batch_of_tasks)
pareto_frontier = new_prog.detailed_results.val_aggregate_scores
# pareto_frontier is a list of scores, one for each task in the batch.
```
Args:
metric: The metric function to use for feedback and evaluation.
auto: The auto budget to use for the run. Options: "light", "medium", "heavy".
max_full_evals: The maximum number of full evaluations to perform.
max_metric_calls: The maximum number of metric calls to perform.
reflection_minibatch_size: The number of examples to use for reflection in a single GEPA step. Default is 3.
candidate_selection_strategy: The strategy to use for candidate selection. Default is "pareto",
which stochastically selects candidates from the Pareto frontier of all validation scores.
Options: "pareto", "current_best".
reflection_lm: The language model to use for reflection. Required parameter. GEPA benefits from
a strong reflection model. Consider using `dspy.LM(model='gpt-5', temperature=1.0, max_tokens=32000)`
for optimal performance.
skip_perfect_score: Whether to skip examples with perfect scores during reflection. Default is True.
instruction_proposer: Optional custom instruction proposer implementing GEPA's ProposalFn protocol.
**Default: None (recommended for most users)** - Uses GEPA's proven instruction proposer from
the [GEPA library](https://github.com/gepa-ai/gepa), which implements the
[`ProposalFn`](https://github.com/gepa-ai/gepa/blob/main/src/gepa/core/adapter.py). This default
proposer is highly capable and was validated across diverse experiments reported in the GEPA
paper and tutorials.
See documentation on custom instruction proposers
[here](https://dspy.ai/api/optimizers/GEPA/GEPA_Advanced/#custom-instruction-proposers).
**Advanced Feature**: Only needed for specialized scenarios:
- **Multi-modal handling**: Processing dspy.Image inputs alongside textual information
- **Nuanced control over constraints**: Fine-grained control over instruction length, format,
and structural requirements beyond standard feedback mechanisms
- **Domain-specific knowledge injection**: Specialized terminology or context that cannot be
provided through feedback_func alone
- **Provider-specific prompting**: Optimizations for specific LLM providers (OpenAI, Anthropic)
with unique formatting preferences
- **Coupled component updates**: Coordinated updates of multiple components together rather
than independent optimization
- **External knowledge integration**: Runtime access to databases, APIs, or knowledge bases
The default proposer handles the vast majority of use cases effectively. Use
MultiModalInstructionProposer() from dspy.teleprompt.gepa.instruction_proposal for visual
content or implement custom ProposalFn for highly specialized requirements.
Note: When both instruction_proposer and reflection_lm are set, the instruction_proposer is called
in the reflection_lm context. However, reflection_lm is optional when using a custom instruction_proposer.
Custom instruction proposers can invoke their own LLMs if needed.
component_selector: Custom component selector implementing the [ReflectionComponentSelector](https://github.com/gepa-ai/gepa/blob/main/src/gepa/proposer/reflective_mutation/base.py) protocol,
or a string specifying a built-in selector strategy. Controls which components (predictors) are selected
for optimization at each iteration. Defaults to 'round_robin' strategy which cycles through components
one at a time. Available string options: 'round_robin' (cycles through components sequentially),
'all' (selects all components for simultaneous optimization). Custom selectors can implement strategies
using LLM-driven selection logic based on optimization state and trajectories.
See [gepa component selectors](https://github.com/gepa-ai/gepa/blob/main/src/gepa/strategies/component_selector.py)
for available built-in selectors and the ReflectionComponentSelector protocol for implementing custom selectors.
add_format_failure_as_feedback: Whether to add format failures as feedback. Default is False.
use_merge: Whether to use merge-based optimization. Default is True.
max_merge_invocations: The maximum number of merge invocations to perform. Default is 5.
num_threads: The number of threads to use for evaluation with `Evaluate`. Optional.
failure_score: The score to assign to failed examples. Default is 0.0.
perfect_score: The maximum score achievable by the metric. Default is 1.0. Used by GEPA
to determine if all examples in a minibatch are perfect.
log_dir: The directory to save the logs. GEPA saves elaborate logs, along with all candidate
programs, in this directory. Running GEPA with the same `log_dir` will resume the run
from the last checkpoint.
track_stats: Whether to return detailed results and all proposed programs in the `detailed_results`
attribute of the optimized program. Default is False.
use_wandb: Whether to use wandb for logging. Default is False.
wandb_api_key: The API key to use for wandb. If not provided, wandb will use the API key
from the environment variable `WANDB_API_KEY`.
wandb_init_kwargs: Additional keyword arguments to pass to `wandb.init`.
track_best_outputs: Whether to track the best outputs on the validation set. track_stats must
be True if track_best_outputs is True. The optimized program's `detailed_results.best_outputs_valset`
will contain the best outputs for each task in the validation set.
warn_on_score_mismatch: GEPA (currently) expects the metric to return the same module-level score when
called with and without the pred_name. This flag (defaults to True) determines whether a warning is
raised if a mismatch in module-level and predictor-level score is detected.
seed: The random seed to use for reproducibility. Default is 0.
gepa_kwargs: (Optional) Additional keyword arguments to pass directly to [gepa.optimize](https://github.com/gepa-ai/gepa/blob/main/src/gepa/api.py).
Useful for accessing advanced GEPA features not directly exposed through DSPy's GEPA interface.
Available parameters:
- batch_sampler: Strategy for selecting training examples. Can be a [BatchSampler](https://github.com/gepa-ai/gepa/blob/main/src/gepa/strategies/batch_sampler.py) instance or a string
('epoch_shuffled'). Defaults to 'epoch_shuffled'. Only valid when reflection_minibatch_size is None.
- merge_val_overlap_floor: Minimum number of shared validation ids required between parents before
attempting a merge subsample. Only relevant when using `val_evaluation_policy` other than 'full_eval'.
Default is 5.
- stop_callbacks: Optional stopper(s) that return True when optimization should stop. Can be a single
[StopperProtocol](https://github.com/gepa-ai/gepa/blob/main/src/gepa/utils/stop_condition.py) or a list of StopperProtocol instances.
Examples: [FileStopper](https://github.com/gepa-ai/gepa/blob/main/src/gepa/utils/stop_condition.py),
[TimeoutStopCondition](https://github.com/gepa-ai/gepa/blob/main/src/gepa/utils/stop_condition.py),
[SignalStopper](https://github.com/gepa-ai/gepa/blob/main/src/gepa/utils/stop_condition.py),
[NoImprovementStopper](https://github.com/gepa-ai/gepa/blob/main/src/gepa/utils/stop_condition.py),
or custom stopping logic. Note: This overrides the default
max_metric_calls stopping condition.
- use_cloudpickle: Use cloudpickle instead of pickle for serialization. Can be helpful when the
serialized state contains dynamically generated DSPy signatures. Default is False.
- val_evaluation_policy: Strategy controlling which validation ids to score each iteration. Can be
'full_eval' (evaluate every id each time) or an [EvaluationPolicy](https://github.com/gepa-ai/gepa/blob/main/src/gepa/strategies/eval_policy.py) instance. Default is 'full_eval'.
- use_mlflow: If True, enables MLflow integration to log optimization progress.
MLflow can be used alongside Weights & Biases (WandB).
- mlflow_tracking_uri: The tracking URI to use for MLflow (when use_mlflow=True).
- mlflow_experiment_name: The experiment name to use for MLflow (when use_mlflow=True).
Note: Parameters already handled by DSPy's GEPA class will be overridden by the direct parameters
and should not be passed through gepa_kwargs.
Note:
Budget Configuration: Exactly one of `auto`, `max_full_evals`, or `max_metric_calls` must be provided.
The `auto` parameter provides preset configurations: "light" for quick experimentation, "medium" for
balanced optimization, and "heavy" for thorough optimization.
Reflection Configuration: The `reflection_lm` parameter is required and should be a strong language model.
GEPA performs best with models like `dspy.LM(model='gpt-5', temperature=1.0, max_tokens=32000)`.
The reflection process analyzes failed examples to generate feedback for program improvement.
Merge Configuration: GEPA can merge successful program variants using `use_merge=True`.
The `max_merge_invocations` parameter controls how many merge attempts are made during optimization.
Evaluation Configuration: Use `num_threads` to parallelize evaluation. The `failure_score` and
`perfect_score` parameters help GEPA understand your metric's range and optimize accordingly.
Logging Configuration: Set `log_dir` to save detailed logs and enable checkpoint resuming.
Use `track_stats=True` to access detailed optimization results via the `detailed_results` attribute.
Enable `use_wandb=True` for experiment tracking and visualization.
Reproducibility: Set `seed` to ensure consistent results across runs with the same configuration.
"""
def __init__(
self,
metric: GEPAFeedbackMetric,
*,
# Budget configuration
auto: Literal["light", "medium", "heavy"] | None = None,
max_full_evals: int | None = None,
max_metric_calls: int | None = None,
# Reflection configuration
reflection_minibatch_size: int = 3,
candidate_selection_strategy: Literal["pareto", "current_best"] = "pareto",
reflection_lm: LM | None = None,
skip_perfect_score: bool = True,
add_format_failure_as_feedback: bool = False,
instruction_proposer: "ProposalFn | None" = None,
component_selector: "ReflectionComponentSelector | str" = "round_robin",
# Merge-based configuration
use_merge: bool = True,
max_merge_invocations: int | None = 5,
# Evaluation configuration
num_threads: int | None = None,
failure_score: float = 0.0,
perfect_score: float = 1.0,
# Logging
log_dir: str | None = None,
track_stats: bool = False,
use_wandb: bool = False,
wandb_api_key: str | None = None,
wandb_init_kwargs: dict[str, Any] | None = None,
track_best_outputs: bool = False,
warn_on_score_mismatch: bool = True,
use_mlflow: bool = False,
# Reproducibility
seed: int | None = 0,
# GEPA passthrough kwargs
gepa_kwargs: dict | None = None,
):
try:
inspect.signature(metric).bind(None, None, None, None, None)
except TypeError as e:
raise TypeError(
"GEPA metric must accept five arguments: (gold, pred, trace, pred_name, pred_trace). "
"See https://dspy.ai/api/optimizers/GEPA for details."
) from e
self.metric_fn = metric
# Budget configuration
assert (max_metric_calls is not None) + (max_full_evals is not None) + (auto is not None) == 1, (
"Exactly one of max_metric_calls, max_full_evals, auto must be set. "
f"You set max_metric_calls={max_metric_calls}, "
f"max_full_evals={max_full_evals}, "
f"auto={auto}."
)
self.auto = auto
self.max_full_evals = max_full_evals
self.max_metric_calls = max_metric_calls
# Reflection configuration
self.reflection_minibatch_size = reflection_minibatch_size
self.candidate_selection_strategy = candidate_selection_strategy
assert reflection_lm is not None or instruction_proposer is not None, (
"GEPA requires a reflection language model, or custom instruction proposer to be provided. "
"Typically, you can use `dspy.LM(model='gpt-5', temperature=1.0, max_tokens=32000)` to get a good reflection model. "
"Reflection LM is used by GEPA to reflect on the behavior of the program and propose new instructions, and will benefit from a strong model. "
)
self.reflection_lm = reflection_lm
self.skip_perfect_score = skip_perfect_score
self.add_format_failure_as_feedback = add_format_failure_as_feedback
# Merge-based configuration
self.use_merge = use_merge
self.max_merge_invocations = max_merge_invocations
# Evaluation Configuration
self.num_threads = num_threads
self.failure_score = failure_score
self.perfect_score = perfect_score
# Logging configuration
self.log_dir = log_dir
self.track_stats = track_stats
self.use_wandb = use_wandb
self.wandb_api_key = wandb_api_key
self.wandb_init_kwargs = wandb_init_kwargs
self.warn_on_score_mismatch = warn_on_score_mismatch
self.use_mlflow = use_mlflow
if track_best_outputs:
assert track_stats, "track_stats must be True if track_best_outputs is True."
self.track_best_outputs = track_best_outputs
# Reproducibility
self.seed = seed
self.custom_instruction_proposer = instruction_proposer
self.component_selector = component_selector
self.gepa_kwargs = gepa_kwargs or {}
def auto_budget(
self, num_preds, num_candidates, valset_size: int, minibatch_size: int = 35, full_eval_steps: int = 5
) -> int:
import numpy as np
num_trials = int(max(2 * (num_preds * 2) * np.log2(num_candidates), 1.5 * num_candidates))
if num_trials < 0 or valset_size < 0 or minibatch_size < 0:
raise ValueError("num_trials, valset_size, and minibatch_size must be >= 0.")
if full_eval_steps < 1:
raise ValueError("full_eval_steps must be >= 1.")
V = valset_size
N = num_trials
M = minibatch_size
m = full_eval_steps
# Initial full evaluation on the default program
total = V
# Assume upto 5 trials for bootstrapping each candidate
total += num_candidates * 5
# N minibatch evaluations
total += N * M
if N == 0:
return total # no periodic/full evals inside the loop
# Periodic full evals occur when trial_num % (m+1) == 0, where trial_num runs 2..N+1
periodic_fulls = (N + 1) // (m) + 1
# If 1 <= N < m, the code triggers one final full eval at the end
extra_final = 1 if N < m else 0
total += (periodic_fulls + extra_final) * V
return total
def compile(
self,
student: Module,
*,
trainset: list[Example],
teacher: Module | None = None,
valset: list[Example] | None = None,
) -> Module:
"""
GEPA uses the trainset to perform reflective updates to the prompt, but uses the valset for tracking Pareto scores.
If no valset is provided, GEPA will use the trainset for both.
Parameters:
- student: The student module to optimize.
- trainset: The training set to use for reflective updates.
- valset: The validation set to use for tracking Pareto scores. If not provided, GEPA will use the trainset for both.
"""
from gepa import GEPAResult, optimize
from dspy.teleprompt.gepa.gepa_utils import DspyAdapter, LoggerAdapter
assert trainset is not None and len(trainset) > 0, "Trainset must be provided and non-empty"
assert teacher is None, "Teacher is not supported in DspyGEPA yet."
if self.auto is not None:
self.max_metric_calls = self.auto_budget(
num_preds=len(student.predictors()),
num_candidates=AUTO_RUN_SETTINGS[self.auto]["n"],
valset_size=len(valset) if valset is not None else len(trainset),
)
elif self.max_full_evals is not None:
self.max_metric_calls = self.max_full_evals * (len(trainset) + (len(valset) if valset is not None else 0))
else:
assert self.max_metric_calls is not None, "Either auto, max_full_evals, or max_metric_calls must be set."
logger.info(
f"Running GEPA for approx {self.max_metric_calls} metric calls of the program. This amounts to {self.max_metric_calls / len(trainset) if valset is None else self.max_metric_calls / (len(trainset) + len(valset)):.2f} full evals on the {'train' if valset is None else 'train+val'} set."
)
if valset is None:
logger.warning(
"No valset provided; Using trainset as valset. This is useful as an inference-time scaling strategy where you want GEPA to find the best solutions for the provided tasks in the trainset, as it makes GEPA overfit prompts to the provided trainset. In order to ensure generalization and perform well on unseen tasks, please provide separate trainset and valset. Provide the smallest valset that is just large enough to match the downstream task distribution, while keeping trainset as large as possible."
)
valset = valset or trainset
logger.info(
f"Using {len(valset)} examples for tracking Pareto scores. You can consider using a smaller sample of the valset to allow GEPA to explore more diverse solutions within the same budget. GEPA requires you to provide the smallest valset that is just large enough to match your downstream task distribution, while providing as large trainset as possible."
)
rng = random.Random(self.seed)
def feedback_fn_creator(pred_name: str, predictor) -> "PredictorFeedbackFn":
def feedback_fn(
predictor_output: dict[str, Any],
predictor_inputs: dict[str, Any],
module_inputs: Example,
module_outputs: Prediction,
captured_trace: "DSPyTrace",
) -> "ScoreWithFeedback":
trace_for_pred = [(predictor, predictor_inputs, predictor_output)]
o = self.metric_fn(
module_inputs,
module_outputs,
captured_trace,
pred_name,
trace_for_pred,
)
if hasattr(o, "feedback"):
if o["feedback"] is None:
o["feedback"] = f"This trajectory got a score of {o['score']}."
return o
else:
return dict(score=o, feedback=f"This trajectory got a score of {o}.")
return feedback_fn
feedback_map = {k: feedback_fn_creator(k, v) for k, v in student.named_predictors()}
# Build the DSPy adapter that encapsulates evaluation, trace capture, feedback extraction, and instruction proposal
adapter = DspyAdapter(
student_module=student,
metric_fn=self.metric_fn,
feedback_map=feedback_map,
failure_score=self.failure_score,
num_threads=self.num_threads,
add_format_failure_as_feedback=self.add_format_failure_as_feedback,
rng=rng,
reflection_lm=self.reflection_lm,
custom_instruction_proposer=self.custom_instruction_proposer,
warn_on_score_mismatch=self.warn_on_score_mismatch,
reflection_minibatch_size=self.reflection_minibatch_size,
)
# Build the seed candidate: map each predictor name to its current instruction
seed_candidate = {name: pred.signature.instructions for name, pred in student.named_predictors()}
gepa_result: GEPAResult = optimize(
seed_candidate=seed_candidate,
trainset=trainset,
valset=valset,
adapter=adapter,
# Reflection-based configuration
reflection_lm=(lambda x: adapter.stripped_lm_call(x)[0]) if self.reflection_lm is not None else None,
candidate_selection_strategy=self.candidate_selection_strategy,
skip_perfect_score=self.skip_perfect_score,
reflection_minibatch_size=self.reflection_minibatch_size,
module_selector=self.component_selector,
perfect_score=self.perfect_score,
# Merge-based configuration
use_merge=self.use_merge,
max_merge_invocations=self.max_merge_invocations,
# Budget
max_metric_calls=self.max_metric_calls,
# Logging
logger=LoggerAdapter(logger),
run_dir=self.log_dir,
use_wandb=self.use_wandb,
wandb_api_key=self.wandb_api_key,
wandb_init_kwargs=self.wandb_init_kwargs,
use_mlflow=self.use_mlflow,
track_best_outputs=self.track_best_outputs,
display_progress_bar=True,
raise_on_exception=True,
# Reproducibility
seed=self.seed,
**self.gepa_kwargs,
)
new_prog = adapter.build_program(gepa_result.best_candidate)
if self.track_stats:
dspy_gepa_result = DspyGEPAResult.from_gepa_result(gepa_result, adapter)
new_prog.detailed_results = dspy_gepa_result
return new_prog
| {
"repo_id": "stanfordnlp/dspy",
"file_path": "dspy/teleprompt/gepa/gepa.py",
"license": "MIT License",
"lines": 521,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
stanfordnlp/dspy:dspy/teleprompt/gepa/gepa_utils.py | import logging
import random
from typing import Any, Callable, Protocol, TypedDict
from gepa import EvaluationBatch, GEPAAdapter
from gepa.core.adapter import ProposalFn
from gepa.strategies.instruction_proposal import InstructionProposalSignature
import dspy
from dspy.adapters.chat_adapter import ChatAdapter
from dspy.adapters.types import History
from dspy.adapters.types.base_type import Type
from dspy.evaluate import Evaluate
from dspy.primitives import Example, Prediction
from dspy.teleprompt.bootstrap_trace import FailedPrediction, TraceData
logger = logging.getLogger(__name__)
class LoggerAdapter:
def __init__(self, logger: logging.Logger):
self.logger = logger
def log(self, x: str):
self.logger.info(x)
DSPyTrace = list[tuple[Any, dict[str, Any], Prediction]]
ReflectiveExample = TypedDict(
"ReflectiveExample",
{
"Inputs": dict[str, Any],
"Generated Outputs": dict[str, Any] | str,
"Feedback": str,
},
)
ReflectiveExample.__doc__ = """
Structure of individual examples in the reflective dataset.
Each example contains the predictor inputs, generated outputs, and feedback from evaluation.
"""
class ScoreWithFeedback(Prediction):
score: float
feedback: str
class PredictorFeedbackFn(Protocol):
def __call__(
self,
predictor_output: dict[str, Any],
predictor_inputs: dict[str, Any],
module_inputs: Example,
module_outputs: Prediction,
captured_trace: DSPyTrace,
) -> ScoreWithFeedback:
"""
This function is used to provide feedback to a specific predictor.
The function is called with the following arguments:
- predictor_output: The output of the predictor.
- predictor_inputs: The inputs to the predictor.
- module_inputs: The inputs to the whole program --- `Example`.
- module_outputs: The outputs of the whole program --- `Prediction`.
- captured_trace: The trace of the module's execution.
# Shape of trace is: [predictor_invocation_idx -> Tuple[Predictor, PredictorInputs, Prediction]]
# Each trace is a tuple of (Predictor, PredictorInputs, Prediction)
The function should return a `ScoreWithFeedback` object.
The feedback is a string that is used to guide the evolution of the predictor.
"""
...
class DspyAdapter(GEPAAdapter[Example, TraceData, Prediction]):
def __init__(
self,
student_module,
metric_fn: Callable,
feedback_map: dict[str, Callable],
failure_score=0.0,
num_threads: int | None = None,
add_format_failure_as_feedback: bool = False,
rng: random.Random | None = None,
reflection_lm=None,
custom_instruction_proposer: "ProposalFn | None" = None,
warn_on_score_mismatch: bool = True,
reflection_minibatch_size: int | None = None,
):
self.student = student_module
self.metric_fn = metric_fn
self.feedback_map = feedback_map
self.failure_score = failure_score
self.num_threads = num_threads
self.add_format_failure_as_feedback = add_format_failure_as_feedback
self.rng = rng or random.Random(0)
self.reflection_lm = reflection_lm
self.custom_instruction_proposer = custom_instruction_proposer
self.warn_on_score_mismatch = warn_on_score_mismatch
self.reflection_minibatch_size = reflection_minibatch_size
def propose_new_texts(
self,
candidate: dict[str, str],
reflective_dataset: dict[str, list[dict[str, Any]]],
components_to_update: list[str],
) -> dict[str, str]:
reflection_lm = self.reflection_lm or dspy.settings.lm
# If custom proposer provided, override everything with custom proposer
if self.custom_instruction_proposer:
with dspy.context(lm=reflection_lm):
return self.custom_instruction_proposer(
candidate=candidate,
reflective_dataset=reflective_dataset,
components_to_update=components_to_update,
)
results: dict[str, str] = {}
with dspy.context(lm=reflection_lm):
for name in components_to_update:
base_instruction = candidate[name]
dataset_with_feedback = reflective_dataset[name]
results[name] = InstructionProposalSignature.run(
lm=(lambda x: self.stripped_lm_call(x)[0]),
input_dict={
"current_instruction_doc": base_instruction,
"dataset_with_feedback": dataset_with_feedback,
},
)["new_instruction"]
return results
def build_program(self, candidate: dict[str, str]):
new_prog = self.student.deepcopy()
for name, pred in new_prog.named_predictors():
if name in candidate:
pred.signature = pred.signature.with_instructions(candidate[name])
return new_prog
def evaluate(self, batch, candidate, capture_traces=False):
program = self.build_program(candidate)
callback_metadata = (
{"metric_key": "eval_full"}
if self.reflection_minibatch_size is None or len(batch) > self.reflection_minibatch_size
else {"disable_logging": True}
)
if capture_traces:
# bootstrap_trace_data-like flow with trace capture
from dspy.teleprompt import bootstrap_trace as bootstrap_trace_module
trajs = bootstrap_trace_module.bootstrap_trace_data(
program=program,
dataset=batch,
metric=self.metric_fn,
num_threads=self.num_threads,
raise_on_error=False,
capture_failed_parses=True,
failure_score=self.failure_score,
format_failure_score=self.failure_score,
callback_metadata=callback_metadata,
)
scores = []
outputs = []
for t in trajs:
outputs.append(t["prediction"])
if hasattr(t["prediction"], "__class__") and t.get("score") is None:
scores.append(self.failure_score)
else:
score = t["score"]
if hasattr(score, "score"):
score = score["score"]
scores.append(score)
return EvaluationBatch(outputs=outputs, scores=scores, trajectories=trajs)
else:
evaluator = Evaluate(
devset=batch,
metric=self.metric_fn,
num_threads=self.num_threads,
return_all_scores=True,
failure_score=self.failure_score,
provide_traceback=True,
max_errors=len(batch) * 100,
callback_metadata=callback_metadata,
)
res = evaluator(program)
outputs = [r[1] for r in res.results]
scores = [r[2] for r in res.results]
scores = [s["score"] if hasattr(s, "score") else s for s in scores]
return EvaluationBatch(outputs=outputs, scores=scores, trajectories=None)
def make_reflective_dataset(
self, candidate, eval_batch, components_to_update
) -> dict[str, list[ReflectiveExample]]:
program = self.build_program(candidate)
ret_d: dict[str, list[ReflectiveExample]] = {}
for pred_name in components_to_update:
# Find the predictor object
module = None
for name, m in program.named_predictors():
if name == pred_name:
module = m
break
assert module is not None, f"Predictor not found: {pred_name}"
# Create reflective examples from traces
items: list[ReflectiveExample] = []
for data in eval_batch.trajectories or []:
trace = data["trace"]
example = data["example"]
prediction = data["prediction"]
module_score = data["score"]
if hasattr(module_score, "score"):
module_score = module_score["score"]
trace_instances = [t for t in trace if t[0].signature.equals(module.signature)]
if not self.add_format_failure_as_feedback:
trace_instances = [t for t in trace_instances if not isinstance(t[2], FailedPrediction)]
if len(trace_instances) == 0:
continue
selected = None
for t in trace_instances:
if isinstance(t[2], FailedPrediction):
selected = t
break
if selected is None:
if isinstance(prediction, FailedPrediction):
continue
selected = self.rng.choice(trace_instances)
inputs = selected[1]
outputs = selected[2]
new_inputs = {}
new_outputs = {}
contains_history = False
history_key_name = None
for input_key, input_val in inputs.items():
if isinstance(input_val, History):
contains_history = True
assert history_key_name is None
history_key_name = input_key
if contains_history:
s = "```json\n"
for i, message in enumerate(inputs[history_key_name].messages):
s += f" {i}: {message}\n"
s += "```"
new_inputs["Context"] = s
for input_key, input_val in inputs.items():
if contains_history and input_key == history_key_name:
continue
if isinstance(input_val, Type) and self.custom_instruction_proposer is not None:
# Keep original object - will be properly formatted when sent to reflection LM
new_inputs[input_key] = input_val
else:
new_inputs[input_key] = str(input_val)
if isinstance(outputs, FailedPrediction):
s = "Couldn't parse the output as per the expected output format. The model's raw response was:\n"
s += "```\n"
s += outputs.completion_text + "\n"
s += "```\n\n"
new_outputs = s
else:
for output_key, output_val in outputs.items():
new_outputs[output_key] = str(output_val)
d = {"Inputs": new_inputs, "Generated Outputs": new_outputs}
if isinstance(outputs, FailedPrediction):
adapter = ChatAdapter()
structure_instruction = ""
for dd in adapter.format(module.signature, [], {}):
structure_instruction += dd["role"] + ": " + dd["content"] + "\n"
d["Feedback"] = "Your output failed to parse. Follow this structure:\n" + structure_instruction
# d['score'] = self.failure_score
else:
feedback_fn = self.feedback_map[pred_name]
fb = feedback_fn(
predictor_output=outputs,
predictor_inputs=inputs,
module_inputs=example,
module_outputs=prediction,
captured_trace=trace,
)
d["Feedback"] = fb["feedback"]
if fb["score"] != module_score:
if self.warn_on_score_mismatch:
logger.warning(
"The score returned by the metric with pred_name is different from the overall metric score. This can indicate 2 things: Either the metric is non-deterministic (e.g., LLM-as-judge, Semantic score, etc.) or the metric returned a score specific to pred_name that differs from the module level score. Currently, GEPA does not support predictor level scoring (support coming soon), and only requires a feedback text to be provided, which can be specific to the predictor or program level. GEPA will ignore the differing score returned, and instead use module level score. You can safely ignore this warning if using a semantic metric, however, if this mismatch is caused due to predictor scoring, please return module-level scores. To disable this warning, set warn_on_score_mismatch=False."
)
self.warn_on_score_mismatch = False
fb["score"] = module_score
items.append(d)
if len(items) == 0:
logger.warning(f" No valid reflective examples found for {pred_name}")
continue
ret_d[pred_name] = items
if len(ret_d) == 0:
raise Exception("No valid predictions found for any module.")
return ret_d
# Always return strings from the LM outputs
# Even when it returns a dict with e.g., "text" and "reasoning" fields
def stripped_lm_call(self, x: str) -> list[str]:
raw_outputs = self.reflection_lm(x)
outputs = []
for raw_output in raw_outputs:
if type(raw_output) == str:
outputs.append(raw_output)
elif type(raw_output) == dict:
if "text" not in raw_output:
raise KeyError("Missing 'text' field in the output from the base LM!")
outputs.append(raw_output["text"])
else:
raise TypeError("Unexpected output type from the base LM! Expected str or dict")
return outputs
| {
"repo_id": "stanfordnlp/dspy",
"file_path": "dspy/teleprompt/gepa/gepa_utils.py",
"license": "MIT License",
"lines": 285,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
stanfordnlp/dspy:tests/teleprompt/test_bootstrap_finetune.py | from unittest.mock import patch
import dspy
from dspy import Example
from dspy.predict import Predict
from dspy.teleprompt import BootstrapFinetune
from dspy.utils.dummies import DummyLM
# Define a simple metric function for testing
def simple_metric(example, prediction, trace=None):
return example.output == prediction.output
examples = [
Example(input="What is the color of the sky?", output="blue").with_inputs("input"),
Example(input="What does the fox say?", output="Ring-ding-ding-ding-dingeringeding!").with_inputs("input"),
]
trainset = [examples[0]]
def test_bootstrap_finetune_initialization():
"""Test BootstrapFinetune initialization with various parameters."""
bootstrap = BootstrapFinetune(metric=simple_metric)
assert bootstrap.metric == simple_metric, "Metric not correctly initialized"
assert bootstrap.multitask == True, "Multitask should default to True"
class SimpleModule(dspy.Module):
def __init__(self, signature):
super().__init__()
self.predictor = Predict(signature)
def forward(self, **kwargs):
return self.predictor(**kwargs)
def test_compile_with_predict_instances():
"""Test BootstrapFinetune compilation with Predict instances."""
# Create SimpleModule instances for student and teacher
student = SimpleModule("input -> output")
teacher = SimpleModule("input -> output")
lm = DummyLM([{"output": "blue"}, {"output": "Ring-ding-ding-ding-dingeringeding!"}])
dspy.configure(lm=lm)
# Set LM for both student and teacher
student.set_lm(lm)
teacher.set_lm(lm)
bootstrap = BootstrapFinetune(metric=simple_metric)
# Mock the fine-tuning process since DummyLM doesn't support it
with patch.object(bootstrap, "finetune_lms") as mock_finetune:
mock_finetune.return_value = {(lm, None): lm}
compiled_student = bootstrap.compile(student, teacher=teacher, trainset=trainset)
assert compiled_student is not None, "Failed to compile student"
assert hasattr(compiled_student, "_compiled") and compiled_student._compiled, "Student compilation flag not set"
mock_finetune.assert_called_once()
def test_error_handling_missing_lm():
"""Test error handling when predictor doesn't have an LM assigned."""
lm = DummyLM([{"output": "test"}])
dspy.configure(lm=lm)
student = SimpleModule("input -> output")
# Intentionally NOT setting LM for the student module
bootstrap = BootstrapFinetune(metric=simple_metric)
# This should raise ValueError about missing LM and hint to use set_lm
try:
bootstrap.compile(student, trainset=trainset)
assert False, "Should have raised ValueError for missing LM"
except ValueError as e:
assert "does not have an LM assigned" in str(e)
assert "set_lm" in str(e)
| {
"repo_id": "stanfordnlp/dspy",
"file_path": "tests/teleprompt/test_bootstrap_finetune.py",
"license": "MIT License",
"lines": 57,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
stanfordnlp/dspy:dspy/utils/hasher.py | from pickle import dumps
from typing import Any
import xxhash
"""
The following class was pulled from the `datasets` package from Hugging Face.
The reason for vendoring this code is to avoid a hard dependency on `datasets`,
which is a large package that is not needed for the majority of use cases.
License: Apache License 2.0
Author: Hugging Face Inc.
URL: https://github.com/huggingface/datasets/blob/fa73ab472eecf9136a3daf7a0fbff16a3dffa7a6/src/datasets/fingerprint.py#L170
Changes: 2025-08-10 - Ran ruff to format the code to DSPy styles.
"""
class Hasher:
"""Hasher that accepts python objects as inputs."""
dispatch: dict = {}
def __init__(self):
"""Initialize an empty xxhash64 hasher state."""
self.m = xxhash.xxh64()
@classmethod
def hash_bytes(cls, value: bytes | list[bytes]) -> str:
"""Return a hex digest for one or more byte chunks.
Args:
value: A single bytes object or a list of bytes to hash in order.
Returns:
The xxhash64 hexadecimal digest.
"""
value = [value] if isinstance(value, bytes) else value
m = xxhash.xxh64()
for x in value:
m.update(x)
return m.hexdigest()
@classmethod
def hash(cls, value: Any) -> str:
"""Serialize and hash a Python object.
Args:
value: Any pickle-serializable Python object.
Returns:
The xxhash64 hexadecimal digest of the serialized object.
"""
return cls.hash_bytes(dumps(value))
def update(self, value: Any) -> None:
"""Update the running digest with a typed object payload.
Args:
value: Any pickle-serializable Python object to incorporate.
"""
header_for_update = f"=={type(value)}=="
value_for_update = self.hash(value)
self.m.update(header_for_update.encode("utf8"))
self.m.update(value_for_update.encode("utf-8"))
def hexdigest(self) -> str:
"""Return the hexadecimal digest of the current hasher state."""
return self.m.hexdigest()
| {
"repo_id": "stanfordnlp/dspy",
"file_path": "dspy/utils/hasher.py",
"license": "MIT License",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
stanfordnlp/dspy:dspy/adapters/baml_adapter.py | """
Custom adapter for improving structured outputs using the information from Pydantic models.
Based on the format used by BAML: https://github.com/BoundaryML/baml
"""
import inspect
import types
from typing import Any, Literal, Union, get_args, get_origin
from pydantic import BaseModel
from dspy.adapters.json_adapter import JSONAdapter
from dspy.adapters.utils import format_field_value as original_format_field_value
from dspy.signatures.signature import Signature
# Changing the comment symbol to Python's # rather than other languages' // seems to help
COMMENT_SYMBOL = "#"
INDENTATION = " "
def _render_type_str(
annotation: Any,
depth: int = 0,
indent: int = 0,
seen_models: set[type] | None = None,
) -> str:
"""Recursively renders a type annotation into a simplified string.
Args:
annotation: The type annotation to render
depth: Current recursion depth (prevents infinite recursion)
indent: Current indentation level for nested structures
"""
# Non-nested types
if annotation is str:
return "string"
if annotation is int:
return "int"
if annotation is float:
return "float"
if annotation is bool:
return "boolean"
if inspect.isclass(annotation) and issubclass(annotation, BaseModel):
return _build_simplified_schema(annotation, indent, seen_models)
try:
origin = get_origin(annotation)
args = get_args(annotation)
except Exception:
return str(annotation)
# Optional[T] or T | None
if origin in (types.UnionType, Union):
non_none_args = [arg for arg in args if arg is not type(None)]
# Render the non-None part of the union
type_render = " or ".join([_render_type_str(arg, depth + 1, indent, seen_models) for arg in non_none_args])
# Add "or null" if None was part of the union
if len(non_none_args) < len(args):
return f"{type_render} or null"
return type_render
# Literal[T1, T2, ...]
if origin is Literal:
return " or ".join(f'"{arg}"' for arg in args)
# list[T]
if origin is list:
# For Pydantic models in lists, use bracket notation
inner_type = args[0]
if inspect.isclass(inner_type) and issubclass(inner_type, BaseModel):
# Build inner schema - the Pydantic model inside should use indent level for array contents
inner_schema = _build_simplified_schema(inner_type, indent + 1, seen_models)
# Format with proper bracket notation and indentation
current_indent = INDENTATION * indent
return f"[\n{inner_schema}\n{current_indent}]"
else:
return f"{_render_type_str(inner_type, depth + 1, indent, seen_models)}[]"
# dict[T1, T2]
if origin is dict:
return f"dict[{_render_type_str(args[0], depth + 1, indent, seen_models)}, {_render_type_str(args[1], depth + 1, indent, seen_models)}]"
# fallback
if hasattr(annotation, "__name__"):
return annotation.__name__
return str(annotation)
def _build_simplified_schema(
pydantic_model: type[BaseModel],
indent: int = 0,
seen_models: set[type] | None = None,
) -> str:
"""Builds a simplified, human-readable schema from a Pydantic model.
Args:
pydantic_model: The Pydantic model to build schema for
indent: Current indentation level
seen_models: Set to track visited pydantic models (prevents infinite recursion)
"""
seen_models = seen_models or set()
if pydantic_model in seen_models:
raise ValueError("BAMLAdapter cannot handle recursive pydantic models, please use a different adapter.")
# Add `pydantic_model` to `seen_models` with a placeholder value to avoid infinite recursion.
seen_models.add(pydantic_model)
lines = []
current_indent = INDENTATION * indent
next_indent = INDENTATION * (indent + 1)
# Add model docstring as a comment above the object if it exists
# Only do this for top-level schemas (indent=0), since nested field docstrings
# are already added before the field name in the parent schema
if indent == 0 and pydantic_model.__doc__:
docstring = pydantic_model.__doc__.strip()
# Handle multiline docstrings by prefixing each line with the comment symbol
for line in docstring.split("\n"):
line = line.strip()
if line:
lines.append(f"{current_indent}{COMMENT_SYMBOL} {line}")
lines.append(f"{current_indent}{{")
fields = pydantic_model.model_fields
if not fields:
lines.append(f"{next_indent}{COMMENT_SYMBOL} No fields defined")
for name, field in fields.items():
if field.description:
lines.append(f"{next_indent}{COMMENT_SYMBOL} {field.description}")
elif field.alias and field.alias != name:
# If there's an alias but no description, show the alias as a comment
lines.append(f"{next_indent}{COMMENT_SYMBOL} alias: {field.alias}")
# If the field type is a BaseModel, add its docstring as a comment before the field
field_annotation = field.annotation
# Handle Optional types
origin = get_origin(field_annotation)
if origin in (types.UnionType, Union):
args = get_args(field_annotation)
non_none_args = [arg for arg in args if arg is not type(None)]
if len(non_none_args) == 1:
field_annotation = non_none_args[0]
if inspect.isclass(field_annotation) and issubclass(field_annotation, BaseModel):
if field_annotation.__doc__:
docstring = field_annotation.__doc__.strip()
for line in docstring.split("\n"):
line = line.strip()
if line:
lines.append(f"{next_indent}{COMMENT_SYMBOL} {line}")
rendered_type = _render_type_str(field.annotation, indent=indent + 1, seen_models=seen_models)
line = f"{next_indent}{name}: {rendered_type},"
lines.append(line)
lines.append(f"{current_indent}}}")
return "\n".join(lines)
class BAMLAdapter(JSONAdapter):
"""
A DSPy adapter that improves the rendering of complex/nested Pydantic models to help LMs.
This adapter generates a compact, human-readable schema representation for nested Pydantic output
fields, inspired by the BAML project's JSON formatter (https://github.com/BoundaryML/baml).
The resulting rendered schema is more token-efficient and easier for smaller LMs to follow than a
raw JSON schema. It also includes Pydantic field descriptions as comments in the schema, which
provide valuable additional context for the LM to understand the expected output.
Example Usage:
```python
import dspy
from pydantic import BaseModel, Field
from typing import Literal
from baml_adapter import BAMLAdapter # Import from your module
# 1. Define your Pydantic models
class PatientAddress(BaseModel):
street: str
city: str
country: Literal["US", "CA"]
class PatientDetails(BaseModel):
name: str = Field(description="Full name of the patient.")
age: int
address: PatientAddress | None
# 2. Define a signature using the Pydantic model as an output field
class ExtractPatientInfo(dspy.Signature):
'''Extract patient information from the clinical note.'''
clinical_note: str = dspy.InputField()
patient_info: PatientDetails = dspy.OutputField()
# 3. Configure dspy to use the new adapter
llm = dspy.OpenAI(model="gpt-4.1-mini")
dspy.configure(lm=llm, adapter=BAMLAdapter())
# 4. Run your program
extractor = dspy.Predict(ExtractPatientInfo)
note = "John Doe, 45 years old, lives at 123 Main St, Anytown. Resident of the US."
result = extractor(clinical_note=note)
print(result.patient_info)
# Expected output:
# PatientDetails(name='John Doe', age=45, address=PatientAddress(street='123 Main St', city='Anytown', country='US'))
```
"""
def format_field_structure(self, signature: type[Signature]) -> str:
"""Overrides the base method to generate a simplified schema for Pydantic models."""
sections = []
# Add structural explanation
sections.append(
"All interactions will be structured in the following way, with the appropriate values filled in.\n"
)
# Add input structure section
if signature.input_fields:
for name in signature.input_fields.keys():
sections.append(f"[[ ## {name} ## ]]")
sections.append(f"{{{name}}}")
sections.append("") # Empty line after each input
# Add output structure section
if signature.output_fields:
for name, field in signature.output_fields.items():
field_type = field.annotation
sections.append(f"[[ ## {name} ## ]]")
sections.append(f"Output field `{name}` should be of type: {_render_type_str(field_type, indent=0)}\n")
# Add completed section
sections.append("[[ ## completed ## ]]")
return "\n".join(sections)
def format_user_message_content(
self,
signature: type[Signature],
inputs: dict[str, Any],
prefix: str = "",
suffix: str = "",
main_request: bool = False,
) -> str:
"""Overrides the base method to render Pydantic input instances as clean JSON."""
messages = [prefix]
for key, field_info in signature.input_fields.items():
if key in inputs:
value = inputs.get(key)
formatted_value = ""
if isinstance(value, BaseModel):
# Use clean, indented JSON for Pydantic instances
formatted_value = value.model_dump_json(indent=2, by_alias=True)
else:
# Fallback to the original dspy formatter for other types
formatted_value = original_format_field_value(field_info=field_info, value=value)
messages.append(f"[[ ## {key} ## ]]\n{formatted_value}")
if main_request:
output_requirements = self.user_message_output_requirements(signature)
if output_requirements is not None:
messages.append(output_requirements)
messages.append(suffix)
return "\n\n".join(m for m in messages if m).strip()
| {
"repo_id": "stanfordnlp/dspy",
"file_path": "dspy/adapters/baml_adapter.py",
"license": "MIT License",
"lines": 222,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
stanfordnlp/dspy:tests/adapters/test_baml_adapter.py | from typing import Literal
from unittest import mock
import pydantic
import pytest
from litellm import Choices, Message
from litellm.files.main import ModelResponse
import dspy
from dspy.adapters.baml_adapter import COMMENT_SYMBOL, INDENTATION, BAMLAdapter
# Test fixtures - Pydantic models for testing
class PatientAddress(pydantic.BaseModel):
"""Patient Address model docstring"""
street: str
city: str
country: Literal["US", "CA"]
class PatientDetails(pydantic.BaseModel):
"""
Patient Details model docstring
Multiline docstring support test
"""
name: str = pydantic.Field(description="Full name of the patient")
age: int
address: PatientAddress | None = None
class ComplexNestedModel(pydantic.BaseModel):
"""Complex model docstring"""
id: int = pydantic.Field(description="Unique identifier")
details: PatientDetails
tags: list[str] = pydantic.Field(default_factory=list)
metadata: dict[str, str] = pydantic.Field(default_factory=dict)
class ModelWithLists(pydantic.BaseModel):
items: list[PatientAddress] = pydantic.Field(description="List of patient addresses")
scores: list[float]
class ImageWrapper(pydantic.BaseModel):
images: list[dspy.Image]
tag: list[str]
class CircularModel(pydantic.BaseModel):
name: str
field: "CircularModel"
def test_baml_adapter_basic_schema_generation():
"""Test that BAMLAdapter generates simplified schemas for Pydantic models."""
class TestSignature(dspy.Signature):
question: str = dspy.InputField()
patient: PatientDetails = dspy.OutputField()
adapter = BAMLAdapter()
schema = adapter.format_field_structure(TestSignature)
# Should contain simplified schema with comments
assert f"{COMMENT_SYMBOL} Full name of the patient" in schema
assert "name: string," in schema
assert "age: int," in schema
assert "address:" in schema
assert "street: string," in schema
assert 'country: "US" or "CA",' in schema
def test_baml_adapter_handles_optional_fields():
"""Test optional field rendering with 'or null' syntax."""
class TestSignature(dspy.Signature):
input: str = dspy.InputField()
patient: PatientDetails = dspy.OutputField()
adapter = BAMLAdapter()
schema = adapter.format_field_structure(TestSignature)
# Optional address field should show 'or null'
assert "address:" in schema
assert "or null" in schema
def test_baml_adapter_handles_primitive_types():
"""Test rendering of basic primitive types."""
class SimpleModel(pydantic.BaseModel):
text: str
number: int
decimal: float
flag: bool
class TestSignature(dspy.Signature):
input: str = dspy.InputField()
output: SimpleModel = dspy.OutputField()
adapter = BAMLAdapter()
schema = adapter.format_field_structure(TestSignature)
assert "text: string," in schema
assert "number: int," in schema
assert "decimal: float," in schema
assert "flag: boolean," in schema
def test_baml_adapter_handles_lists_with_bracket_notation():
"""Test that lists of Pydantic models use proper bracket notation."""
class TestSignature(dspy.Signature):
input: str = dspy.InputField()
addresses: ModelWithLists = dspy.OutputField()
adapter = BAMLAdapter()
schema = adapter.format_field_structure(TestSignature)
# Should use bracket notation for lists and include comments
assert "items: [" in schema
assert f"{COMMENT_SYMBOL} List of patient addresses" in schema
assert "street: string," in schema
assert "city: string," in schema
assert "]," in schema
assert "scores: float[]," in schema
def test_baml_adapter_handles_complex_nested_models():
"""Test deeply nested Pydantic model schema generation."""
class TestSignature(dspy.Signature):
input: str = dspy.InputField()
complex: ComplexNestedModel = dspy.OutputField()
adapter = BAMLAdapter()
schema = adapter.format_field_structure(TestSignature)
expected_patient_details = "\n".join([
f"{INDENTATION}{COMMENT_SYMBOL} Patient Details model docstring",
f"{INDENTATION}{COMMENT_SYMBOL} Multiline docstring support test",
f"{INDENTATION}details:",
])
# Should include nested structure with comments
assert f"{COMMENT_SYMBOL} Unique identifier" in schema
assert expected_patient_details in schema
assert f"{COMMENT_SYMBOL} Full name of the patient" in schema
assert "tags: string[]," in schema
assert "metadata: dict[string, string]," in schema
assert f"{COMMENT_SYMBOL} Complex model docstring" in schema
assert f"{COMMENT_SYMBOL} Patient Address model docstring" in schema
def test_baml_adapter_raise_error_on_circular_references():
"""Test that circular references are handled gracefully."""
class TestSignature(dspy.Signature):
input: str = dspy.InputField()
circular: CircularModel = dspy.OutputField()
adapter = BAMLAdapter()
with pytest.raises(ValueError) as error:
adapter.format_field_structure(TestSignature)
assert "BAMLAdapter cannot handle recursive pydantic models" in str(error.value)
def test_baml_adapter_formats_pydantic_inputs_as_clean_json():
"""Test that Pydantic input instances are formatted as clean JSON."""
class TestSignature(dspy.Signature):
patient: PatientDetails = dspy.InputField()
question: str = dspy.InputField()
answer: str = dspy.OutputField()
adapter = BAMLAdapter()
patient = PatientDetails(
name="John Doe", age=45, address=PatientAddress(street="123 Main St", city="Anytown", country="US")
)
messages = adapter.format(TestSignature, [], {"patient": patient, "question": "What is the diagnosis?"})
# Should have clean, indented JSON for Pydantic input
user_message = messages[-1]["content"]
assert '"name": "John Doe"' in user_message
assert '"age": 45' in user_message
assert '"street": "123 Main St"' in user_message
assert '"country": "US"' in user_message
def test_baml_adapter_handles_mixed_input_types():
"""Test formatting of mixed Pydantic and primitive inputs."""
class TestSignature(dspy.Signature):
patient: PatientDetails = dspy.InputField()
priority: int = dspy.InputField()
notes: str = dspy.InputField()
result: str = dspy.OutputField()
adapter = BAMLAdapter()
patient = PatientDetails(name="Jane Doe", age=30)
messages = adapter.format(TestSignature, [], {"patient": patient, "priority": 1, "notes": "Urgent case"})
user_message = messages[-1]["content"]
# Pydantic should be JSON formatted
assert '"name": "Jane Doe"' in user_message
# Primitives should be formatted normally
assert "priority ## ]]\n1" in user_message
assert "notes ## ]]\nUrgent case" in user_message
def test_baml_adapter_handles_schema_generation_errors_gracefully():
"""Test graceful handling of schema generation errors."""
class ProblematicModel(pydantic.BaseModel):
# This might cause issues in schema generation
field: object
class TestSignature(dspy.Signature):
input: str = dspy.InputField()
output: ProblematicModel = dspy.OutputField()
adapter = BAMLAdapter()
# Should not raise an exception
try:
schema = adapter.format_field_structure(TestSignature)
# If no exception, schema should at least contain some basic structure
assert "schema" in schema.lower()
except Exception:
# If exception occurs, test passes as we're testing graceful handling
pass
def test_baml_adapter_raises_on_missing_fields():
"""Test that missing required fields raise appropriate errors."""
class TestSignature(dspy.Signature):
input: str = dspy.InputField()
patient: PatientDetails = dspy.OutputField()
summary: str = dspy.OutputField()
adapter = BAMLAdapter()
# Missing 'summary' field
completion = '{"patient": {"name": "John", "age": 30}}'
with pytest.raises(dspy.utils.exceptions.AdapterParseError) as e:
adapter.parse(TestSignature, completion)
assert e.value.adapter_name == "JSONAdapter" # BAMLAdapter inherits from JSONAdapter
assert "summary" in str(e.value)
def test_baml_adapter_handles_type_casting_errors():
"""Test graceful handling of type casting errors."""
class TestSignature(dspy.Signature):
input: str = dspy.InputField()
patient: PatientDetails = dspy.OutputField()
adapter = BAMLAdapter()
# Invalid age type
completion = '{"patient": {"name": "John", "age": "not_a_number"}}'
# Should raise ValidationError from Pydantic (which is the expected behavior)
with pytest.raises((dspy.utils.exceptions.AdapterParseError, pydantic.ValidationError)):
adapter.parse(TestSignature, completion)
def test_baml_adapter_with_images():
"""Test BAMLAdapter integration with dspy.Image objects."""
class TestSignature(dspy.Signature):
image_data: ImageWrapper = dspy.InputField()
description: str = dspy.OutputField()
adapter = BAMLAdapter()
image_wrapper = ImageWrapper(
images=[dspy.Image(url="https://example.com/image1.jpg"), dspy.Image(url="https://example.com/image2.jpg")],
tag=["test", "medical"],
)
messages = adapter.format(TestSignature, [], {"image_data": image_wrapper})
# Should contain image URLs in the message content
user_message = messages[-1]["content"]
image_contents = [
content for content in user_message if isinstance(content, dict) and content.get("type") == "image_url"
]
assert len(image_contents) == 2
assert {"type": "image_url", "image_url": {"url": "https://example.com/image1.jpg"}} in user_message
assert {"type": "image_url", "image_url": {"url": "https://example.com/image2.jpg"}} in user_message
def test_baml_adapter_with_tools():
"""Test BAMLAdapter integration with dspy.Tool objects."""
class TestSignature(dspy.Signature):
question: str = dspy.InputField()
tools: list[dspy.Tool] = dspy.InputField()
answer: str = dspy.OutputField()
def get_patient_info(patient_id: int) -> str:
"""Get patient information by ID"""
return f"Patient info for ID {patient_id}"
def schedule_appointment(patient_name: str, date: str) -> str:
"""Schedule an appointment for a patient"""
return f"Scheduled appointment for {patient_name} on {date}"
tools = [dspy.Tool(get_patient_info), dspy.Tool(schedule_appointment)]
adapter = BAMLAdapter()
messages = adapter.format(TestSignature, [], {"question": "Schedule an appointment for John", "tools": tools})
user_message = messages[-1]["content"]
assert "get_patient_info" in user_message
assert "schedule_appointment" in user_message
assert "Get patient information by ID" in user_message
assert "Schedule an appointment for a patient" in user_message
def test_baml_adapter_with_code():
"""Test BAMLAdapter integration with dspy.Code objects."""
# Test with code as input field
class CodeAnalysisSignature(dspy.Signature):
code: dspy.Code = dspy.InputField()
analysis: str = dspy.OutputField()
adapter = BAMLAdapter()
messages = adapter.format(CodeAnalysisSignature, [], {"code": "def hello():\n print('Hello, world!')"})
user_message = messages[-1]["content"]
assert "def hello():" in user_message
assert "print('Hello, world!')" in user_message
# Test with code as output field
class CodeGenSignature(dspy.Signature):
task: str = dspy.InputField()
code: dspy.Code = dspy.OutputField()
with mock.patch("litellm.completion") as mock_completion:
mock_completion.return_value = ModelResponse(
choices=[Choices(message=Message(content='{"code": "print(\\"Generated code\\")"}'))],
model="openai/gpt-4o-mini",
)
result = adapter(
dspy.LM(model="openai/gpt-4o-mini", cache=False),
{},
CodeGenSignature,
[],
{"task": "Write a hello world program"},
)
assert result[0]["code"].code == 'print("Generated code")'
def test_baml_adapter_with_conversation_history():
"""Test BAMLAdapter integration with dspy.History objects."""
class TestSignature(dspy.Signature):
history: dspy.History = dspy.InputField()
question: str = dspy.InputField()
answer: str = dspy.OutputField()
history = dspy.History(
messages=[
{"question": "What is the patient's age?", "answer": "45 years old"},
{"question": "Any allergies?", "answer": "Penicillin allergy"},
]
)
adapter = BAMLAdapter()
messages = adapter.format(TestSignature, [], {"history": history, "question": "What medications should we avoid?"})
# Should format history as separate messages
assert len(messages) == 6 # system + 2 history pairs + user
assert "What is the patient's age?" in messages[1]["content"]
assert '"answer": "45 years old"' in messages[2]["content"]
assert "Any allergies?" in messages[3]["content"]
assert '"answer": "Penicillin allergy"' in messages[4]["content"]
# Comparison tests with JSONAdapter
def test_baml_vs_json_adapter_token_efficiency():
"""Test that BAMLAdapter generates more token-efficient schemas."""
class TestSignature(dspy.Signature):
input: str = dspy.InputField()
complex: ComplexNestedModel = dspy.OutputField()
baml_adapter = BAMLAdapter()
json_adapter = dspy.JSONAdapter()
baml_schema = baml_adapter.format_field_structure(TestSignature)
json_schema = json_adapter.format_field_structure(TestSignature)
# Simple character count as proxy for token efficiency
# BAMLAdapter should always produce shorter schemas
assert len(baml_schema) < len(json_schema)
def test_baml_vs_json_adapter_functional_compatibility():
"""Test that both adapters parse identical outputs to the same results."""
class TestSignature(dspy.Signature):
question: str = dspy.InputField()
patient: PatientDetails = dspy.OutputField()
baml_adapter = BAMLAdapter()
json_adapter = dspy.JSONAdapter()
completion = """{"patient": {
"name": "Alice Brown",
"age": 35,
"address": {"street": "789 Pine St", "city": "Boston", "country": "US"}
}}"""
baml_result = baml_adapter.parse(TestSignature, completion)
json_result = json_adapter.parse(TestSignature, completion)
# Results should be functionally equivalent
assert baml_result["patient"].name == json_result["patient"].name
assert baml_result["patient"].age == json_result["patient"].age
assert baml_result["patient"].address.street == json_result["patient"].address.street
@pytest.mark.asyncio
async def test_baml_adapter_async_functionality():
"""Test BAMLAdapter async operations."""
class TestSignature(dspy.Signature):
question: str = dspy.InputField()
patient: PatientDetails = dspy.OutputField()
with mock.patch("litellm.acompletion") as mock_acompletion:
mock_acompletion.return_value = ModelResponse(
choices=[Choices(message=Message(content='{"patient": {"name": "John Doe", "age": 28}}'))],
model="openai/gpt-4o",
)
adapter = BAMLAdapter()
result = await adapter.acall(
dspy.LM(model="openai/gpt-4o", cache=False), {}, TestSignature, [], {"question": "Extract patient info"}
)
assert result[0]["patient"].name == "John Doe"
assert result[0]["patient"].age == 28
def test_baml_adapter_with_field_aliases():
"""Test BAMLAdapter with Pydantic field aliases."""
class ModelWithAliases(pydantic.BaseModel):
full_name: str = pydantic.Field(alias="name")
patient_age: int = pydantic.Field(alias="age")
class TestSignature(dspy.Signature):
input: str = dspy.InputField()
data: ModelWithAliases = dspy.OutputField()
adapter = BAMLAdapter()
# Schema should show aliases in the output structure
schema = adapter.format_field_structure(TestSignature)
assert "name:" in schema # Should use alias, not field name
assert "age:" in schema # Should use alias, not field name
def test_baml_adapter_field_alias_without_description():
"""Test BAMLAdapter with field alias present but description absent."""
class ModelWithAliasNoDescription(pydantic.BaseModel):
internal_field: str = pydantic.Field(alias="public_name")
regular_field: int
field_with_description: str = pydantic.Field(description="This field has a description", alias="desc_field")
class TestSignature(dspy.Signature):
input: str = dspy.InputField()
data: ModelWithAliasNoDescription = dspy.OutputField()
adapter = BAMLAdapter()
schema = adapter.format_field_structure(TestSignature)
# Should show alias as comment when description is absent
assert f"{COMMENT_SYMBOL} alias: public_name" in schema
# Should show description comment when present
assert f"{COMMENT_SYMBOL} This field has a description" in schema
# Regular field (without alias) should appear in schema but without alias comment
assert "regular_field: int," in schema
# Check that regular_field section doesn't have an alias comment
regular_field_section = schema.split("regular_field: int,")[0].split("\n")[-1]
assert f"{COMMENT_SYMBOL} alias:" not in regular_field_section
def test_baml_adapter_multiple_pydantic_input_fields():
"""Test that multiple InputField() with Pydantic models are rendered correctly."""
class UserProfile(pydantic.BaseModel):
name: str = pydantic.Field(description="User's full name")
email: str
age: int
class SystemConfig(pydantic.BaseModel):
timeout: int = pydantic.Field(description="Timeout in seconds")
debug: bool
endpoints: list[str]
class TestSignature(dspy.Signature):
input_1: UserProfile = dspy.InputField(desc="User profile information")
input_2: SystemConfig = dspy.InputField(desc="System configuration settings")
result: str = dspy.OutputField(desc="Resulting output after processing")
adapter = BAMLAdapter()
# Test schema generation includes headers for ALL input fields
schema = adapter.format_field_structure(TestSignature)
assert "[[ ## input_1 ## ]]" in schema # Should include first input field header
assert "[[ ## input_2 ## ]]" in schema # Should include second input field header
assert "[[ ## result ## ]]" in schema # Should include output field header
assert "[[ ## completed ## ]]" in schema # Should include completed section
assert "All interactions will be structured in the following way" in schema
assert "{input_1}" in schema
assert "{input_2}" in schema
assert "Output field `result` should be of type: string" in schema
# Test field descriptions are in the correct method
field_desc = adapter.format_field_description(TestSignature)
assert "Your input fields are:" in field_desc
assert "1. `input_1` (UserProfile): User profile information" in field_desc
assert "2. `input_2` (SystemConfig): System configuration settings" in field_desc
assert "Your output fields are:" in field_desc
assert "1. `result` (str): Resulting output after processing" in field_desc
# Test message formatting with actual Pydantic instances
user_profile = UserProfile(name="John Doe", email="john@example.com", age=30)
system_config = SystemConfig(timeout=300, debug=True, endpoints=["api1", "api2"])
messages = adapter.format(TestSignature, [], {"input_1": user_profile, "input_2": system_config})
user_message = messages[-1]["content"]
# Verify both inputs are rendered with the correct bracket notation
assert "[[ ## input_1 ## ]]" in user_message
assert "[[ ## input_2 ## ]]" in user_message
# Verify JSON content for both inputs
assert '"name": "John Doe"' in user_message
assert '"email": "john@example.com"' in user_message
assert '"age": 30' in user_message
assert '"timeout": 300' in user_message
assert '"debug": true' in user_message
# Endpoints array is formatted with indentation, so check for individual elements
assert '"api1"' in user_message
assert '"api2"' in user_message
assert '"endpoints":' in user_message
| {
"repo_id": "stanfordnlp/dspy",
"file_path": "tests/adapters/test_baml_adapter.py",
"license": "MIT License",
"lines": 412,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
stanfordnlp/dspy:dspy/adapters/types/code.py | import re
from typing import Any, ClassVar
import pydantic
from pydantic import create_model
from dspy.adapters.types.base_type import Type
class Code(Type):
"""Code type in DSPy.
This type is useful for code generation and code analysis.
Example 1: dspy.Code as output type in code generation:
```python
import dspy
dspy.configure(lm=dspy.LM("openai/gpt-4o-mini"))
class CodeGeneration(dspy.Signature):
'''Generate python code to answer the question.'''
question: str = dspy.InputField(description="The question to answer")
code: dspy.Code["java"] = dspy.OutputField(description="The code to execute")
predict = dspy.Predict(CodeGeneration)
result = predict(question="Given an array, find if any of the two numbers sum up to 10")
print(result.code)
```
Example 2: dspy.Code as input type in code analysis:
```python
import dspy
import inspect
dspy.configure(lm=dspy.LM("openai/gpt-4o-mini"))
class CodeAnalysis(dspy.Signature):
'''Analyze the time complexity of the function.'''
code: dspy.Code["python"] = dspy.InputField(description="The function to analyze")
result: str = dspy.OutputField(description="The time complexity of the function")
predict = dspy.Predict(CodeAnalysis)
def sleepsort(x):
import time
for i in x:
time.sleep(i)
print(i)
result = predict(code=inspect.getsource(sleepsort))
print(result.result)
```
"""
code: str
language: ClassVar[str] = "python"
def format(self):
return f"{self.code}"
@pydantic.model_serializer()
def serialize_model(self):
"""Override to bypass the <<CUSTOM-TYPE-START-IDENTIFIER>> and <<CUSTOM-TYPE-END-IDENTIFIER>> tags."""
return self.format()
@classmethod
def description(cls) -> str:
return (
"Code represented in a string, specified in the `code` field. If this is an output field, the code "
f"field should follow the markdown code block format, e.g. \n```{cls.language.lower()}\n{{code}}\n```"
f"\nProgramming language: {cls.language}"
)
@pydantic.model_validator(mode="before")
@classmethod
def validate_input(cls, data: Any):
if isinstance(data, cls):
return data
if isinstance(data, str):
return {"code": _filter_code(data)}
if isinstance(data, dict):
if "code" not in data:
raise ValueError("`code` field is required for `dspy.Code`")
if not isinstance(data["code"], str):
raise ValueError(f"`code` field must be a string, but received type: {type(data['code'])}")
return {"code": _filter_code(data["code"])}
raise ValueError(f"Received invalid value for `dspy.Code`: {data}")
def _filter_code(code: str) -> str:
"""Extract code from markdown code blocks, stripping any language identifier."""
# Case 1: format like:
# ```python
# {code_block}
# ```
regex_pattern = r"```(?:[^\n]*)\n(.*?)```"
match = re.search(regex_pattern, code, re.DOTALL)
if match:
return match.group(1).strip()
# Case 2: ```<code>``` (no language, single-line)
regex_pattern_simple = r"```(.*?)```"
match = re.search(regex_pattern_simple, code, re.DOTALL)
if match:
return match.group(1).strip()
# Fallback case
return code
# Patch __class_getitem__ directly on the class to support dspy.Code["python"] syntax
def _code_class_getitem(cls, language):
code_with_language_cls = create_model(f"{cls.__name__}_{language}", __base__=cls)
code_with_language_cls.language = language
return code_with_language_cls
Code.__class_getitem__ = classmethod(_code_class_getitem)
| {
"repo_id": "stanfordnlp/dspy",
"file_path": "dspy/adapters/types/code.py",
"license": "MIT License",
"lines": 91,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
stanfordnlp/dspy:tests/adapters/test_code.py | import inspect
import pydantic
import pytest
import dspy
def test_code_validate_input():
# Create a `dspy.Code` instance with valid code.
code = dspy.Code["python"](code="print('Hello, world!')")
assert code.code == "print('Hello, world!')"
with pytest.raises(ValueError):
# Try to create a `dspy.Code` instance with invalid type.
dspy.Code["python"](code=123)
def foo(x):
return x + 1
code_source = inspect.getsource(foo)
code = dspy.Code["python"](code=code_source)
assert code.code == code_source
def test_code_in_nested_type():
class Wrapper(pydantic.BaseModel):
code: dspy.Code
code = dspy.Code(code="print('Hello, world!')")
wrapper = Wrapper(code=code)
assert wrapper.code.code == "print('Hello, world!')"
def test_code_with_language():
java_code = dspy.Code["java"](code="System.out.println('Hello, world!');")
assert java_code.code == "System.out.println('Hello, world!');"
assert java_code.language == "java"
assert "Programming language: java" in java_code.description()
cpp_code = dspy.Code["cpp"](code="std::cout << 'Hello, world!' << std::endl;")
assert cpp_code.code == "std::cout << 'Hello, world!' << std::endl;"
assert cpp_code.language == "cpp"
assert "Programming language: cpp" in cpp_code.description()
def test_code_parses_from_dirty_code():
dirty_code = "```python\nprint('Hello, world!')```"
code = dspy.Code(code=dirty_code)
assert code.code == "print('Hello, world!')"
dirty_code_with_reasoning = """
The generated code is:
```python
print('Hello, world!')
```
The reasoning is:
The code is a simple print statement.
"""
code = dspy.Code(code=dirty_code_with_reasoning)
assert code.code == "print('Hello, world!')"
| {
"repo_id": "stanfordnlp/dspy",
"file_path": "tests/adapters/test_code.py",
"license": "MIT License",
"lines": 45,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
stanfordnlp/dspy:dspy/utils/syncify.py | import asyncio
from types import MethodType
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from dspy.primitives.module import Module
def run_async(coro):
"""Run an async coroutine from a synchronous context."""
try:
loop = asyncio.get_running_loop()
except RuntimeError:
loop = None
if loop and loop.is_running():
# If we're in a running event loop (e.g., Jupyter), use asyncio.create_task and run until done
import nest_asyncio
nest_asyncio.apply()
return asyncio.get_event_loop().run_until_complete(coro)
else:
return asyncio.run(coro)
def syncify(program: "Module", in_place: bool = True) -> "Module":
"""Convert an async DSPy module to a sync program.
There are two modes of this function:
- `in_place=True` (recommended): Modify the module in place. But this may not work if you already have a `forward`
method which does different things from `aforward`.
- `in_place=False`: Return a wrapper module. This changes the module's architecture, but it's more robust.
Args:
program: The async program to convert, must have an `aforward` method implemented.
in_place: If True, modify the module in place. Otherwise, return a wrapper module.
Returns:
The sync program, which has a `forward` method that can be called from a synchronous context.
"""
if in_place:
def forward(self, *args, **kwargs):
return run_async(self.aforward(*args, **kwargs))
# Create the `forward` method in place.
program.forward = MethodType(forward, program)
return program
else:
from dspy.primitives.module import Module
class SyncWrapper(Module):
def __init__(self, program: "Module"):
self.program = program
def forward(self, *args, **kwargs):
return run_async(self.program.aforward(*args, **kwargs))
return SyncWrapper(program)
| {
"repo_id": "stanfordnlp/dspy",
"file_path": "dspy/utils/syncify.py",
"license": "MIT License",
"lines": 44,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.